a
    hx                     @   s   d dl Z d dlZd dlmZmZmZmZ d dlZddl	m
Z
 ddlmZmZmZmZ ddlmZmZmZmZ e rd dlZddlmZ e rd dlZdd	lmZ G d
d deZG dd deZeedddG dd deZeZdS )    N)AnyOptionalUnionoverload   )BasicTokenizer)ExplicitEnumadd_end_docstringsis_tf_availableis_torch_available   )ArgumentHandlerChunkPipelineDatasetbuild_pipeline_init_args)/TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES),MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMESc                   @   s*   e Zd ZdZeeee f dddZdS )"TokenClassificationArgumentHandlerz5
    Handles arguments for token classification.
    )inputsc                 K   s   | dd}| d}|d urJt|ttfrJt|dkrJt|}t|}nHt|tr`|g}d}n2td urrt|ts~t|tjr||d |fS t	d| d}|rt|trt|d tr|g}t||krt	d||||fS )	Nis_split_into_wordsF	delimiterr   r   zAt least one input is required.offset_mappingz;offset_mapping should have the same batch size as the input)
get
isinstancelisttuplelenstrr   typesGeneratorType
ValueError)selfr   kwargsr   r   Z
batch_sizer    r#   g/var/www/html/assistant/venv/lib/python3.9/site-packages/transformers/pipelines/token_classification.py__call__    s$    
"


z+TokenClassificationArgumentHandler.__call__N)__name__
__module____qualname____doc__r   r   r   r%   r#   r#   r#   r$   r      s   r   c                   @   s$   e Zd ZdZdZdZdZdZdZdS )AggregationStrategyzDAll the valid aggregation strategies for TokenClassificationPipelinenonesimplefirstZaveragemaxN)	r&   r'   r(   r)   NONESIMPLEFIRSTAVERAGEMAXr#   r#   r#   r$   r*   8   s   r*   T)Zhas_tokenizera
  
        ignore_labels (`list[str]`, defaults to `["O"]`):
            A list of labels to ignore.
        grouped_entities (`bool`, *optional*, defaults to `False`):
            DEPRECATED, use `aggregation_strategy` instead. Whether or not to group the tokens corresponding to the
            same entity together in the predictions or not.
        stride (`int`, *optional*):
            If stride is provided, the pipeline is applied on all the text. The text is split into chunks of size
            model_max_length. Works only with fast tokenizers and `aggregation_strategy` different from `NONE`. The
            value of this argument defines the number of overlapping tokens between chunks. In other words, the model
            will shift forward by `tokenizer.model_max_length - stride` tokens each step.
        aggregation_strategy (`str`, *optional*, defaults to `"none"`):
            The strategy to fuse (or not) tokens based on the model prediction.

                - "none" : Will simply not do any aggregation and simply return raw results from the model
                - "simple" : Will attempt to group entities following the default schema. (A, B-TAG), (B, I-TAG), (C,
                  I-TAG), (D, B-TAG2) (E, B-TAG2) will end up being [{"word": ABC, "entity": "TAG"}, {"word": "D",
                  "entity": "TAG2"}, {"word": "E", "entity": "TAG2"}] Notice that two consecutive B tags will end up as
                  different entities. On word based languages, we might end up splitting words undesirably : Imagine
                  Microsoft being tagged as [{"word": "Micro", "entity": "ENTERPRISE"}, {"word": "soft", "entity":
                  "NAME"}]. Look for FIRST, MAX, AVERAGE for ways to mitigate that and disambiguate words (on languages
                  that support that meaning, which is basically tokens separated by a space). These mitigations will
                  only work on real words, "New york" might still be tagged with two different entities.
                - "first" : (works only on word based models) Will use the `SIMPLE` strategy except that words, cannot
                  end up with different tags. Words will simply use the tag of the first token of the word when there
                  is ambiguity.
                - "average" : (works only on word based models) Will use the `SIMPLE` strategy except that words,
                  cannot end up with different tags. scores will be averaged first across tokens, and then the maximum
                  label is applied.
                - "max" : (works only on word based models) Will use the `SIMPLE` strategy except that words, cannot
                  end up with different tags. Word entity will simply be the token with the maximum score.c                       s  e Zd ZdZdZdZdZdZdZe	 f fdd	Z
d+ee ee ee eeeeef   ee ee ee dd	d
Zeeeeeeef  dddZeee eeeeeef   dddZeeee f eeeeeef  eeeeef   f d fddZd,ddZdd ZejdfddZdd Zd-eejejeeeeef   ejeeeee   eeeeef   ee d	ddZee eee dddZ ee eeddd Z!ee eee dd!d"Z"ee ed#d$d%Z#eeeef d&d'd(Z$ee ee d#d)d*Z%  Z&S ).TokenClassificationPipelineuv	  
    Named Entity Recognition pipeline using any `ModelForTokenClassification`. See the [named entity recognition
    examples](../task_summary#named-entity-recognition) for more information.

    Example:

    ```python
    >>> from transformers import pipeline

    >>> token_classifier = pipeline(model="Jean-Baptiste/camembert-ner", aggregation_strategy="simple")
    >>> sentence = "Je m'appelle jean-baptiste et je vis à montréal"
    >>> tokens = token_classifier(sentence)
    >>> tokens
    [{'entity_group': 'PER', 'score': 0.9931, 'word': 'jean-baptiste', 'start': 12, 'end': 26}, {'entity_group': 'LOC', 'score': 0.998, 'word': 'montréal', 'start': 38, 'end': 47}]

    >>> token = tokens[0]
    >>> # Start and end provide an easy way to highlight words in the original text.
    >>> sentence[token["start"] : token["end"]]
    ' jean-baptiste'

    >>> # Some models use the same idea to do part of speech.
    >>> syntaxer = pipeline(model="vblagoje/bert-english-uncased-finetuned-pos", aggregation_strategy="simple")
    >>> syntaxer("My name is Sarah and I live in London")
    [{'entity_group': 'PRON', 'score': 0.999, 'word': 'my', 'start': 0, 'end': 2}, {'entity_group': 'NOUN', 'score': 0.997, 'word': 'name', 'start': 3, 'end': 7}, {'entity_group': 'AUX', 'score': 0.994, 'word': 'is', 'start': 8, 'end': 10}, {'entity_group': 'PROPN', 'score': 0.999, 'word': 'sarah', 'start': 11, 'end': 16}, {'entity_group': 'CCONJ', 'score': 0.999, 'word': 'and', 'start': 17, 'end': 20}, {'entity_group': 'PRON', 'score': 0.999, 'word': 'i', 'start': 21, 'end': 22}, {'entity_group': 'VERB', 'score': 0.998, 'word': 'live', 'start': 23, 'end': 27}, {'entity_group': 'ADP', 'score': 0.999, 'word': 'in', 'start': 28, 'end': 30}, {'entity_group': 'PROPN', 'score': 0.999, 'word': 'london', 'start': 31, 'end': 37}]
    ```

    Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)

    This token recognition pipeline can currently be loaded from [`pipeline`] using the following task identifier:
    `"ner"` (for predicting the classes of tokens in a sequence: person, organisation, location or miscellaneous).

    The models that this pipeline can use are models that have been fine-tuned on a token classification task. See the
    up-to-date list of available models on
    [huggingface.co/models](https://huggingface.co/models?filter=token-classification).
    	sequencesFTc                    s@   t  j|i | | | jdkr$tnt tdd| _|| _d S )NtfF)Zdo_lower_case)	super__init__Zcheck_model_type	frameworkr   r   r   Z_basic_tokenizer_args_parser)r!   Zargs_parserargsr"   	__class__r#   r$   r8      s    z$TokenClassificationPipeline.__init__N)grouped_entitiesignore_subwordsaggregation_strategyr   r   strider   c	                 C   sl  i }	||	d< |r$|d u rdn||	d< |d ur4||	d< i }
|d usH|d ur|rX|rXt j}n|rh|sht j}nt j}|d urtd| d |d urtd| d |d urt|trt |  }|t jt j	t j
hv r| jjstd||
d	< |d ur||
d
< |d urb|| jjkrtd|t jkr:td| dn(| jjrZdd|d}||	d< ntd|	i |
fS )Nr    r   r   zl`grouped_entities` is deprecated and will be removed in version v5.0.0, defaulted to `aggregation_strategy="z"` instead.zk`ignore_subwords` is deprecated and will be removed in version v5.0.0, defaulted to `aggregation_strategy="z{Slow tokenizers cannot handle subwords. Please set the `aggregation_strategy` option to `"simple"` or use a fast tokenizer.r@   ignore_labelszl`stride` must be less than `tokenizer.model_max_length` (or even lower if the tokenizer adds special tokens)zI`stride` was provided to process all the text but `aggregation_strategy="z&"`, please select another one instead.T)Zreturn_overflowing_tokenspaddingrA   tokenizer_paramszm`stride` was provided to process all the text but you're using a slow tokenizer. Please use a fast tokenizer.)r*   r1   r0   r/   warningswarnr   r   upperr3   r2   	tokenizeris_fastr    model_max_length)r!   rC   r>   r?   r@   r   r   rA   r   preprocess_paramsZpostprocess_paramsrE   r#   r#   r$   _sanitize_parameters   sz    



z0TokenClassificationPipeline._sanitize_parameters)r   r"   returnc                 K   s   d S Nr#   r!   r   r"   r#   r#   r$   r%      s    z$TokenClassificationPipeline.__call__c                 K   s   d S rO   r#   rP   r#   r#   r$   r%      s    c                    sv   | j |fi |\}}}}||d< ||d< |rVtdd |D sVt j|gfi |S |rb||d< t j|fi |S )a  
        Classify each token of the text(s) given as inputs.

        Args:
            inputs (`str` or `List[str]`):
                One or several texts (or one list of texts) for token classification. Can be pre-tokenized when
                `is_split_into_words=True`.

        Return:
            A list or a list of list of `dict`: Each result comes as a list of dictionaries (one for each token in the
            corresponding input, or each entity if this pipeline was instantiated with an aggregation_strategy) with
            the following keys:

            - **word** (`str`) -- The token/word classified. This is obtained by decoding the selected tokens. If you
              want to have the exact string in the original sentence, use `start` and `end`.
            - **score** (`float`) -- The corresponding probability for `entity`.
            - **entity** (`str`) -- The entity predicted for that token/word (it is named *entity_group* when
              *aggregation_strategy* is not `"none"`.
            - **index** (`int`, only present when `aggregation_strategy="none"`) -- The index of the corresponding
              token in the sentence.
            - **start** (`int`, *optional*) -- The index of the start of the corresponding entity in the sentence. Only
              exists if the offsets are available within the tokenizer
            - **end** (`int`, *optional*) -- The index of the end of the corresponding entity in the sentence. Only
              exists if the offsets are available within the tokenizer
        r   r   c                 s   s   | ]}t |tV  qd S rO   )r   r   ).0inputr#   r#   r$   	<genexpr>      z7TokenClassificationPipeline.__call__.<locals>.<genexpr>r   )r:   allr7   r%   )r!   r   r"   Z_inputsr   r   r   r<   r#   r$   r%      s    c                 +   s  | di }| jjo| jjdk}d }|d }|r|d }t|tsJtd|}	||	}g }t|}
d}|	D ]*}|||t| f |t||
 7 }ql|	}d|d< nt|t	std|}| j|f| j
|d| jjd|}|r| jjstd	| d
d  t|d }t|D ] | j
dkr: fdd| D }n fdd| D }|d urb||d<  dkrp|nd |d<  |d k|d< |d ur| |d< ||d< |V  qd S )NrE   r   r   r   zEWhen `is_split_into_words=True`, `sentence` must be a list of tokens.TzKWhen `is_split_into_words=False`, `sentence` must be an untokenized string.)Zreturn_tensors
truncationZreturn_special_tokens_maskZreturn_offsets_mappingz@is_split_into_words=True is only supported with fast tokenizers.Zoverflow_to_sample_mapping	input_idsr6   c                    s"   i | ]\}}|t |  d qS r   )r6   Zexpand_dimsrQ   kvir#   r$   
<dictcomp>F  rT   z:TokenClassificationPipeline.preprocess.<locals>.<dictcomp>c                    s    i | ]\}}||   d qS rX   )Z	unsqueezerY   r\   r#   r$   r^   H  rT   r   sentencer   is_lastword_idsword_to_chars_map)poprI   rK   r   r   r    joinr   appendr   r9   rJ   rangeitemsra   )r!   r_   r   rL   rE   rV   rb   r   r   wordsZdelimiter_lenZchar_offsetwordZtext_to_tokenizer   
num_chunksmodel_inputsr#   r\   r$   
preprocess  s^    



	

z&TokenClassificationPipeline.preprocessc           
      C   s   | d}| dd }| d}| d}| dd }| dd }| jdkrb| jf i |d }n*| jf i |}	t|	tr|	d	 n|	d }|||||||d
|S )Nspecial_tokens_maskr   r_   r`   ra   rb   r6   r   logits)rn   rm   r   r_   r`   ra   rb   )rc   r9   modelr   dict)
r!   rk   rm   r   r_   r`   ra   rb   rn   outputr#   r#   r$   _forwardT  s(    



z$TokenClassificationPipeline._forwardc                    s   d u rdg g }|d  d}|D ]<}| jdkrh|d d jtjtjfv rh|d d tj }n|d d  }|d d }|d d }	|d d ur|d d nd }
|d	 d  }| d
}t	j
|ddd}t	|| }||jddd }| jdkr |	 }	|
d ur|
 nd }
| j||	||
||||d}| ||} fdd|D }|| q$t|}|dkr| |}|S )NOr   rb   ptrn   r_   rW   r   rm   ra   T)axisZkeepdimsr6   )ra   rb   c                    s0   g | ](}| d d vr| dd vr|qS )entityNentity_group)r   rQ   rw   rC   r#   r$   
<listcomp>  s   z;TokenClassificationPipeline.postprocess.<locals>.<listcomp>r   )r   r9   ZdtypetorchZbfloat16Zfloat16toZfloat32numpynpr.   expsumgather_pre_entities	aggregateextendr   aggregate_overlapping_entities)r!   Zall_outputsr@   rC   Zall_entitiesrb   Zmodel_outputsrn   r_   rW   r   rm   ra   ZmaxesZshifted_expscorespre_entitiesr>   entitiesrj   r#   rz   r$   postprocessn  sN    
$




z'TokenClassificationPipeline.postprocessc                 C   s   t |dkr|S t|dd d}g }|d }|D ]|}|d |d   krT|d k rn nF|d |d  }|d |d  }||ks||kr|d |d kr|}q0|| |}q0|| |S )Nr   c                 S   s   | d S )Nstartr#   )xr#   r#   r$   <lambda>  rT   zLTokenClassificationPipeline.aggregate_overlapping_entities.<locals>.<lambda>keyr   endscore)r   sortedre   )r!   r   Zaggregated_entitiesZprevious_entityrw   Zcurrent_lengthZprevious_lengthr#   r#   r$   r     s(    $

z:TokenClassificationPipeline.aggregate_overlapping_entities)	r_   rW   r   r   rm   r@   ra   rb   rN   c	                 C   sz  g }	t |D ]f\}
}||
 r q| jt||
 }|durL||
 \}}|dur|dur||
 }|dur|| \}}||7 }||7 }t|ts| jdkr| }| }||| }t| jddrt| jjj	ddrt
|t
|k}nD|tjtjtjhv r
tdt |dko*d||d |d  v}t||
 | jjkrX|}d	}nd}d}d	}|||||
|d
}|	| q|	S )zTFuse various numpy arrays into dicts with all the information needed for aggregationNrt   
_tokenizerZcontinuing_subword_prefixz?Tokenizer does not support real words, using fallback heuristicr   rB   r   F)ri   r   r   r   index
is_subword)	enumeraterI   Zconvert_ids_to_tokensintr   r9   itemgetattrr   ro   r   r*   r1   r2   r3   rF   rG   UserWarningZunk_token_idre   )r!   r_   rW   r   r   rm   r@   ra   rb   r   idxZtoken_scoresri   Z	start_indZend_indZ
word_indexZ
start_char_Zword_refr   
pre_entityr#   r#   r$   r     s^    


"z/TokenClassificationPipeline.gather_pre_entities)r   r@   rN   c                 C   s   |t jt jhv rng }|D ]R}|d  }|d | }| jjj| ||d |d |d |d d}|| qn| ||}|t jkr|S | 	|S )Nr   r   ri   r   r   )rw   r   r   ri   r   r   )
r*   r/   r0   argmaxro   configid2labelre   aggregate_wordsgroup_entities)r!   r   r@   r   r   
entity_idxr   rw   r#   r#   r$   r     s"    
z%TokenClassificationPipeline.aggregate)r   r@   rN   c                 C   s  | j dd |D }|tjkrL|d d }| }|| }| jjj| }n|tjkrt	|dd d}|d }| }|| }| jjj| }nT|tj
krtdd |D }tj|dd	}	|	 }
| jjj|
 }|	|
 }ntd
||||d d |d d d}|S )Nc                 S   s   g | ]}|d  qS ri   r#   ry   r#   r#   r$   r{     rT   z>TokenClassificationPipeline.aggregate_word.<locals>.<listcomp>r   r   c                 S   s   | d   S )Nr   )r.   )rw   r#   r#   r$   r   !  rT   z<TokenClassificationPipeline.aggregate_word.<locals>.<lambda>r   c                 S   s   g | ]}|d  qS )r   r#   ry   r#   r#   r$   r{   '  rT   )rv   zInvalid aggregation_strategyr   ru   r   )rw   r   ri   r   r   )rI   convert_tokens_to_stringr*   r1   r   ro   r   r   r3   r.   r2   r   stacknanmeanr    )r!   r   r@   ri   r   r   r   rw   Z
max_entityZaverage_scoresr   Z
new_entityr#   r#   r$   aggregate_word  s4    





z*TokenClassificationPipeline.aggregate_wordc                 C   s   |t jt jhv rtdg }d}|D ]@}|du r8|g}q$|d rL|| q$|| || |g}q$|dur|| || |S )z
        Override tokens from a given word that disagree to force agreement on word boundaries.

        Example: micro|soft| com|pany| B-ENT I-NAME I-ENT I-ENT will be rewritten with first strategy as microsoft|
        company| B-ENT I-ENT
        z;NONE and SIMPLE strategies are invalid for word aggregationNr   )r*   r/   r0   r    re   r   )r!   r   r@   Zword_entitiesZ
word_grouprw   r#   r#   r$   r   7  s"    z+TokenClassificationPipeline.aggregate_words)r   rN   c                 C   sl   |d d  ddd }tdd |D }dd |D }|t|| j||d d	 |d d
 d}|S )z
        Group together the adjacent tokens with the same entity predicted.

        Args:
            entities (`dict`): The entities predicted by the pipeline.
        r   rw   -r   ru   c                 S   s   g | ]}|d  qS )r   r#   ry   r#   r#   r$   r{   \  rT   zBTokenClassificationPipeline.group_sub_entities.<locals>.<listcomp>c                 S   s   g | ]}|d  qS r   r#   ry   r#   r#   r$   r{   ]  rT   r   r   )rx   r   ri   r   r   )splitr   r   meanrI   r   )r!   r   rw   r   tokensrx   r#   r#   r$   group_sub_entitiesS  s    


z.TokenClassificationPipeline.group_sub_entities)entity_namerN   c                 C   sH   | drd}|dd  }n$| dr8d}|dd  }nd}|}||fS )NzB-Br   zI-I)
startswith)r!   r   bitagr#   r#   r$   get_tagh  s    

z#TokenClassificationPipeline.get_tagc           	      C   s   g }g }|D ]n}|s | | q| |d \}}| |d d \}}||krd|dkrd| | q| | | |g}q|r| | | |S )z
        Find and group together the adjacent tokens with the same entity predicted.

        Args:
            entities (`dict`): The entities predicted by the pipeline.
        rw   ru   r   )re   r   r   )	r!   r   Zentity_groupsZentity_group_disaggrw   r   r   Zlast_biZlast_tagr#   r#   r$   r   v  s    
z*TokenClassificationPipeline.group_entities)NNNNNFNN)N)NN)'r&   r'   r(   r)   Zdefault_input_namesZ_load_processorZ_load_image_processorZ_load_feature_extractorZ_load_tokenizerr   r8   r   boolr*   r   r   r   r   rM   r   r   rp   r%   r   rl   rr   r/   r   r   r   Zndarrayr   r   r   r   r   r   r   __classcell__r#   r#   r<   r$   r4   B   sl   #$        P (&'
;6  Ir4   ) r   rF   typingr   r   r   r   r~   r   Zmodels.bert.tokenization_bertr   utilsr   r	   r
   r   baser   r   r   r   Z
tensorflowr6   Zmodels.auto.modeling_tf_autor   r|   Zmodels.auto.modeling_autor   r   r*   r4   ZNerPipeliner#   r#   r#   r$   <module>   s0   
"    <