
    Ig"                        d dl Z d dlmZmZ ddlmZmZmZmZm	Z	m
Z
 ddlmZmZ  e       rd dlmZ ddlmZ  e       rdd	lmZ  e       r
d dlZdd
lmZ  e	j.                  e      Z e edd             G d de             Zy)    N)ListUnion   )add_end_docstringsis_tf_availableis_torch_availableis_vision_availableloggingrequires_backends   )Pipelinebuild_pipeline_init_args)Image)
load_image)'TF_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)$MODEL_FOR_VISION_2_SEQ_MAPPING_NAMEST)has_tokenizerhas_image_processorc                   j     e Zd ZdZ fdZd
dZddeeee   ded   f   f fdZ	ddZ
d Zd	 Z xZS )ImageToTextPipelinea  
    Image To Text pipeline using a `AutoModelForVision2Seq`. This pipeline predicts a caption for a given image.

    Example:

    ```python
    >>> from transformers import pipeline

    >>> captioner = pipeline(model="ydshieh/vit-gpt2-coco-en")
    >>> captioner("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png")
    [{'generated_text': 'two birds are standing next to each other '}]
    ```

    Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)

    This image to text pipeline can currently be loaded from pipeline() using the following task identifier:
    "image-to-text".

    See the list of available models on
    [huggingface.co/models](https://huggingface.co/models?pipeline_tag=image-to-text).
    c                     t        |   |i | t        | d       | j                  | j                  dk(  rt
               y t               y )Nvisiontf)super__init__r   check_model_type	frameworkr   r   )selfargskwargs	__class__s      a/var/www/html/answerous/venv/lib/python3.12/site-packages/transformers/pipelines/image_to_text.pyr   zImageToTextPipeline.__init__F   sD    $)&)$)7;~~7M3	
Sw	
    c                     i }i }|||d<   |t        j                  dt               ||d<   |||d<   |"|d|v rt        d      |j	                  |       ||i fS )NpromptzUThe `timeout` argument is deprecated and will be removed in version 5 of Transformerstimeoutmax_new_tokenszp`max_new_tokens` is defined both as an argument and inside `generate_kwargs` argument, please use only 1 version)warningswarnFutureWarning
ValueErrorupdate)r   r'   generate_kwargsr%   r&   forward_paramspreprocess_paramss          r"   _sanitize_parametersz(ImageToTextPipeline._sanitize_parametersM   s    *0h'MMgiv ,3i(%/=N+,&).>/.Q &  !!/2 ."44r#   inputszImage.Imagec                 h    d|v r|j                  d      }|t        d      t        |   |fi |S )a  
        Assign labels to the image(s) passed as inputs.

        Args:
            inputs (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`):
                The pipeline handles three types of images:

                - A string containing a HTTP(s) link pointing to an image
                - A string containing a local path to an image
                - An image loaded in PIL directly

                The pipeline accepts either a single image or a batch of images.

            max_new_tokens (`int`, *optional*):
                The amount of maximum tokens to generate. By default it will use `generate` default.

            generate_kwargs (`Dict`, *optional*):
                Pass it to send all of these arguments directly to `generate` allowing full control of this function.

        Return:
            A list or a list of list of `dict`: Each result comes as a dictionary with the following key:

            - **generated_text** (`str`) -- The generated text.
        imageszBCannot call the image-to-text pipeline without an inputs argument!)popr+   r   __call__)r   r1   r    r!   s      r"   r5   zImageToTextPipeline.__call__e   sA    4 vZZ)F>abbw1&11r#   c                    t        ||      }|t        |t              st        dt	        |       d      | j
                  j                  j                  }|dk(  r| j                  || j                        }| j                  dk(  r|j                  | j                        }| j                  |d      j                  }| j                  j                  g|z   }t        j                   |      j#                  d	      }|j%                  d
|i       n|dk(  rI| j                  ||| j                        }| j                  dk(  r|j                  | j                        }n|dk7  rv| j                  || j                        }| j                  dk(  r|j                  | j                        }| j                  || j                        }|j%                  |       nVt        d| d      | j                  || j                        }| j                  dk(  r|j                  | j                        }| j
                  j                  j                  dk(  r|d |d
<   |S )N)r&   z&Received an invalid text input, got - zy - but expected a single string. Note also that one single text can be provided for conditional image to text generation.git)r3   return_tensorsptF)textadd_special_tokensr   	input_ids
pix2struct)r3   header_textr8   zvision-encoder-decoder)r8   zModel type z- does not support conditional text generation)r   
isinstancestrr+   typemodelconfig
model_typeimage_processorr   totorch_dtype	tokenizerr<   cls_token_idtorchtensor	unsqueezer,   )r   imager%   r&   rD   model_inputsr<   text_inputss           r"   
preprocesszImageToTextPipeline.preprocess   s&   5'2fc* <T&\N Ko o 
 **55JU"#335QUQ_Q_3`>>T)#/??43C3C#DL NN5NQ[[	!^^889IE	!LL3==a@	##[)$<=|+#335feieses3t>>T)#/??43C3C#DL77#335QUQ_Q_3`>>T)#/??43C3C#DL"nnVDNNnS##K0 !;zl:g!hii  //uT^^/\L~~%+t/?/?@::''50V^(,L%r#   c                    d|v r-t        |d   t              rt        d |d   D              rd |d<   d|vr| j                  |d<   |j	                  | j
                  j                        } | j
                  j                  |fi ||}|S )Nr<   c              3   $   K   | ]  }|d u  
 y wN ).0xs     r"   	<genexpr>z/ImageToTextPipeline._forward.<locals>.<genexpr>   s     A!AIAs   generation_config)r?   listallrX   r4   rB   main_input_namegenerate)r   rN   r-   r1   model_outputss        r"   _forwardzImageToTextPipeline._forward   s     <'<4d;A|K'@AA(,L% o5373I3IO/0 !!$**"<"<=+

++FVlVoVr#   c                 x    g }|D ]2  }d| j                   j                  |d      i}|j                  |       4 |S )Ngenerated_textT)skip_special_tokens)rH   decodeappend)r   r]   records
output_idsrecords        r"   postprocesszImageToTextPipeline.postprocess   sQ    ' 	#J $.."7"7(, #8 #F NN6"	# r#   )NNNNrS   )NN)__name__
__module____qualname____doc__r   r0   r   r@   r   r5   rP   r^   rg   __classcell__)r!   s   @r"   r   r   .   sF    ,
502uS$s)]DDW%WX 2@-^,
r#   r   )r(   typingr   r   utilsr   r   r   r	   r
   r   baser   r   PILr   image_utilsr   models.auto.modeling_tf_autor   rJ   models.auto.modeling_autor   
get_loggerrh   loggerr   rT   r#   r"   <module>rv      s~        5 (VP			H	% ,4UYZ[e( e \er#   