
    IgC                         d Z ddlZddlmZ ddlmZ ddlmZ  ej                  e	      Z
 G d de      Z G d	 d
e      Z G d de      Zy)zPix2Struct model configuration    N)Union   )PretrainedConfig)loggingc                        e Zd ZdZdZdgZddddZ	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d fd	Zed	e	e
ej                  f   d
dfd       Z xZS )Pix2StructTextConfiga  
    This is the configuration class to store the configuration of a [`Pix2StructTextModel`]. It is used to instantiate
    a Pix2Struct text model according to the specified arguments, defining the model architecture. Instantiating a
    configuration with the defaults will yield a similar configuration to that of the Pix2Struct text decoder used by
    the [google/pix2struct-base](https://huggingface.co/google/pix2struct-base) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        vocab_size (`int`, *optional*, defaults to 50244):
            Vocabulary size of the `Pix2Struct` text model. Defines the number of different tokens that can be
            represented by the `inputs_ids` passed when calling [`Pix2StructTextModel`].
        hidden_size (`int`, *optional*, defaults to 768):
            Dimensionality of the encoder layers and the pooler layer.
        d_kv (`int`, *optional*, defaults to 64):
            Dimensionality of the key, query, value projections in each attention head.
        d_ff (`int`, *optional*, defaults to 2048):
            Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
        num_layers (`int`, *optional*, defaults to 12):
            Number of hidden layers in the Transformer encoder.
        num_heads (`int`, *optional*, defaults to 12):
            Number of attention heads for each attention layer in the Transformer encoder.
        relative_attention_num_buckets (`int`, *optional*, defaults to 32):
            The number of buckets to use for each attention layer.
        relative_attention_max_distance (`int`, *optional*, defaults to 128):
            The maximum distance of the longer sequences for the bucket separation.
        dropout_rate (`float`, *optional*, defaults to 0.1):
            The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
        layer_norm_epsilon (`float`, *optional*, defaults to 1e-6):
            The epsilon used by the layer normalization layers.
        initializer_factor (`float`, *optional*, defaults to 1.0):
            A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
            testing).
        dense_act_fn (`Union[Callable, str]`, *optional*, defaults to `"gelu_new"`):
            The non-linear activation function (function or string).
        decoder_start_token_id (`int`, *optional*, defaults to 0):
            The id of the `decoder_start_token_id` token.
        use_cache (`bool`, *optional*, defaults to `False`):
            Whether or not the model should return the last key/values attentions (not used by all models).
        pad_token_id (`int`, *optional*, defaults to 0):
            The id of the `padding` token.
        eos_token_id (`int`, *optional*, defaults to 1):
            The id of the `end-of-sequence` token.

    Example:

    ```python
    >>> from transformers import Pix2StructTextConfig, Pix2StructTextModel

    >>> # Initializing a Pix2StructTextConfig with google/pix2struct-base style configuration
    >>> configuration = Pix2StructTextConfig()

    >>> # Initializing a Pix2StructTextModel (with random weights) from the google/pix2struct-base style configuration
    >>> model = Pix2StructTextModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```pix2struct_text_modelpast_key_valueshidden_size	num_heads
num_layers)r   num_attention_headsnum_hidden_layersc           	         || _         || _        || _        || _        || _        || _        || _        || _        |	| _        |
| _	        || _
        || _        || _        || _        || _        t        | @  d|||||d| y )N)pad_token_ideos_token_iddecoder_start_token_idtie_word_embeddings
is_decoder )
vocab_sizer   d_kvd_ffr   r   relative_attention_num_bucketsrelative_attention_max_distancedropout_ratelayer_norm_epsiloninitializer_factor	use_cacher   r   dense_act_fnsuper__init__)selfr   r   r   r   r   r   r   r   r   r   r   r    r   r   r   r   r   r   kwargs	__class__s                       t/var/www/html/answerous/venv/lib/python3.12/site-packages/transformers/models/pix2struct/configuration_pix2struct.pyr"   zPix2StructTextConfig.__init__`   s    , %&		$".L+/N,("4"4"(&<# ) 	
%%#9 3!	
 	
    !pretrainehidden_size_name_or_pathreturnr   c                 >   | j                  |        | j                  |fi |\  }}|j                  d      dk(  r|d   }d|v rGt        | d      r;|d   | j                  k7  r)t
        j                  d|d    d| j                   d        | j                  |fi |S )N
model_type
pix2structtext_configYou are using a model of type   to instantiate a model of type N. This is not supported for all configurations of models and can yield errors._set_token_in_kwargsget_config_dictgethasattrr+   loggerwarning	from_dictclsr(   r$   config_dicts       r&   from_pretrainedz$Pix2StructTextConfig.from_pretrained   s     	  (1c112S^W]^V ??<(L8%m4K;&73+E+VbJcgjguguJuNN0\1J0KKk>>""pr
 s}}[3F33r'   )iD     @         r@          g?ư>      ?gelu_newr   Fr      FT)__name__
__module____qualname____doc__r+   keys_to_ignore_at_inferenceattribute_mapr"   classmethodr   strosPathLiker<   __classcell__r%   s   @r&   r   r      s    :x )J#4"5$*)M ')(+ !'0
d 405c2;;6F0G4	4 4r'   r   c                   ~     e Zd ZdZdZ	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d fd	Zedeee	j                  f   ddfd       Z xZS )	Pix2StructVisionConfiga  
    This is the configuration class to store the configuration of a [`Pix2StructVisionModel`]. It is used to
    instantiate a Pix2Struct vision model according to the specified arguments, defining the model architecture.
    Instantiating a configuration defaults will yield a similar configuration to that of the Pix2Struct-base
    [google/pix2struct-base](https://huggingface.co/google/pix2struct-base) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        hidden_size (`int`, *optional*, defaults to 768):
            Dimensionality of the encoder layers and the pooler layer.
        patch_embed_hidden_size (`int`, *optional*, defaults to 768):
            Dimensionality of the input patch_embedding layer in the Transformer encoder.
        d_ff (`int`, *optional*, defaults to 2048):
            Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
        d_kv (`int`, *optional*, defaults to 64):
            Dimensionality of the key, query, value projections per attention head.
        num_hidden_layers (`int`, *optional*, defaults to 12):
            Number of hidden layers in the Transformer encoder.
        num_attention_heads (`int`, *optional*, defaults to 12):
            Number of attention heads for each attention layer in the Transformer encoder.
        dense_act_fn (`str` or `function`, *optional*, defaults to `"gelu_new"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
            `"relu"`, `"selu"` and `"gelu_new"` `"gelu"` are supported.
        layer_norm_eps (`float`, *optional*, defaults to 1e-06):
            The epsilon used by the layer normalization layers.
        dropout_rate (`float`, *optional*, defaults to 0.0):
            The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        initializer_range (`float`, *optional*, defaults to 1e-10):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        initializer_factor (`float`, *optional*, defaults to 1.0):
            A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
            testing).
        seq_len (`int`, *optional*, defaults to 4096):
            Maximum sequence length (here number of patches) supported by the model.
        relative_attention_num_buckets (`int`, *optional*, defaults to 32):
            The number of buckets to use for each attention layer.
        relative_attention_max_distance (`int`, *optional*, defaults to 128):
            The maximum distance (in tokens) to use for each attention layer.

    Example:

    ```python
    >>> from transformers import Pix2StructVisionConfig, Pix2StructVisionModel

    >>> # Initializing a Pix2StructVisionConfig with google/pix2struct-base style configuration
    >>> configuration = Pix2StructVisionConfig()

    >>> # Initializing a Pix2StructVisionModel (with random weights) from the google/pix2struct-base style configuration
    >>> model = Pix2StructVisionModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```pix2struct_vision_modelc                     t        |   di | || _        || _        || _        |	| _        || _        || _        || _        || _	        |
| _
        || _        || _        || _        || _        || _        || _        y )Nr   )r!   r"   r   patch_embed_hidden_sizer   r   r   r   initializer_ranger   attention_dropoutlayer_norm_epsr    seq_lenr   r   r   )r#   r   rW   r   r   r   r   r    rZ   r   rY   rX   r   r[   r   r   r$   r%   s                    r&   r"   zPix2StructVisionConfig.__init__   s    & 	"6"&'>$	(!2#6 !2"4!2,(.L+/N,	r'   r(   r)   r   c                 >   | j                  |        | j                  |fi |\  }}|j                  d      dk(  r|d   }d|v rGt        | d      r;|d   | j                  k7  r)t
        j                  d|d    d| j                   d        | j                  |fi |S )Nr+   r,   vision_configr.   r/   r0   r1   r9   s       r&   r<   z&Pix2StructVisionConfig.from_pretrained	  s     	  (1c112S^W]^V ??<(L8%o6K;&73+E+VbJcgjguguJuNN0\1J0KKk>>""pr
 s}}[3F33r'   )r=   r=   r?   r>   r@   r@   rE   rC           r^   g|=rD   i   rA   rB   )rG   rH   rI   rJ   r+   r"   rM   r   rN   rO   rP   r<   rQ   rR   s   @r&   rT   rT      sw    8t +J  #')(+!#J 405c2;;6F0G4	4 4r'   rT   c                   P     e Zd ZdZdZ	 	 	 	 	 	 	 d fd	Zededefd       Z	 xZ
S )Pix2StructConfiga1	  
    [`Pix2StructConfig`] is the configuration class to store the configuration of a
    [`Pix2StructForConditionalGeneration`]. It is used to instantiate a Pix2Struct model according to the specified
    arguments, defining the text model and vision model configs. Instantiating a configuration with the defaults will
    yield a similar configuration to that of the Pix2Struct-base
    [google/pix2struct-base](https://huggingface.co/google/pix2struct-base) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        text_config (`dict`, *optional*):
            Dictionary of configuration options used to initialize [`Pix2StructTextConfig`].
        vision_config (`dict`, *optional*):
            Dictionary of configuration options used to initialize [`Pix2StructVisionConfig`].
        initializer_factor (`float`, *optional*, defaults to 1.0):
            Factor to multiply the initialization range with.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        is_vqa (`bool`, *optional*, defaults to `False`):
            Whether the model has been fine-tuned for VQA or not.
        kwargs (*optional*):
            Dictionary of keyword arguments.

    Example:

    ```python
    >>> from transformers import Pix2StructConfig, Pix2StructForConditionalGeneration

    >>> # Initializing a Pix2StructConfig with google/pix2struct-base style configuration
    >>> configuration = Pix2StructConfig()

    >>> # Initializing a Pix2StructForConditionalGeneration (with random weights) from the google/pix2struct-base style configuration
    >>> model = Pix2StructForConditionalGeneration(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config

    >>> # We can also initialize a Pix2StructConfig from a Pix2StructTextConfig and a Pix2StructVisionConfig

    >>> # Initializing a Pix2Struct text and Pix2Struct vision configuration
    >>> config_text = Pix2StructTextConfig()
    >>> config_vision = Pix2StructVisionConfig()

    >>> config = Pix2StructConfig.from_text_vision_configs(config_text, config_vision)
    ```r,   c                    t        	|   d||d| |i }t        j                  d       |i }t        j                  d       t	        di || _        t        di || _        | j
                  j                  | _        | j
                  j                  | _	        | j
                  j                  | _
        || _        || _        | j                  | j
                  _        | j                  | j                  _        || _        y )N)r   is_encoder_decoderzOtext_config is None. Initializing the Pix2StructTextConfig with default values.zSvision_config is None. Initializing the Pix2StructVisionConfig with default values.r   )r!   r"   r6   infor   r-   rT   r]   r   r   r   r   rX   is_vqa)
r#   r-   r]   r   rX   rd   r   rb   r$   r%   s
            r&   r"   zPix2StructConfig.__init__P  s     	r-@UgrkqrKKKij MKKmn/>+>3DmD&*&6&6&M&M# ,,99 ,,99"4!2-1-C-C*/3/E/E,r'   r-   r]   c                 P     | d|j                         |j                         d|S )z
        Instantiate a [`Pix2StructConfig`] (or a derived class) from pix2struct text model configuration and pix2struct
        vision model configuration.

        Returns:
            [`Pix2StructConfig`]: An instance of a configuration object
        )r-   r]   r   )to_dict)r:   r-   r]   r$   s       r&   from_text_vision_configsz)Pix2StructConfig.from_text_vision_configst  s,     f{224MDYDYD[f_effr'   )NNrD   g{Gz?FFT)rG   rH   rI   rJ   r+   r"   rM   r   rT   rg   rQ   rR   s   @r&   r`   r`     sU    -^ J !"H g.g?Ug gr'   r`   )rJ   rO   typingr   configuration_utilsr   utilsr   
get_loggerrG   r6   r   rT   r`   r   r'   r&   <module>rl      s[    % 	  3  
		H	%I4+ I4Xt4- t4nbg' bgr'   