
    IgQ                         d Z ddlZddlmZ ddlmZ ddlmZ  ej                  e	      Z
 G d de      Z G d	 d
e      Z G d de      Zy)zCLIPSeg model configuration    N)Union   )PretrainedConfig)loggingc                   |     e Zd ZdZdZ	 	 	 	 	 	 	 	 	 	 	 	 	 	 d fd	Zedeee	j                  f   ddfd       Z xZS )	CLIPSegTextConfiga  
    This is the configuration class to store the configuration of a [`CLIPSegModel`]. It is used to instantiate an
    CLIPSeg model according to the specified arguments, defining the model architecture. Instantiating a configuration
    with the defaults will yield a similar configuration to that of the CLIPSeg
    [CIDAS/clipseg-rd64](https://huggingface.co/CIDAS/clipseg-rd64) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        vocab_size (`int`, *optional*, defaults to 49408):
            Vocabulary size of the CLIPSeg text model. Defines the number of different tokens that can be represented
            by the `inputs_ids` passed when calling [`CLIPSegModel`].
        hidden_size (`int`, *optional*, defaults to 512):
            Dimensionality of the encoder layers and the pooler layer.
        intermediate_size (`int`, *optional*, defaults to 2048):
            Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
        num_hidden_layers (`int`, *optional*, defaults to 12):
            Number of hidden layers in the Transformer encoder.
        num_attention_heads (`int`, *optional*, defaults to 8):
            Number of attention heads for each attention layer in the Transformer encoder.
        max_position_embeddings (`int`, *optional*, defaults to 77):
            The maximum sequence length that this model might ever be used with. Typically set this to something large
            just in case (e.g., 512 or 1024 or 2048).
        hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
            `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
        layer_norm_eps (`float`, *optional*, defaults to 1e-05):
            The epsilon used by the layer normalization layers.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        initializer_factor (`float`, *optional*, defaults to 1.0):
            A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
            testing).
        pad_token_id (`int`, *optional*, defaults to 1):
            Padding token id.
        bos_token_id (`int`, *optional*, defaults to 49406):
            Beginning of stream token id.
        eos_token_id (`int`, *optional*, defaults to 49407):
            End of stream token id.

    Example:

    ```python
    >>> from transformers import CLIPSegTextConfig, CLIPSegTextModel

    >>> # Initializing a CLIPSegTextConfig with CIDAS/clipseg-rd64 style configuration
    >>> configuration = CLIPSegTextConfig()

    >>> # Initializing a CLIPSegTextModel (with random weights) from the CIDAS/clipseg-rd64 style configuration
    >>> model = CLIPSegTextModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```clipseg_text_modelc                     t        |   d|||d| || _        || _        || _        || _        || _        || _        || _        || _	        |
| _
        || _        |	| _        y )N)pad_token_idbos_token_ideos_token_id )super__init__
vocab_sizehidden_sizeintermediate_sizenum_hidden_layersnum_attention_headsmax_position_embeddingslayer_norm_eps
hidden_actinitializer_rangeinitializer_factorattention_dropout)selfr   r   r   r   r   r   r   r   r   r   r   r   r   r   kwargs	__class__s                   n/var/www/html/answerous/venv/lib/python3.12/site-packages/transformers/models/clipseg/configuration_clipseg.pyr   zCLIPSegTextConfig.__init__X   sv    $ 	sl\hslrs$&!2!2#6 '>$,$!2"4!2    pretrained_model_name_or_pathreturnr   c                 >   | j                  |        | j                  |fi |\  }}|j                  d      dk(  r|d   }d|v rGt        | d      r;|d   | j                  k7  r)t
        j                  d|d    d| j                   d        | j                  |fi |S )N
model_typeclipsegtext_configYou are using a model of type   to instantiate a model of type N. This is not supported for all configurations of models and can yield errors._set_token_in_kwargsget_config_dictgethasattrr$   loggerwarning	from_dictclsr!   r   config_dicts       r   from_pretrainedz!CLIPSegTextConfig.from_pretrainedx   s      (1c112OZSYZV ??<(I5%m4K;&73+E+VbJcgjguguJuNN0\1J0KKk>>""pr
 s}}[3F33r    )i               M   
quick_geluh㈵>        {Gz?      ?   i  i  __name__
__module____qualname____doc__r$   r   classmethodr   strosPathLiker5   __classcell__r   s   @r   r   r      ss    8t &J  "3@ 4E#r{{BR<S 4bt 4 4r    r   c                   x     e Zd ZdZdZ	 	 	 	 	 	 	 	 	 	 	 	 d fd	Zedeee	j                  f   ddfd       Z xZS )	CLIPSegVisionConfigaG  
    This is the configuration class to store the configuration of a [`CLIPSegModel`]. It is used to instantiate an
    CLIPSeg model according to the specified arguments, defining the model architecture. Instantiating a configuration
    with the defaults will yield a similar configuration to that of the CLIPSeg
    [CIDAS/clipseg-rd64](https://huggingface.co/CIDAS/clipseg-rd64) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        hidden_size (`int`, *optional*, defaults to 768):
            Dimensionality of the encoder layers and the pooler layer.
        intermediate_size (`int`, *optional*, defaults to 3072):
            Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
        num_hidden_layers (`int`, *optional*, defaults to 12):
            Number of hidden layers in the Transformer encoder.
        num_attention_heads (`int`, *optional*, defaults to 12):
            Number of attention heads for each attention layer in the Transformer encoder.
        num_channels (`int`, *optional*, defaults to 3):
            The number of input channels.
        image_size (`int`, *optional*, defaults to 224):
            The size (resolution) of each image.
        patch_size (`int`, *optional*, defaults to 32):
            The size (resolution) of each patch.
        hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
            `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
        layer_norm_eps (`float`, *optional*, defaults to 1e-05):
            The epsilon used by the layer normalization layers.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        initializer_factor (`float`, *optional*, defaults to 1.0):
            A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
            testing).

    Example:

    ```python
    >>> from transformers import CLIPSegVisionConfig, CLIPSegVisionModel

    >>> # Initializing a CLIPSegVisionConfig with CIDAS/clipseg-rd64 style configuration
    >>> configuration = CLIPSegVisionConfig()

    >>> # Initializing a CLIPSegVisionModel (with random weights) from the CIDAS/clipseg-rd64 style configuration
    >>> model = CLIPSegVisionModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```clipseg_vision_modelc                     t        |   di | || _        || _        || _        || _        || _        || _        || _        || _	        || _
        |
| _        |	| _        || _        y )Nr   )r   r   r   r   r   r   num_channels
patch_size
image_sizer   r   r   r   r   )r   r   r   r   r   rP   rR   rQ   r   r   r   r   r   r   r   s                 r   r   zCLIPSegVisionConfig.__init__   sr      	"6"&!2!2#6 ($$!2"4!2,$r    r!   r"   r   c                 >   | j                  |        | j                  |fi |\  }}|j                  d      dk(  r|d   }d|v rGt        | d      r;|d   | j                  k7  r)t
        j                  d|d    d| j                   d        | j                  |fi |S )Nr$   r%   vision_configr'   r(   r)   r*   r2   s       r   r5   z#CLIPSegVisionConfig.from_pretrained   s      (1c112OZSYZV ??<(I5%o6K;&73+E+VbJcgjguguJuNN0\1J0KKk>>""pr
 s}}[3F33r    )i   i   r8   r8   r          r;   r<   r=   r>   r?   rA   rK   s   @r   rM   rM      sl    2h (J %> 4E#r{{BR<S 4bt 4 4r    rM   c                   ^     e Zd ZdZdZddddg dddd	d
dddf fd	Zededefd       Z	 xZ
S )CLIPSegConfiga  
    [`CLIPSegConfig`] is the configuration class to store the configuration of a [`CLIPSegModel`]. It is used to
    instantiate a CLIPSeg model according to the specified arguments, defining the text model and vision model configs.
    Instantiating a configuration with the defaults will yield a similar configuration to that of the CLIPSeg
    [CIDAS/clipseg-rd64](https://huggingface.co/CIDAS/clipseg-rd64) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        text_config (`dict`, *optional*):
            Dictionary of configuration options used to initialize [`CLIPSegTextConfig`].
        vision_config (`dict`, *optional*):
            Dictionary of configuration options used to initialize [`CLIPSegVisionConfig`].
        projection_dim (`int`, *optional*, defaults to 512):
            Dimensionality of text and vision projection layers.
        logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
            The initial value of the *logit_scale* parameter. Default is used as per the original CLIPSeg implementation.
        extract_layers (`List[int]`, *optional*, defaults to `[3, 6, 9]`):
            Layers to extract when forwarding the query image through the frozen visual backbone of CLIP.
        reduce_dim (`int`, *optional*, defaults to 64):
            Dimensionality to reduce the CLIP vision embedding.
        decoder_num_attention_heads (`int`, *optional*, defaults to 4):
            Number of attention heads in the decoder of CLIPSeg.
        decoder_attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        decoder_hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
            `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
        decoder_intermediate_size (`int`, *optional*, defaults to 2048):
            Dimensionality of the "intermediate" (i.e., feed-forward) layers in the Transformer decoder.
        conditional_layer (`int`, *optional*, defaults to 0):
            The layer to use of the Transformer encoder whose activations will be combined with the condition
            embeddings using FiLM (Feature-wise Linear Modulation). If 0, the last layer is used.
        use_complex_transposed_convolution (`bool`, *optional*, defaults to `False`):
            Whether to use a more complex transposed convolution in the decoder, enabling more fine-grained
            segmentation.
        kwargs (*optional*):
            Dictionary of keyword arguments.

    Example:

    ```python
    >>> from transformers import CLIPSegConfig, CLIPSegModel

    >>> # Initializing a CLIPSegConfig with CIDAS/clipseg-rd64 style configuration
    >>> configuration = CLIPSegConfig()

    >>> # Initializing a CLIPSegModel (with random weights) from the CIDAS/clipseg-rd64 style configuration
    >>> model = CLIPSegModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config

    >>> # We can also initialize a CLIPSegConfig from a CLIPSegTextConfig and a CLIPSegVisionConfig

    >>> # Initializing a CLIPSegText and CLIPSegVision configuration
    >>> config_text = CLIPSegTextConfig()
    >>> config_vision = CLIPSegVisionConfig()

    >>> config = CLIPSegConfig.from_text_vision_configs(config_text, config_vision)
    ```r%   Nr6   g/L
F@)r      	   @      r=   r;   r7   r   Fc                 4   |j                  dd       }|j                  dd       }t        |   di | ||i }t        di |j	                         }|j                         D ]A  \  }}||v s|||   k7  s|dvs||v r
d| d| d}nd| d}t        j                  |       C |j                  |       ||i }t        di |j	                         }d	|v r3|d	   j                         D ci c]  \  }}t        |      | c}}|d	<   |j                         D ]A  \  }}||v s|||   k7  s|dvs||v r
d| d
| d}nd| d}t        j                  |       C |j                  |       |i }t        j                  d       |i }t        j                  d       t        di || _        t        di || _        || _        || _        || _        || _        || _        || _        |	| _        |
| _        || _        d| _        || _        y c c}}w )Ntext_config_dictvision_config_dict)transformers_version`zp` is found in both `text_config_dict` and `text_config` but with different values. The value `text_config_dict["z"]` will be used instead.zm`text_config_dict` is provided which will be used to initialize `CLIPSegTextConfig`. The value `text_config["z"]` will be overridden.id2labelzv` is found in both `vision_config_dict` and `vision_config` but with different values. The value `vision_config_dict["zs`vision_config_dict` is provided which will be used to initialize `CLIPSegVisionConfig`. The value `vision_config["zR`text_config` is `None`. Initializing the `CLIPSegTextConfig` with default values.zV`vision_config` is `None`. initializing the `CLIPSegVisionConfig` with default values.r?   r   )popr   r   r   to_dictitemsr/   infoupdaterM   rG   r&   rT   projection_dimlogit_scale_init_valueextract_layers
reduce_dimdecoder_num_attention_headsdecoder_attention_dropoutdecoder_hidden_actdecoder_intermediate_sizeconditional_layerr   "use_complex_transposed_convolution)r   r&   rT   rh   ri   rj   rk   rl   rm   rn   ro   rp   rq   r   r^   r_   _text_config_dictkeyvaluemessage_vision_config_dictr   s                        r   r   zCLIPSegConfig.__init__6  s   & "::&8$?#ZZ(<dC"6"
 '"  !2 E4D E M M O 0557 )
U+%%;s3C*CSkHk..u %<<?5@Y[  336%7NP   KK()" 01)$ " #6"K8J"K"S"S"U006I*6U6[6[6]3(2UCHeO3#J/
 2779 )
U-'E]35G,GCWoLo00u %FFIUJce  99<=TV   KK()"   !45KKKlm MKKpq,;{;0A=A,&<#,$+F()B&"4)B&!2"%2T/[3s   2Hr&   rT   c                 P     | d|j                         |j                         d|S )z
        Instantiate a [`CLIPSegConfig`] (or a derived class) from clipseg text model configuration and clipseg vision
        model configuration.

        Returns:
            [`CLIPSegConfig`]: An instance of a configuration object
        )r&   rT   r   )rd   )r3   r&   rT   r   s       r   from_text_vision_configsz&CLIPSegConfig.from_text_vision_configs  s,     f{224MDYDYD[f_effr    )rB   rC   rD   rE   r$   r   rF   r   rM   rx   rJ   rK   s   @r   rX   rX      sf    =~ J % $%"%'"&+0kUZ 	g3D 	gUh 	g 	gr    rX   )rE   rH   typingr   configuration_utilsr   utilsr   
get_loggerrB   r/   r   rM   rX   r   r    r   <module>r}      s[    " 	  3  
		H	%m4( m4`f4* f4Ryg$ ygr    