
    Ig+                         d Z ddlmZmZ ddlmZ ddlmZ  ej                  e	      Z
 G d de      Z G d d	e      Z G d
 de      Zy)zDBRX model configuration    )AnyOptional   )PretrainedConfig)loggingc                   f     e Zd ZdZ	 	 	 	 ddedee   dededef
 fdZe	de
ded	d
fd       Z xZS )DbrxAttentionConfiga_  Configuration class for Dbrx Attention.

    [`DbrxAttention`] class. It is used to instantiate attention layers
    according to the specified arguments, defining the layers architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        attn_pdrop (`float`, *optional*, defaults to 0.0):
            The dropout probability for the attention layers.
        clip_qkv (`float`, *optional*):
            If set, clip the queries, keys, and values in the attention layer to this value.
        kv_n_heads (`int`, *optional*, defaults to 1): For grouped_query_attention only, allow user to specify number of kv heads.
        rope_theta (`float`, *optional*, defaults to 10000.0): The base frequency for rope.
    
attn_pdropclip_qkv
kv_n_heads
rope_thetakwargsc                     t        |   di | || _        || _        || _        || _        dD ]  }||v s|j                  |        t        |      dk7  rt        d|      y )N
model_typer   Found unknown kwargs= )	super__init__r
   r   r   r   poplen
ValueError)selfr
   r   r   r   r   k	__class__s          h/var/www/html/answerous/venv/lib/python3.12/site-packages/transformers/models/dbrx/configuration_dbrx.pyr   zDbrxAttentionConfig.__init__,   sx     	"6"$ $$ 	AF{

1	 v;!5fY788     pretrained_model_name_or_pathreturnr   c                 D   | j                  |        | j                  |fi |\  }}|j                  d      dk(  r|d   }d|v rJt        | d      r>|d   | j                  k7  r,t
        j                  d|d    d| j                   dz           | j                  |fi |S )Nr   dbrxattn_configYou are using a model of type   to instantiate a model of type N. This is not supported for all configurations of models and can yield errors._set_token_in_kwargsget_config_dictgethasattrr   loggerwarning	from_dictclsr   r   config_dicts       r   from_pretrainedz#DbrxAttentionConfig.from_pretrained@   s      (1c112OZSYZV??<(F2%m4K;&73+E+VbJcgjguguJuNN0\1J0KKkl^^$$rst
 s}}[3F33r   )        N   g     @)__name__
__module____qualname____doc__floatr   intr   r   classmethodstrr1   __classcell__r   s   @r   r	   r	      sy    &  $(#99 5/9 	9
 9 9( 4C 43 4Se 4 4r   r	   c                   ~     e Zd ZdZ	 	 	 	 	 	 	 ddededededee   dedee   d	ef fd
Z	e
ded	eddfd       Z xZS )DbrxFFNConfiga|  Configuration class for Dbrx FFN.

    [`DbrxFFN`] class. It is used to instantiate feedforward layers according to
    the specified arguments, defining the layers architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        ffn_act_fn (`dict`, *optional*, defaults to `None`): A dict specifying activation function for the FFN.
            The dict should have a key 'name' with the value being the name of the activation function along with
            any additional keyword arguments. If `None`, then set to `{"name": "silu"}`.
        ffn_hidden_size (`int`, *optional*, defaults to 3584): The hidden size of the feedforward network.
        moe_num_experts (`int`, *optional*, defaults to 4): The number of experts in the mixture of experts layer.
        moe_top_k (`int`, *optional*, defaults to 1): The number of experts to use in the mixture of experts layer.
        moe_jitter_eps (`float`, *optional*, defaults to `None`): If not `None`, the jitter epsilon for the mixture of experts layer.
        moe_loss_weight (`float`, *optional*, defaults to 0.01): The loss weight for the mixture of experts layer.
        moe_normalize_expert_weights (`float`, *optional*, defaults to 1.0): The normalization factor for the expert weights.
    
ffn_act_fnffn_hidden_sizemoe_num_experts	moe_top_kmoe_jitter_epsmoe_loss_weightmoe_normalize_expert_weightsr   c                    t         
|           |ddi}|| _        || _        || _        || _        || _        || _        || _        dD ]  }	|	|v s|j                  |	        t        |      dk7  rt        d|      y )Nnamesilur   r   r   )r   r   r@   rA   rB   rC   rD   rE   rF   r   r   r   )r   r@   rA   rB   rC   rD   rE   rF   r   r   r   s             r   r   zDbrxFFNConfig.__init__g   s     	 &)J$..",.,H) 	AF{

1	 v;!5fY788 r   r   r   r   c                 D   | j                  |        | j                  |fi |\  }}|j                  d      dk(  r|d   }d|v rJt        | d      r>|d   | j                  k7  r,t
        j                  d|d    d| j                   dz           | j                  |fi |S )Nr   r!   
ffn_configr#   r$   r%   r&   r.   s       r   r1   zDbrxFFNConfig.from_pretrained   s      (1c112OZSYZV??<(F2%l3K;&73+E+VbJcgjguguJuNN0\1J0KKkl^^$$rst
 s}}[3F33r   )Ni      r3   Ng{Gz?g      ?)r4   r5   r6   r7   dictr9   r   r8   r   r   r:   r;   r1   r<   r=   s   @r   r?   r?   R   s    ,  # *.!%8;99 9 	9
 9 !9 9 '/uo9 98 4C 43 4Se 4 4r   r?   c                        e Zd ZdZdZdddddZ	 	 	 	 	 	 	 	 	 	 	 	 ddededededed	ed
edee	   dee
   dedededef fdZ xZS )
DbrxConfiga.  

    This is the configuration class to store the configuration of a [`DbrxModel`]. It is used to instantiate a Dbrx model according to the
    specified arguments, defining the model architecture. Instantiating a configuration with the
    defaults will yield a different configuration to that of the [databricks/dbrx-instruct](https://huggingface.co/databricks/dbrx-instruct) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.


    Args:
        d_model (`int`, *optional*, defaults to 2048):
            Dimensionality of the embeddings and hidden states.
        n_heads (`int`, *optional*, defaults to 16):
            Number of attention heads for each attention layer in the Transformer encoder.
        n_layers (`int`, *optional*, defaults to 24):
            Number of hidden layers in the Transformer encoder.
        max_seq_len (`int`, *optional*, defaults to 2048):
            The maximum sequence length of the model.
        vocab_size (`int`, *optional*, defaults to 32000):
            Vocabulary size of the Dbrx model. Defines the maximum number of different tokens that can be represented by
            the `inputs_ids` passed when calling [`DbrxModel`].
        resid_pdrop (`float`, *optional*, defaults to 0.0):
            The dropout probability applied to the attention output before combining with residual.
        emb_pdrop (`float`, *optional*, defaults to 0.0):
            The dropout probability for the embedding layer.
        attn_config (`dict`, *optional*):
            A dictionary used to configure the model's attention module.
        ffn_config (`dict`, *optional*):
            A dictionary used to configure the model's FFN module.
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should return the last key/values attentions (not used by all models).
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        output_router_logits (`bool`, *optional*, defaults to `False`):
            Whether or not the router logits should be returned by the model. Enabling this will also
            allow the model to output the auxiliary loss. See [here]() for more details.


    Example:
    ```python
    >>> from transformers import DbrxConfig, DbrxModel

    >>> # Initializing a Dbrx configuration
    >>> configuration = DbrxConfig(n_layers=2, d_model=256, n_heads=8, vocab_size=128)

    >>> # Initializing a model (with random weights) from the configuration
    >>> model = DbrxModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```
    r!   n_headsd_modeln_layersmax_seq_len)num_attention_headshidden_sizenum_hidden_layersmax_position_embeddings
vocab_sizeresid_pdrop	emb_pdropr"   rK   	use_cacheinitializer_rangeoutput_router_logitsr   c                    |t               | _        n(t        |t              rt        di || _        n|| _        |	t	               | _        n(t        |	t              rt	        di |	| _        n|	| _        || _        || _        || _        || _	        || _
        || _        || _        |
| _        || _        || _        | j                  j                   | _        |j%                  dd      }|rt'        d      t)        | T  dd|i| y )Ntie_word_embeddingsFz5tie_word_embeddings is not supported for DBRX models.r   )r	   r"   
isinstancerM   r?   rK   rQ   rP   rR   rS   rX   rY   rZ   r[   r\   r]   r   num_key_value_headsr   r   r   r   )r   rQ   rP   rR   rS   rX   rY   rZ   r"   rK   r[   r\   r]   r   r_   r   s                  r   r   zDbrxConfig.__init__   s      24DT*2A[AD*D+oDO
D)+9j9DO(DO &$&""!2$8!#'#3#3#>#> $jj)>FTUUK-@KFKr   )         rb   i }  r2   r2   NNTg{Gz?F)r4   r5   r6   r7   r   attribute_mapr9   r8   r   r	   r?   boolr   r   r<   r=   s   @r   rO   rO      s    4l J( '#0	M  59.2#'%*.L.L .L 	.L
 .L .L .L .L 12.L ]+.L .L !.L #.L .L .Lr   rO   N)r7   typingr   r   configuration_utilsr   utilsr   
get_loggerr4   r+   r	   r?   rO   r   r   r   <module>rk      sV       3  
		H	%54* 54p@4$ @4FmL! mLr   