
    UhD                        d Z ddlmZmZmZmZmZ ddlZddlZddlm	Z	 ddl
mZ ddlmZ ddlmZ dd	lmZmZ dd
lmZ ddlmZ ddlmZ ddlmZ ddlmZmZ ddlmZm Z m!Z!m"Z"m#Z#m$Z$m%Z% ddl&m'Z'  ejP                  e)      Z* G d de	jV                        Z, ejZ                  e,        G d de$      Z.d Z/d(dZ0 G d de!      Z1 G d de      Z2 G d de      Z3 G d d e#      Z4 G d! d"e"      Z5 G d# d$ee      Z6 G d% d&e       Z7g d'Z8y))zPyTorch Cohere model.    )CallableListOptionalTupleUnionN)nn   )Cache)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast)dynamic_rope_update)ALL_ATTENTION_FUNCTIONS)Unpack)ALL_LAYERNORM_LAYERS)
LossKwargslogging   )LlamaAttentionLlamaForCausalLMLlamaMLP
LlamaModelLlamaPreTrainedModelLlamaRotaryEmbeddingeager_attention_forward   )CohereConfigc                   &     e Zd Zd fd	Zd Z xZS )CohereLayerNormc                     t         |           t        j                  t	        j
                  |            | _        || _        y)zcThe hidden size can be a tuple or an int. The tuple is used for QKNorm to normalize across head_dimN)super__init__r   	Parametertorchonesweightvariance_epsilon)selfhidden_sizeepsbias	__class__s       {/var/www/catia.catastroantioquia-mas.com/valormas/lib/python3.12/site-packages/transformers/models/cohere/modular_cohere.pyr#   zCohereLayerNorm.__init__8   s/    ll5::k#:; #    c                    |j                   }|j                  t        j                        }|j	                  dd      }||z
  j                  d      j	                  dd      }||z
  t        j                  || j                  z         z  }| j                  j                  t        j                        |z  }|j                  |      S )NT)keepdimr   )	dtypetor%   float32meanpowrsqrtr(   r'   )r)   hidden_statesinput_dtyper6   variances        r.   forwardzCohereLayerNorm.forward>   s    #))%((7!!"d!3!D(--a055b$5G&-XH]H]=]1^^u}}5E,,r/   )Ngh㈵>F)__name__
__module____qualname__r#   r<   __classcell__r-   s   @r.   r    r    7   s    $-r/   r    c                   D    e Zd Z ej                         ed               Zy)CohereRotaryEmbeddingc                 .   | j                   d d d d f   j                         j                  |j                  d   dd      }|d d d d d f   j                         }t	        |j
                  j                  t              r/|j
                  j                  dk7  r|j
                  j                  nd}t        j                  |d      5  |j                         |j                         z  j                  dd      }t        j                  |dd	      }|j                         | j                  z  }|j                         | j                  z  }	d d d        j                  |j                   
      	j                  |j                   
      fS # 1 sw Y   AxY w)Nr   r1   r   mpscpuF)device_typeenabledr   dimr3   )inv_freqfloatexpandshape
isinstancedevicetypestrr%   autocast	transposerepeat_interleavecosattention_scalingsinr4   r3   )
r)   xposition_idsinv_freq_expandedposition_ids_expandedrG   freqsembrW   rY   s
             r.   r<   zCohereRotaryEmbedding.forwardL   sD    !MM$4-8>>@GGHZHZ[\H]_acde ,QaZ 8 > > @'1!((--'E!((--[`J`ahhmmfk^^UC 	5&,,.1F1L1L1NNYYZ[]^_E))%;C'')d444C'')d444C		5 vvAGGv$cff177f&;;;	5 	5s   BFFN)r=   r>   r?   r%   no_gradr   r<    r/   r.   rC   rC   K   s$    U]]_<  <r/   rC   c                     | dd d df   }| ddd df   }t        j                  | |gd      j                  d      }|S )N.r   r   r1   rI   )r%   stackflatten)rZ   x1x2rot_xs       r.   rotate_halfri   \   sL    	
3!8B	
319BKK"b	r*2226ELr/   c                 6   | j                   }| j                         } |j                         }|j                  |      }|j                  |      }| |z  t        |       |z  z   }||z  t        |      |z  z   }|j	                  |      |j	                  |      fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    rK   )r3   rM   	unsqueezeri   r4   )	qkrW   rY   r[   unsqueeze_dimr3   q_embedk_embeds	            r.   apply_rotary_pos_embrq   d   s    ( GGE		A		A
--
&C
--
&C3w;q>C/0G3w;q>C/0G::E:"GJJUJ$;;;r/   c                        e Zd Z fdZ xZS )	CohereMLPc                 J   t         |   |       t        j                  | j                  | j
                  d      | _        t        j                  | j                  | j
                  d      | _        t        j                  | j
                  | j                  d      | _        y )NF)r,   )	r"   r#   r   Linearr*   intermediate_size	gate_projup_proj	down_projr)   configr-   s     r.   r#   zCohereMLP.__init__   ss     4#3#3T5K5KRWXyy!1!143I3IPUV4#9#94;K;KRWXr/   )r=   r>   r?   r#   r@   rA   s   @r.   rs   rs      s    Y Yr/   rs   c                   >    e Zd ZdZddedee   f fdZ	 	 ddej                  de
ej                  ej                  f   deej                     dee   d	eej                     d
ee   de
ej                  eej                     ee
ej                        f   fdZ xZS )CohereAttentionz=Multi-headed attention from 'Attention Is All You Need' paperr{   	layer_idxc                 *   t         |   ||       |j                  | _        | j                  ret        |j                  | j
                  f|j                        | _        t        |j                  | j
                  f|j                        | _	        y y )Nr*   r+   )
r"   r#   use_qk_normr    num_attention_headshead_dimlayer_norm_epsq_normnum_key_value_headsk_normr)   r{   r~   r-   s      r.   r#   zCohereAttention.__init__   s|    +!--)#77GVMbMbDK *#77GVMbMbDK r/   r9   position_embeddingsattention_maskpast_key_valuecache_positionkwargsreturnc                    |j                   d d }g |d| j                  }| j                  |      j                  |      }	| j	                  |      j                  |      }
| j                  |      j                  |      }| j                  r"| j                  |	      }	| j                  |
      }
|	j                  dd      }	|
j                  dd      }
|j                  dd      }|\  }}t        |	|
||      \  }	}
|'|||d}|j                  |
|| j                  |      \  }
}t        }| j                  j                  dk7  r^| j                  j                  dk(  r(|j!                  dd      rt"        j%                  d	       nt&        | j                  j                     } || |	|
||f| j(                  sd
n| j*                  | j,                  d|\  }} |j.                  g |d j1                         }| j3                  |      }||fS )Nr1   r   r   )rY   rW   r   eagersdpaoutput_attentionsFz`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.        )dropoutscaling)rO   r   q_projviewk_projv_projr   r   r   rU   rq   updater~   r   r{   _attn_implementationgetloggerwarning_oncer   trainingattention_dropoutr   reshape
contiguouso_proj)r)   r9   r   r   r   r   r   input_shapehidden_shapequery_states
key_statesvalue_statesrW   rY   cache_kwargsattention_interfaceattn_outputattn_weightss                     r.   r<   zCohereAttention.forward   s    $))#2.88b8$--8{{=166|D[[/44\B
{{=166|D;;|4LZ0J#--a3))!Q/
#--a3&S#7jRUWZ#[ j%#&snUL'5'<'<ZW[WeWegs't$J(?;;++w6{{//69fjjI\^c>d##L
 '>dkk>^>^&_#$7	%
  $}}C$2H2HLL	%
 	%
!\ *k));;;;FFHkk+.L((r/   N)NN)r=   r>   r?   __doc__r   r   intr#   r%   Tensorr   r
   
LongTensorr   r   r<   r@   rA   s   @r.   r}   r}      s    G
| 
 
" +/597)||7) #5<<#=>7) !.	7)
 !7) !!1!127) -.7) 
u||Xell3XeELL>Q5RR	S7)r/   r}   c                   p    e Zd Zdedef fdZ	 	 	 	 	 	 	 ddej                  deej                     deej                     dee
   dee   d	ee   d
eej                     deeej                  ej                  f      dee   deej                  eeej                  ej                  f      f   fdZ xZS )CohereDecoderLayerr{   r~   c                     t         |           |j                  | _        t        ||      | _        t        |      | _        t        |j                  |j                        | _	        y )N)r{   r~   r   )
r"   r#   r*   r}   	self_attnrs   mlpr    r   input_layernormr   s      r.   r#   zCohereDecoderLayer.__init__   sR    !--()LV$.F<N<NU[UjUjkr/   r9   r   r[   r   r   	use_cacher   r   r   r   c	                     |}
| j                  |      } | j                  d||||||||d|	\  }}| j                  |      }|
|z   |z   }|f}|r||fz  }|S )a  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`, *optional*):
                attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
                query_sequence_length, key_sequence_length)` if default attention is used.
            past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            use_cache (`bool`, *optional*):
                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
                (see `past_key_values`).
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
                Indices depicting the position of the input sequence tokens in the sequence
            position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
                Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
                with `head_dim` being the embedding dimension of each attention head.
        )r9   r   r[   r   r   r   r   r   ra   )r   r   r   )r)   r9   r   r[   r   r   r   r   r   r   residualhidden_states_attentionself_attn_weightshidden_states_mlpoutputss                  r.   r<   zCohereDecoderLayer.forward   s    > !,,]; 6DT^^ 
6
')%)/) 3
6
 
6
2!2 !HH]3 !#::=NN ")++Gr/   )NNNFFNN)r=   r>   r?   r   r   r#   r%   r   r   r   r
   boolr   r   r   FloatTensorr<   r@   rA   s   @r.   r   r      s   l| l l 2637*.,1$)59KO:||: !.: u//0	:
 !: $D>: D>: !!1!12: &eELL%,,,F&GH: -.: 
u  (51B1BEDUDU1U+V"WW	X:r/   r   c                       e Zd Zd Zy)CoherePreTrainedModelc                    | j                   j                  }t        |t        j                        rY|j
                  j                  j                  d|       |j                  %|j                  j                  j                          y y t        |t        j                        rf|j
                  j                  j                  d|       |j                  2|j
                  j                  |j                     j                          y y t        |t              r&|j
                  j                  j                  d       y y )Nr   )r6   stdg      ?)r{   initializer_rangerP   r   ru   r'   datanormal_r,   zero_	Embeddingpadding_idxr    fill_)r)   moduler   s      r.   _init_weightsz#CoherePreTrainedModel._init_weights  s    kk++fbii(MM&&CS&9{{&  &&( '-MM&&CS&9!!-""6#5#56<<> .0MM$$S) 1r/   N)r=   r>   r?   r   ra   r/   r.   r   r     s    *r/   r   c                   $     e Zd Zdef fdZ xZS )CohereModelr{   c           	      &   t         |   |       t        j                  t	        |j
                        D cg c]  }t        ||       c}      | _        t        |      | _	        t        |j                  |j                        | _        y c c}w )N)r{   r   )r"   r#   r   
ModuleListrangenum_hidden_layersr   layersrC   
rotary_embr    r*   r   normr   s      r.   r#   zCohereModel.__init__(  so     mmDI&JbJbDcdy	2d
 0v>#1C1C&J_J_`	 es   B)r=   r>   r?   r   r#   r@   rA   s   @r.   r   r   '  s    a| a ar/   r   c                       e Zd Zy)KwargsForCausalLMN)r=   r>   r?   ra   r/   r.   r   r   1  s    r/   r   c                   n    e Zd Z fdZ	 	 	 	 	 	 	 	 	 	 	 ddeej                     deej                     deej                     deee	e
ej                     f      deej                     deej                     dee   d	ee   d
ee   deej                     deeej                  f   dee   defdZ xZS )CohereForCausalLMc                     t         |   |       t        |      | _        |j                  | _        |j
                  | _        y r   )r"   r#   r   modellogit_scaletie_word_embeddingsrz   s     r.   r#   zCohereForCausalLM.__init__5  s8      (
!--#)#=#= r/   	input_idsr   r[   past_key_valuesinputs_embedslabelsr   r   output_hidden_statesr   logits_to_keepr   r   c                    ||n| j                   j                  }|	|	n| j                   j                  }	 | j                  d||||||||	|
d	|}|j                  }t        |t              rt        | d      n|}| j                  |dd|ddf         }|| j                  z  }d}|* | j                  d||| j                   j                  d|}t        |||j                  |j                  |j                        S )az  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >> from transformers import AutoTokenizer, CohereForCausalLM

        >> model = CohereForCausalLM.from_pretrained("CohereForAI/c4ai-command-r-v01")
        >> tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-v01")

        >> prompt = "Hey, are you conscious? Can you talk to me?"
        >> inputs = tokenizer(prompt, return_tensors="pt")

        >> # Generate
        >> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```N)	r   r   r[   r   r   r   r   r   r   )logitsr   
vocab_size)lossr   r   r9   
attentionsra   )r{   r   r   r   last_hidden_staterP   r   slicelm_headr   loss_functionr   r   r   r9   r   )r)   r   r   r[   r   r   r   r   r   r   r   r   r   r   r9   slice_indicesr   r   s                     r.   r<   zCohereForCausalLM.forward;  s+   J 2C1N-TXT_T_TqTq$8$D $++JjJj 	
 ,64:: ,
)%+'/!5),
 ,
  118B>SV8W~ot4]kmA}a,?@A$***%4%%pVFt{{OeOepiopD%#33!//))
 	
r/   )NNNNNNNNNNr   )r=   r>   r?   r#   r   r%   r   r   r   r
   r   r   r   r   r   r   r   r<   r@   rA   s   @r.   r   r   4  s8   > 151537KO59-1$(,0/35934H
E,,-H
 !.H
 u//0	H

 "%tE4E4E/F(F"GHH
   1 12H
 ))*H
 D>H
 $D>H
 'tnH
 !!1!12H
 c5<</0H
 *+H
 
 H
r/   r   )r   r   r   )Nr   )9r   typingr   r   r   r   r   r%   torch.utils.checkpointr   cache_utilsr
   modeling_flash_attention_utilsr   modeling_layersr   modeling_outputsr   r   modeling_rope_utilsr   modeling_utilsr   processing_utilsr   pytorch_utilsr   utilsr   r   llama.modeling_llamar   r   r   r   r   r   r   configuration_coherer   
get_loggerr=   r   Moduler    appendrC   ri   rq   rs   r}   r   r   r   r   r   __all__ra   r/   r.   <module>r      s  .  9 9      B 9 O 6 5 & 1 (   / 
		H	%-bii -"    O ,<0 <"<<Y YF)n F)RB3 BJ*0 *a* a ?,j >O
( O
dr/   