
    Uh$                        d dl mZmZmZmZ d dlZd dlZd dlmZ ddlm	Z	 ddl
mZmZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZmZmZmZmZ ddlmZmZ ddlmZm Z  ddl!m"Z" ddl#m$Z$ ddl%m&Z&m'Z'm(Z(m)Z)m*Z* ddl+m,Z,  e)       rd dl-m.Z. ddl/m0Z0 ddl1m2Z2  e*jf                  e4      Z5 e2d       G d dejl                               Z7 e$jp                  e7        G d dejl                        Z9d Z:d@dZ; G d dejl                        Z<dejz                  d e>d!ejz                  fd"Z?	 dAd#ejl                  d$ejz                  d%ejz                  d&ejz                  d'eejz                     d(e@d)e@fd*ZA G d+ d,ejl                        ZB G d- d.e      ZCe' G d/ d0e              ZDe' G d1 d2eD             ZE G d3 d4ee&      ZFe' G d5 d6eDe             ZG e'd78       G d9 d:eD             ZHe' G d; d<eD             ZIe' G d= d>eD             ZJg d?ZKy)B    )CallableOptionalTupleUnionN)nn   )ACT2FN)CacheDynamicCache)GenerationMixin)AttentionMaskConverter)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPastQuestionAnsweringModelOutput SequenceClassifierOutputWithPastTokenClassifierOutput)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)ALL_LAYERNORM_LAYERS)
LossKwargsauto_docstringcan_return_tupleis_torch_flex_attn_availablelogging   )LlamaConfig)	BlockMask)make_flex_block_causal_mask)use_kernel_forward_from_hubRMSNormc                   ,     e Zd Zd fd	Zd Zd Z xZS )LlamaRMSNormc                     t         |           t        j                  t	        j
                  |            | _        || _        y)z;
        LlamaRMSNorm is equivalent to T5LayerNorm
        N)super__init__r   	Parametertorchonesweightvariance_epsilon)selfhidden_sizeeps	__class__s      z/var/www/catia.catastroantioquia-mas.com/valormas/lib/python3.12/site-packages/transformers/models/llama/modeling_llama.pyr*   zLlamaRMSNorm.__init__<   s1     	ll5::k#:; #    c                 "   |j                   }|j                  t        j                        }|j	                  d      j                  dd      }|t        j                  || j                  z         z  }| j                  |j                  |      z  S )N   T)keepdim)	dtypetor,   float32powmeanrsqrtr/   r.   )r0   hidden_statesinput_dtypevariances       r4   forwardzLlamaRMSNorm.forwardD   sy    #))%((7 $$Q',,R,>%Ht?T?T4T(UU{{]--k:::r5   c                 ^    t        | j                  j                         d| j                   S )Nz, eps=)tupler.   shaper/   r0   s    r4   
extra_reprzLlamaRMSNorm.extra_reprK   s*    ))*+6$2G2G1HIIr5   )gư>)__name__
__module____qualname__r*   rC   rH   __classcell__r3   s   @r4   r'   r'   :   s    $;Jr5   r'   c                   ^     e Zd Zddef fdZ ej                         ed               Z xZ	S )LlamaRotaryEmbeddingconfigc                    t         |           t        |d      rG|j                  ;|j                  j	                  d|j                  j	                  d            | _        nd| _        |j                  | _        |j                  | _        || _	        t        | j
                     | _        | j                  | j                  |      \  }| _        | j                  d|d       | j                  | _        y )Nrope_scaling	rope_typetypedefaultinv_freqF)
persistent)r)   r*   hasattrrR   getrS   max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenrP   r   rope_init_fnattention_scalingregister_bufferrV   original_inv_freq)r0   rP   devicerV   r3   s       r4   r*   zLlamaRotaryEmbedding.__init__S   s    6>*v/B/B/N#0044[&BUBUBYBYZ`BabDN&DN"("@"@$*$B$B!/?+/+<+<T[[&+Q($(ZeD!%r5   c                 b   | j                   d d d d f   j                         j                  |j                  d   dd      j	                  |j
                        }|d d d d d f   j                         }t        |j
                  j                  t              r/|j
                  j                  dk7  r|j
                  j                  nd}t        j                  |d      5  |j                         |j                         z  j                  dd      }t        j                  ||fd	      }|j                         | j                  z  }|j                         | j                  z  }	d d d        j	                  |j                   
      	j	                  |j                   
      fS # 1 sw Y   AxY w)Nr   r8   r    mpscpuF)device_typeenabledr7   dim)r:   )rV   floatexpandrF   r;   ra   
isinstancerT   strr,   autocast	transposecatcosr^   sinr:   )
r0   xposition_idsinv_freq_expandedposition_ids_expandedre   freqsembrp   rq   s
             r4   rC   zLlamaRotaryEmbedding.forwardd   sV    !MM$4-8>>@GGHZHZ[\H]_acdehhijiqiqr ,QaZ 8 > > @'1!((--'E!((--[`J`ahhmmfk^^UC 	5&,,.1F1L1L1NNYYZ[]^_E))UEN3C'')d444C'')d444C		5 vvAGGv$cff177f&;;;	5 	5s    BF%%F.N)
rI   rJ   rK   r!   r*   r,   no_gradr   rC   rL   rM   s   @r4   rO   rO   R   s3    /{ /" U]]_<  <r5   rO   c                     | dd| j                   d   dz  f   }| d| j                   d   dz  df   }t        j                  | |fd      S )z*Rotates half the hidden dims of the input..Nr8   r7   rg   )rF   r,   ro   )rr   x1x2s      r4   rotate_halfr}   t   sZ    	
3"!''"+"""	#B	
3q ""	#B99rc2YB''r5   c                     |j                  |      }|j                  |      }| |z  t        |       |z  z   }||z  t        |      |z  z   }||fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )	unsqueezer}   )qkrp   rq   rs   unsqueeze_dimq_embedk_embeds           r4   apply_rotary_pos_embr   {   sY    ( --
&C
--
&C3w;q>C/0G3w;q>C/0GGr5   c                   $     e Zd Z fdZd Z xZS )LlamaMLPc                    t         |           || _        |j                  | _        |j                  | _        t        j                  | j                  | j                  |j                        | _        t        j                  | j                  | j                  |j                        | _	        t        j                  | j                  | j                  |j                        | _
        t        |j                     | _        y )Nbias)r)   r*   rP   r1   intermediate_sizer   Linearmlp_bias	gate_projup_proj	down_projr	   
hidden_actact_fnr0   rP   r3   s     r4   r*   zLlamaMLP.__init__   s    !--!'!9!94#3#3T5K5KRXRaRabyy!1!143I3IPVP_P_`4#9#94;K;KRXRaRabV../r5   c                     | j                  | j                  | j                  |            | j                  |      z        }|S rx   )r   r   r   r   )r0   rr   r   s      r4   rC   zLlamaMLP.forward   s6    NN4;;t~~a/@#ADLLQRO#ST	r5   )rI   rJ   rK   r*   rC   rL   rM   s   @r4   r   r      s    0r5   r   r@   n_repreturnc                     | j                   \  }}}}|dk(  r| S | dddddddddf   j                  |||||      } | j                  |||z  ||      S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r    N)rF   rj   reshape)r@   r   batchnum_key_value_headsslenhead_dims         r4   	repeat_kvr      so    
 2?1D1D.Ehz!!Qa"23::5BUW\^bdlmM  (;e(CT8TTr5   modulequerykeyvalueattention_maskscalingdropoutc                 T   t        || j                        }t        || j                        }	t        j                  ||j	                  dd            |z  }
|#|d d d d d d d |j
                  d   f   }|
|z   }
t        j                  j                  |
dt        j                        j                  |j                        }
t        j                  j                  |
|| j                        }
t        j                  |
|	      }|j	                  dd      j                         }||
fS )Nr7   r   r8   )rh   r:   )ptrainingr    )r   num_key_value_groupsr,   matmulrn   rF   r   
functionalsoftmaxr<   r;   r:   r   r   
contiguous)r   r   r   r   r   r   r   kwargs
key_statesvalue_statesattn_weightscausal_maskattn_outputs                r4   eager_attention_forwardr      s    3 ; ;<JUF$?$?@L<<z';';Aq'ABWLL!$Q1.D
0@0@0D.D%DE#k1==((2U]](SVVW\WbWbcL==((6??([L,,|\:K''1-88:K$$r5   c                   6    e Zd ZdZdedef fdZ	 	 ddej                  de	ej                  ej                  f   de
ej                     de
e   d	e
ej                     d
ee   de	ej                  e
ej                     e
e	ej                        f   fdZ xZS )LlamaAttentionz=Multi-headed attention from 'Attention Is All You Need' paperrP   	layer_idxc                 d   t         |           || _        || _        t	        |d|j
                  |j                  z        | _        |j                  |j                  z  | _	        | j                  dz  | _
        |j                  | _        d| _        t        j                  |j
                  |j                  | j                  z  |j                        | _        t        j                  |j
                  |j                  | j                  z  |j                        | _        t        j                  |j
                  |j                  | j                  z  |j                        | _        t        j                  |j                  | j                  z  |j
                  |j                        | _        y )Nr   g      Tr   )r)   r*   rP   r   getattrr1   num_attention_headsr   r   r   r   attention_dropout	is_causalr   r   attention_biasq_projk_projv_projo_projr0   rP   r   r3   s      r4   r*   zLlamaAttention.__init__   sM   "
F4F4F&JdJd4de$*$>$>&B\B\$\!}}d*!'!9!9ii : :T]] JQWQfQf
 ii : :T]] JQWQfQf
 ii : :T]] JQWQfQf
 ii&&68J8JQWQfQf
r5   r@   position_embeddingsr   past_key_valuecache_positionr   r   c                    |j                   d d }g |d| j                  }| j                  |      j                  |      j	                  dd      }	| j                  |      j                  |      j	                  dd      }
| j                  |      j                  |      j	                  dd      }|\  }}t        |	|
||      \  }	}
|'|||d}|j                  |
|| j                  |      \  }
}t        }| j                  j                  dk7  r^| j                  j                  dk(  r(|j                  dd      rt        j                  d	       nt         | j                  j                     } || |	|
||f| j"                  sd
n| j$                  | j&                  d|\  }} |j(                  g |d j+                         }| j-                  |      }||fS )Nr8   r    r7   )rq   rp   r   eagersdpaoutput_attentionsFz`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.        )r   r   )rF   r   r   viewrn   r   r   r   updater   r   rP   _attn_implementationrY   loggerwarning_oncer   r   r   r   r   r   r   )r0   r@   r   r   r   r   r   input_shapehidden_shapequery_statesr   r   rp   rq   cache_kwargsattention_interfacer   r   s                     r4   rC   zLlamaAttention.forward   s    $))#2.88b8$--8{{=166|DNNqRST[[/44\BLLQPQR
{{=166|DNNqRST&S#7jRUWZ#[ j%#&snUL'5'<'<ZW[WeWegs't$J(?;;++w6{{//69fjjI\^c>d##L
 '>dkk>^>^&_#$7	%
  $}}C$2H2HLL	%
 	%
!\ *k));;;;FFHkk+.L((r5   )NN)rI   rJ   rK   __doc__r!   intr*   r,   Tensorr   r   r
   
LongTensorr   r   rC   rL   rM   s   @r4   r   r      s    G
{ 
s 
8 +/590)||0) #5<<#=>0) !.	0)
 !0) !!1!120) -.0) 
u||Xell3XeELL>Q5RR	S0)r5   r   c                   p    e Zd Zdedef fdZ	 	 	 	 	 	 	 ddej                  deej                     deej                     dee
   dee   d	ee   d
eej                     deeej                  ej                  f      dee   deej                  eeej                  ej                  f      f   fdZ xZS )LlamaDecoderLayerrP   r   c                     t         |           |j                  | _        t        ||      | _        t        |      | _        t        |j                  |j                        | _	        t        |j                  |j                        | _
        y )N)rP   r   r2   )r)   r*   r1   r   	self_attnr   mlpr'   rms_norm_epsinput_layernormpost_attention_layernormr   s      r4   r*   zLlamaDecoderLayer.__init__  sl    !--'vKF#+F,>,>FDWDWX(4V5G5GVM`M`(a%r5   r@   r   rs   r   r   	use_cacher   r   r   r   c	                     |}
| j                  |      } | j                  d||||||||d|	\  }}|
|z   }|}
| j                  |      }| j                  |      }|
|z   }|f}|r||fz  }|S )N)r@   r   rs   r   r   r   r   r    )r   r   r   r   )r0   r@   r   rs   r   r   r   r   r   r   residualself_attn_weightsoutputss                r4   rC   zLlamaDecoderLayer.forward$  s     !,,]; ,:4>> 
,
')%)/) 3
,
 
,
(( !=0 !55mD/ =0 ")++Gr5   )NNNFFNN)rI   rJ   rK   r!   r   r*   r,   r   r   r   r
   boolr   r   r   FloatTensorrC   rL   rM   s   @r4   r   r     s   b{ bs b 2637*.,1$)59KO'||' !.' u//0	'
 !' $D>' D>' !!1!12' &eELL%,,,F&GH' -.' 
u  (51B1BEDUDU1U+V"WW	X'r5   r   c                   F    e Zd ZeZdZdZdgZdgZdZ	dZ
dZdZdZdZdZd Zy)LlamaPreTrainedModelmodelTr   past_key_valuesc                    | j                   j                  }t        |t        j                        rY|j
                  j                  j                  d|       |j                  %|j                  j                  j                          y y t        |t        j                        rf|j
                  j                  j                  d|       |j                  2|j
                  j                  |j                     j                          y y t        |t              r&|j
                  j                  j                  d       y y )Nr   )r>   stdg      ?)rP   initializer_rangerk   r   r   r.   datanormal_r   zero_	Embeddingpadding_idxr'   fill_)r0   r   r   s      r4   _init_weightsz"LlamaPreTrainedModel._init_weights]  s    kk++fbii(MM&&CS&9{{&  &&( '-MM&&CS&9!!-""6#5#56<<> .-MM$$S) .r5   N)rI   rJ   rK   r!   config_classbase_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_2_supports_sdpa_supports_flex_attn_supports_cache_class_supports_quantized_cache_supports_static_cache_supports_attention_backendr   r   r5   r4   r   r   N  sS    L&*#,-#4"5!N  $!"&*r5   r   c                       e Zd Zdef fdZd Zd Zee	 	 	 	 	 	 	 	 	 dde	e
j                     de	e
j                     de	e
j                     de	e   d	e	e
j                     d
e	e   de	e   de	e   de	e
j                     dee   defd              Z	 ddee
j                  df   de
j                  de
j                  dedef
dZede
j                  dedede
j0                  de
j                  defd       Z xZS )
LlamaModelrP   c           	         t         |   |       |j                  | _        |j                  | _        t        j                  |j                  |j                  | j                        | _        t        j                  t        |j                        D cg c]  }t        ||       c}      | _        t        |j                  |j                        | _        t#        |      | _        d| _        | j)                          y c c}w )Nr   )rP   F)r)   r*   pad_token_idr   
vocab_sizer   r   r1   embed_tokens
ModuleListrangenum_hidden_layersr   layersr'   r   normrO   
rotary_embgradient_checkpointing	post_initr   s      r4   r*   zLlamaModel.__init__m  s     !.. ++LL):):F<N<NPTP`P`ammCHIaIaCbcivy1c
 !!3!39L9LM	.f=&+# 	 ds   Dc                     | j                   S rx   r	  rG   s    r4   get_input_embeddingszLlamaModel.get_input_embeddings}  s       r5   c                     || _         y rx   r  r0   r   s     r4   set_input_embeddingszLlamaModel.set_input_embeddings  s
    !r5   	input_idsr   rs   r   inputs_embedsr   r   output_hidden_statesr   flash_attn_kwargsr   c
                    ||n| j                   j                  }||n| j                   j                  }||n| j                   j                  }|d u |d uz  rt	        d      | j
                  r%| j                  r|rt        j                  d       d}t        |t        d       t        f      st	        d      || j                  |      }|r|
t               }|	F||j                         nd}t        j                   |||j"                  d   z   |j$                        }	||	j'                  d      }| j)                  |||	||      }|}| j+                  ||      }|rdnd }|rdnd }| j,                  d | j                   j.                   D ],  }|r||fz  } ||f||||||	|d	|
}|d   }|s$||d   fz  }. | j1                  |      }|r||fz  }t3        ||r|nd ||
      S )Nz:You must specify exactly one of input_ids or inputs_embedszX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.FzBThe `past_key_values` should be either a `Cache` object or `None`.r   r    ra   r   )r   rs   r   r   r   r   r   )last_hidden_stater   r@   
attentions)rP   r   r  r   
ValueErrorr  r   r   r   rk   rT   r
   r	  r   get_seq_lengthr,   arangerF   ra   r   _update_causal_maskr  r  r  r  r   )r0   r  r   rs   r   r  r   r   r  r   r  past_seen_tokensr   r@   r   all_hidden_statesall_self_attnsdecoder_layerlayer_outputss                      r4   rC   zLlamaModel.forward  sT    2C1N-TXT_T_TqTq$8$D $++JjJj 	 "+!6IDKK<Q<Q	-t";<YZZ&&4==Yj I /DJ+>?abb  --i8M0*nO!CRC^==?de"\\ "2]5H5H5K"KTaThThN )33A6L..M>?L]
 & #oom\J #7BD0d![[)H4;;+H+HI 	6M#!m%55!)
*)."3#-$7
 $
M *!,M =#3"55'	6* 		-0  -!11&+/8Od+%	
 	
r5   r"   input_tensorc           	         | j                   j                  dk(  r||dk(  j                         r|S y | j                   j                  dk(  r't        |t        j
                        rt        |      }|S ||j                         nd}||j                  nd}| j                   j                  dk(  r(|s&|s$t        j                  |||| j                        ry |j                  }|j                  d   }	|r|j                         }
n1t        |t        j
                        r|j                  d	   n||	z   dz   }
| j                  ||	|
|||j                  d   
      }| j                   j                  dk(  rQ|O|j                   j"                  dv r7|s5t	        j$                  |      j&                  }t        j(                  ||      }|S )Nflash_attention_2r   flex_attentionr   Fr   )r  past_key_values_lengthis_trainingr    r8   )sequence_lengthtarget_lengthr:   r   
batch_size)cudaxpunpu)rP   r   anyrk   r,   r   r#   r!  is_compileabler   _ignore_causal_mask_sdpar   r:   rF   get_max_cache_shape5_prepare_4d_causal_attention_mask_with_cache_positionra   rT   finfomin_unmask_unattended)r0   r   r)  r   r   r   r$  using_compilable_cacher:   r/  r0  r   	min_dtypes                r4   r#  zLlamaModel._update_causal_mask  s    ;;++/BB)~/D.I.I.K%%;;++/??.%,,7!<^!L!!
 @O?Z?99;`aCRC^!?!?di ;;++v5>T]n%>>*'7 MM	 ""&,,Q/!+??AM nell; $$R(%7!;  PP+')#))!, Q 
 KK,,6*%%**.DD%
 E*..I0CCKQZ[Kr5   r/  r0  r:   r1  c                    | | j                         dk(  r| }|S t        j                  |      j                  }t        j                  ||f|||j
                        }|dk7  rt        j                  |d      }|t        j                  ||j
                        |j                  dd      kD  z  }|ddddddf   j                  |ddd      }| |j                         }| j                  d   }	|ddddddd|	f   | ddddddf   j                  |j
                        z   }
|
dk(  }
|ddddddd|	f   j                  |
|      |ddddddd|	f<   |S )	aM  
        Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
        `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.

        Args:
            attention_mask (`torch.Tensor`):
                A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
                `(batch_size, 1, query_length, key_value_length)`.
            sequence_length (`int`):
                The sequence length being processed.
            target_length (`int`):
                The target length: when generating with static cache, the mask should be as long as the static cache,
                to account for the 0 padding, the part of the cache that is not filled yet.
            dtype (`torch.dtype`):
                The dtype to use for the 4D attention mask.
            cache_position (`torch.Tensor`):
                Indices depicting the position of the input sequence tokens in the sequence.
            batch_size (`torch.Tensor`):
                Batch size.
        N   )
fill_valuer:   ra   r    )diagonalr  r8   r   )rh   r,   r:  r;  fullra   triur"  r   rj   clonerF   r;   masked_fill)r   r/  r0  r:   r   r1  r   r   r>  mask_lengthpadding_masks              r4   r9  z@LlamaModel._prepare_4d_causal_attention_mask_with_cache_position'  s   < %.*<*<*>!*C(K* ' E*..I** -0Ye\j\q\qK !##jjqA5<<n>S>STWeWmWmnprsWtttK%dD!Q&67>>z1bRTUK))//1,2226*1aL[L+@ANSTVZ\`bcScDdDgDg&&E    ,q05@Aq,;,AV5W5c5c )6Aq!\k\12 r5   	NNNNNNNNN)F)rI   rJ   rK   r!   r*   r  r  r   r   r   r,   r   r   r
   r   r   r   r   r   rC   r   r#  staticmethodr   r:   r9  rL   rM   s   @r4   r  r  k  s   {  !"  151537+/59$(,0/359\
E,,-\
 !.\
 u//0	\

 "%\
   1 12\
 D>\
 $D>\
 'tn\
 !!1!12\
 $$89\
 
!\
  \
H #(BellK78B llB 	B
 B  BH 444 4 {{	4
 4 4 4r5   r  c                       e Zd Zy)KwargsForCausalLMN)rI   rJ   rK   r   r5   r4   rL  rL  _  s    r5   rL  c                       e Zd ZdgZddiZddgdgfiZ fdZd Zd Zd	 Z	d
 Z
d Zd Zee	 	 	 	 	 	 	 	 	 	 	 ddeej"                     deej$                     deej"                     dee   deej(                     deej"                     dee   dee   dee   deej"                     deeej$                  f   dee   defd              Z xZS )LlamaForCausalLMzlm_head.weightlm_headcolwise_repr@   logitsc                     t         |   |       t        |      | _        |j                  | _        t        j                  |j                  |j                  d      | _        | j                          y NFr   )
r)   r*   r  r   r  r   r   r1   rO  r  r   s     r4   r*   zLlamaForCausalLM.__init__h  sU     '
 ++yy!3!3V5F5FUS 	r5   c                 .    | j                   j                  S rx   r   r	  rG   s    r4   r  z%LlamaForCausalLM.get_input_embeddingsq      zz&&&r5   c                 &    || j                   _        y rx   rU  r  s     r4   r  z%LlamaForCausalLM.set_input_embeddingst      "'

r5   c                     | j                   S rx   rO  rG   s    r4   get_output_embeddingsz&LlamaForCausalLM.get_output_embeddingsw  s    ||r5   c                     || _         y rx   rZ  )r0   new_embeddingss     r4   set_output_embeddingsz&LlamaForCausalLM.set_output_embeddingsz  s	    %r5   c                     || _         y rx   r   )r0   decoders     r4   set_decoderzLlamaForCausalLM.set_decoder}  s	    
r5   c                     | j                   S rx   r`  rG   s    r4   get_decoderzLlamaForCausalLM.get_decoder  s    zzr5   r  r   rs   r   r  labelsr   r   r  r   logits_to_keepr   r   c                    ||n| j                   j                  }|	|	n| j                   j                  }	 | j                  d||||||||	|
d	|}|j                  }t        |t              rt        | d      n|}| j                  |dd|ddf         }d}|* | j                  d||| j                   j                  d|}t        |||j                  |j                  |j                        S )at  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, LlamaForCausalLM

        >>> model = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf")
        >>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf")

        >>> prompt = "Hey, are you conscious? Can you talk to me?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```N)	r  r   rs   r   r  r   r   r  r   )rQ  re  r  lossrQ  r   r@   r  r   )rP   r   r  r   r  rk   r   slicerO  loss_functionr  r   r   r@   r  )r0   r  r   rs   r   r  re  r   r   r  r   rf  r   r   r@   slice_indicesrQ  ri  s                     r4   rC   zLlamaForCausalLM.forward  s   N 2C1N-TXT_T_TqTq$8$D $++JjJj 	
 ,64:: ,
)%+'/!5),
 ,
  118B>SV8W~ot4]kmA}a,?@A%4%%pVFt{{OeOepiopD%#33!//))
 	
r5   )NNNNNNNNNNr   )rI   rJ   rK   _tied_weights_keys_tp_plan_pp_planr*   r  r  r[  r^  rb  rd  r   r   r   r,   r   r   r
   r   r   r   r   r   rL  r   rC   rL   rM   s   @r4   rN  rN  b  s   *+=)H_-z:;H'(&  151537+/59-1$(,0/35934G
E,,-G
 !.G
 u//0	G

 "%G
   1 12G
 ))*G
 D>G
 $D>G
 'tnG
 !!1!12G
 c5<</0G
 *+G
 
 G
  G
r5   rN  a  
    The LLaMa Model transformer with a sequence classification head on top (linear layer).

    [`LlamaForSequenceClassification`] uses the last token in order to do the classification, as other causal models
    (e.g. GPT-2) do.

    Since it does classification on the last token, it requires to know the position of the last token. If a
    `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
    no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
    padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
    each row of the batch).
    )custom_introc                       e Zd Z fdZd Zd Zee	 	 	 	 	 	 	 	 	 ddee	j                     dee	j                     dee	j                     dee   dee	j                     d	ee	j                     d
ee   dee   dee   defd              Z xZS )LlamaForSequenceClassificationc                     t         |   |       |j                  | _        t        |      | _        t        j                  |j                  | j                  d      | _        | j                          y rS  )
r)   r*   
num_labelsr  r   r   r   r1   scorer  r   s     r4   r*   z'LlamaForSequenceClassification.__init__  sS      ++'
YYv114??O
 	r5   c                 .    | j                   j                  S rx   rU  rG   s    r4   r  z3LlamaForSequenceClassification.get_input_embeddings  rV  r5   c                 &    || j                   _        y rx   rU  r  s     r4   r  z3LlamaForSequenceClassification.set_input_embeddings  rX  r5   r  r   rs   r   r  re  r   r   r  r   c
           
         | j                  ||||||||	      }
|
j                  }| j                  |      }||j                  d   }n|j                  d   }| j                  j
                  |dk7  rt        d      | j                  j
                  d}n||| j                  j
                  k7  j                  |j                  t        j                        }t        j                  |j                  d   |j                  t        j                        }||z  j                  d      }n.d}t        j                  | j                  j                    d       |t        j                  ||j                  	      |f   }d}|| j#                  |||| j                  
      }t%        |||
j&                  |
j(                  |
j*                        S )  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        r   rs   r   r  r   r   r  Nr   r    z=Cannot handle batch sizes > 1 if no padding token is defined.r8   )ra   r:   z will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`r  )rQ  re  pooled_logitsrP   rh  )r   r  ru  rF   rP   r  r   r;   ra   r,   int32r"  argmaxr   r   r3   rI   rk  r   r   r@   r  )r0   r  r   rs   r   r  re  r   r   r  transformer_outputsr@   rQ  r1  last_non_pad_tokennon_pad_masktoken_indicesr{  ri  s                      r4   rC   z&LlamaForSequenceClassification.forward  s   * 8<zz)%+'/!5 8B 	8
 ,==M* "+J&,,Q/J;;##+
a\]];;##+!#"%)A)AAEEfmmUZU`U`aL!LL)<V]]Z_ZeZefM"/,">!F!Fr!J!#>>**+ ,Z Z
 u||Jv}}MOaab%%VFR_hlhshs%tD/ /??-;;*55
 	
r5   rI  )rI   rJ   rK   r*   r  r  r   r   r   r,   r   r   r
   r   r   r   rC   rL   rM   s   @r4   rr  rr    s    '(  151537+/59-1$(,0/3A
E,,-A
 !.A
 u//0	A

 "%A
   1 12A
 ))*A
 D>A
 $D>A
 'tnA
 
*A
  A
r5   rr  c                   4    e Zd ZdZ fdZd Zd Zee	 	 	 	 	 	 	 	 	 dde	e
j                     de	e
j                     de	e
j                     de	e   d	e	e
j                     d
e	e
j                     de	e
j                     de	e   de	e   defd              Z xZS )LlamaForQuestionAnsweringtransformerc                     t         |   |       t        |      | _        t	        j
                  |j                  d      | _        | j                          y )Nr7   )	r)   r*   r  r  r   r   r1   
qa_outputsr  r   s     r4   r*   z"LlamaForQuestionAnswering.__init__8  sA     %f-))F$6$6: 	r5   c                 .    | j                   j                  S rx   r  r	  rG   s    r4   r  z.LlamaForQuestionAnswering.get_input_embeddings@  s    ,,,r5   c                 &    || j                   _        y rx   r  r  s     r4   r  z.LlamaForQuestionAnswering.set_input_embeddingsC  s    (-%r5   r  r   rs   r   r  start_positionsend_positionsr   r  r   c
           	         | j                  |||||||	      }|j                  }| j                  |      }|j                  dd      \  }}|j	                  d      j                         }|j	                  d      j                         }d }|| | j                  ||||fi |
}t        ||||j                  |j                        S )N)r   rs   r   r  r   r  r    r8   rg   )ri  start_logits
end_logitsr@   r  )
r  r  r  splitsqueezer   rk  r   r@   r  )r0   r  r   rs   r   r  r  r  r   r  r   r   sequence_outputrQ  r  r  ri  s                    r4   rC   z!LlamaForQuestionAnswering.forwardF  s     ,0+;+;)%+'/!5 ,< ,
 "331#)<<r<#: j#++B/::<''+668
&=+D%4%%lJQ^ibhiD+%!!//))
 	
r5   rI  )rI   rJ   rK   r   r*   r  r  r   r   r   r,   r   r   r
   r   r   r   rC   rL   rM   s   @r4   r  r  3  s    %-.  151537+/596:48,0/3(
E,,-(
 !.(
 u//0	(

 "%(
   1 12(
 "%"2"23(
   0 01(
 $D>(
 'tn(
 
&(
  (
r5   r  c                       e Zd Z fdZd Zd Zee	 	 	 	 	 	 	 	 	 ddee	j                     dee	j                     dee	j                     dee   dee	j                     d	ee	j                     d
ee   dee   dee   defd              Z xZS )LlamaForTokenClassificationc                    t         |   |       |j                  | _        t        |      | _        t        |dd       |j                  }nt        |dd       |j                  }nd}t        j                  |      | _
        t        j                  |j                  |j                        | _        | j                          y )Nclassifier_dropouthidden_dropoutg?)r)   r*   rt  r  r   r   r  r  r   Dropoutr   r   r1   ru  r  )r0   rP   r  r3   s      r4   r*   z$LlamaForTokenClassification.__init__u  s      ++'
6/6B!'!:!:V-t4@!'!6!6!$zz"45YYv1163D3DE
 	r5   c                 .    | j                   j                  S rx   rU  rG   s    r4   r  z0LlamaForTokenClassification.get_input_embeddings  rV  r5   c                 &    || j                   _        y rx   rU  r  s     r4   r  z0LlamaForTokenClassification.set_input_embeddings  rX  r5   r  r   rs   r   r  re  r   r   r  r   c
           
         | j                  ||||||||	      }
|
j                  }| j                  |      }| j                  |      }d}|| j	                  ||| j
                        }t        |||
j                  |
j                        S )ry  rz  N)ri  rQ  r@   r  )	r   r  r   ru  rk  rP   r   r@   r  )r0   r  r   rs   r   r  re  r   r   r  r   r  rQ  ri  s                 r4   rC   z#LlamaForTokenClassification.forward  s    * ,0::)%+'/!5 ,6 	,
 "33,,7O,%%ffdkkBD$!//))	
 	
r5   rI  )rI   rJ   rK   r*   r  r  r   r   r   r,   r   r   r
   r   r   r   rC   rL   rM   s   @r4   r  r  s  s     '(  151537+/59-1$(,0/3*
E,,-*
 !.*
 u//0	*

 "%*
   1 12*
 ))**
 D>*
 $D>*
 'tn*
 
*
  *
r5   r  )rN  r  r   rr  r  r  )Nr    )r   )Ltypingr   r   r   r   r,   torch.utils.checkpointr   activationsr	   cache_utilsr
   r   
generationr   modeling_attn_mask_utilsr   modeling_flash_attention_utilsr   modeling_layersr   modeling_outputsr   r   r   r   r   modeling_rope_utilsr   r   modeling_utilsr   r   processing_utilsr   pytorch_utilsr   utilsr   r   r   r   r   configuration_llamar!   !torch.nn.attention.flex_attentionr"   integrations.flex_attentionr#   integrationsr$   
get_loggerrI   r   Moduler'   appendrO   r}   r   r   r   r   r   ri   r   r   r   r   r  rL  rN  rr  r  r  __all__r   r5   r4   <module>r     sX  ( 4 3    ! . ) > B 9  L F & 1 h h ,  !;J 7 
		H	% Y'J299 J (J(    L )<299 <D(6ryy  	UU\\ 	U# 	U%,, 	U& %II%<<% 
% <<	%
 U\\*% % %4J)RYY J)Z22 2j *? * *8 p% p pf ?,j > i
+_ i
 i
X S
%9 S
S
l <
 4 <
 <
~ C
"6 C
 C
Lr5   