
    Uh                        d dl mZmZmZmZ d dlZd dlmZ ddlmZ ddl	m
Z
mZmZmZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZmZmZmZ ddlmZmZ ddl m!Z!m"Z" ddl#m$Z$ ddl%m&Z&m'Z'm(Z(m)Z)m*Z* ddl+m,Z,  e)       rd dl-m.Z. ddl/m0Z0  e*jb                  e2      Z3 G d dejh                        Z5d Z6dejn                  de8dejn                  fdZ9	 d=dejh                  dejn                  dejn                  dejn                  d eejn                     d!e:d"e:fd#Z;d>d$Z< G d% d&ejh                        Z= ed'       G d( d)ejh                               Z> G d* d+e      Z?e' G d, d-e"             Z@ G d. d/ejh                        ZAe' G d0 d1e@             ZB G d2 d3ee&      ZCe' G d4 d5e@e             ZD e'd67       G d8 d9e@             ZEe' G d: d;e@             ZFg d<ZGy)?    )CallableOptionalTupleUnionN)nn   )ACT2FN)CacheDynamicCacheSlidingWindowCacheStaticCache)GenerationMixin)use_kernel_forward_from_hub)AttentionMaskConverter)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast SequenceClassifierOutputWithPastTokenClassifierOutput)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)
LossKwargsauto_docstringcan_return_tupleis_torch_flex_attn_availablelogging   )
Phi3Config)	BlockMask)make_flex_block_causal_maskc                   V     e Zd Z fdZdej
                  dej
                  fdZ xZS )Phi3MLPc                 *   t         |           || _        t        j                  |j
                  d|j                  z  d      | _        t        j                  |j                  |j
                  d      | _        t        |j                     | _        y )N   Fbias)super__init__configr   Linearhidden_sizeintermediate_sizegate_up_proj	down_projr	   
hidden_actactivation_fnselfr-   	__class__s     x/var/www/catia.catastroantioquia-mas.com/valormas/lib/python3.12/site-packages/transformers/models/phi3/modeling_phi3.pyr,   zPhi3MLP.__init__:   sp    IIf&8&8!f>V>V:V]bc6#;#;V=O=OV[\#F$5$56    hidden_statesreturnc                     | j                  |      }|j                  dd      \  }}|| j                  |      z  }| j                  |      S )Nr(   dim)r1   chunkr4   r2   )r6   r:   	up_statesgates       r8   forwardzPhi3MLP.forwardB   sL    %%m4	#//!/4i 2 24 88	~~i((r9   )__name__
__module____qualname__r,   torchFloatTensorrC   __classcell__r7   s   @r8   r&   r&   9   s'    7)U%6%6 )5;L;L )r9   r&   c                     | dd| j                   d   dz  f   }| d| j                   d   dz  df   }t        j                  | |fd      S )z*Rotates half the hidden dims of the input..Nr=   r(   r>   )shaperG   cat)xx1x2s      r8   rotate_halfrQ   K   sZ    	
3"!''"+"""	#B	
3q ""	#B99rc2YB''r9   r:   n_repr;   c                     | j                   \  }}}}|dk(  r| S | dddddddddf   j                  |||||      } | j                  |||z  ||      S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r!   N)rL   expandreshape)r:   rR   batchnum_key_value_headsslenhead_dims         r8   	repeat_kvrZ   R   so    
 2?1D1D.Ehz!!Qa"23::5BUW\^bdlmM  (;e(CT8TTr9   modulequerykeyvalueattention_maskscalingdropoutc                 T   t        || j                        }t        || j                        }	t        j                  ||j	                  dd            |z  }
|#|d d d d d d d |j
                  d   f   }|
|z   }
t        j                  j                  |
dt        j                        j                  |j                        }
t        j                  j                  |
|| j                        }
t        j                  |
|	      }|j	                  dd      j                         }||
fS )Nr(   r   r=   )r?   dtype)ptrainingr!   )rZ   num_key_value_groupsrG   matmul	transposerL   r   
functionalsoftmaxfloat32tord   ra   rf   
contiguous)r[   r\   r]   r^   r_   r`   ra   kwargs
key_statesvalue_statesattn_weightscausal_maskattn_outputs                r8   eager_attention_forwardru   ^   s    3 ; ;<JUF$?$?@L<<z';';Aq'ABWLL!$Q1.D
0@0@0D.D%DE#k1==((2U]](SVVW\WbWbcL==((6??([L,,|\:K''1-88:K$$r9   c                 `   |j                  |      }|j                  |      }|j                  d   }| dd|f   | d|df   }}|dd|f   |d|df   }
}	t        j                  ||z  t	        |      |z  z   |gd      }t        j                  |	|z  t	        |	      |z  z   |
gd      }||fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    r=   .Nr>   )	unsqueezerL   rG   rM   rQ   )qkcossinposition_idsunsqueeze_dim
rotary_dimq_rotq_passk_rotk_passq_embedk_embeds                r8   apply_rotary_pos_embr   x   s    ( --
&C
--
&C2Jc;J;&'3
+;)<6Ec;J;&'3
+;)<6Eii%#++e*<s*BCVLRTUGii%#++e*<s*BCVLRTUGGr9   c                   >    e Zd ZdZddedee   f fdZ	 	 ddej                  de
ej                  ej                  f   deej                     dee   d	eej                     d
ee   de
ej                  eej                     ee
ej                        f   fdZ xZS )Phi3Attentionz=Multi-headed attention from 'Attention Is All You Need' paperr-   	layer_idxc                 |   t         |           || _        || _        t	        |d|j
                  |j                  z        | _        |j                  |j                  z  | _	        |j                  | _        | j                  dz  | _
        |j                  | _        d| _        |j                  | j                  z  d|j                  | j                  z  z  z   }t        j                  |j                  | j                  z  |j
                  d      | _        t        j                  |j
                  |d      | _        y )NrY   g      Tr(   Fr)   )r+   r,   r-   r   getattrr/   num_attention_headsrY   rW   rg   r`   attention_dropout	is_causalr   r.   o_projqkv_proj)r6   r-   r   op_sizer7   s       r8   r,   zPhi3Attention.__init__   s    "
F4F4F&JdJd4de$*$>$>&B\B\$\!#)#=#= }}d*!'!9!9,,t}}<qFD^D^aeananDn?ooii : :T]] JFL^L^ejk		&"4"4gEJr9   r:   position_embeddingsr_   past_key_valuecache_positionro   r;   c           
         |j                   d d }g |d| j                  }| j                  |      }	| j                  j                  | j                  z  }
|	dd |
f   }|	d|
|
| j
                  | j                  z  z   f   }|	d|
| j
                  | j                  z  z   d f   }|j                  |      j                  dd      }|j                  |      j                  dd      }|j                  |      j                  dd      }|\  }}t        ||||      \  }}|'|||d}|j                  ||| j                  |      \  }}t        }| j                  j                  dk7  r^| j                  j                  dk(  r(|j                  dd	      rt        j                  d
       nt         | j                  j                     } || ||||f| j"                  sdn| j$                  | j&                  t)        | j                  dd       d|\  }} |j*                  g |d j-                         }| j/                  |      }||fS )Nr=   .r!   r(   )r{   rz   r   eagersdpaoutput_attentionsFz`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.        sliding_window)ra   r`   r   )rL   rY   r   r-   r   rW   viewri   r   updater   ru   _attn_implementationgetloggerwarning_oncer   rf   r   r`   r   rU   rn   r   )r6   r:   r   r_   r   r   ro   input_shapehidden_shapeqkv	query_posquery_statesrp   rq   rz   r{   cache_kwargsattention_interfacert   rr   s                       r8   rC   zPhi3Attention.forward   s[    $))#2.88b8$--8mmM*KK33dmmC	3

?+i)d6N6NQUQ^Q^6^*^^^_
3	D,D,Dt}},T T VVW#((6@@AF__\2<<QB
#((6@@AF&S#7jRUWZ#[ j%#&snUL'5'<'<ZW[WeWegs't$J(?;;++w6{{//69fjjI\^c>d##L
 '>dkk>^>^&_#$7
%
  $}}C$2H2HLL"4;;0@$G
%
 
%
!\ *k));;;;FFHkk+.L((r9   N)NN)rD   rE   rF   __doc__r"   r   intr,   rG   Tensorr   r
   
LongTensorr   r   rC   rI   rJ   s   @r8   r   r      s    GKz Khsm K( +/596)||6) #5<<#=>6) !.	6)
 !6) !!1!126) -.6) 
u||Xell3XeELL>Q5RR	S6)r9   r   RMSNormc                   ,     e Zd Zd fd	Zd Zd Z xZS )Phi3RMSNormc                     t         |           t        j                  t	        j
                  |            | _        || _        y)z:
        Phi3RMSNorm is equivalent to T5LayerNorm
        N)r+   r,   r   	ParameterrG   onesweightvariance_epsilon)r6   r/   epsr7   s      r8   r,   zPhi3RMSNorm.__init__   s1     	ll5::k#:; #r9   c                 "   |j                   }|j                  t        j                        }|j	                  d      j                  dd      }|t        j                  || j                  z         z  }| j                  |j                  |      z  S )Nr(   r=   T)keepdim)	rd   rm   rG   rl   powmeanrsqrtr   r   )r6   r:   input_dtypevariances       r8   rC   zPhi3RMSNorm.forward   sy    #))%((7 $$Q',,R,>%Ht?T?T4T(UU{{]--k:::r9   c                 ^    t        | j                  j                         d| j                   S )Nz, eps=)tupler   rL   r   r6   s    r8   
extra_reprzPhi3RMSNorm.extra_repr   s*    ))*+6$2G2G1HIIr9   )gư>)rD   rE   rF   r,   rC   r   rI   rJ   s   @r8   r   r      s    $;Jr9   r   c                   p    e Zd Zdedef fdZ	 	 	 	 	 	 	 ddej                  deej                     deej                     dee
   dee   d	ee   d
eej                     deeej                  ej                  f      dee   deej                  eeej                  ej                  f      f   fdZ xZS )Phi3DecoderLayerr-   r   c                    t         |           |j                  | _        t        ||      | _        t        |      | _        t        |j                  |j                        | _	        t        |j                  |j                        | _
        || _        t        j                  |j                        | _        t        j                  |j                        | _        y )N)r-   r   r   )r+   r,   r/   r   	self_attnr&   mlpr   rms_norm_epsinput_layernormpost_attention_layernormr-   r   Dropoutresid_pdropresid_attn_dropoutresid_mlp_dropoutr6   r-   r   r7   s      r8   r,   zPhi3DecoderLayer.__init__   s    !--&f	J6?*6+=+=6CVCVW(3F4F4FFL_L_(`%"$**V-?-?"@!#F,>,>!?r9   r:   r_   r|   r   r   	use_cacher   r   ro   r;   c	                    |}
| j                  |      } | j                  d||||||||d|	\  }}|
| j                  |      z   }|}
| j                  |      }| j	                  |      }|
| j                  |      z   }|f}|r||fz  }|S )a  
        Args:
            hidden_states (`torch.FloatTensor`):
                input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
            position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
                Indices of positions of each input sequence tokens in the position embeddings. Selected in the range
                `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
            past_key_value (`Cache`, *optional*): cached past key and value projection states
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            use_cache (`bool`, *optional*):
                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
                (see `past_key_values`).
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
                Indices depicting the position of the input sequence tokens in the sequence
            kwargs (`dict`, *optional*):
                Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
                into the model
        )r:   r_   r|   r   r   r   r   r    )r   r   r   r   r   r   )r6   r:   r_   r|   r   r   r   r   r   ro   residualself_attn_weightsoutputss                r8   rC   zPhi3DecoderLayer.forward  s    D !,,]; ,:4>> 
,
')%)/) 3
,
 
,
(( !4#:#:=#II 55mD/ 4#9#9-#HH ")++Gr9   )NNNFFNN)rD   rE   rF   r"   r   r,   rG   r   r   r   r
   boolr   r   r   rH   rC   rI   rJ   s   @r8   r   r      s   	@z 	@c 	@ 2637*.,1$)59KO=||= !.= u//0	=
 != $D>= D>= !!1!12= &eELL%,,,F&GH= -.= 
u  (51B1BEDUDU1U+V"WW	X=r9   r   c                   J    e Zd ZeZdZdZdgZdgZdZ	dZ
dZdZdZdZdZdZd Zy)Phi3PreTrainedModelmodelTr   past_key_valuesz0.0.5c                    | j                   j                  }t        |t        j                        rY|j
                  j                  j                  d|       |j                  %|j                  j                  j                          y y t        |t        j                        rf|j
                  j                  j                  d|       |j                  2|j
                  j                  |j                     j                          y y t        |t              r&|j
                  j                  j                  d       y y )Nr   )r   stdg      ?)r-   initializer_range
isinstancer   r.   r   datanormal_r*   zero_	Embeddingpadding_idxr   fill_)r6   r[   r   s      r8   _init_weightsz!Phi3PreTrainedModel._init_weightsT  s    kk++fbii(MM&&CS&9{{&  &&( '-MM&&CS&9!!-""6#5#56<<> .,MM$$S) -r9   N)rD   rE   rF   r"   config_classbase_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_2_supports_sdpa_supports_flex_attn_supports_cache_class_supports_quantized_cache_supports_static_cache_supports_attention_backend_versionr   r   r9   r8   r   r   D  sX    L&*#+,#4"5!N  $!"&H*r9   r   c                   ^     e Zd Zddef fdZ ej                         ed               Z xZ	S )Phi3RotaryEmbeddingr-   c                    t         |           t        |d      rG|j                  ;|j                  j	                  d|j                  j	                  d            | _        nd| _        |j                  | _        |j                  | _        || _	        t        | j
                     | _        | j                  | j                  |      \  }| _        | j                  d|d       | j                  | _        y )Nrope_scaling	rope_typetypedefaultinv_freqF)
persistent)r+   r,   hasattrr   r   r   max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenr-   r   rope_init_fnattention_scalingregister_bufferr   original_inv_freq)r6   r-   devicer   r7   s       r8   r,   zPhi3RotaryEmbedding.__init__c  s    6>*v/B/B/N#0044[&BUBUBYBYZ`BabDN&DN"("@"@$*$B$B!/?+/+<+<T[[&+Q($(ZeD!%r9   c                 b   | j                   d d d d f   j                         j                  |j                  d   dd      j	                  |j
                        }|d d d d d f   j                         }t        |j
                  j                  t              r/|j
                  j                  dk7  r|j
                  j                  nd}t        j                  |d      5  |j                         |j                         z  j                  dd      }t        j                  ||fd	      }|j                         | j                  z  }|j                         | j                  z  }	d d d        j	                  |j                   
      	j	                  |j                   
      fS # 1 sw Y   AxY w)Nr   r=   r!   mpscpuF)device_typeenabledr(   r>   )rd   )r   floatrT   rL   rm   r  r   r   strrG   autocastri   rM   rz   r   r{   rd   )
r6   rN   r|   inv_freq_expandedposition_ids_expandedr  freqsembrz   r{   s
             r8   rC   zPhi3RotaryEmbedding.forwardt  sV    !MM$4-8>>@GGHZHZ[\H]_acdehhijiqiqr ,QaZ 8 > > @'1!((--'E!((--[`J`ahhmmfk^^UC 	5&,,.1F1L1L1NNYYZ[]^_E))UEN3C'')d444C'')d444C		5 vvAGGv$cff177f&;;;	5 	5s    BF%%F.r   )
rD   rE   rF   r"   r,   rG   no_gradr   rC   rI   rJ   s   @r8   r   r   b  s3    /z /" U]]_<  <r9   r   c                        e Zd Zdef fdZd Zd Zee	 	 	 	 	 	 	 	 	 dde	e
j                     de	e
j                     de	e
j                     de	e   d	e	e
j                     d
e	e   de	e   de	e   de	e
j                     dee   defd              Z	 ddee
j                  df   de
j                  de
j                  dedef
dZede
j                  dedede
j0                  de
j                  dededefd       Z xZS )	Phi3Modelr-   c           	         t         |   |       |j                  | _        |j                  | _        t        j                  |j                  |j                  | j                        | _        t        j                  t        |j                        D cg c]  }t        ||       c}      | _        t        |j                  |j                        | _        t#        |      | _        d| _        | j)                          y c c}w )Nr   )r-   F)r+   r,   pad_token_idr   
vocab_sizer   r   r/   embed_tokens
ModuleListrangenum_hidden_layersr   layersr   r   normr   
rotary_embgradient_checkpointing	post_initr   s      r8   r,   zPhi3Model.__init__  s     !.. ++LL):):F<N<NPTP`P`ammBGH`H`BabYfi0b
   2 28K8KL	-V<&+# 	 cs   Dc                     | j                   S r   r  r   s    r8   get_input_embeddingszPhi3Model.get_input_embeddings  s       r9   c                     || _         y r   r  r6   r^   s     r8   set_input_embeddingszPhi3Model.set_input_embeddings  s
    !r9   	input_idsr_   r|   r   inputs_embedsr   r   output_hidden_statesr   flash_attn_kwargsr;   c
                    ||n| j                   j                  }||n| j                   j                  }||n| j                   j                  }|d u |d uz  rt	        d      | j
                  r%| j                  r|rt        j                  d       d}t        |t        d       t        f      st	        d      || j                  |      }|r|
t               }|	F||j                         nd}t        j                   |||j"                  d   z   |j$                        }	||	j'                  d      }| j)                  |||	||      }|}| j+                  ||      }|rdnd }|rdnd }| j,                  d | j                   j.                   D ],  }|r||fz  } ||f||||||	|d	|
}|d   }|s$||d   fz  }. | j1                  |      }|r||fz  }t3        ||r|nd ||
      S )Nz:You must specify exactly one of input_ids or inputs_embedszX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.FzBThe `past_key_values` should be either a `Cache` object or `None`.r   r!   r  r   )r_   r|   r   r   r   r   r   )last_hidden_stater   r:   
attentions)r-   r   r&  r   
ValueErrorr  rf   r   r   r   r   r
   r  r   get_seq_lengthrG   arangerL   r  rw   _update_causal_maskr  r  r  r  r   )r6   r$  r_   r|   r   r%  r   r   r&  r   r'  past_seen_tokensrs   r:   r   all_hidden_statesall_self_attnsdecoder_layerlayer_outputss                      r8   rC   zPhi3Model.forward  sT    2C1N-TXT_T_TqTq$8$D $++JjJj 	 "+!6IDKK<Q<Q	-t";<YZZ&&4==Yj I /DJ+>?abb  --i8M0*nO!CRC^==?de"\\ "2]5H5H5K"KTaThThN )33A6L..M>?L]
 & #oom\J #7BD0d![[)H4;;+H+HI 	6M#!m%55!)
*)."3#-$7
 $
M *!,M =#3"55'	6* 		-0  -!11&+/8Od+%	
 	
r9   r#   input_tensorc           
      l   | j                   j                  dk(  rS|H|F|d d df   j                         j                         |j	                         d   k7  }|rt        d      |d|v r|S y | j                   j                  dk(  r't        |t        j                        rt        |      }|S ||j                         nd}t        |t              }t        |t              }	| j                   j                  dk(  r?|s=|	s;|s9t        j                  |||| j                   j                  | j                         ry |j"                  }
t        j$                  |
      j&                  }|j(                  d	   }|	s|r|j+                         }n1t        |t        j                        r|j(                  d   n||z   d	z   }| j-                  ||||
||j(                  d   | j                   |
      }| j                   j                  dk(  r2|0|j.                  j0                  dv r|st        j2                  ||      }|S )Nflash_attention_2r=   r   zYou are attempting to perform batched generation with padding_side='right' this may lead to unexpected behaviour for Flash Attention version of Phi3. Make sure to  call `tokenizer.padding_side  = 'left'` before tokenizing the input. r   flex_attentionr   )r%  past_key_values_lengthr   is_trainingr!   )sequence_lengthtarget_lengthrd   r   
batch_sizer-   r   )cudaxpunpu)r-   r   sumitemsizer,  r   rG   r   r$   r-  r   r   r   _ignore_causal_mask_sdpar   rf   rd   finfominrL   get_max_cache_shape5_prepare_4d_causal_attention_mask_with_cache_positionr  r   _unmask_unattended)r6   r_   r5  r   r   r   is_padding_rightr0  using_static_cacheusing_sliding_window_cacherd   	min_dtyper;  r<  rs   s                  r8   r/  zPhi3Model._update_causal_mask  s:    ;;++/BB)o.I#1!R%#8#<#<#>#C#C#EIZIZI\]^I_#_ #$a 
 )c^.C%%;;++/??.%,,7!<^!L!!
 @O?Z?99;`a'E%/AS%T" KK,,6'+E%%>>*'7#{{99 MM ""KK&**	&,,Q/%);+??AM
 nell; $$R(%7!;  PP+')#))!,;;+ Q 	
 KK,,6*%%**.DD%
 1CCKQZ[Kr9   r;  r<  rd   r=  c                    | | j                         dk(  r| }|S t        j                  |      j                  }	t        j                  ||f|	||j
                        }t        j                  ||j
                        |j                  dd      kD  }
|j                         }t        |dd      rs|j                  gt        |t              r||kD  rRt        j                  ||j
                        |j                  dd      |j                  z
  k  }|
j                  |       ||
z  }|ddddddf   j                  |ddd      }| |j                         }| j                   d   |kD  r| ddd|f   } | j                   d   }|ddddddd|f   | ddddddf   j#                  |j
                        z   }|d	k(  }|ddddddd|f   j%                  ||	      |ddddddd|f<   |S )
a  
        Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
        `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.

        Args:
            attention_mask (`torch.Tensor`):
                A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
            sequence_length (`int`):
                The sequence length being processed.
            target_length (`int`):
                The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
            dtype (`torch.dtype`):
                The dtype to use for the 4D attention mask.
            cache_position (`torch.Tensor`):
                Indices depicting the position of the input sequence tokens in the sequence.
            batch_size (`torch.Tensor`):
                Batch size.
            config (`Phi3Config`):
                The model's configuration class
            past_key_values (`Cache`):
                The cache class that is being used currently to generate
        N   )
fill_valuerd   r  r)  r=   r!   use_sliding_windowTr   )r?   rG   rE  rF  fullr  r.  rU   get_text_configr   r   r   r   bitwise_or_rT   clonerL   rm   masked_fill)r_   r;  r<  rd   r   r=  r-   r   rs   rM  diagonal_attend_masktext_configsliding_attend_maskmask_lengthpadding_masks                  r8   rH  z?Phi3Model._prepare_4d_causal_attention_mask_with_cache_positionR  s%   B %.*<*<*>!*C(K@ = E*..I** -0Ye\j\q\qK $)<<nF[F[#\_m_u_uA` $  !002K{$8$?KD^D^Dj "/3EF/\iJi*/,,}^MbMb*c&..r158R8RR+' )445HI//K%dD!Q&67>>z1bRTUK))//1!''+m;%3A~~4E%FN,2226*1aL[L+@ANSTVZ\`bcScDdDgDg&&E    ,q05@Aq,;,AV5W5c5c )6Aq!\k\12 r9   	NNNNNNNNN)F)rD   rE   rF   r"   r,   r   r#  r   r   r   rG   r   r   r
   rH   r   r   r   r   rC   r   r/  staticmethodr   rd   rH  rI   rJ   s   @r8   r  r    s   z  !"  151537+/59$(,0/359\
E,,-\
 !.\
 u//0	\

 "%\
   1 12\
 D>\
 $D>\
 'tn\
 !!1!12\
 $$89\
 
!\
  \
H #(TellK78T llT 	T
 T  Tl BBB B {{	B
 B B B B Br9   r  c                       e Zd Zy)KwargsForCausalLMN)rD   rE   rF   r   r9   r8   r_  r_    s    r9   r_  c                       e Zd ZdgZddiZddgdgfiZ fdZd Zd Zd	 Z	d
 Z
d Zd Zee	 	 	 	 	 	 	 	 	 	 	 ddeej"                     deej$                     deej"                     dee   deej(                     deej"                     dee   dee   dee   deej"                     deeej$                  f   dee   defd              Z	 	 	 	 	 	 	 d fd	Z xZS )Phi3ForCausalLMzlm_head.weightlm_headcolwise_repr:   logitsc                     t         |   |       t        |      | _        |j                  | _        t        j                  |j                  |j                  d      | _        | j                          y NFr)   )
r+   r,   r  r   r  r   r.   r/   rb  r  r5   s     r8   r,   zPhi3ForCausalLM.__init__  sU     v&
 ++yy!3!3V5F5FUS 	r9   c                 .    | j                   j                  S r   r   r  r   s    r8   r   z$Phi3ForCausalLM.get_input_embeddings      zz&&&r9   c                 &    || j                   _        y r   rh  r"  s     r8   r#  z$Phi3ForCausalLM.set_input_embeddings      "'

r9   c                     | j                   S r   rb  r   s    r8   get_output_embeddingsz%Phi3ForCausalLM.get_output_embeddings  s    ||r9   c                     || _         y r   rm  )r6   new_embeddingss     r8   set_output_embeddingsz%Phi3ForCausalLM.set_output_embeddings  s	    %r9   c                     || _         y r   r   )r6   decoders     r8   set_decoderzPhi3ForCausalLM.set_decoder  s	    
r9   c                     | j                   S r   rs  r   s    r8   get_decoderzPhi3ForCausalLM.get_decoder  s    zzr9   r$  r_   r|   r   r%  labelsr   r   r&  r   logits_to_keepro   r;   c                    ||n| j                   j                  }|	|	n| j                   j                  }	 | j                  d||||||||	|
d	|}|j                  }t        |t              rt        | d      n|}| j                  |dd|ddf         }d}|* | j                  d||| j                   j                  d|}t        |||j                  |j                  |j                        S )an  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, Phi3ForCausalLM

        >>> model = Phi3ForCausalLM.from_pretrained("meta-phi3/Phi3-2-7b-hf")
        >>> tokenizer = AutoTokenizer.from_pretrained("meta-phi3/Phi3-2-7b-hf")

        >>> prompt = "Hey, are you conscious? Can you talk to me?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```N)	r$  r_   r|   r   r%  r   r   r&  r   )rd  rx  r  lossrd  r   r:   r+  r   )r-   r   r&  r   r*  r   r   slicerb  loss_functionr  r   r   r:   r+  )r6   r$  r_   r|   r   r%  rx  r   r   r&  r   ry  ro   r   r:   slice_indicesrd  r|  s                     r8   rC   zPhi3ForCausalLM.forward  s   N 2C1N-TXT_T_TqTq$8$D $++JjJj 	
 ,64:: ,
)%+'/!5),
 ,
  118B>SV8W~ot4]kmA}a,?@A%4%%pVFt{{OeOepiopD%#33!//))
 	
r9   c	                     |r_| j                   j                  rI|j                  d   | j                   j                  dz   k\  r |d   }
|
| j                   j                  k  rd }t	        |   d||||||||d|	}|S )Nr!   r   )r$  r   r_   r%  r   r|   r   ry  r   )r-   r   rL    original_max_position_embeddingsr+   prepare_inputs_for_generation)r6   r$  r   r_   r%  r   r|   r   ry  ro   past_lengthmodel_inputsr7   s               r8   r  z-Phi3ForCausalLM.prepare_inputs_for_generation  s    $ (("dkk&R&RUV&VV(+KdkkJJJ"&w< 

+)')%)

 

 r9   )NNNNNNNNNNr   )NNNNNTN)rD   rE   rF   _tied_weights_keys_tp_plan_pp_planr,   r   r#  rn  rq  ru  rw  r   r   r   rG   r   r   r
   rH   r   r   r   r   r_  r   rC   r  rI   rJ   s   @r8   ra  ra    s   *+=)H_-z:;H'(&  151537+/59-1$(,0/35934G
E,,-G
 !.G
 u//0	G

 "%G
   1 12G
 ))*G
 D>G
 $D>G
 'tnG
 !!1!12G
 c5<</0G
 *+G
 
 G
  G
X % %r9   ra  a  
    The Phi3 Model transformer with a sequence classification head on top (linear layer).

    [`Phi3ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
    (e.g. GPT-2) do.

    Since it does classification on the last token, it requires to know the position of the last token. If a
    `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
    no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
    padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
    each row of the batch).
    )custom_introc                       e Zd Z fdZd Zd Zee	 	 	 	 	 	 	 	 	 ddee	j                     dee	j                     dee	j                     dee   dee	j                     d	ee	j                     d
ee   dee   dee   defd              Z xZS )Phi3ForSequenceClassificationc                     t         |   |       |j                  | _        t        |      | _        t        j                  |j                  | j                  d      | _        | j                          y rf  )
r+   r,   
num_labelsr  r   r   r.   r/   scorer  r5   s     r8   r,   z&Phi3ForSequenceClassification.__init__>  sS      ++v&
YYv114??O
 	r9   c                 .    | j                   j                  S r   rh  r   s    r8   r   z2Phi3ForSequenceClassification.get_input_embeddingsG  ri  r9   c                 &    || j                   _        y r   rh  r"  s     r8   r#  z2Phi3ForSequenceClassification.set_input_embeddingsJ  rk  r9   r$  r_   r|   r   r%  rx  r   r   r&  r;   c
           
         | j                  ||||||||	      }
|
j                  }| j                  |      }||j                  d   }n|j                  d   }| j                  j
                  |dk7  rt        d      | j                  j
                  d}n||| j                  j
                  k7  j                  |j                  t        j                        }t        j                  |j                  d   |j                  t        j                        }||z  j                  d      }n.d}t        j                  | j                  j                    d       |t        j                  ||j                  	      |f   }d}|| j#                  |||| j                  
      }t%        |||
j&                  |
j(                  |
j*                        S )  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        r_   r|   r   r%  r   r   r&  Nr   r!   z=Cannot handle batch sizes > 1 if no padding token is defined.r=   )r  rd   z will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`r)  )rd  rx  pooled_logitsr-   r{  )r   r*  r  rL   r-   r  r,  rm   r  rG   int32r.  argmaxr   r   r7   rD   r~  r   r   r:   r+  )r6   r$  r_   r|   r   r%  rx  r   r   r&  transformer_outputsr:   rd  r=  last_non_pad_tokennon_pad_masktoken_indicesr  r|  s                      r8   rC   z%Phi3ForSequenceClassification.forwardM  s   * 8<zz)%+'/!5 8B 	8
 ,==M* "+J&,,Q/J;;##+
a\]];;##+!#"%)A)AAEEfmmUZU`U`aL!LL)<V]]Z_ZeZefM"/,">!F!Fr!J!#>>**+ ,Z Z
 u||Jv}}MOaab%%VFR_hlhshs%tD/ /??-;;*55
 	
r9   r\  )rD   rE   rF   r,   r   r#  r   r   r   rG   r   r   r
   rH   r   r   rC   rI   rJ   s   @r8   r  r  /  s    '(  151537+/59-1$(,0/3A
E,,-A
 !.A
 u//0	A

 "%A
   1 12A
 ))*A
 D>A
 $D>A
 'tnA
 
*A
  A
r9   r  c                       e Zd Z fdZd Zd Zee	 	 	 	 	 	 	 	 	 ddee	j                     dee	j                     dee	j                     dee   dee	j                     d	ee	j                     d
ee   dee   dee   defd              Z xZS )Phi3ForTokenClassificationc                    t         |   |       |j                  | _        t        |      | _        t        |dd       |j                  }nt        |dd       |j                  }nd}t        j                  |      | _
        t        j                  |j                  |j                        | _        | j                          y )Nclassifier_dropouthidden_dropoutg?)r+   r,   r  r  r   r   r  r  r   r   ra   r.   r/   r  r  )r6   r-   r  r7   s      r8   r,   z#Phi3ForTokenClassification.__init__  s      ++v&
6/6B!'!:!:V-t4@!'!6!6!$zz"45YYv1163D3DE
 	r9   c                 .    | j                   j                  S r   rh  r   s    r8   r   z/Phi3ForTokenClassification.get_input_embeddings  ri  r9   c                 &    || j                   _        y r   rh  r"  s     r8   r#  z/Phi3ForTokenClassification.set_input_embeddings  rk  r9   r$  r_   r|   r   r%  rx  r   r   r&  r;   c
           
         | j                  ||||||||	      }
|
j                  }| j                  |      }| j                  |      }d}|| j	                  ||| j
                        }t        |||
j                  |
j                        S )r  r  N)r|  rd  r:   r+  )	r   r*  ra   r  r~  r-   r   r:   r+  )r6   r$  r_   r|   r   r%  rx  r   r   r&  r   sequence_outputrd  r|  s                 r8   rC   z"Phi3ForTokenClassification.forward  s    * ,0::)%+'/!5 ,6 	,
 "33,,7O,%%ffdkkBD$!//))	
 	
r9   r\  )rD   rE   rF   r,   r   r#  r   r   r   rG   r   r   r
   rH   r   r   rC   rI   rJ   s   @r8   r  r    s     '(  151537+/59-1$(,0/3*
E,,-*
 !.*
 u//0	*

 "%*
   1 12*
 ))**
 D>*
 $D>*
 'tn*
 
*
  *
r9   r  )r   r  ra  r  r  )r   )Nr!   )Htypingr   r   r   r   rG   r   activationsr	   cache_utilsr
   r   r   r   
generationr   integrationsr   modeling_attn_mask_utilsr   modeling_flash_attention_utilsr   modeling_layersr   modeling_outputsr   r   r   r   modeling_rope_utilsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   r   r    configuration_phi3r"   !torch.nn.attention.flex_attentionr#   integrations.flex_attentionr$   
get_loggerrD   r   Moduler&   rQ   r   r   rZ   r  ru   r   r   r   r   r   r   r  r_  ra  r  r  __all__r   r9   r8   <module>r     s(  . 4 3   ! O O ) 7 > B 9  L F & h h *  !;J 
		H	%)bii )$(	UU\\ 	U# 	U%,, 	U& %II%<<% 
% <<	%
 U\\*% % %4@H)BII H)V Y'J")) J (J(I1 IX */ * *:<")) <D P# P Pf ?,j > P)? P Pf S
$7 S
S
l C
!4 C
 C
Lr9   