
    Uh                     ~   d Z ddlZddlmZmZmZmZ ddlZddlZddlm	Z	 ddl
mZ ddlmZmZ ddlmZ dd	lmZ dd
lmZmZmZmZ ddlmZmZ ddlmZ ddlmZmZm Z m!Z! ddl"m#Z#  e        rddl$m%Z% ddl&m'Z'  e!jP                  e)      Z* G d de	jV                        Z,d Z-d)dZ. G d de	jV                        Z/ G d de	jV                        Z0 G d de	jV                        Z1e G d de             Z2e G d de2             Z3 G d  d!e2e      Z4 ed"#       G d$ d%e2             Z5e G d& d'e2             Z6g d(Z7y)*zPyTorch Persimmon model.    N)ListOptionalTupleUnion)nn   )ACT2FN)CacheDynamicCache)GenerationMixin)AttentionMaskConverter)BaseModelOutputWithPastCausalLMOutputWithPast SequenceClassifierOutputWithPastTokenClassifierOutput)ROPE_INIT_FUNCTIONSdynamic_rope_update)PreTrainedModel)auto_docstringcan_return_tupleis_torch_flex_attn_availablelogging   )PersimmonConfig)	BlockMask)make_flex_block_causal_maskc                   ^     e Zd Zddef fdZ ej                         ed               Z xZ	S )PersimmonRotaryEmbeddingconfigc                    t         |           t        |d      rG|j                  ;|j                  j	                  d|j                  j	                  d            | _        nd| _        |j                  | _        |j                  | _        || _	        t        | j
                     | _        | j                  | j                  |      \  }| _        | j                  d|d       | j                  | _        y )Nrope_scaling	rope_typetypedefaultinv_freqF)
persistent)super__init__hasattrr!   getr"   max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenr   r   rope_init_fnattention_scalingregister_bufferr%   original_inv_freq)selfr   devicer%   	__class__s       /var/www/catia.catastroantioquia-mas.com/valormas/lib/python3.12/site-packages/transformers/models/persimmon/modeling_persimmon.pyr(   z!PersimmonRotaryEmbedding.__init__8   s    6>*v/B/B/N#0044[&BUBUBYBYZ`BabDN&DN"("@"@$*$B$B!/?+/+<+<T[[&+Q($(ZeD!%    c                 b   | j                   d d d d f   j                         j                  |j                  d   dd      j	                  |j
                        }|d d d d d f   j                         }t        |j
                  j                  t              r/|j
                  j                  dk7  r|j
                  j                  nd}t        j                  |d      5  |j                         |j                         z  j                  dd      }t        j                  ||fd	      }|j                         | j                  z  }|j                         | j                  z  }	d d d        j	                  |j                   
      	j	                  |j                   
      fS # 1 sw Y   AxY w)Nr   r   mpscpuF)device_typeenabled   dim)dtype)r%   floatexpandshapetor3   
isinstancer#   strtorchautocast	transposecatcosr/   sinr@   )
r2   xposition_idsinv_freq_expandedposition_ids_expandedr;   freqsembrK   rL   s
             r5   forwardz PersimmonRotaryEmbedding.forwardI   sV    !MM$4-8>>@GGHZHZ[\H]_acdehhijiqiqr ,QaZ 8 > > @'1!((--'E!((--[`J`ahhmmfk^^UC 	5&,,.1F1L1L1NNYYZ[]^_E))UEN3C'')d444C'')d444C		5 vvAGGv$cff177f&;;;	5 	5s    BF%%F.N)
__name__
__module____qualname__r   r(   rG   no_gradr   rS   __classcell__r4   s   @r5   r   r   7   s3    / /" U]]_<  <r6   r   c                     | dd| j                   d   dz  f   }| d| j                   d   dz  df   }t        j                  | |fd      S )z*Rotates half the hidden dims of the input..Nr8   r=   r>   )rC   rG   rJ   )rM   x1x2s      r5   rotate_halfr^   Z   sZ    	
3"!''"+"""	#B	
3q ""	#B99rc2YB''r6   c                     |j                  |      }|j                  |      }| |z  t        |       |z  z   }||z  t        |      |z  z   }||fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )	unsqueezer^   )qkrK   rL   rN   unsqueeze_dimq_embedk_embeds           r5   apply_rotary_pos_embrf   b   sY    ( --
&C
--
&C3w;q>C/0G3w;q>C/0GGr6   c                   $     e Zd Z fdZd Z xZS )PersimmonMLPc                    t         |           t        j                  |j                  |j
                        | _        t        j                  |j
                  |j                        | _        t        |j                     | _
        y rT   )r'   r(   r   Linearhidden_sizeintermediate_sizedense_h_to_4hdense_4h_to_hr	   
hidden_actactr2   r   r4   s     r5   r(   zPersimmonMLP.__init__   s^    YYv'9'96;S;STYYv'?'?ASAST&++,r6   c                 l    | j                  |      }| j                  |      }| j                  |      }|S rT   )rm   rp   rn   )r2   hidden_statess     r5   rS   zPersimmonMLP.forward   s6    **=9/**=9r6   )rU   rV   rW   r(   rS   rY   rZ   s   @r5   rh   rh   ~   s    -r6   rh   c                       e Zd ZdZddedee   f fdZdej                  de
ej                  ej                  ej                  f   fdZ	 	 	 	 	 	 	 ddej                  d	eej                     d
eej                     dee   dededeej                     dee
ej                  ej                  f      de
ej                  eej                     ee
ej                        f   fdZ xZS )PersimmonAttentionz=Multi-headed attention from 'Attention Is All You Need' paperr   	layer_idxc                    t         |           || _        || _        |-t        j                  d| j                  j                   d       |j                  | _        |j                  | _
        | j                  | j                  z  | _        |j                  | _        t        | j                  |j                  z        | _        d| _        | j                  | j                  z  | j                  k7  r&t#        d| j                   d| j                   d      t%        j&                  | j                  d| j                  z  d      | _        t%        j&                  | j                  | j                  z  | j                  d      | _        |j,                  | _        | j,                  r|t%        j.                  |j                  | j                  z  |j0                  d	      | _        t%        j.                  |j                  | j                  z  |j0                  d	      | _        t%        j6                  |j8                        | _        t;        | j                  
      | _        y )NzInstantiating z without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.Tz?hidden_size must be divisible by num_heads (got `hidden_size`: z and `num_heads`: z).r   bias)epselementwise_affiner   )r'   r(   r   rv   loggerwarning_oncer4   rU   rk   num_attention_heads	num_headshead_dim
rope_thetaintpartial_rotary_factorrotary_ndims	is_causal
ValueErrorr   rj   query_key_valuedenseqk_layernorm	LayerNormlayer_norm_epsq_layernormk_layernormDropoutattention_dropoutr   
rotary_embr2   r   rv   r4   s      r5   r(   zPersimmonAttention.__init__   s   " !8!8 9 :, , "--33((DNN: ++0L0L LMMMDNN*t/?/??QRVRbRbQc$T^^$4B8   "yy)9)91t?O?O;OVZ[YYt~~=t?O?OVZ[
"//!||""dnn4&:O:Odh D  "||""dnn4&:O:Odh D "$F,D,D!E2$++Fr6   	fused_qkvreturnc                     |j                   \  }}}|j                  ||| j                  d| j                        }|ddddf   |ddddf   |ddddf   fS )a  
        Split the last dimension into (num_heads, head_dim) without making any copies, results share same memory
        storage as `fused_qkv`

        Args:
            fused_qkv (`torch.tensor`): [batch_size, seq_length, num_heads * 3 * head_dim]

        Returns:
            query: [batch_size, seq_length, num_heads, head_dim] key: [batch_size, seq_length, num_heads, head_dim]
            value: [batch_size, seq_length, num_heads, head_dim]
        r   .r   Nr   r=   )rC   viewr   r   )r2   r   
batch_size
seq_lengththree_times_hidden_sizes        r5   _split_headszPersimmonAttention._split_heads   sb     ;D//7
J 7NN:z4>>1dmm\	a#YsAqy%99S!QY;OOOr6   rs   attention_maskrN   past_key_valueoutput_attentions	use_cachecache_positionposition_embeddingsc	                    |j                         \  }	}
}| j                  |      }| j                  |      \  }}}| j                  r"| j	                  |      }| j                  |      }|j                  dd      }|j                  dd      }|j                  dd      }|\  }}|dd | j                  f   |d| j                  d f   }}|dd | j                  f   |d| j                  d f   }}t        ||||      \  }}t        j                  ||fd      }t        j                  ||fd      }|2||| j                  |d}|j                  ||| j                  |      \  }}t        j                  ||j                  dd            t        j                  | j                         z  }|#|d d d d d d d |j"                  d   f   }||z   }t$        j&                  j)                  |t        j*                  d	      j-                  |j.                        }| j1                  |      }t        j                  ||      }|j                         |	| j2                  |
| j                   fk7  r7t5        d
|	| j2                  |
| j                   f d|j                                |j                  dd      j7                         }|j9                  |	|
| j:                        }| j=                  |      }|sd }|||fS )Nr   r=   .r8   r>   )rL   rK   partial_rotation_sizer   r   )r@   r?   z `attn_output` should be of size z	, but is )sizer   r   r   r   r   rI   r   rf   rG   rJ   updaterv   matmulmathsqrtr   rC   r   
functionalsoftmaxfloat32rD   r@   r   r   r   
contiguousreshaperk   r   )r2   rs   r   rN   r   r   r   r   r   bszq_len_r   query_states
key_statesvalue_statesrK   rL   	query_rot
query_passkey_rotkey_passcache_kwargsattn_weightscausal_maskattn_outputs                             r5   rS   zPersimmonAttention.forward   s"    &**,UA ((7	 483D3DY3O0z<++L9L))*5J $--a3#--a3))!Q/
&S 1 1 1112d//112 	
 s/d////0sD--//0 
 2)Wc3O	7 yy)Z!8bAYY2;
% )-):):"0	L (6'<'<ZW[WeWegs't$J||L*2F2Fq!2LMPTPYPYZ^ZgZgPhh%(Aq2HJ4D4DR4H2H)HIK'+5L }},,\TV,WZZ[g[m[mn--l;ll<>#t~~udmm!LL2CPTP]P]3^2_ `$$&') 
 "++Aq1<<>!))#ud6F6FGjj- LL.88r6   rT   NNNFFNN)rU   rV   rW   __doc__r   r   r   r(   rG   Tensorr   r   
LongTensorr
   boolrS   rY   rZ   s   @r5   ru   ru      s9   G#G #G8C= #GJPell PuU\\5<<Y^YeYe=e7f P& 2637*."'59KOR9||R9 !.R9 u//0	R9
 !R9  R9 R9 !!1!12R9 &eELL%,,,F&GHR9 
u||Xell3XeELL>Q5RR	SR9r6   ru   c                       e Zd Zdedef fdZ	 	 	 	 	 	 	 ddej                  deej                     deej                     dee
ej                        dee   d	ee   d
eej                     dee
ej                  ej                  f      de
ej                  ee
ej                  ej                  f      f   fdZ xZS )PersimmonDecoderLayerr   rv   c                    t         |           |j                  | _        t        ||      | _        t        |      | _        t        j                  |j                  |j                        | _
        t        j                  |j                  |j                        | _        t        j                  |j                        | _        y )N)r   rv   rz   )r'   r(   rk   ru   	self_attnrh   mlpr   r   r   input_layernormpost_attention_layernormr   hidden_dropoutdropoutr   s      r5   r(   zPersimmonDecoderLayer.__init__  s    !--+6YO'!||F,>,>FDYDYZ(*V5G5GVMbMb(c%zz&"7"78r6   rs   r   rN   r   r   r   r   r   r   c	           
      
   |}	| j                  |      }| j                  ||||||||      \  }}
}|	|z   }|}	| j                  |      }| j                  |      }| j	                  |      }||	z   }|f}|r||
fz  }|r||fz  }|S )a  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
            position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
                Indices of positions of each input sequence tokens in the position embeddings. Selected in the range
                `[0, config.n_positions - 1]`.
                [What are position IDs?](../glossary#position-ids)
            past_key_value (`Tuple(torch.FloatTensor)`, *optional*):
                cached past key and value projection states
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            use_cache (`bool`, *optional*):
                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
                (see `past_key_values`).
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
                Indices depicting the position of the input sequence tokens in the sequence
            position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
                Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
                with `head_dim` being the embedding dimension of each attention head.
        )rs   r   rN   r   r   r   r   r   )r   r   r   r   r   )r2   rs   r   rN   r   r   r   r   r   residualself_attn_weightspresent_key_valueoutputss                r5   rS   zPersimmonDecoderLayer.forward#  s    F !,,]; ?Cnn')%)/) 3 ?M 	?
;(*; !=0 !55mD/]3%0 ")++G)++Gr6   r   )rU   rV   rW   r   r   r(   rG   r   r   r   r   r   FloatTensorrS   rY   rZ   s   @r5   r   r     s   9 93 9 26378<,1$)59KOD||D !.D u//0	D
 !u||!45D $D>D D>D !!1!12D &eELL%,,,F&GHD 
u  (51B1BEDUDU1U+V"WW	XDr6   r   c                   4    e Zd ZeZdZdZdgZdZdZ	dZ
dZd Zy)PersimmonPreTrainedModelmodelTr   past_key_valuesc                    | j                   j                  }t        |t        j                        rY|j
                  j                  j                  d|       |j                  %|j                  j                  j                          y y t        |t        j                        rf|j
                  j                  j                  d|       |j                  2|j
                  j                  |j                     j                          y y t        |t        j                        rJ|j
                  j                  j                  d       |j                  j                  j                          y y )N        )meanstdg      ?)r   initializer_rangerE   r   rj   weightdatanormal_ry   zero_	Embeddingpadding_idxr   fill_)r2   moduler   s      r5   _init_weightsz&PersimmonPreTrainedModel._init_weightsu  s    kk++fbii(MM&&CS&9{{&  &&( '-MM&&CS&9!!-""6#5#56<<> .-MM$$S)KK""$ .r6   N)rU   rV   rW   r   config_classbase_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_cache_class_supports_quantized_cache_supports_static_cacher    r6   r5   r   r   j  s9    "L&*#01"3  $!%r6   r   c                       e Zd ZdZdef fdZd Zd Zee		 	 	 	 	 	 	 	 	 dde
ej                     de
ej                     de
ej                     d	e
eej                        d
e
ej                     de
e   de
e   de
e   de
ej                     defd              Z	 ddeej                  df   dej                  dej                  d	edef
dZedej                  dededej0                  dej                  defd       Z xZS )PersimmonModelz
    Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`PersimmonDecoderLayer`]

    Args:
        config: PersimmonConfig
    r   c           	          t         |   |       |j                  | _        |j                  | _        t        j                  |j                  |j                  | j                        | _        t        j                  t        |j                        D cg c]  }t        ||       c}      | _        t        j                  |j                  |j                        | _        t#        |      | _        d| _        | j)                          y c c}w )Nr   r|   F)r'   r(   pad_token_idr   
vocab_sizer   r   rk   embed_tokens
ModuleListrangenum_hidden_layersr   layersr   r   final_layernormr   r   gradient_checkpointing	post_initr   s      r5   r(   zPersimmonModel.__init__  s     !.. ++LL):):F<N<NPTP`P`ammGLVMeMeGfg)"695g
  "||F,>,>FDYDYZ2&A&+# hs   Dc                     | j                   S rT   r   r2   s    r5   get_input_embeddingsz#PersimmonModel.get_input_embeddings  s       r6   c                     || _         y rT   r   r2   values     r5   set_input_embeddingsz#PersimmonModel.set_input_embeddings  s
    !r6   	input_idsr   rN   r   inputs_embedsr   r   output_hidden_statesr   r   c
                    ||n| j                   j                  }||n| j                   j                  }||n| j                   j                  }|d u |d uz  rt	        d      | j
                  r%| j                  r|rt        j                  d       d}d}
|rIt        |t              s9d}
|t               }n*t        j                  |      }t        j                  d       || j                  |      }|	F||j                         nd}t        j                   |||j"                  d   z   |j$                        }	||	j'                  d      }| j)                  |||	||      }|}| j+                  ||      }|rd	nd }|rd	nd }d }| j,                  D ]r  }|r||fz  }| j
                  r0| j                  r$| j/                  |j0                  |||||||	|	      }n ||||||||	|
      }|d   }|r	||rdnd   }|sj||d   fz  }t | j3                  |      }|r||fz  }|r|nd }|
r|j5                         }t7        ||||      S )Nz:You must specify exactly one of input_ids or inputs_embedszZ`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...FTzWe detected that you are passing `past_key_values` as a tuple of tuples. This is deprecated and will be removed in v4.47. Please convert your cache or use an appropriate `Cache` class (https://huggingface.co/docs/transformers/kv_cache#legacy-cache-format)r   r   r3   r   )r   rN   r   r   r   r   r   r=   )last_hidden_stater   rs   
attentions)r   r   r  r   r   r   trainingr}   r~   rE   r
   r   from_legacy_cacher   get_seq_lengthrG   arangerC   r3   r`   _update_causal_maskr   r   _gradient_checkpointing_func__call__r   to_legacy_cacher   )r2   r  r   rN   r   r  r   r   r  r   return_legacy_cachepast_seen_tokensr   rs   r   all_hidden_statesall_self_attnsnext_decoder_cachedecoder_layerlayer_outputs
next_caches                        r5   rS   zPersimmonModel.forward  s    2C1N-TXT_T_TqTq$8$D $++JjJj 	 "+!6IDKK<Q<Q	-t";<YZZ&&4==##p "	 $Z?"&&".."."@"@"Q##^   --i8M!CRC^==?de"\\ "2]5H5H5K"KTaThThN )33A6L..M>?L]
 & #oom\J #7BD0d!![[ "	6M#!m%55!**t}} $ A A!**! #%"'
! !.!#.!-#2&7'#1(;	! *!,M%28I1q%Q" =#3"55E"	6H ,,];  -!11+4'$
#335J&+&+%	
 	
r6   r   input_tensorc           	         | j                   j                  dk(  r||dk(  j                         r|S y | j                   j                  dk(  r't        |t        j
                        rt        |      }|S ||j                         nd}||j                  nd}| j                   j                  dk(  r(|s&|s$t        j                  |||| j                        ry |j                  }|j                  d   }	|r|j                         }
n1t        |t        j
                        r|j                  d	   n||	z   dz   }
| j                  ||	|
|||j                  d   
      }| j                   j                  dk(  rQ|O|j                   j"                  dv r7|s5t	        j$                  |      j&                  }t        j(                  ||      }|S )Nflash_attention_2r   flex_attentionr   Fsdpa)r  past_key_values_lengthis_trainingr   r8   )sequence_lengthtarget_lengthr@   r   r   )cudaxpunpu)r   _attn_implementationanyrE   rG   r   r   r
  is_compileabler   _ignore_causal_mask_sdpar  r@   rC   get_max_cache_shape5_prepare_4d_causal_attention_mask_with_cache_positionr3   r#   finfomin_unmask_unattended)r2   r   r  r   r   r   r  using_compilable_cacher@   r  r   r   	min_dtypes                r5   r  z"PersimmonModel._update_causal_mask  s    ;;++/BB)~/D.I.I.K%%;;++/??.%,,7!<^!L!!
 @O?Z?99;`aCRC^!?!?di ;;++v5>T]n%>>*'7 MM	 ""&,,Q/!+??AM nell; $$R(%7!;  PP+')#))!, Q 
 KK,,6*%%**.DD%
 E*..I0CCKQZ[Kr6   r  r   r@   r   c                    | | j                         dk(  r| }|S t        j                  |      j                  }t        j                  ||f|||j
                        }|dk7  rt        j                  |d      }|t        j                  ||j
                        |j                  dd      kD  z  }|ddddddf   j                  |ddd      }| |j                         }| j                  d   }	|ddddddd|	f   | ddddddf   j                  |j
                        z   }
|
dk(  }
|ddddddd|	f   j                  |
|      |ddddddd|	f<   |S )	aM  
        Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
        `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.

        Args:
            attention_mask (`torch.Tensor`):
                A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
                `(batch_size, 1, query_length, key_value_length)`.
            sequence_length (`int`):
                The sequence length being processed.
            target_length (`int`):
                The target length: when generating with static cache, the mask should be as long as the static cache,
                to account for the 0 padding, the part of the cache that is not filled yet.
            dtype (`torch.dtype`):
                The dtype to use for the 4D attention mask.
            cache_position (`torch.Tensor`):
                Indices depicting the position of the input sequence tokens in the sequence.
            batch_size (`torch.Tensor`):
                Batch size.
        N   )
fill_valuer@   r3   r   )diagonalr  r8   r   )r?   rG   r*  r+  fullr3   triur  r   rB   clonerC   rD   masked_fill)r   r  r   r@   r   r   kwargsr   r.  mask_lengthpadding_masks              r5   r)  zDPersimmonModel._prepare_4d_causal_attention_mask_with_cache_positionc  s   > %.*<*<*>!*C(K* ' E*..I** -0Ye\j\q\qK !##jjqA5<<n>S>STWeWmWmnprsWtttK%dD!Q&67>>z1bRTUK))//1,2226*1aL[L+@ANSTVZ\`bcScDdDgDg&&E    ,q05@Aq,;,AV5W5c5c )6Aq!\k\12 r6   	NNNNNNNNN)F)rU   rV   rW   r   r   r(   r   r   r   r   r   rG   r   r   r   r   r   r   rS   r   r
   r  staticmethodr   r@   r)  rY   rZ   s   @r5   r   r     s    "!"  151537=A59$(,0/359v
E,,-v
 !.v
 u//0	v

 "$u'8'8"9:v
   1 12v
 D>v
 $D>v
 'tnv
 !!1!12v
 
!v
  v
~ #(BellK78B llB 	B
 B  BH 444 4 {{	4
 4 4 4r6   r   c                       e Zd ZdgZ fdZd Zd Zd Zd Zd Z	d Z
ee	 	 	 	 	 	 	 	 	 	 	 dd	eej                     d
eej                      deej                     deeej$                        deej$                     deej                     dee   dee   dee   deej                     deeej                   f   defd              Z xZS )PersimmonForCausalLMzlm_head.weightc                     t         |   |       t        |      | _        |j                  | _        t        j                  |j                  |j                  d      | _        | j                          y NFrx   )
r'   r(   r   r   r   r   rj   rk   lm_headr   rq   s     r5   r(   zPersimmonForCausalLM.__init__  sU     #F+
 ++yy!3!3V5F5FUS 	r6   c                 .    | j                   j                  S rT   r   r   r   s    r5   r   z)PersimmonForCausalLM.get_input_embeddings      zz&&&r6   c                 &    || j                   _        y rT   rB  r   s     r5   r   z)PersimmonForCausalLM.set_input_embeddings      "'

r6   c                     | j                   S rT   r@  r   s    r5   get_output_embeddingsz*PersimmonForCausalLM.get_output_embeddings  s    ||r6   c                     || _         y rT   rG  )r2   new_embeddingss     r5   set_output_embeddingsz*PersimmonForCausalLM.set_output_embeddings  s	    %r6   c                     || _         y rT   r   )r2   decoders     r5   set_decoderz PersimmonForCausalLM.set_decoder  s	    
r6   c                     | j                   S rT   rM  r   s    r5   get_decoderz PersimmonForCausalLM.get_decoder  s    zzr6   r  r   rN   r   r  labelsr   r   r  r   logits_to_keepr   c                    ||n| j                   j                  }|	|	n| j                   j                  }	| j                  ||||||||	|
	      }|j                  }t        |t              rt        | d      n|}| j                  |dd|ddf         }d}|* | j                  ||fd| j                   j                  i|}t        |||j                  |j                  |j                        S )uk  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, PersimmonForCausalLM

        >>> model = PersimmonForCausalLM.from_pretrained("adept/persimmon-8b-base")
        >>> tokenizer = AutoTokenizer.from_pretrained("adept/persimmon-8b-base")

        >>> prompt = "human: Hey, what should I eat for dinner?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        'human: Hey, what should I eat for dinner?\n\ncat: 🐱\n\nhuman: 😐\n\n'
        ```N)	r  r   rN   r   r  r   r   r  r   r   losslogitsr   rs   r  )r   r   r  r   r  rE   r   slicer@  loss_functionr   r   r   rs   r  )r2   r  r   rN   r   r  rR  r   r   r  r   rS  r7  r   rs   slice_indicesrW  rV  s                     r5   rS   zPersimmonForCausalLM.forward  s   P 2C1N-TXT_T_TqTq$8$D $++JjJj 	
 ,0::)%+'/!5) ,6 
,
  118B>SV8W~ot4]kmA}a,?@A%4%%  ;;11 	D &#33!//))
 	
r6   )NNNNNNNNNNr   )rU   rV   rW   _tied_weights_keysr(   r   r   rH  rK  rO  rQ  r   r   r   rG   r   r   r   r   r   r   r   r   rS   rY   rZ   s   @r5   r=  r=    s\   *+'(&  151537=A59-1$(,0/35934L
E,,-L
 !.L
 u//0	L

 "$u'8'8"9:L
   1 12L
 ))*L
 D>L
 $D>L
 'tnL
 !!1!12L
 c5<</0L
 
 L
  L
r6   r=  a  
    The Persimmon transformer with a sequence classification head on top (linear layer).

    [`PersimmonForSequenceClassification`] uses the last token in order to do the classification, as other causal
    models (e.g. GPT-2) do.

    Since it does classification on the last token, it requires to know the position of the last token. If a
    `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
    no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
    padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
    each row of the batch).
    )custom_introc                       e Zd Z fdZd Zd Zee	 	 	 	 	 	 	 	 	 ddee	j                     dee	j                     dee	j                     dee   dee	j                     d	ee	j                     d
ee   dee   dee   defd              Z xZS )"PersimmonForSequenceClassificationc                     t         |   |       |j                  | _        t        |      | _        t        j                  |j                  | j                  d      | _        | j                          y r?  )
r'   r(   
num_labelsr   r   r   rj   rk   scorer   rq   s     r5   r(   z+PersimmonForSequenceClassification.__init__"  sS      ++#F+
YYv114??O
 	r6   c                 .    | j                   j                  S rT   rB  r   s    r5   r   z7PersimmonForSequenceClassification.get_input_embeddings+  rC  r6   c                 &    || j                   _        y rT   rB  r   s     r5   r   z7PersimmonForSequenceClassification.set_input_embeddings.  rE  r6   r  r   rN   r   r  rR  r   r   r  r   c
           
         | j                  ||||||||	      }
|
j                  }| j                  |      }||j                  d   }n|j                  d   }| j                  j
                  |dk7  rt        d      | j                  j
                  d}n||| j                  j
                  k7  j                  |j                  t        j                        }t        j                  |j                  d   |j                  t        j                        }||z  j                  d      }n.d}t        j                  | j                  j                    d       |t        j                  ||j                  	      |f   }d}|| j#                  |||| j                  
      }t%        |||
j&                  |
j(                  |
j*                        S )  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        r   rN   r   r  r   r   r  Nr   r   z=Cannot handle batch sizes > 1 if no padding token is defined.r8   )r3   r@   z will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`r  )rW  rR  pooled_logitsr   rU  )r   r  ra  rC   r   r   r   rD   r3   rG   int32r  argmaxr}   r~   r4   rU   rY  r   r   rs   r  )r2   r  r   rN   r   r  rR  r   r   r  transformer_outputsrs   rW  r   last_non_pad_tokennon_pad_masktoken_indicesrg  rV  s                      r5   rS   z*PersimmonForSequenceClassification.forward1  s   * 8<zz)%+'/!5 8B 	8
 ,==M* "+J&,,Q/J;;##+
a\]];;##+!#"%)A)AAEEfmmUZU`U`aL!LL)<V]]Z_ZeZefM"/,">!F!Fr!J!#>>**+ ,Z Z
 u||Jv}}MOaab%%VFR_hlhshs%tD/ /??-;;*55
 	
r6   r:  )rU   rV   rW   r(   r   r   r   r   r   rG   r   r   r
   r   r   r   rS   rY   rZ   s   @r5   r^  r^    s     '(  151537+/59-1$(,0/3A
E,,-A
 !.A
 u//0	A

 "%A
   1 12A
 ))*A
 D>A
 $D>A
 'tnA
 
*A
  A
r6   r^  c                       e Zd Z fdZd Zd Zee	 	 	 	 	 	 	 	 	 ddee	j                     dee	j                     dee	j                     dee   dee	j                     d	ee	j                     d
ee   dee   dee   defd              Z xZS )PersimmonForTokenClassificationc                    t         |   |       |j                  | _        t        |      | _        t        |dd       |j                  }nt        |dd       |j                  }nd}t        j                  |      | _
        t        j                  |j                  |j                        | _        | j                          y )Nclassifier_dropoutr   g?)r'   r(   r`  r   r   getattrrq  r   r   r   r   rj   rk   ra  r   )r2   r   rq  r4   s      r5   r(   z(PersimmonForTokenClassification.__init__z  s      ++#F+
6/6B!'!:!:V-t4@!'!6!6!$zz"45YYv1163D3DE
 	r6   c                 .    | j                   j                  S rT   rB  r   s    r5   r   z4PersimmonForTokenClassification.get_input_embeddings  rC  r6   c                 &    || j                   _        y rT   rB  r   s     r5   r   z4PersimmonForTokenClassification.set_input_embeddings  rE  r6   r  r   rN   r   r  rR  r   r   r  r   c
           
         | j                  ||||||||	      }
|
j                  }| j                  |      }| j                  |      }d}|| j	                  ||| j
                        }t        |||
j                  |
j                        S )re  rf  N)rV  rW  rs   r  )	r   r  r   ra  rY  r   r   rs   r  )r2   r  r   rN   r   r  rR  r   r   r  r   sequence_outputrW  rV  s                 r5   rS   z'PersimmonForTokenClassification.forward  s    * ,0::)%+'/!5 ,6 	,
 "33,,7O,%%ffdkkBD$!//))	
 	
r6   r:  )rU   rV   rW   r(   r   r   r   r   r   rG   r   r   r
   r   r   r   rS   rY   rZ   s   @r5   ro  ro  w  s     '(  151537+/59-1$(,0/3*
E,,-*
 !.*
 u//0	*

 "%*
   1 12*
 ))**
 D>*
 $D>*
 'tn*
 
*
  *
r6   ro  )r=  r   r   r^  ro  )Nr   )8r   r   typingr   r   r   r   rG   torch.utils.checkpointr   activationsr	   cache_utilsr
   r   
generationr   modeling_attn_mask_utilsr   modeling_outputsr   r   r   r   modeling_rope_utilsr   r   modeling_utilsr   utilsr   r   r   r   configuration_persimmonr   !torch.nn.attention.flex_attentionr   integrations.flex_attentionr   
get_loggerrU   r}   Moduler   r^   rf   rh   ru   r   r   r   r=  r^  ro  __all__r   r6   r5   <module>r     sU  (   / /    ! . ) >  L - \ \ 4  !;J 
		H	%<ryy <F(8299 J9 J9ZNBII Nb % % %2 T- T Tns
3_ s
l S
)A S
S
l C
&> C
 C
Lr6   