
    Uhm                        d dl mZ d dlmZmZmZ d dlZd dlZd dlm	Z	m
Z
 d dlmZmZmZmZmZmZmZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZmZ  ej@                  e!      Z" G d de
      Z# G d de	      Z$ G d de      Z% G d de      Z& G d de      Z' G d de      Z(e G d de             Z) G d de      Z* G d de      Z+g d Z,y)!    )	dataclass)OptionalTupleUnionN)InstructBlipQFormerConfigInstructBlipVisionConfig)$InstructBlipForConditionalGeneration/InstructBlipForConditionalGenerationModelOutputInstructBlipModelInstructBlipPreTrainedModelInstructBlipQFormerModelInstructBlipVisionModelKwargsForCausalLM   )PretrainedConfig)FlashAttentionKwargs)!MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)Unpack)logging   )CONFIG_MAPPING
AutoConfigc                       e Zd Zy)InstructBlipVideoVisionConfigN__name__
__module____qualname__     /var/www/catia.catastroantioquia-mas.com/valormas/lib/python3.12/site-packages/transformers/models/instructblipvideo/modular_instructblipvideo.pyr   r   /       r    r   c                       e Zd Zy)InstructBlipVideoQFormerConfigNr   r   r    r!   r$   r$   3   r"   r    r$   c                   d     e Zd ZdZdZddiZeeedZ		 	 	 	 	 d fd	Z
ededed	efd
       Z xZS )InstructBlipVideoConfiga
  
    [`InstructBlipVideoConfig`] is the configuration class to store the configuration of a
    [`InstructBlipVideoForConditionalGeneration`]. It is used to instantiate a Instructblipvideo model according to the specified
    arguments, defining the vision model, Q-Former model and language model configs. Instantiating a configuration with
    the defaults will yield a similar configuration to that of the Instructblipvideo
    [Salesforce/instruct-blip-flan-t5](https://huggingface.co/Salesforce/instruct-blip-flan-t5) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        vision_config (`dict`, *optional*):
            Dictionary of configuration options used to initialize [`InstructBlipVideoVisionConfig`].
        qformer_config (`dict`, *optional*):
            Dictionary of configuration options used to initialize [`InstructBlipVideoQFormerConfig`].
        text_config (`dict`, *optional*):
            Dictionary of configuration options used to initialize any [`PretrainedConfig`].
        num_query_tokens (`int`, *optional*, defaults to 32):
            The number of query tokens passed through the Transformer.

        video_token_index (`int`, *optional*):
            Token index of special video token.
        kwargs (*optional*):
            Dictionary of keyword arguments.

    Example:

    ```python
    >>> from transformers import (
    ...     InstructBlipVideoVisionConfig,
    ...     InstructBlipVideoQFormerConfig,
    ...     OPTConfig,
    ...     InstructBlipVideoConfig,
    ...     InstructBlipVideoForConditionalGeneration,
    ... )

    >>> # Initializing a InstructBlipVideoConfig with Salesforce/instruct-blip-flan-t5 style configuration
    >>> configuration = InstructBlipVideoConfig()

    >>> # Initializing a InstructBlipVideoForConditionalGeneration (with random weights) from the Salesforce/instruct-blip-flan-t5 style configuration
    >>> model = InstructBlipVideoForConditionalGeneration(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config

    >>> # We can also initialize a InstructBlipVideoConfig from a InstructBlipVideoVisionConfig, InstructBlipVideoQFormerConfig and any PretrainedConfig

    >>> # Initializing Instructblipvideo vision, Instructblipvideo Q-Former and language model configurations
    >>> vision_config = InstructBlipVideoVisionConfig()
    >>> qformer_config = InstructBlipVideoQFormerConfig()
    >>> text_config = OPTConfig()

    >>> config = InstructBlipVideoConfig.from_text_vision_configs(vision_config, qformer_config, text_config)
    ```instructblipvideovideo_token_idvideo_token_index)text_configqformer_configvision_configc                    t        |   di | |i }t        j                  d       |i }t        j                  d       |i }t        j                  d       t	        di || _        t        di || _        d|v r|d   nd}t        |   di || _	        || _
        || _        | j
                  j                  | j                  _        | j                  j                  t        v | _        d| _        d| _        y )	NzZvision_config is None. initializing the InstructBlipVideoVisionConfig with default values.z\qformer_config is None. Initializing the InstructBlipVideoQFormerConfig with default values.zTtext_config is None. Initializing the text config with default values (`OPTConfig`).
model_typeoptg      ?g{Gz?r   )super__init__loggerinfor   r,   r$   r+   r   r*   num_query_tokensr)   hidden_sizeencoder_hidden_sizer.   r   use_decoder_only_language_modelinitializer_factorinitializer_range)	selfr,   r+   r*   r4   r)   kwargstext_model_type	__class__s	           r!   r1   z InstructBlipVideoConfig.__init__y   s     	"6" MKKtu!NKKvwKKKno:K]K<N~N7C{7R+l3X])/:I[I 0!2262D2D2P2P//3/?/?/J/JNo/o,"%!%r    r,   r+   r*   c                 n     | d|j                         |j                         |j                         d|S )a  
        Instantiate a [`InstructBlipVideoConfig`] (or a derived class) from a InstructBlipVideo vision model, Q-Former and
        language model configurations.

        Returns:
            [`InstructBlipVideoConfig`]: An instance of a configuration object
        )r,   r+   r*   r   )to_dict)clsr,   r+   r*   r;   s        r!    from_vision_qformer_text_configsz8InstructBlipVideoConfig.from_vision_qformer_text_configs   sD       
'//1)113#++-
 	
 	
r    )NNN    N)r   r   r   __doc__r.   attribute_mapr   r$   r   sub_configsr1   classmethodr   rA   __classcell__)r=   s   @r!   r&   r&   7   sv    5n %J-M "86K !&F 
4
 7
 &	
 
r    r&   c                       e Zd Zy) InstructBlipVideoPreTrainedModelNr   r   r    r!   rI   rI      r"   r    rI   c                       e Zd Zy)InstructBlipVideoVisionModelNr   r   r    r!   rK   rK      r"   r    rK   c                       e Zd Zy)InstructBlipVideoQFormerModelNr   r   r    r!   rM   rM      r"   r    rM   c                       e Zd Zy)4InstructBlipVideoForConditionalGenerationModelOutputNr   r   r    r!   rO   rO      s    r    rO   c                   4   e Zd Z	 	 	 	 	 	 	 	 	 	 ddej                  dej                  deej                     deej                     deej                     deej                     deej                     d	ee   d
ee   dee   dedee   dee	   de
eef   fdZy)InstructBlipVideoModelNpixel_valuesqformer_input_idsqformer_attention_mask	input_idsattention_maskdecoder_input_idsdecoder_attention_maskoutput_attentionsoutput_hidden_statesreturn_dictinterpolate_pos_encoding	use_cacher;   returnc                 ^   |
|
n| j                   j                  }
|j                  \  }}}}}|j                  ||z  |||      }| j	                  |||	|
|      }|d   }t        j                  |j                         d d t
        j                  |j                        }| j                  j                  |j                  d   dd      }t        j                  |j                         d d t
        j                  |j                        }|t        j                  |      }|j                  |d      }|j                  |d      }t        j                  ||gd      }| j                  |||||||	|
      }|d   d d d |j                  d      d d f   }| j!                  |      }|j                  || j                   j"                  |z  d      } | j$                  j'                         |      }|t        j                  |      }|| j                   j(                  k(  j+                  d      j-                  |      }|j/                         ||<   | j                   j0                  r | j$                  d||||	|
|d|}n | j$                  d||||||	|
|d	|}t3        |||
      S )N)rR   rY   rZ   r[   r\   r   dtypedevicedim   )rU   rV   query_embedsencoder_hidden_statesencoder_attention_maskrY   rZ   r[   inputs_embedsrV   rY   rZ   r[   r]   )rk   rV   rW   rX   rY   rZ   r[   r]   )vision_outputsqformer_outputslanguage_model_outputsr   )configuse_return_dictshapereshapevision_modeltorchonessizelongrc   query_tokensexpand	ones_likerepeat_interleavecatqformerlanguage_projectionr4   language_modelget_input_embeddingsr(   	unsqueeze	expand_asflattenr7   rO   )r:   rR   rS   rT   rU   rV   rW   rX   rY   rZ   r[   r\   r]   r;   
batch_sizeframeschannelheightwidthrl   image_embedsimage_attention_maskrx   query_attention_maskquery_outputsquery_outputlanguage_model_inputsrk   special_image_maskoutputss                                 r!   forwardzInstructBlipVideoModel.forward   s     &1%<k$++B]B] 6B5G5G2
FGVU#++J,?&RWX**%/!5#%= + 
 &a(  %zz,*;*;*=cr*B%**]i]p]pq ((//0B0B10Er2N$zz,*;*;*=cr*B%**]i]p]pq!)%*__5F%G"-??A?N!7!I!I&VW!I!X!&,@BX+Y_`!a'1%".#7/!5# % 	
 %Q'+A\->->q-A+A1(DE !% 8 8 F !6 = =j$++JfJfioJoqs tB++@@B9M!"__Y7N'4;;+E+EEPPQST^^_lm,A,I,I,K();;66)d)) +-"3%9'# G *d)) 
+-"3'="3%9'#
 
G D))#*
 	
r    )
NNNNNNNNFN)r   r   r   rt   FloatTensorr   
LongTensorboolr   r   r   r   rO   r   r   r    r!   rQ   rQ      s%   
 >B15598<=A,0/3&*).$(`
''`
 !,,`
 !))9)9 :	`

 E--.`
 !!1!12`
 $E$4$45`
 !))9)9 :`
 $D>`
 'tn`
 d^`
 #'`
 D>`
 -.`
 
uJJ	K`
r    rQ   c            !           e Zd Z	 	 	 ddej                  dej
                  deej
                     dee   dee   f
dZ	 	 	 ddej                  dej
                  deej
                     dee   dee   f
dZ		 	 	 	 	 	 	 	 	 	 	 ddej                  dej                  deej
                     d	eej                     d
eej
                     deej
                     deej
                     dee   dee   deej
                     dee   dedee   de
e   deeef   fdZ ej                          	 	 	 	 	 ddej                  deej
                     deej
                     d	eej
                     d
eej
                     dedej
                  fd       Zy))InstructBlipVideoForConditionalGenerationNrR   rS   rT   r\   r[   c                    |j                   \  }}}}	}
|j                  ||z  ||	|
      }| j                  ||d      }|d   }t        j                  |j                         dd t        j                  |j                        }| j                  j                  |j                   d   dd      }t        j                  |j                         dd t        j                  |j                        }|t        j                  |      }|j                  |d      }|j                  |d      }t        j                  ||gd      }| j                  |||||d	      }|d   ddd|j                  d      ddf   }| j                  |      }|j                  || j                  j                   |z  d      }|r|||fS |S )
a$  
        Encodes images into continuous embeddings that can be forwarded to the language model.

        Args:
            pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
                The tensors corresponding to the input images.
        T)rR   r\   r[   r   Nr`   ra   rd   rf   )rU   rV   rg   rh   ri   r[   )rq   rr   rs   rt   ru   rv   rw   rc   rx   ry   rz   r{   r|   r}   r~   ro   r4   )r:   rR   rS   rT   r\   r[   r   r   r   r   r   rl   r   r   rx   r   r   r   r   s                      r!   get_video_featuresz<InstructBlipVideoForConditionalGeneration.get_video_features*  s   " 6B5G5G2
FGVU#++J,?&RWX**%%= + 

 &a(  %zz,*;*;*=cr*B%**]i]p]pq ((//0B0B10Er2N$zz,*;*;*=cr*B%**]i]p]pq!)%*__5F%G"-??A?N!7!I!I&VW!I!X!&,@BX+Y_`!a'1%".#7 % 
 %Q'+A\->->q-A+A1(DE !% 8 8 F !6 = =j$++JfJfioJoqs t(.-GG$$r    c                      y )Nr   )r:   rR   rS   rT   r\   r[   s         r!   get_image_featuresz<InstructBlipVideoForConditionalGeneration.get_image_featuresf  s     	r    rU   rV   rW   rX   rY   rZ   labelsr]   r;   r^   c                 ,   ||n| j                   j                  }| j                  ||||d      \  }}}|s|j                         n|}|s|j                         n|}t	        j
                  |j                         dd t        j                  |j                        } | j                  j                         |      }|t	        j                  |      }t        | j                   dd      d|| j                   j                  k(  j                  d      j                  |      }|j!                         j#                  |j                        ||<   nyt$        j'                  d       t	        j(                  ||j#                  |j                        gd	      }t	        j(                  ||j#                  |j                        gd	      }| j                   j*                  re | j                  d||||	||d
|}|r|j,                  n|d   }d}|
w | j.                  d||
| j                   j0                  j2                  d|}nB | j                  d||||||	||
|d	|}|r|j4                  n|d   }|r|j,                  n|d   }t7        |||||      S )a0
  
        ```python
        >>> from transformers import InstructBlipVideoProcessor, InstructBlipVideoForConditionalGeneration
        >>> import torch
        >>> from huggingface_hub import hf_hub_download
        >>> import av
        >>> import numpy as np

        >>> def read_video_pyav(container, indices):
        ...     '''
        ...     Decode the video with PyAV decoder.
        ...     Args:
        ...         container (`av.container.input.InputContainer`): PyAV container.
        ...         indices (`List[int]`): List of frame indices to decode.
        ...     Returns:
        ...         result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
        ...     '''
        ...     frames = []
        ...     container.seek(0)
        ...     start_index = indices[0]
        ...     end_index = indices[-1]
        ...     for i, frame in enumerate(container.decode(video=0)):
        ...         if i > end_index:
        ...             break
        ...         if i >= start_index and i in indices:
        ...             frames.append(frame)
        ...     return np.stack([x.to_ndarray(format="rgb24") for x in frames])

        >>> model = InstructBlipVideoForConditionalGeneration.from_pretrained("Salesforce/instructblip-vicuna-7b", device_map="auto")
        >>> processor = InstructBlipVideoProcessor.from_pretrained("Salesforce/instructblip-vicuna-7b")

        >>> file_path = hf_hub_download(
        ...       repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
        ... )
        >>> container = av.open(file_path)

        >>> # sample uniformly 4 frames from the videWhy is this video funny?o
        >>> total_frames = container.streams.video[0].frames
        >>> indices = np.arange(0, total_frames, total_frames / 4).astype(int)
        >>> clip = read_video_pyav(container, indices)

        >>> prompt = "What is happening in the video?"
        >>> inputs = processor(text=prompt, images=clip, return_tensors="pt").to(model.device)

        >>> outputs = model.generate(
        ...     **inputs,
        ...     do_sample=False,
        ...     num_beams=5,
        ...     max_length=256,
        ...     repetition_penalty=1.5,
        ...     length_penalty=1.0,
        ... )
        >>> generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0].strip()
        >>> print(generated_text)
        "A person is eating a bowl of pasta, and they are using a fork to eat it. The person is sitting at a table, and the plate of pasta is on the table in front"
        ```NTrS   rT   r\   r[   r`   ra   r(   K  Expanding inputs for video tokens in InstructBLIPVideo should be done in processing. Please follow instruction here (https://gist.github.com/zucchini-nlp/65f22892b054dc0d68228af56fbeaac2) to update your InstructBLIPVideo model. Using processors without these attributes in the config is deprecated and will throw an error in v4.47.rf   rd   rj   r   )logitsr   
vocab_size)	rk   rV   rW   rX   rY   rZ   r[   r   r]   )lossr   rl   rm   rn   r   )ro   rp   r   to_tuplert   ru   rv   rw   rc   r   r   rz   getattrr(   r   r   r   tor2   warning_oncer|   r7   r   loss_functionr*   r   r   rO   )r:   rR   rS   rT   rU   rV   rW   rX   rY   rZ   r   r[   r\   r]   r;   r   rl   r   language_model_attention_maskrk   r   r   r   r   s                           r!   r   z1InstructBlipVideoForConditionalGeneration.forwardp  s   R &1%<k$++B]B]?C?V?V/#9%= @W @
<~} ;F002>8C..0(-

!&&("-UZZH]HdHd)
% C++@@B9M!"__Y7N 4;; 0$7C"+t{{/I/I"I!T!TUW!X!b!bcp!q0E0M0M0O0R0RS`SgSg0hM,-z
 "II'<m>N>NOdOkOk>l&mstuM"YY.0A0AB_BfBf0ghnoN ;;66)d)) +-"3%9'# G (3W^^
FD!)t)) !&T[[=T=T=_=_ci
 *d)) +-"3'="3%9'# G $/7<<GAJD'2W^^
FC))#*
 	
r    c                     t        | d      r| j                          |j                  d   }| j                  ||||d      \  }	}
}t	        j
                  |	j                         dd t        j                  |	j                        }|| j                  j                  j                  g}t        | j                  dd      4| j                  j                  g| j                  j                  z  d	z  |z   }t	        j                  |gt        j                  |j                        }|j!                  |d
      }|t	        j"                  |      } | j%                         |      }t        | j                  dd      d|| j                  j                  k(  j'                  d      j)                  |      }|	j+                         j-                  |j                        ||<   nt.        j1                  d       t	        j2                  |	|j-                  |	j                        gd
      }t	        j2                  ||j-                  |j                        gd
      }| j4                  j                  j6                  sM|j9                  dd      |	j                  d
   z   d
z
  |d<   |j9                  dd      |	j                  d
   z   |d<   ||d}| j4                  j                  j6                  s||d<    | j4                  j:                  di ||}|S )a  
        Overrides `generate` function to be able to use the model as a conditional generator.

        Args:
            pixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width) or
                (batch_size, num_frames, num_channels, height, width)): Input images or videos to be processed.
            qformer_input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
                The sequence used as a prompt to be fed to the Q-Former module.
            qformer_attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
                Mask to avoid performing attention on padding token indices.
            input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
                The sequence used as a prompt for the generation.
            attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
                Mask to avoid performing attention on padding token indices.
            interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):
                Whether to interpolate the positional encoding of the image embeddings.

        Returns:
            captions (list): A list of strings of length batch_size * num_captions.
        hf_device_mapr   Tr   Nr`   ra   r(      rf   r   rd   
max_length   
min_length)rk   rV   rU   r   )hasattr_preprocess_acceleraterq   r   rt   ru   rv   rw   rc   ro   r*   bos_token_idr   r(   r4   tensorrepeatrz   r   r   r   r   r   r2   r   r|   r   is_encoder_decodergetgenerate)r:   rR   rS   rT   rU   rV   r\   generate_kwargsr   r   rl   r   language_attention_maskstart_tokensrk   r   inputsr   s                     r!   r   z2InstructBlipVideoForConditionalGeneration.generate  s   > 4)'')!''*
?C?V?V/#9%= @W @
<~} #(**!&&("-UZZH]HdHd#
  KK33@@ALt{{$4d;G $ : :;dkk>Z>ZZ]^^amml^5::lNaNabI!((Q7I!"__Y7N3113I> 4;; 0$7C"+t{{/I/I"I!T!TUW!X!b!bcp!q0E0M0M0O0R0RS`SgSg0hM,-z
 "II'<m>N>NOdOkOk>l&mstuM"YY(.*;*;<S<Z<Z*[\bcN &&--@@#''b9<Q<W<WXY<ZZ]^^  - 1@0C0CLRS0TWlWrWrstWu0u-#0NS""))<<"+F;.$%%..KK?Kr    )NFF)NNNNNNNNNFN)NNNNF)r   r   r   rt   r   r   r   r   r   r   r   r   r   r   rO   r   no_gradr   r   r    r!   r   r   )  s   
 >B38&+9%''9% !++9% !))9)9 :	9%
 #+4.9% d^9%@ >B38&+'' !++ !))9)9 :	
 #+4. d^ >B15598<=A,0/3-1&*).$(S
''S
 !,,S
 !))9)9 :	S

 E--.S
 !!1!12S
 $E$4$45S
 !))9)9 :S
 $D>S
 'tnS
 ))*S
 d^S
 #'S
 D>S
 *+S
  
uJJ	K!S
j U]]_ 9==A0459).Y''Y $E$4$45Y !))9)9 :	Y
 E,,-Y !!1!12Y #'Y 
		Y Yr    r   )r&   r$   r   rK   rI   rM   rQ   r   )-dataclassesr   typingr   r   r   rt   torch.utils.checkpoint;transformers.models.instructblip.configuration_instructblipr   r   6transformers.models.instructblip.modeling_instructblipr	   r
   r   r   r   r   r   configuration_utilsr   modeling_flash_attention_utilsr   models.auto.modeling_autor   processing_utilsr   utilsr   autor   r   
get_loggerr   r2   r   r$   r&   rI   rK   rM   rO   rQ   r   __all__r   r    r!   <module>r      s     " ) )     4 B J &  - 
		H	%	$< 		%> 	z
. z
z	'B 		#: 		$< 	 	;j 	 	a
. a
Hv0T vr		r    