
     `io                     R   d Z ddlmZmZ ddlZddlmZ ddlmZ ddlm	Z	 ddl
mZmZmZ dd	lmZmZ dd
lmZ ddlmZmZ ddlmZmZmZmZ ddlmZmZ ddlmZ  ej         e!          Z" G d dej#                  Z$ G d dej#                  Z%	 d6dej#        dej&        dej&        dej&        deej&                 de'de'fdZ( G d dej#                  Z) G d d ej#                  Z* G d! d"ej#                  Z+ G d# d$ej#                  Z, G d% d&ej#                  Z- G d' d(e	          Z. G d) d*ej#                  Z/ G d+ d,ej#                  Z0e G d- d.e                      Z1e G d/ d0e1                      Z2 ed12           G d3 d4e1                      Z3g d5Z4dS )7zPyTorch ViViT model.    )CallableOptionalN)nn   )ACT2FN)GradientCheckpointingLayer)BaseModelOutputBaseModelOutputWithPoolingImageClassifierOutput)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack) find_pruneable_heads_and_indicesprune_linear_layer)TransformersKwargsauto_docstringlogging	torch_int)can_return_tuplecheck_model_inputs   )VivitConfigc                   R     e Zd ZdZdef fdZd	dej        dedej        fdZ	 xZ
S )
VivitTubeletEmbeddingsa  
    Construct Vivit Tubelet embeddings.

    This module turns a batch of videos of shape (batch_size, num_frames, num_channels, height, width) into a tensor of
    shape (batch_size, seq_len, hidden_size) to be consumed by a Transformer encoder.

    The seq_len (the number of patches) equals (number of frames // tubelet_size[0]) * (height // tubelet_size[1]) *
    (width // tubelet_size[2]).
    configc                    t                                                       |j        | _        |j        | _        |j        | _        | j        | j        d         z  | j        | j        d         z  z  | j        | j        d         z  z  | _        |j        | _        t          j
        |j        |j        |j        |j                  | _        d S )N   r   r   )kernel_sizestride)super__init__
num_frames
image_sizetubelet_size
patch_sizenum_patcheshidden_size	embed_dimr   Conv3dnum_channels
projectionselfr   	__class__s     |/home/jaya/work/projects/VOICE-AGENT/VIET/agent-env/lib/python3.11/site-packages/transformers/models/vivit/modeling_vivit.pyr!   zVivitTubeletEmbeddings.__init__/   s     + + -_ 22$/!"446$/!"446 	
  +)!3AT]c]p
 
 
    Fpixel_valuesinterpolate_pos_encodingreturnc                 b   |j         \  }}}}}|sH|| j        k    s|| j        k    r2t          d| d| d| j        d          d| j        d          d	          |                    ddddd	          }|                     |          }|                    d                              dd          }|S )
NzImage image size (*z) doesn't match model (r   r   z).r   r      )shaper#   
ValueErrorpermuter+   flatten	transpose)	r-   r1   r2   
batch_sizer"   r*   heightwidthxs	            r/   forwardzVivitTubeletEmbeddings.forward?   s    >J>P;
Jfe' 	Vt-F-F%SWSbJbJbwVwwewwDO\]L^wwaeapqraswww  
 $++Aq!Q::OOL)) IIaLL""1a((r0   F)__name__
__module____qualname____doc__r   r!   torchTensorboolr@   __classcell__r.   s   @r/   r   r   $   s         
{ 
 
 
 
 
 
  EL D ]b]i        r0   r   c                   ~     e Zd ZdZdef fdZdej        dededej        fdZ	dd
ej        de
dej        fdZ xZS )VivitEmbeddingsz
    Vivit Embeddings.

    Creates embeddings from a video using VivitTubeletEmbeddings, adds CLS token and positional embeddings.
    r   c                    t                                                       t          j        t	          j        dd|j                            | _        t          |          | _	        t          j        t	          j        d| j	        j
        dz   |j                            | _        t          j        |j                  | _        |j        dd          | _        || _        d S )Nr   )r    r!   r   	ParameterrF   zerosr'   	cls_tokenr   patch_embeddingsr&   position_embeddingsDropouthidden_dropout_probdropoutr$   r%   r   r,   s     r/   r!   zVivitEmbeddings.__init__W   s    ek!Q8J&K&KLL 6v > >#%<K40<q@&BTUU$
 $
  z&"<== -abb1r0   
embeddingsr=   r>   r3   c                    |j         d         dz
  }| j        j         d         dz
  }t          j                                        s||k    r||k    r| j        S | j        ddddf         }| j        ddddf         }|j         d         }|| j        d         z  }	|| j        d         z  }
t          |dz            }|                    d|||          }|                    dddd          }t          j
                            ||	|
fdd	
          }|                    dddd                              dd|          }t          j        ||fd          S )a   
        This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
        images. This method is also adapted to support torch.jit tracing.

        Adapted from:
        - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
        - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
        r   Nr   g      ?r   r   bicubicF)sizemodealign_cornersdim)r7   rR   rF   jit
is_tracingr%   r   reshaper9   r   
functionalinterpolateviewcat)r-   rV   r=   r>   r&   num_positionsclass_pos_embedpatch_pos_embedr^   
new_height	new_widthsqrt_num_positionss               r/   r2   z(VivitEmbeddings.interpolate_pos_encodinge   s|    !&q)A-06q9A= y##%% 	,+*F*F6UZ??++2111bqb592111abb59r"tq11
T_Q//	&}c'9::)11!5GI[]`aa)11!Q1==-33i(	 4 
 
 *11!Q1==BB1b#NNy/?;CCCCr0   Fr1   r2   c                 0   |j         \  }}}}}|                     ||          }| j                            |ddg          }	t	          j        |	|fd          }|r||                     |||          z   }n
|| j        z   }|                     |          }|S )Nr2   r   r]   )	r7   rQ   rP   tilerF   re   r2   rR   rU   )
r-   r1   r2   r<   r"   r*   r=   r>   rV   
cls_tokenss
             r/   r@   zVivitEmbeddings.forward   s    >J>P;
Jfe**<Rj*kk
^((*a);<<
Y
J7Q???
 $ 	?#d&C&CJPVX]&^&^^JJ#d&>>J\\*--
r0   rA   )rB   rC   rD   rE   r   r!   rF   rG   intr2   rH   r@   rI   rJ   s   @r/   rL   rL   P   s         {      &D5< &D &DUX &D]b]i &D &D &D &DP EL D ]b]i        r0   rL           modulequerykeyvalueattention_maskscalingrU   c                    t          j        ||                    dd                    |z  }t          j                            |dt           j                                      |j                  }t          j        	                    ||| j
                  }|||z  }t          j        ||          }	|	                    dd                                          }	|	|fS )NrX   )r^   dtype)ptrainingr   r   )rF   matmulr;   r   rb   softmaxfloat32torz   rU   r|   
contiguous)
rr   rs   rt   ru   rv   rw   rU   kwargsattn_weightsattn_outputs
             r/   eager_attention_forwardr      s     <s}}R'<'<==GL =((2U](SSVVW\WbccL =((6?([[L !#n4,|U33K''1--88::K$$r0   c            	            e Zd Zdef fdZ	 ddej        deej                 deej        ej        f         fdZ	 xZ
S )	VivitSelfAttentionr   c                    t                                                       |j        |j        z  dk    r0t	          |d          s t          d|j         d|j         d          || _        |j        | _        t          |j        |j        z            | _        | j        | j        z  | _	        |j
        | _        | j        dz  | _        d| _        t          j        |j        | j	        |j                  | _        t          j        |j        | j	        |j                  | _        t          j        |j        | j	        |j                  | _        d S )	Nr   embedding_sizezThe hidden size z4 is not a multiple of the number of attention heads .g      F)bias)r    r!   r'   num_attention_headshasattrr8   r   rp   attention_head_sizeall_head_sizeattention_probs_dropout_probdropout_probrw   	is_causalr   Linearqkv_biasrs   rt   ru   r,   s     r/   r!   zVivitSelfAttention.__init__   sB    ::a??PVXhHiHi?76#5 7 737 7 7  
 #)#= #&v'9F<V'V#W#W !58PP"?/5Yv143EFO\\\
9V/1C&/ZZZYv143EFO\\\


r0   Nhidden_states	head_maskr3   c           
         |j         d         }|d| j        | j        f} |                     |          j        |                     dd          } |                     |          j        |                     dd          } |                     |          j        |                     dd          }t          }| j	        j
        dk    rt          | j	        j
                 } || ||||| j        | j        | j        sdn| j                  \  }	}
|	                                d d         | j        fz   }|	                    |          }	|	|
fS )	Nr   rX   r   r   eagerrq   )r   rw   rU   ry   )r7   r   r   rt   rd   r;   ru   rs   r   r   _attn_implementationr   r   rw   r|   r   rZ   r   ra   )r-   r   r   r<   	new_shape	key_layervalue_layerquery_layerattention_interfacecontext_layerattention_probsnew_context_layer_shapes               r/   r@   zVivitSelfAttention.forward   sY    #(+
D$<d>VV	0DHH]++0)<FFq!LL	4djj//4i@JJ1aPP4djj//4i@JJ1aPP(?;+w66"9$+:Z"[)<)<nL#}CCC$2C	*
 	*
 	*
& #0"4"4"6"6ss";t?Q>S"S%--.EFFo--r0   N)rB   rC   rD   r   r!   rF   rG   r   tupler@   rI   rJ   s   @r/   r   r      s        ]{ ] ] ] ] ] ]* PT. ."\.6>u|6L.	u|U\)	*. . . . . . . .r0   r   c                   Z     e Zd ZdZdef fdZdej        dej        dej        fdZ xZ	S )VivitSelfOutputz
    The residual connection is defined in VivitLayer instead of here (as is the case with other models), due to the
    layernorm applied before each block.
    r   c                     t                                                       t          j        |j        |j                  | _        t          j        |j                  | _        d S r   )	r    r!   r   r   r'   denserS   rT   rU   r,   s     r/   r!   zVivitSelfOutput.__init__   sJ    Yv163EFF
z&"<==r0   r   input_tensorr3   c                 Z    |                      |          }|                     |          }|S r   r   rU   r-   r   r   s      r/   r@   zVivitSelfOutput.forward   s*    

=11]33r0   )
rB   rC   rD   rE   r   r!   rF   rG   r@   rI   rJ   s   @r/   r   r      s         
>{ > > > > > >
U\  RWR^        r0   r   c                   |     e Zd Zdef fdZdee         fdZd
dej	        de
ej	                 dej	        fd	Z xZS )VivitAttentionr   c                     t                                                       t          |          | _        t	          |          | _        t                      | _        d S r   )r    r!   r   	attentionr   outputsetpruned_headsr,   s     r/   r!   zVivitAttention.__init__  sI    +F33%f--EEr0   headsc                    t          |          dk    rd S t          || j        j        | j        j        | j                  \  }}t          | j        j        |          | j        _        t          | j        j        |          | j        _        t          | j        j	        |          | j        _	        t          | j
        j        |d          | j
        _        | j        j        t          |          z
  | j        _        | j        j        | j        j        z  | j        _        | j                            |          | _        d S )Nr   r   r]   )lenr   r   r   r   r   r   rs   rt   ru   r   r   r   union)r-   r   indexs      r/   prune_headszVivitAttention.prune_heads  s   u::??F74>5t~7Y[_[l
 
u
  2$.2FNN/0BEJJ1$.2FNN.t{/@%QOOO .2^-ORUV[R\R\-\*'+~'IDNLn'n$ -33E::r0   Nr   r   r3   c                 d    |                      ||          \  }}|                     ||          }|S r   )r   r   )r-   r   r   self_attn_output_r   s         r/   r@   zVivitAttention.forward  s4    "nn]IFF!-}==r0   r   )rB   rC   rD   r   r!   r   rp   r   rF   rG   r   r@   rI   rJ   s   @r/   r   r     s        "{ " " " " " ";S ; ; ; ;$ U\ hu|>T `e`l        r0   r   c                   H     e Zd Zdef fdZdej        dej        fdZ xZS )VivitIntermediater   c                 J   t                                                       t          j        |j        |j                  | _        t          j        |j                  | _	        t          |j        t                    rt          |j                 | _        d S |j        | _        d S r   )r    r!   r   r   r'   intermediate_sizer   rS   rT   rU   
isinstance
hidden_actstrr   intermediate_act_fnr,   s     r/   r!   zVivitIntermediate.__init__&  s    Yv163KLL
z&"<==f'-- 	9'-f.?'@D$$$'-'8D$$$r0   r   r3   c                     |                      |          }|                     |          }|                     |          }|S r   )r   r   rU   )r-   r   s     r/   r@   zVivitIntermediate.forward/  s=    

=1100??]33r0   	rB   rC   rD   r   r!   rF   rG   r@   rI   rJ   s   @r/   r   r   %  sj        9{ 9 9 9 9 9 9U\ el        r0   r   c                   V     e Zd Zdef fdZdej        dej        dej        fdZ xZS )VivitOutputr   c                     t                                                       t          j        |j        |j                  | _        t          j        |j                  | _	        d S r   )
r    r!   r   r   r   r'   r   rS   rT   rU   r,   s     r/   r!   zVivitOutput.__init__8  sJ    Yv79KLL
z&"<==r0   r   r   r3   c                 d    |                      |          }|                     |          }||z   }|S r   r   r   s      r/   r@   zVivitOutput.forward=  s4    

=11]33%4r0   r   rJ   s   @r/   r   r   7  su        >{ > > > > > >
U\  RWR^        r0   r   c                   h     e Zd ZdZdef fdZd	dej        deej                 dej        fdZ	 xZ
S )

VivitLayerzNThis corresponds to the EncoderBlock class in the scenic/vivit implementation.r   c                 z   t                                                       |j        | _        d| _        t	          |          | _        t          |          | _        t          |          | _	        t          j        |j        |j                  | _        t          j        |j        |j                  | _        d S )Nr   eps)r    r!   chunk_size_feed_forwardseq_len_dimr   r   r   intermediater   r   r   	LayerNormr'   layer_norm_epslayernorm_beforelayernorm_afterr,   s     r/   r!   zVivitLayer.__init__G  s    '-'E$'//-f55!&)) "V-?VEZ [ [ [!|F,>FDYZZZr0   Nr   r   r3   c                     |                      |          }|                     ||          }||z   }|                     |          }|                     |          }|                     ||          }|S r   )r   r   r   r   r   )r-   r   r   hidden_states_normattention_outputlayer_outputs         r/   r@   zVivitLayer.forwardQ  sz    !22=AA>>*<iHH )=8 ++M::((66 {{<??r0   r   )rB   rC   rD   rE   r   r!   rF   rG   r   r@   rI   rJ   s   @r/   r   r   D  s        XX[{ [ [ [ [ [ [ U\ hu|>T `e`l        r0   r   c                   Z     e Zd Zdef fdZddej        deej                 defdZ	 xZ
S )	VivitEncoderr   c                     t                                                       | _        t          j        fdt          j                  D                       | _        d| _        d S )Nc                 .    g | ]}t                    S  )r   ).0r   r   s     r/   
<listcomp>z)VivitEncoder.__init__.<locals>.<listcomp>f  s!    #`#`#`1Jv$6$6#`#`#`r0   F)	r    r!   r   r   
ModuleListrangenum_hidden_layerslayergradient_checkpointingr,   s    `r/   r!   zVivitEncoder.__init__c  s`    ]#`#`#`#`fF^@_@_#`#`#`aa
&+###r0   Nr   r   r3   c                     t          | j                  D ]\  }}|||         nd } |||          }t          |          S )N)last_hidden_state)	enumerater   r	   )r-   r   r   ilayer_modulelayer_head_masks         r/   r@   zVivitEncoder.forwardi  sW    (44 	I 	IOA|.7.CillO(LHHMM????r0   r   )rB   rC   rD   r   r!   rF   rG   r   r	   r@   rI   rJ   s   @r/   r   r   b  s        ,{ , , , , , ,@ @U\ @hu|>T @`o @ @ @ @ @ @ @ @r0   r   c                   H     e Zd Zdef fdZdej        dej        fdZ xZS )VivitPoolerr   c                     t                                                       t          j        |j        |j                  | _        t          j                    | _        d S r   )r    r!   r   r   r'   r   Tanh
activationr,   s     r/   r!   zVivitPooler.__init__r  sC    Yv163EFF
'))r0   r   r3   c                 r    |d d df         }|                      |          }|                     |          }|S )Nr   )r   r   )r-   r   first_token_tensorpooled_outputs       r/   r@   zVivitPooler.forwardw  s@     +111a40

#56666r0   r   rJ   s   @r/   r   r   q  sj        ${ $ $ $ $ $ $
U\ el        r0   r   c                   J    e Zd ZU eed<   dZdZdZg ZdZ	dZ
dZdZeedZd ZdS )VivitPreTrainedModelr   vivitr1   T)r   
attentionsc                 D   t          |t          j        t          j        f          rT|j        j                            d| j        j                   |j	         |j	        j        
                                 dS dS t          |t          j                  r_|j        j                            d| j        j                   |j        +|j        j        |j                 
                                 dS dS t          |t          j                  r?|j	        j        
                                 |j        j                            d           dS t          |t                    r>|j        j        
                                 |j        j        
                                 dS dS )zInitialize the weightsrq   )meanstdNg      ?)r   r   r   r)   weightdatanormal_r   initializer_ranger   zero_	Embeddingpadding_idxr   fill_rL   rP   rR   )r-   rr   s     r/   _init_weightsz"VivitPreTrainedModel._init_weights  sx   fry")455 	4 M&&CT[5R&SSS{& &&((((( '&-- 		4M&&CT[5R&SSS!-"6#56<<>>>>> .--- 	4K""$$$M$$S)))))00 	4!'')))&+1133333	4 	4r0   N)rB   rC   rD   r   __annotations__base_model_prefixmain_input_namesupports_gradient_checkpointing_no_split_modules_supports_sdpa_supports_flash_attn_supports_flex_attn_supports_attention_backendr   r   _can_record_outputsr  r   r0   r/   r   r     sr         $O&*#N"&#( 
4 4 4 4 4r0   r   c                        e Zd Zddedef fdZd Zd Z ed          e		 	 	 dd
e
ej                 de
ej                 dedee         def
d                        Z xZS )
VivitModelTr   add_pooling_layerc                 J   t                                          |           || _        t          |          | _        t          |          | _        t          j        |j	        |j
                  | _        |rt          |          nd| _        |                                  dS )zv
        add_pooling_layer (bool, *optional*, defaults to `True`):
            Whether to add a pooling layer
        r   N)r    r!   r   rL   rV   r   encoderr   r   r'   r   	layernormr   pooler	post_init)r-   r   r  r.   s      r/   r!   zVivitModel.__init__  s    
 	   )&11#F++f&8f>STTT->Hk&)))D 	r0   c                     | j         j        S r   )rV   rQ   )r-   s    r/   get_input_embeddingszVivitModel.get_input_embeddings  s    //r0   c                     |                                 D ]/\  }}| j        j        |         j                            |           0dS )z
        Prunes heads of the model.

        Args:
            heads_to_prune:
                dict of {layer_num: list of heads to prune in this layer}
        N)itemsr  r   r   r   )r-   heads_to_pruner   r   s       r/   _prune_headszVivitModel._prune_heads  sU     +0022 	C 	CLE5Lu%/;;EBBBB	C 	Cr0   F)tie_last_hidden_statesNr1   r   r2   r   r3   c                 V   |t          d          |                     || j        j                  }|                     ||          }|                     ||          }|j        }|                     |          }| j        |                     |          nd}t          ||          S )a  
        Examples:

        ```python
        >>> import av
        >>> import numpy as np

        >>> from transformers import VivitImageProcessor, VivitModel
        >>> from huggingface_hub import hf_hub_download

        >>> np.random.seed(0)


        >>> def read_video_pyav(container, indices):
        ...     '''
        ...     Decode the video with PyAV decoder.
        ...     Args:
        ...         container (`av.container.input.InputContainer`): PyAV container.
        ...         indices (`list[int]`): List of frame indices to decode.
        ...     Returns:
        ...         result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
        ...     '''
        ...     frames = []
        ...     container.seek(0)
        ...     start_index = indices[0]
        ...     end_index = indices[-1]
        ...     for i, frame in enumerate(container.decode(video=0)):
        ...         if i > end_index:
        ...             break
        ...         if i >= start_index and i in indices:
        ...             frames.append(frame)
        ...     return np.stack([x.to_ndarray(format="rgb24") for x in frames])


        >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
        ...     '''
        ...     Sample a given number of frame indices from the video.
        ...     Args:
        ...         clip_len (`int`): Total number of frames to sample.
        ...         frame_sample_rate (`int`): Sample every n-th frame.
        ...         seg_len (`int`): Maximum allowed index of sample's last frame.
        ...     Returns:
        ...         indices (`list[int]`): List of sampled frame indices
        ...     '''
        ...     converted_len = int(clip_len * frame_sample_rate)
        ...     end_idx = np.random.randint(converted_len, seg_len)
        ...     start_idx = end_idx - converted_len
        ...     indices = np.linspace(start_idx, end_idx, num=clip_len)
        ...     indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
        ...     return indices


        >>> # video clip consists of 300 frames (10 seconds at 30 FPS)
        >>> file_path = hf_hub_download(
        ...     repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
        ... )
        >>> container = av.open(file_path)

        >>> # sample 32 frames
        >>> indices = sample_frame_indices(clip_len=32, frame_sample_rate=1, seg_len=container.streams.video[0].frames)
        >>> video = read_video_pyav(container=container, indices=indices)

        >>> image_processor = VivitImageProcessor.from_pretrained("google/vivit-b-16x2-kinetics400")
        >>> model = VivitModel.from_pretrained("google/vivit-b-16x2-kinetics400")

        >>> # prepare video for the model
        >>> inputs = image_processor(list(video), return_tensors="pt")

        >>> # forward pass
        >>> outputs = model(**inputs)
        >>> last_hidden_states = outputs.last_hidden_state
        >>> list(last_hidden_states.shape)
        [1, 3137, 768]
        ```Nz You have to specify pixel_valuesrm   )r   )r   pooler_output)
r8   get_head_maskr   r   rV   r  r   r  r  r
   )	r-   r1   r   r2   r   embedding_outputencoder_outputssequence_outputr   s	            r/   r@   zVivitModel.forward  s    h ?@@@&&y$+2OPP	??<Rj?kk+/<<8HT]<+^+^);..998<8OO444UY)O[hiiiir0   )T)NNF)rB   rC   rD   r   rH   r!   r  r  r   r   r   rF   FloatTensorr   r   r
   r@   rI   rJ   s   @r/   r  r    s        { t      "0 0 0	C 	C 	C u555 5915).	]j ]ju01]j E-.]j #'	]j
 +,]j 
$]j ]j ]j ^ 65]j ]j ]j ]j ]jr0   r  a  
        ViViT Transformer model with a video classification head on top (a linear layer on top of the final hidden state of the
    [CLS] token) e.g. for Kinetics-400.

        <Tip>

            Note that it's possible to fine-tune ViT on higher resolution images than the ones it has been trained on, by
            setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained
            position embeddings to the higher resolution.

        </Tip>
    )custom_introc                        e Zd Zdef fdZee	 	 	 	 ddeej	                 deej	                 deej
                 ded	ee         d
efd                        Z xZS )VivitForVideoClassificationr   c                 :   t                                          |           |j        | _        t          |d          | _        |j        dk    rt          j        |j        |j                  nt          j                    | _	        | 
                                 d S )NF)r  r   )r    r!   
num_labelsr  r   r   r   r'   Identity
classifierr  r,   s     r/   r!   z$VivitForVideoClassification.__init__6  s        +%@@@
 OUN_bcNcNc")F$68IJJJikitiviv 	r0   NFr1   r   labelsr2   r   r3   c                      | j         |f||d|}|j        }|                     |dddddf                   }d}	| | j        ||| j        fi |}	t          |	||j        |j                  S )a  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).

        Examples:

        ```python
        >>> import av
        >>> import numpy as np
        >>> import torch

        >>> from transformers import VivitImageProcessor, VivitForVideoClassification
        >>> from huggingface_hub import hf_hub_download

        >>> np.random.seed(0)


        >>> def read_video_pyav(container, indices):
        ...     '''
        ...     Decode the video with PyAV decoder.
        ...     Args:
        ...         container (`av.container.input.InputContainer`): PyAV container.
        ...         indices (`list[int]`): List of frame indices to decode.
        ...     Returns:
        ...         result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
        ...     '''
        ...     frames = []
        ...     container.seek(0)
        ...     start_index = indices[0]
        ...     end_index = indices[-1]
        ...     for i, frame in enumerate(container.decode(video=0)):
        ...         if i > end_index:
        ...             break
        ...         if i >= start_index and i in indices:
        ...             frames.append(frame)
        ...     return np.stack([x.to_ndarray(format="rgb24") for x in frames])


        >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
        ...     '''
        ...     Sample a given number of frame indices from the video.
        ...     Args:
        ...         clip_len (`int`): Total number of frames to sample.
        ...         frame_sample_rate (`int`): Sample every n-th frame.
        ...         seg_len (`int`): Maximum allowed index of sample's last frame.
        ...     Returns:
        ...         indices (`list[int]`): List of sampled frame indices
        ...     '''
        ...     converted_len = int(clip_len * frame_sample_rate)
        ...     end_idx = np.random.randint(converted_len, seg_len)
        ...     start_idx = end_idx - converted_len
        ...     indices = np.linspace(start_idx, end_idx, num=clip_len)
        ...     indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
        ...     return indices


        >>> # video clip consists of 300 frames (10 seconds at 30 FPS)
        >>> file_path = hf_hub_download(
        ...     repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
        ... )
        >>> container = av.open(file_path)

        >>> # sample 32 frames
        >>> indices = sample_frame_indices(clip_len=32, frame_sample_rate=4, seg_len=container.streams.video[0].frames)
        >>> video = read_video_pyav(container=container, indices=indices)

        >>> image_processor = VivitImageProcessor.from_pretrained("google/vivit-b-16x2-kinetics400")
        >>> model = VivitForVideoClassification.from_pretrained("google/vivit-b-16x2-kinetics400")

        >>> inputs = image_processor(list(video), return_tensors="pt")

        >>> with torch.no_grad():
        ...     outputs = model(**inputs)
        ...     logits = outputs.logits

        >>> # model predicts one of the 400 Kinetics-400 classes
        >>> predicted_label = logits.argmax(-1).item()
        >>> print(model.config.id2label[predicted_label])
        LABEL_116
        ```)r   r2   Nr   )losslogitsr   r   )r   r   r*  loss_functionr   r   r   r   )
r-   r1   r   r+  r2   r   outputsr"  r.  r-  s
             r/   r@   z#VivitForVideoClassification.forwardB  s    z $.4:$
$-H`$
 $
dj$
 $
 "3Aqqq!9::%4%ffdkLLVLLD$!/)	
 
 
 	
r0   )NNNF)rB   rC   rD   r   r!   r   r   r   rF   r#  
LongTensorrH   r   r   r   r@   rI   rJ   s   @r/   r&  r&  '  s        
{ 
 
 
 
 
 
  5915-1).j
 j
u01j
 E-.j
 )*	j

 #'j
 +,j
 
j
 j
 j
 ^ j
 j
 j
 j
 j
r0   r&  )r  r   r&  )rq   )5rE   typingr   r   rF   r   activationsr   modeling_layersr   modeling_outputsr	   r
   r   modeling_utilsr   r   processing_utilsr   pytorch_utilsr   r   utilsr   r   r   r   utils.genericr   r   configuration_vivitr   
get_loggerrB   loggerModuler   rL   rG   floatr   r   r   r   r   r   r   r   r   r   r  r&  __all__r   r0   r/   <module>rA     sI     % % % % % % % %        ! ! ! ! ! ! 9 9 9 9 9 9 b b b b b b b b b b F F F F F F F F & & & & & & Q Q Q Q Q Q Q Q K K K K K K K K K K K K A A A A A A A A , , , , , , 
	H	%	%) ) ) ) )RY ) ) )XL L L L Lbi L L Ln % %I%<% 
% <	%
 U\*% % % % % %>1. 1. 1. 1. 1. 1. 1. 1.j    bi   $    RY   >    	   $
 
 
 
 
") 
 
 
    +   <@ @ @ @ @29 @ @ @    ")     4  4  4  4  4?  4  4  4F j j j j j% j j jD   y
 y
 y
 y
 y
"6 y
 y
 y
x P
O
Or0   