
     `i                        d dl Z d dlZd dlZd dlmZ d dlmZ d dlmZm	Z	m
Z
mZ d dlZddlmZ ddlmZmZ ddlmZ dd	lmZmZmZ dd
lmZmZ ddlmZmZmZm Z m!Z!m"Z"m#Z#m$Z$m%Z%m&Z&m'Z'm(Z(m)Z) ddl*m+Z+ ddl,m-Z- ddl.m/Z/m0Z0m1Z1m2Z2m3Z3m4Z4m5Z5m6Z6m7Z7  e&            rd dl8Z8 e(            rd dl9m:Z;  e)j<        e=          Z>dZ? e!de?           e-d           G d de                                  Z@ e"e@jA                  e@_A        e@jA        jB        .e@jA        jB        C                    ddd          e@jA        _B        dS dS )    N)deepcopy)partial)AnyCallableOptionalUnion   )custom_object_save)BatchFeatureget_size_dict)BaseImageProcessorFast)ChannelDimensionSizeDictvalidate_kwargs)UnpackVideosKwargs)IMAGE_PROCESSOR_NAMEPROCESSOR_NAMEVIDEO_PROCESSOR_NAME
TensorTypeadd_start_docstrings	copy_funcdownload_urlis_offline_modeis_remote_urlis_torch_availableis_torchcodec_availableis_torchvision_v2_availableloggingcached_file)requires)	
VideoInputVideoMetadatagroup_videos_by_shapeis_valid_video
load_videomake_batched_metadatamake_batched_videosreorder_videosto_channel_dimension_format)
functionala  
    Args:
        do_resize (`bool`, *optional*, defaults to `self.do_resize`):
            Whether to resize the video's (height, width) dimensions to the specified `size`. Can be overridden by the
            `do_resize` parameter in the `preprocess` method.
        size (`dict`, *optional*, defaults to `self.size`):
            Size of the output video after resizing. Can be overridden by the `size` parameter in the `preprocess`
            method.
        size_divisor (`int`, *optional*, defaults to `self.size_divisor`):
            The size by which to make sure both the height and width can be divided.
        default_to_square (`bool`, *optional*, defaults to `self.default_to_square`):
            Whether to default to a square video when resizing, if size is an int.
        resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
            Resampling filter to use if resizing the video. Only has an effect if `do_resize` is set to `True`. Can be
            overridden by the `resample` parameter in the `preprocess` method.
        do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
            Whether to center crop the video to the specified `crop_size`. Can be overridden by `do_center_crop` in the
            `preprocess` method.
        crop_size (`dict[str, int]` *optional*, defaults to `self.crop_size`):
            Size of the output video after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`
            method.
        do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
            Whether to rescale the video by the specified scale `rescale_factor`. Can be overridden by the
            `do_rescale` parameter in the `preprocess` method.
        rescale_factor (`int` or `float`, *optional*, defaults to `self.rescale_factor`):
            Scale factor to use if rescaling the video. Only has an effect if `do_rescale` is set to `True`. Can be
            overridden by the `rescale_factor` parameter in the `preprocess` method.
        do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
            Whether to normalize the video. Can be overridden by the `do_normalize` parameter in the `preprocess`
            method. Can be overridden by the `do_normalize` parameter in the `preprocess` method.
        image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
            Mean to use if normalizing the video. This is a float or list of floats the length of the number of
            channels in the video. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
            overridden by the `image_mean` parameter in the `preprocess` method.
        image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
            Standard deviation to use if normalizing the video. This is a float or list of floats the length of the
            number of channels in the video. Can be overridden by the `image_std` parameter in the `preprocess` method.
            Can be overridden by the `image_std` parameter in the `preprocess` method.
        do_convert_rgb (`bool`, *optional*, defaults to `self.image_std`):
            Whether to convert the video to RGB.
        video_metadata (`VideoMetadata`, *optional*):
            Metadata of the video containing information about total duration, fps and total number of frames.
        do_sample_frames (`int`, *optional*, defaults to `self.do_sample_frames`):
            Whether to sample frames from the video before processing or to process the whole video.
        num_frames (`int`, *optional*, defaults to `self.num_frames`):
            Maximum number of frames to sample when `do_sample_frames=True`.
        fps (`int` or `float`, *optional*, defaults to `self.fps`):
            Target frames to sample per second when `do_sample_frames=True`.
        return_tensors (`str` or `TensorType`, *optional*):
            Returns stacked tensors if set to `pt, otherwise returns a list of tensors.
        data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
            The channel dimension format for the output video. Can be one of:
            - `"channels_first"` or `ChannelDimension.FIRST`: video in (num_channels, height, width) format.
            - `"channels_last"` or `ChannelDimension.LAST`: video in (height, width, num_channels) format.
            - Unset: Use the channel dimension format of the input video.
        input_data_format (`ChannelDimension` or `str`, *optional*):
            The channel dimension format for the input video. If unset, the channel dimension format is inferred
            from the input video. Can be one of:
            - `"channels_first"` or `ChannelDimension.FIRST`: video in (num_channels, height, width) format.
            - `"channels_last"` or `ChannelDimension.LAST`: video in (height, width, num_channels) format.
            - `"none"` or `ChannelDimension.NONE`: video in (height, width) format.
        device (`torch.device`, *optional*):
            The device to process the videos on. If unset, the device is inferred from the input videos.
        return_metadata (`bool`, *optional*):
            Whether to return video metadata or not.
        z!Constructs a base VideoProcessor.)visiontorchvision)backendsc                       e Zd ZdZdZdZdZdZdZdZ	dZ
dZdZdZdZdZdZdZdZdZdZdZeZdgZdee         ddf fdZdefd	Zd
ddefdZ	 	 dAdede e!         de e"e!e#f                  fdZ$	 	 dAdede"ee%f         de e&         de e'         de(d         f
dZ)	 	 dAdede e"e*e+f                  de e*         de(d         fdZ, e-e.          dedee         defd            Z/	 dBde(d         de&de&de0de d         de&d e0d!e&d"e#d#e&d$e e"e#e(e#         f                  d%e e"e#e(e#         f                  d&e e"e*e1f                  defd'Z2e3	 	 	 	 	 dCd)e"e*e4j5        f         d*e e"e*e4j5        f                  d+e&d,e&d-e e"e*e&f                  d.e*fd/            Z6dDd0e"e*e4j5        f         d1e&fd2Z7e3d)e"e*e4j5        f         de8e%e*e9f         e%e*e9f         f         fd3            Z:e3d4e%e*e9f         fd5            Z;de%e*e9f         fd6Z<de*fd7Z=d8e"e*e4j5        f         fd9Z>d: Z?e3d;e"e*e4j5        f         fd<            Z@e3dEd>            ZAdBd?e"e*e(e*         e(e(e*                  f         fd@ZB xZCS )FBaseVideoProcessorNTgp?Fpixel_values_videoskwargsreturnc                 r   t                                                       |                    dd           | _        |                                D ]N\  }}	 t          | ||           # t          $ r*}t                              d| d| d|             |d }~ww xY w|                    d| j	                  }|*t          ||                    d| j                            nd | _	        |                    d| j                  }|t          |d	          nd | _        t          | j        j                                                  | _        | j        D ]\}|                    |          t          | |||                    /t          | |t'          t)          | |d                                ]d S )
Nprocessor_classz
Can't set z with value z for sizedefault_to_square)r7   r8   	crop_size)
param_name)super__init__pop_processor_classitemssetattrAttributeErrorloggererrorr7   r   r8   r9   listvalid_kwargs__annotations__keysmodel_valid_processing_keysgetr   getattr)selfr3   keyvalueerrr7   r9   	__class__s          w/home/jaya/work/projects/VOICE-AGENT/VIET/agent-env/lib/python3.11/site-packages/transformers/video_processing_utils.pyr<   zBaseVideoProcessor.__init__   s    &

+<d C C !,,.. 	 	JCc5))))!   M#MM5MMtMMNNN	
 zz&$),,  tvzzBUW[Wm7n7noooo 		
 JJ{DN;;	MVMby[IIIIhl ,00A0Q0V0V0X0X+Y+Y(3 	G 	GCzz#*c6#;////c8GD#t,D,D#E#EFFFF		G 	Gs   A))
B3%BBc                      | j         |fi |S N)
preprocess)rK   videosr3   s      rP   __call__zBaseVideoProcessor.__call__   s    tv00000    videoztorch.Tensorc                 :   t          j        |          }|j        d         dk    s&|ddddddf         dk                                     s|S |ddddddf         dz  }d|ddddddf         z
  dz  |ddddddf         |dddddddf         z  z   }|S )z
        Converts a video to RGB format.

        Args:
            video (`"torch.Tensor"`):
                The video to convert.

        Returns:
            `torch.Tensor`: The converted video.
           .N   g     o@r	   )Fgrayscale_to_rgbshapeany)rK   rW   alphas      rP   convert_to_rgbz!BaseVideoProcessor.convert_to_rgb   s     "5));r?ac1aaal(;c(A'F'F'H'HL c1aaal#e+U3aaa?++s2U3aaa?5KeTWY[Z[Y[]^]^]^`a`a`aTaNb5bbrV   metadata
num_framesfpsc                    ||t          d          ||n| j        }||n| j        }|j        }|4|2||j        t          d          t	          ||j        z  |z            }||k    rt          d| d| d          |,t          j        d|||z                                            }n't          j        d|                                          }|S )a%  
        Default sampling function which uniformly samples the desired number of frames between 0 and total number of frames.
        If `fps` is passed along with metadata, `fps` frames per second are sampled uniformty. Arguments `num_frames`
        and `fps` are mutually exclusive.

        Args:
            metadata (`VideoMetadata`):
                Metadata of the video containing information about total duration, fps and total number of frames.
            num_frames (`int`, *optional*):
                Maximum number of frames to sample. Defaults to `self.num_frames`.
            fps (`int` or `float`, *optional*):
                Target frames to sample per second. Defaults to `self.fps`.

        Returns:
            np.ndarray:
                Indices to sample video frames.
        Nzc`num_frames`, `fps`, and `sample_indices_fn` are mutually exclusive arguments, please use only one!zAsked to sample `fps` frames per second but no video metadata was provided which is required when sampling with `fps`. Please pass in `VideoMetadata` object or use a fixed `num_frames` per input videoz(Video can't be sampled. The `num_frames=z` exceeds `total_num_frames=z`. r   )
ValueErrorrc   rd   total_num_framesinttorcharange)rK   rb   rc   rd   r3   rg   indicess          rP   sample_framesz BaseVideoProcessor.sample_frames   s"   0 ?z5u   $.#9ZZt
_cc$(#4 #/8<#7 h   -<sBCCJ(((x:xxcsxxx   !l1&68H:8UVVZZ\\GGl1&677;;==GrV   rT   video_metadatado_sample_framessample_indices_fnc                 2   t          |          }t          ||          }t          |d                   rd|rbg }g }t          ||          D ]H\  }} ||          }	|	|_        |                    ||	                    |                    |           I|}|}n{t          |d                   sft          |d         t                    r1d |                     |          D             }|rt          d          n| 
                    ||          \  }}||fS )zB
        Decode input videos and sample frames if needed.
        )rm   r   )rb   c                 N    g | ]"}t          j        d  |D             d          #S )c                 6    g | ]}t          j        |          S  )r\   pil_to_tensor).0images     rP   
<listcomp>zKBaseVideoProcessor._decode_and_sample_videos.<locals>.<listcomp>.<listcomp>:  s"     L L LE!7!7 L L LrV   r   dim)ri   stack)ru   imagess     rP   rw   z@BaseVideoProcessor._decode_and_sample_videos.<locals>.<listcomp>9  sG        K L LV L L LRSTTT  rV   zUSampling frames from a list of images is not supported! Set `do_sample_frames=False`.ro   )r)   r(   r&   zipframes_indicesappend
isinstancerD   fetch_imagesrf   fetch_videos)
rK   rT   rm   rn   ro   sampled_videossampled_metadatarW   rb   rk   s
             rP   _decode_and_sample_videosz,BaseVideoProcessor._decode_and_sample_videos  sh    %V,,.vnUUU &)$$ 	h)9 	hN!#&v~#>#> 2 2x++X>>>*1'%%eGn555 ''1111#F-NNq	** 	h&)T** h "&"3"3F";";   $ $o  
 *.):):6Uf):)g)g&~%%rV   input_data_formatdevicec                 "   g }|D ]}t          |t          j                  rAt          |t          j        |          }t          j        |                                          }||	                    |          }|
                    |           |S )z:
        Prepare the input videos for processing.
        )r   npndarrayr+   r   FIRSTri   
from_numpy
contiguoustor   )rK   rT   r   r   processed_videosrW   s         rP   _prepare_input_videosz(BaseVideoProcessor._prepare_input_videosF  s      
	+ 
	+E%,, =3E;K;QSdee(//::<<!((##E****rV   c           	         t          |                                t          | j        j                                                  dgz              | j        j        D ]'}|                    |t          | |d                      (|                    d          }|                    d          }|                    d          }|                    d          }|rt          | j	        fi |nd }| 
                    ||||          \  }}|                     |||          } | j        di |} | j        di | |                    d	           |                    d
          }	 | j        dd|i|}
|	r||
d<   |
S )Nreturn_tensors)captured_kwargsvalid_processor_keysr   rn   r   rm   )rm   rn   ro   )rT   r   r   data_formatreturn_metadatarT   rs   )r   rG   rD   rE   rF   
setdefaultrJ   r=   r   rl   r   r   _further_process_kwargs_validate_preprocess_kwargs_preprocess)rK   rT   r3   
kwarg_namer   rn   r   rm   ro   r   preprocessed_videoss              rP   rS   zBaseVideoProcessor.preprocess]  s    	"KKMM!%d&7&G&L&L&N&N!O!OScRd!d	
 	
 	
 	
 +; 	K 	KJj'$
D*I*IJJJJ"JJ':;;!::&899H%%$455EU_GD$6AA&AAA[_!%!?!?)-/	 "@ "
 "
 ++6M^gm+nn--7777((226222 	

=!!! **%677.d.GGfGGG 	C4B 01""rV   do_convert_rgb	do_resizer7   interpolationzF.InterpolationModedo_center_cropr9   
do_rescalerescale_factordo_normalize
image_mean	image_stdr   c           	      .   t          |          \  }}i }|                                D ];\  }}|r|                     |          }|r|                     |||          }|||<   <t	          ||          }t          |          \  }}i }|                                D ]<\  }}|r|                     ||          }|                     |||	|
||          }|||<   =t	          ||          }|rt          j        |d          n|}t          d|i|          S )N)r7   r   r   rx   r2   )datatensor_type)
r%   r?   ra   resizer*   center_croprescale_and_normalizeri   rz   r   )rK   rT   r   r   r7   r   r   r9   r   r   r   r   r   r   r3   grouped_videosgrouped_videos_indexresized_videos_groupedr^   stacked_videosresized_videosprocessed_videos_groupedr   s                          rP   r   zBaseVideoProcessor._preprocess  su   $ 0EV/L/L,,!#%3%9%9%;%; 	; 	;!E> E!%!4!4^!D!D e!%^$Vc!d!d,:"5))'(>@TUU 0E^/T/T,,#% %3%9%9%;%; 	= 	=!E> M!%!1!1.)!L!L!77
NL*V_ N /=$U++)*BDXYYCQg5;'7Q????Wg"79I!JXfggggrV   mainpretrained_model_name_or_path	cache_dirforce_downloadlocal_files_onlytokenrevisionc                     ||d<   ||d<   ||d<   ||d<   |                     dd          }|-t          j        dt                     |t	          d          |}|||d	<    | j        |fi |\  }	} | j        |	fi |S )
a  
        Instantiate a type of [`~video_processing_utils.VideoProcessorBase`] from an video processor.

        Args:
            pretrained_model_name_or_path (`str` or `os.PathLike`):
                This can be either:

                - a string, the *model id* of a pretrained video hosted inside a model repo on
                  huggingface.co.
                - a path to a *directory* containing a video processor file saved using the
                  [`~video_processing_utils.VideoProcessorBase.save_pretrained`] method, e.g.,
                  `./my_model_directory/`.
                - a path or url to a saved video processor JSON *file*, e.g.,
                  `./my_model_directory/video_preprocessor_config.json`.
            cache_dir (`str` or `os.PathLike`, *optional*):
                Path to a directory in which a downloaded pretrained model video processor should be cached if the
                standard cache should not be used.
            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force to (re-)download the video processor files and override the cached versions if
                they exist.
            resume_download:
                Deprecated and ignored. All downloads are now resumed by default when possible.
                Will be removed in v5 of Transformers.
            proxies (`dict[str, str]`, *optional*):
                A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
                'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
            token (`str` or `bool`, *optional*):
                The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
                the token generated when running `hf auth login` (stored in `~/.huggingface`).
            revision (`str`, *optional*, defaults to `"main"`):
                The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
                git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
                identifier allowed by git.


                <Tip>

                To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>"`.

                </Tip>

            return_unused_kwargs (`bool`, *optional*, defaults to `False`):
                If `False`, then this function returns just the final video processor object. If `True`, then this
                functions returns a `Tuple(video_processor, unused_kwargs)` where *unused_kwargs* is a dictionary
                consisting of the key/value pairs whose keys are not video processor attributes: i.e., the part of
                `kwargs` which has not been used to update `video_processor` and is otherwise ignored.
            subfolder (`str`, *optional*, defaults to `""`):
                In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
                specify the folder name here.
            kwargs (`dict[str, Any]`, *optional*):
                The values in kwargs of any keys which are video processor attributes will be used to override the
                loaded values. Behavior concerning key/value pairs whose keys are *not* video processor attributes is
                controlled by the `return_unused_kwargs` keyword parameter.

        Returns:
            A video processor of type [`~video_processing_utils.ImagVideoProcessorBase`].

        Examples:

        ```python
        # We can't instantiate directly the base class *VideoProcessorBase* so let's show the examples on a
        # derived class: *LlavaOnevisionVideoProcessor*
        video_processor = LlavaOnevisionVideoProcessor.from_pretrained(
            "llava-hf/llava-onevision-qwen2-0.5b-ov-hf"
        )  # Download video_processing_config from huggingface.co and cache.
        video_processor = LlavaOnevisionVideoProcessor.from_pretrained(
            "./test/saved_model/"
        )  # E.g. video processor (or model) was saved using *save_pretrained('./test/saved_model/')*
        video_processor = LlavaOnevisionVideoProcessor.from_pretrained("./test/saved_model/video_preprocessor_config.json")
        video_processor = LlavaOnevisionVideoProcessor.from_pretrained(
            "llava-hf/llava-onevision-qwen2-0.5b-ov-hf", do_normalize=False, foo=False
        )
        assert video_processor.do_normalize is False
        video_processor, unused_kwargs = LlavaOnevisionVideoProcessor.from_pretrained(
            "llava-hf/llava-onevision-qwen2-0.5b-ov-hf", do_normalize=False, foo=False, return_unused_kwargs=True
        )
        assert video_processor.do_normalize is False
        assert unused_kwargs == {"foo": False}
        ```r   r   r   r   use_auth_tokenNrThe `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.V`token` and `use_auth_token` are both specified. Please set only the argument `token`.r   )r=   warningswarnFutureWarningrf   get_video_processor_dict	from_dict)
clsr   r   r   r   r   r   r3   r   video_processor_dicts
             rP   from_pretrainedz"BaseVideoProcessor.from_pretrained  s    t ({#1 %5!"%z$4d;;%M E     l   #E#F7O'Cs'CDa'l'lek'l'l$fs}1<<V<<<rV   save_directorypush_to_hubc           	      ~   |                     dd          }|Ct          j        dt                     |                    d          t          d          ||d<   t          j                            |          rt          d| d          t          j
        |d	           |rw|                     d
d          }|                     d|                    t          j        j                  d                   } | j        |fi |}|                     |          }| j        t!          | ||            t          j                            |t$                    }|                     |           t(                              d|            |r-|                     |||||                    d                     |gS )aq  
        Save an video processor object to the directory `save_directory`, so that it can be re-loaded using the
        [`~video_processing_utils.VideoProcessorBase.from_pretrained`] class method.

        Args:
            save_directory (`str` or `os.PathLike`):
                Directory where the video processor JSON file will be saved (will be created if it does not exist).
            push_to_hub (`bool`, *optional*, defaults to `False`):
                Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
                repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
                namespace).
            kwargs (`dict[str, Any]`, *optional*):
                Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
        r   Nr   r   r   zProvided path (z#) should be a directory, not a fileT)exist_okcommit_messagerepo_id)configzVideo processor saved in )r   r   )r=   r   r   r   rI   rf   ospathisfileAssertionErrormakedirssplitsep_create_repo_get_files_timestamps_auto_classr
   joinr   to_json_filerB   info_upload_modified_files)	rK   r   r   r3   r   r   r   files_timestampsoutput_video_processor_files	            rP   save_pretrainedz"BaseVideoProcessor.save_pretrained(  s     $4d;;%M E   zz'"". l   -F7O7>>.)) 	h !f>!f!f!fggg
NT2222 	J#ZZ(8$??NjjN,@,@,M,Mb,QRRG'd'::6::G#99.II 't^DAAAA ')gll>CW&X&X#5666M0KMMNNN 	'' -jj)) (    ,,,rV   c                 :   |                     dd          |                     dd          |                     dd          |                     dd          |                     dd          |                     dd          }|                     d	d          |                     d
d          |                     dd          |                     dd          }|                     dd          }|-t          j        dt                     t	          d          |d|d||d<   t                      rst                              d           dt                    t          j
                                      }t          j
                                      r}d}nt                    r}t                    }nqt          }	 fdt          t           t"          fD             }	|	d         }n6# t$          $ r  t&          $ r t%          d d dt           d          w xY w	 t)          |dd          5 }
|
                                }ddd           n# 1 swxY w Y   t-          j        |          }|                    d|          }n&# t,          j        $ r t%          d | d!          w xY w|rt                              d"|            n t                              d"| d#|            ||fS )$a  
        From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a
        video processor of type [`~video_processing_utils.VideoProcessorBase`] using `from_dict`.

        Parameters:
            pretrained_model_name_or_path (`str` or `os.PathLike`):
                The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.
            subfolder (`str`, *optional*, defaults to `""`):
                In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
                specify the folder name here.

        Returns:
            `tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the video processor object.
        r   Nr   Fresume_downloadproxiesr   r   r   r   	subfolder _from_pipeline
_from_autor   r   video processor)	file_typefrom_auto_classusing_pipelinez+Offline mode: forcing local_files_only=TrueTc                 P    g | ]"}t          |	
d           x	  #S )F)filenamer   r   r   r   r   r   
user_agentr   r   %_raise_exceptions_for_missing_entriesr    )ru   r   r   r   r   r   r   resolved_filer   r   r   r   r   s     rP   rw   z?BaseVideoProcessor.get_video_processor_dict.<locals>.<listcomp>  sp     2 2 2 )49%-&/+9$+,;-="''1%-&/BG* * *   !  "     rV   r   z Can't load video processor for 'z'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure 'z2' is the correct path to a directory containing a z filerutf-8encodingvideo_processorz"It looks like the config file at 'z' is not a valid JSON file.zloading configuration file z from cache at )r=   r   r   r   rf   r   rB   r   strr   r   isdirr   r   r   r   r   r   OSError	ExceptionopenreadjsonloadsrI   JSONDecodeError)r   r   r3   r   from_pipeliner   is_localresolved_video_processor_filevideo_processor_fileresolved_video_processor_filesreadertextr   r   r   r   r   r   r   r   r   r   r   s    `           @@@@@@@@@@rP   r   z+BaseVideoProcessor.get_video_processor_dicte  s/   $ JJ{D11	$4e<< **%6==**Y--

7D))$4d;;!::&8%@@::j$//JJ{B//	

#3T:: **\599%M E     l   #E#4YY
$+8J'( 	$%5 	$KKEFFF#(+,I(J(J%7==!>??7>>788 ,	,I)HH899 )	#@ ,89V,W,W))#7 $2 2 2 2 2 2 2 2 2 2 2 2 2 2%9;OQ_$`2 2 2.* 1Oq0Q--       K7T K K9VK K 0DK K K  
	3S7KKK %v{{}}% % % % % % % % % % % % % % %#':d#3#3 #7#;#;<MOc#d#d  # 	 	 	o5Rooo  	
  	KKU6SUUVVVVKKr.BrrSprr   $V++s<    2H 3I
J3 I=1J3 =JJ3 J-J3 3#Kr   c                 
   |                                 }|                    dd          }d|v rd|v r|                    d          |d<   d|v rd|v r|                    d          |d<    | di |}g }|                                D ];\  }}t          ||          r&t	          |||           |                    |           <|D ]}|                    |d           t                              d|            |r||fS |S )a  
        Instantiates a type of [`~video_processing_utils.VideoProcessorBase`] from a Python dictionary of parameters.

        Args:
            video_processor_dict (`dict[str, Any]`):
                Dictionary that will be used to instantiate the video processor object. Such a dictionary can be
                retrieved from a pretrained checkpoint by leveraging the
                [`~video_processing_utils.VideoProcessorBase.to_dict`] method.
            kwargs (`dict[str, Any]`):
                Additional parameters from which to initialize the video processor object.

        Returns:
            [`~video_processing_utils.VideoProcessorBase`]: The video processor object instantiated from those
            parameters.
        return_unused_kwargsFr7   r9   NzVideo processor rs   )copyr=   r?   hasattrr@   r   rB   r   )r   r   r3   r  r   	to_removerL   rM   s           rP   r   zBaseVideoProcessor.from_dict  sR   "  488::%zz*@%HH
 V*> > >+1::f+=+= (&  [4H%H%H06

;0G0G -#55 455 	 ,,.. 	& 	&JC,, &e444  %%% 	" 	"CJJsD!!!!888999 	#"F**""rV   c                     t          | j                  }|                    dd           |                    dd           | j        j        |d<   |S )z
        Serializes this instance to a Python dictionary.

        Returns:
            `dict[str, Any]`: Dictionary of all the attributes that make up this video processor instance.
        rH   N_valid_kwargs_namesvideo_processor_type)r   __dict__r=   rO   __name__)rK   outputs     rP   to_dictzBaseVideoProcessor.to_dict  sR     $-((

0$777

($///)-)@%&rV   c                 .   |                                  }|                                D ]6\  }}t          |t          j                  r|                                ||<   7|                    dd          }|||d<   t          j        |dd          dz   S )z
        Serializes this instance to a JSON string.

        Returns:
            `str`: String containing all the attributes that make up this feature_extractor instance in JSON format.
        r>   Nr6      T)indent	sort_keys
)	r  r?   r   r   r   tolistr=   r   dumps)rK   
dictionaryrL   rM   r>   s        rP   to_json_stringz!BaseVideoProcessor.to_json_string  s     \\^^
$**,, 	1 	1JC%,, 1"',,..
3 &>>*<dCC',<J()z*Q$???$FFrV   json_file_pathc                     t          |dd          5 }|                    |                                            ddd           dS # 1 swxY w Y   dS )z
        Save this instance to a JSON file.

        Args:
            json_file_path (`str` or `os.PathLike`):
                Path to the JSON file in which this image_processor instance's parameters will be saved.
        wr   r   N)r   writer  )rK   r  writers      rP   r   zBaseVideoProcessor.to_json_file+  s     .#888 	0FLL,,..///	0 	0 	0 	0 	0 	0 	0 	0 	0 	0 	0 	0 	0 	0 	0 	0 	0 	0s   (AAAc                 H    | j         j         d|                                  S )N )rO   r  r  )rK   s    rP   __repr__zBaseVideoProcessor.__repr__6  s'    .)CCD,?,?,A,ACCCrV   	json_filec                     t          |dd          5 }|                                }ddd           n# 1 swxY w Y   t          j        |          } | di |S )a  
        Instantiates a video processor of type [`~video_processing_utils.VideoProcessorBase`] from the path to a JSON
        file of parameters.

        Args:
            json_file (`str` or `os.PathLike`):
                Path to the JSON file containing the parameters.

        Returns:
            A video processor of type [`~video_processing_utils.VideoProcessorBase`]: The video_processor object
            instantiated from that JSON file.
        r   r   r   Nrs   )r   r   r   r   )r   r   r  r  r   s        rP   from_json_filez!BaseVideoProcessor.from_json_file9  s     )S7333 	!v;;==D	! 	! 	! 	! 	! 	! 	! 	! 	! 	! 	! 	! 	! 	! 	!#z$//s**)***s   488AutoVideoProcessorc                     t          |t                    s|j        }ddlmc m} t          ||          st          | d          || _        dS )a	  
        Register this class with a given auto class. This should only be used for custom video processors as the ones
        in the library are already mapped with `AutoVideoProcessor `.

        <Tip warning={true}>

        This API is experimental and may have some slight breaking changes in the next releases.

        </Tip>

        Args:
            auto_class (`str` or `type`, *optional*, defaults to `"AutoVideoProcessor "`):
                The auto class to register this new video processor with.
        r   Nz is not a valid auto class.)	r   r   r  transformers.models.automodelsautor  rf   r   )r   
auto_classauto_modules      rP   register_for_auto_classz*BaseVideoProcessor.register_for_auto_classL  sn      *c** 	-#,J666666666{J// 	I
GGGHHH$rV   video_url_or_urlsc                      d}t                      st          j        d           d}t          |t                    r#t	          t           fd|D                        S t          ||          S )z
        Convert a single or a list of urls into the corresponding `np.array` objects.

        If a single url is passed, the return value will be a single object. If a list is passed a list of objects is
        returned.
        
torchcodecz`torchcodec` is not installed and cannot be used to decode the video by default. Falling back to `torchvision`. Note that `torchvision` decoding is deprecated and will be removed in future versions. r.   c                 >    g | ]}                     |           S )r|   )r   )ru   xro   rK   s     rP   rw   z3BaseVideoProcessor.fetch_videos.<locals>.<listcomp>v  s.    sss\]d//EV/WWsssrV   )backendro   )r   r   r   r   rD   r}   r'   )rK   r+  ro   r0  s   ` ` rP   r   zBaseVideoProcessor.fetch_videosf  s     &(( 	$MI   $G'.. 	gsssssarssstuuu/TeffffrV   )NNrR   )NFFNr   )F)r#  )Dr  
__module____qualname__r   resampler   r   r7   size_divisorr8   r9   r   r   r   r   r   r   rn   rd   rc   rm   r   r   rE   model_input_namesr   r<   r   rU   r#   ra   r$   r   rh   r   floatrl   dictboolr   rD   r   r   r   r   r   BASE_VIDEO_PROCESSOR_DOCSTRINGrS   r   r   r   classmethodr   PathLiker   r   tupler   r   r   r  r  r   r  r"  r*  r   __classcell__)rO   s   @rP   r1   r1      s        KHJIDLIINJNLN
CJNOL./G!5 G$ G G G G G G>1L 1 1 1 1 
   8 %)+/	3 33 SM3 eCJ'(	3 3 3 3r ,004&& &&&& mT12&& #4.	&&
 $H-&& 
n	&& && && &&V EI $	     $E#/?*?$@A  	 
 
n	       . & &#&# &&# 
	&# &# &# &#l <@,h ,h^$,h ,h 	,h
 ,h   56,h ,h ,h ,h ,h ,h U5$u+#567,h E%e"456,h !sJ!78,h  
!,h ,h ,h ,h\  8<$!&,0o= o=',S"+-='>o= E#r{"234o= 	o=
 o= c4i()o= o= o= o= [o=b;- ;-eC4D.E ;-TX ;- ;- ;- ;-z s,,1#r{2B,Cs,	tCH~tCH~-	.s, s, s, [s,j *#T#s(^ *# *# *# [*#Xc3h    G G G G G*	05bk1A+B 	0 	0 	0 	0D D D +uS"+-='> + + + [+$ % % % [%2g geCcDcO4S.T g g g g g g g grV   r1   r   r#  zvideo processor file)objectobject_classobject_files)Dr   r   r   r  r   	functoolsr   typingr   r   r   r   numpyr   dynamic_module_utilsr
   image_processing_utilsr   r   image_processing_utils_fastr   image_utilsr   r   r   processing_utilsr   r   utilsr   r   r   r   r   r   r   r   r   r   r   r   r   	utils.hubr!   utils.import_utilsr"   video_utilsr#   r$   r%   r&   r'   r(   r)   r*   r+   ri   torchvision.transforms.v2r,   r\   
get_loggerr  rB   r9  r1   r   __doc__formatrs   rV   rP   <module>rQ     su     				              1 1 1 1 1 1 1 1 1 1 1 1     4 4 4 4 4 4        @ ? ? ? ? ?         
 3 2 2 2 2 2 2 2                              # " " " " " ( ( ( ( ( (
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  LLL   :999999 
	H	%	%A" H '"  
,---bg bg bg bg bg/ bg bg .-	 
bgJ "++=+I!J!J  !)5-?-K-S-Z-Z /CRh .[ . ."*** 65rV   