
    &`i]                    
   d dl Z d dlZd dlZd dlZd dlmZmZ d dlmZm	Z	m
Z
mZmZmZmZmZmZ d dlZd dlZd dlZd dlmZ d dlmZ d dlZd dlmZ d dlmZm Z  d dl!m"Z" d dl#m$Z$ d d	l%m&Z& d d
l'm(Z( d dl)m*Z* d dl+m,Z, d dl-m.Z.m/Z/m0Z0m1Z1 d dl2m3Z3m4Z4m5Z5 d dl6m7Z7 d dl8m9Z9m:Z: d dl;m<Z< d dl=m>Z> d dl?m@Z@mAZAmBZB d dlCmDZDmEZEmFZF d dlGmHZH d dlImJZJ d dlKmLZLmMZMmNZNmOZOmPZPmQZQmRZRmSZSmTZT d dlUmVZV  e9            \  ZWZXZY e:            \  ZZZ[ ej\        e]          Z^e. G d d                      Z_e. G d de                      Z`e.	 d#deeeeeTf         d eja        fd!            Zbe.deeef         d ejc        j        fd"            ZddS )$    N)ABCMetaabstractmethod)	AnyCallable
CollectionDictListOptionalTupleTypeUnion)Box)version)DEPRECATED_VALUEdeprecation_warning)ActorHandle)ActionDistribution)ModelCatalog)ModelV2)SampleBatch)ViewRequirement)OldAPIStackOverrideToImplementCustomLogic5OverrideToImplementCustomLogic_CallToSuperRecommendedis_overridden)CHECKPOINT_VERSIONget_checkpoint_infotry_import_msgpack)Exploration)try_import_tftry_import_torch)from_config)convert_to_numpy)deserialize_typespace_from_dictspace_to_dict)get_base_struct_from_spaceget_dummy_batch_for_spaceunbatch)get_np_dtype)get_tf_eager_cls_if_necessary)	AgentIDAlgorithmConfigDictModelGradientsModelWeightsPolicyIDPolicyStateTTensorStructType
TensorType)
Checkpointc                       e Zd ZdZ	 ddZddZdeeef         fdZ	e
deeef         dd fd	            Zdefd
Ze
dedd fd            ZdS )
PolicySpecaQ  A policy spec used in the "config.multiagent.policies" specification dict.

    As values (keys are the policy IDs (str)). E.g.:
    config:
        multiagent:
            policies: {
                "pol1": PolicySpec(None, Box, Discrete(2), {"lr": 0.0001}),
                "pol2": PolicySpec(config={"lr": 0.001}),
            }
    Nc                 >    || _         || _        || _        || _        d S Npolicy_classobservation_spaceaction_spaceconfig)selfr;   r<   r=   r>   s        k/home/jaya/work/projects/VOICE-AGENT/VIET/agent-env/lib/python3.11/site-packages/ray/rllib/policy/policy.py__init__zPolicySpec.__init__`   s-    
 ) "3 )     otherc                     | j         |j         k    o/| j        |j        k    o| j        |j        k    o| j        |j        k    S r9   r:   )r?   rC   s     r@   __eq__zPolicySpec.__eq__p   sN    !33 ,&%*AA,!U%77, u|+		
rB   returnc                 6    | j         | j        | j        | j        fS )z.Returns the state of a `PolicyDict` as a dict.r:   r?   s    r@   	get_statezPolicySpec.get_statex   s%     "K	
 	
rB   statec                 V    t                      }|j                            |           |S )z#Builds a `PolicySpec` from a state.)r7   __dict__update)clsrJ   policy_specs      r@   
from_statezPolicySpec.from_state   s*     !ll##E***rB   c                     ddl m}  || j                  }|*t                              d| j         d           | j        }|t          | j                  t          | j                  | j        dS )Nr   )get_policy_class_namez-Can not figure out a durable policy name for z. You are probably trying to checkpoint a custom policy. Raw policy class may cause problems when the checkpoint needs to be loaded in the future. To fix this, make sure you add your custom policy in rllib.algorithms.registry.POLICIES.r:   )	ray.rllib.algorithms.registryrR   r;   loggerwarningr&   r<   r=   r>   )r?   rR   rN   s      r@   	serializezPolicySpec.serialize   s    GGGGGG $#D$566;NNG@Q G G G   #C  !.t/E!F!F)$*;<< k
 
 	
rB   specc                 V   t          |d         t                    rddlm}  ||d                   }n<t          |d         t                    r	|d         }nt          d|d                     | |t          |d                   t          |d                   |d                   S )	Nr;   r   )get_policy_classzUnknown policy class spec r<   r=   r>   r:   )
isinstancestrrS   rY   typeAttributeErrorr%   )rN   rW   rY   r;   s       r@   deserializezPolicySpec.deserialize   s    d>*C00 		VFFFFFF++D,@AALL^,d33 	V/LL !Td>>R!T!TUUUs%-d3F.GHH(n)=>>>	
 
 
 	
rB   )NNNN)rC   r7   )__name__
__module____qualname____doc__rA   rE   r   r[   r   rI   classmethodrP   rV   r^    rB   r@   r7   r7   S   s        	 	 TX    
 
 
 

4S> 
 
 
 
 tCH~ ,    [
4 
 
 
 
0 
t 
 
 
 
 [
 
 
rB   r7   c                      e Zd ZdZdej        dej        defdZe	 dcde	e
ef         deee                  d	e	d eed f         f         fd
            Zeded	d fd            Zd Zd	efdZdddZ	 	 dedddddddddee         deee                  dee         dee         dedee         dee         dee         d	eeee         ee
ef         f         fdZ	 	 	 dfde	eee
ef         f         dee         dee         d	eeee         ee
ef         f         fdZe 	 	 	 	 	 	 	 dgde	ee         ef         deee                  de	ee         ef         de	ee         ef         deee
e!f                  dee         dee         dee         d	eeee         ee
ef         f         fd             Z"	 	 	 	 	 dhd"e	ee         ef         de	ee         ef         deee                  dee	ee         ef                  dee	ee         ef                  d#ed$ed	efd%Z#e$	 	 ded&ed'eee%ed ef         f                  d	efd(            Z&e'd)e(d*e)d+ed	e	eee         f         fd,            Z*d-ed	ee
ef         fd.Z+d/e,d0ed	ee
ef         fd1Z-did3ed4ed	efd5Z.did4ed	efd6Z/djd7ed4efd8Z0d9ed	ee1ee
ef         f         fd:Z2d;e1d	dfd<Z3d	e4fd=Z5d>e4d	dfd?Z6d	ee
ef         fd@Z7d	efdAZ8d	efdBZ9d	ee         fdCZ:e$d	efdD            Z;defdEZ<e$ded	dfdF            Z=dGe>d ee?         ee?         ge@f         d	e@fdHZAdIee
ef         d	dfdJZBeCfddKdLdMe
dNee         dOe
d	dfdPZDdcdMe
dQee         d	dfdRZEdSe
d	dfdTZFd	edU         fdVZGd	e
fdWZHd	efdXZId	eJfdYZKdZ ZL	 	 dkd[ed	dfd\ZMdee
ef         fd]ZN	 dld_ed	efd`ZOda ZPdb ZQdS )mPolicyaG  RLlib's base class for all Policy implementations.

    Policy is the abstract superclass for all DL-framework specific sub-classes
    (e.g. TFPolicy or TorchPolicy). It exposes APIs to

    1. Compute actions from observation (and possibly other) inputs.

    2. Manage the Policy's NN model(s), like exporting and loading their weights.

    3. Postprocess a given trajectory from the environment or other input via the
        `postprocess_trajectory` method.

    4. Compute losses from a train batch.

    5. Perform updates from a train batch on the NN-models (this normally includes loss
        calculations) either:

        a. in one monolithic step (`learn_on_batch`)

        b. via batch pre-loading, then n steps of actual loss computations  and updates
            (`load_batch_into_buffer` + `learn_on_loaded_batch`).
    r<   r=   r>   c                    || _         || _        |                    d          | _        t	          |          | _        t	          |          | _        || _        | j                            d          | _        ddl	m
} | j                            d          }t          ||          r |            | _        nqt          |t          t          f          rF	  t          | j                            d                                | _        n# t           $ r Y nw xY w |            | _        d| _        d| _        d| _        |                                  d| _        d| _        d| _        dS )aC  Initializes a Policy instance.

        Args:
            observation_space: Observation space of the policy.
            action_space: Action space of the policy.
            config: A complete Algorithm/Policy config dict. For the default
                config keys and values, see rllib/algorithm/algorithm.py.
        __policy_id	frameworkr   )RLlibCallback	callbacksNF)r<   r=   get_Policy__policy_idr'   observation_space_structaction_space_structr>   ri   ray.rllib.callbacks.callbacksrj   rZ   rk   r[   r\   r$   	Exceptionglobal_timestepnum_grad_updates
dist_classinit_view_requirements%_model_init_state_automatically_addedagent_connectorsaction_connectors)r?   r<   r=   r>   rj   rk   s         r@   rA   zPolicy.__init__   s    ->'3!::m44 )CCT(U(U%#=l#K#K +155 	@?????KOOK00	i// 
	>&Y[[DNN	C;// 	>32BKOOK003 3 3 3     /<mooDN %&%& +/ 	##%%% 6;2 !%!%s   4D	 	
DDN
checkpoint
policy_idsrF   c                 0   t          |           }|d         dk    rddlm} i }|d         t          j        d          k     rt          |d         d          5 }t          j        |          }d	d	d	           n# 1 swxY w Y   t          j        |d
                   }|d         }|	                                D ]\  }	}
|d         |	         }|
                    |d         |d                   }|                    d|i           |
                    d|i           t                              |
          ||	<   n|d         |d         D ]}|||v rt          t          j                            |d         d|                    }|d         dk    sJ t          |d         d          5 }t          j        |          }
d	d	d	           n# 1 swxY w Y   t                              |
          ||<   |S d	}|                    d          dk    rt%          d          }t          |d         d          5 }||                    |          }nt          j        |          }d	d	d	           n# 1 swxY w Y   t                              |          S )a  Creates new Policy instance(s) from a given Policy or Algorithm checkpoint.

        Note: This method must remain backward compatible from 2.1.0 on, wrt.
        checkpoints created with Ray 2.0.0 or later.

        Args:
            checkpoint: The path (str) to a Policy or Algorithm checkpoint directory
                or an AIR Checkpoint (Policy or Algorithm) instance to restore
                from.
                If checkpoint is a Policy checkpoint, `policy_ids` must be None
                and only the Policy in that checkpoint is restored and returned.
                If checkpoint is an Algorithm checkpoint and `policy_ids` is None,
                will return a list of all Policy objects found in
                the checkpoint, otherwise a list of those policies in `policy_ids`.
            policy_ids: List of policy IDs to extract from a given Algorithm checkpoint.
                If None and an Algorithm checkpoint is provided, will restore all
                policies found in that checkpoint. If a Policy checkpoint is given,
                this arg must be None.

        Returns:
            An instantiated Policy, if `checkpoint` is a Policy checkpoint. A dict
            mapping PolicyID to Policies, if `checkpoint` is an Algorithm checkpoint.
            In the latter case, returns all policies within the Algorithm if
            `policy_ids` is None, else a dict of only those Policies that are in
            `policy_ids`.
        r\   	Algorithmr   )r|   checkpoint_versionz1.0
state_filerbNworkerrJ   policy_specspolicy_configr>   rO   rz   checkpoint_dirpoliciesrf   formatmsgpackTerror)r   ray.rllib.algorithms.algorithmr|   r   Versionopenpickleloadloadsitemsmerge_algorithm_configsrM   rf   rP   ospathjoinrl   r   )ry   rz   checkpoint_infor|   r   frJ   worker_statepolicy_statespidpolicy_stateserialized_policy_specr   	policy_idpolicy_checkpoint_infor   s                   r@   from_checkpointzPolicy.from_checkpoint  sw   > .j99 6"k11@@@@@@H 34wu7M7MMM/,7>> +!"KNNE+ + + + + + + + + + + + + + +  &|E(O<< ,W 5)6)<)<)>)> D D%C-9.-I#-N*$-$E$E$_57Mh7W% %M +118]2KLLL ''8N(OPPP$*$5$5l$C$CHSMMD !.:!0!> N NI!)Y*-D-D1DGLL /0@ A * ) 2 2.  6f=IIII!"8"FMM :QR+1;q>>L: : : : : : : : : : : : : : :.4.?.?.M.M+O G""8,,	99,4888ol3T:: +a&#LLOOEE"KNNE	+ + + + + + + + + + + + + + +
 $$U+++s6   A::A>A>-GG	G	:-I33I7:I7rJ   c                 t   |                      d          }|t          d          t                              |          }t	          |j        |j                  }|j        d         dk    rddlm} |	                    |           S  ||j
        |j        |j                  }|                    |            |S )a:  Recovers a Policy from a state object.

        The `state` of an instantiated Policy can be retrieved by calling its
        `get_state` method. This only works for the V2 Policy classes (EagerTFPolicyV2,
        SynamicTFPolicyV2, and TorchPolicyV2). It contains all information necessary
        to create the Policy. No access to the original code (e.g. configs, knowledge of
        the policy's class, etc..) is needed.

        Args:
            state: The state to recover a new Policy instance from.

        Returns:
            A new Policy instance.
        rO   NzJNo `policy_spec` key was found in given `state`! Cannot create new Policy.ri   tfr   )TFPolicy)rl   
ValueErrorr7   r^   r+   r;   r>   ray.rllib.policy.tf_policyr   _tf1_from_state_helperr<   r=   	set_state)rJ   serialized_pol_specpol_specactual_classr   
new_policys         r@   rP   zPolicy.from_stateg  s      /4ii.F.F&,   ))*=>>4!O
 

 ?;'4//;;;;;;225999 "\ &!O
 

 	U###rB   c                     |                                  }t          | d          s	|| _        dS |                                D ]\  }}|| j        vr
|| j        |<   dS )zMaximal view requirements dict for `learn_on_batch()` and
        `compute_actions` calls.
        Specific policies can override this function to provide custom
        list of view requirements.
        view_requirementsN)_get_default_view_requirementshasattrr   r   )r?   	view_reqskvs       r@   ru   zPolicy.init_view_requirements  sy     7799	t011 	2%.D"""!)) 2 21D22201D*1-2 2rB   c                     d | j         j                                        D             d | j         j                                        D             dS )z&Get metrics on timing from connectors.c                 0    i | ]\  }}|d z   d|j         z  S _msi  mean.0nametimers      r@   
<dictcomp>z0Policy.get_connector_metrics.<locals>.<dictcomp>  s9     ! ! !D% udUZ/! ! !rB   c                 0    i | ]\  }}|d z   d|j         z  S r   r   r   s      r@   r   z0Policy.get_connector_metrics.<locals>.<dictcomp>  s9     " " "D% udUZ/" " "rB   )rw   rx   )rw   timersr   rH   s    r@   get_connector_metricszPolicy.get_connector_metrics  sm    ! !#'#8#?#E#E#G#G! ! !" "#'#8#?#E#E#G#G" " "	
 	
 		
rB   c                 r    | j                             |           | j                            |           dS )z3Reset action- and agent-connectors for this policy.)env_idN)rw   resetrx   )r?   r   s     r@   reset_connectorszPolicy.reset_connectors  s<    ##6#222$$F$33333rB   )prev_actionprev_rewardinfo
input_dictepisodeexploretimestepobsr   r   r   r   r   r   c                B   |`t           j        |i}|t          |          D ]\  }}||d| <   |||t           j        <   |||t           j        <   |||t           j        <   t          j        d |          }d}||g}|                     t          |          |||	          }t          |t                    s|}g }i }n|\  }}}t          |          }t          |          dk    sJ |d         }|t          j        d |          t          j        d |          fS )	a  Computes and returns a single (B=1) action value.

        Takes an input dict (usually a SampleBatch) as its main data input.
        This allows for using this method in case a more complex input pattern
        (view requirements) is needed, for example when the Model requires the
        last n observations, the last m actions/rewards, or a combination
        of any of these.
        Alternatively, in case no complex inputs are required, takes a single
        `obs` values (and possibly single state values, prev-action/reward
        values, etc..).

        Args:
            obs: Single observation.
            state: List of RNN state inputs, if any.
            prev_action: Previous action value, if any.
            prev_reward: Previous reward, if any.
            info: Info object, if any.
            input_dict: A SampleBatch or input dict containing the
                single (unbatched) Tensors to compute actions. If given, it'll
                be used instead of `obs`, `state`, `prev_action|reward`, and
                `info`.
            episode: This provides access to all of the internal episode state,
                which may be useful for model-based or multi-agent algorithms.
            explore: Whether to pick an exploitation or
                exploration action
                (default: None -> use self.config["explore"]).
            timestep: The current (sampling) time step.

        Keyword Args:
            kwargs: Forward compatibility placeholder.

        Returns:
            Tuple consisting of the action, the list of RNN state outputs (if
            any), and a dictionary of extra features (if any).
        N	state_in_c                     | dk    r|nJt           r/t          |t           j                  r|                    d          nt	          j        |d          S )Nseq_lensr   )torchrZ   Tensor	unsqueezenpexpand_dims)pss     r@   <lambda>z.Policy.compute_single_action.<locals>.<lambda>  sR    
??  *'5<88*Q[[^^^^Aq)) rB   )r   episodesr   r      r   c                     | d         S Nr   rd   xs    r@   r   z.Policy.compute_single_action.<locals>.<lambda>$  
    1 rB   c                     | d         S r   rd   r   s    r@   r   z.Policy.compute_single_action.<locals>.<lambda>%  r   rB   )r   OBS	enumeratePREV_ACTIONSPREV_REWARDSINFOStreemap_structure_with_pathcompute_actions_from_input_dictrZ   tupler)   lenmap_structure)r?   r   rJ   r   r   r   r   r   r   r   kwargsir   r   outsingle_action	state_outbatched_actions                     r@   compute_single_actionzPolicy.compute_single_action  s   h %/3/J %e,, 4 4DAq23J1//&7B
;34&7B
;3404
;,- 1  	
 	

 yH22":..	 3 
 
 #u%% 	4MIDD /2+NIt#N33M=!!Q&&&&%a( ~~y99~~t44
 	
rB   c           
      8   d |                                 D             } | j        |t          j                 |f|                    t          j                  |                    t          j                  |                    t          j                  |||d|S )a  Computes actions from collected samples (across multiple-agents).

        Takes an input dict (usually a SampleBatch) as its main data input.
        This allows for using this method in case a more complex input pattern
        (view requirements) is needed, for example when the Model requires the
        last n observations, the last m actions/rewards, or a combination
        of any of these.

        Args:
            input_dict: A SampleBatch or input dict containing the Tensors
                to compute actions. `input_dict` already abides to the
                Policy's as well as the Model's view requirements and can
                thus be passed to the Model as-is.
            explore: Whether to pick an exploitation or exploration
                action (default: None -> use self.config["explore"]).
            timestep: The current (sampling) time step.
            episodes: This provides access to all of the internal episodes'
                state, which may be useful for model-based or multi-agent
                algorithms.

        Keyword Args:
            kwargs: Forward compatibility placeholder.

        Returns:
            actions: Batch of output actions, with shape like
                [BATCH_SIZE, ACTION_SHAPE].
            state_outs: List of RNN state output
                batches, if any, each with shape [BATCH_SIZE, STATE_SIZE].
            info: Dictionary of extra feature batches, if any, with shape like
                {"f1": [BATCH_SIZE, ...], "f2": [BATCH_SIZE, ...]}.
        c                 B    g | ]\  }}|                     d           |S )state_in)
startswith)r   r   r   s      r@   
<listcomp>z:Policy.compute_actions_from_input_dict.<locals>.<listcomp>Q  s-    VVVtq!Q\\*=U=UVVVVrB   )prev_action_batchprev_reward_batch
info_batchr   r   r   )r   compute_actionsr   r   rl   r   r   r   )r?   r   r   r   r   r   state_batchess          r@   r   z&Policy.compute_actions_from_input_dict(  s    R WVz'7'7'9'9VVV#t#{'

 )nn[-EFF(nn[-EFF!~~k&788

 

 

 

 
	
rB   	obs_batchr   r   r   r   r   c	                     t           )aF  Computes actions for the current policy.

        Args:
            obs_batch: Batch of observations.
            state_batches: List of RNN state input batches, if any.
            prev_action_batch: Batch of previous action values.
            prev_reward_batch: Batch of previous rewards.
            info_batch: Batch of info objects.
            episodes: List of Episode objects, one for each obs in
                obs_batch. This provides access to all of the internal
                episode state, which may be useful for model-based or
                multi-agent algorithms.
            explore: Whether to pick an exploitation or exploration action.
                Set to None (default) for using the value of
                `self.config["explore"]`.
            timestep: The current (sampling) time step.

        Keyword Args:
            kwargs: Forward compatibility placeholder

        Returns:
            actions: Batch of output actions, with shape like
                [BATCH_SIZE, ACTION_SHAPE].
            state_outs (List[TensorType]): List of RNN state output
                batches, if any, each with shape [BATCH_SIZE, STATE_SIZE].
            info (List[dict]): Dictionary of extra feature batches, if any,
                with shape like
                {"f1": [BATCH_SIZE, ...], "f2": [BATCH_SIZE, ...]}.
        NotImplementedError)
r?   r   r   r   r   r   r   r   r   r   s
             r@   r   zPolicy.compute_actions^  s    T "!rB   Tactionsactions_normalizedin_trainingc                     t           )aE  Computes the log-prob/likelihood for a given action and observation.

        The log-likelihood is calculated using this Policy's action
        distribution class (self.dist_class).

        Args:
            actions: Batch of actions, for which to retrieve the
                log-probs/likelihoods (given all other inputs: obs,
                states, ..).
            obs_batch: Batch of observations.
            state_batches: List of RNN state input batches, if any.
            prev_action_batch: Batch of previous action values.
            prev_reward_batch: Batch of previous rewards.
            actions_normalized: Is the given `actions` already normalized
                (between -1.0 and 1.0) or not? If not and
                `normalize_actions=True`, we need to normalize the given
                actions first, before calculating log likelihoods.
            in_training: Whether to use the forward_train() or forward_exploration() of
                the underlying RLModule.
        Returns:
            Batch of log probs/likelihoods, with shape: [BATCH_SIZE].
        r   )r?   r   r   r   r   r   r   r   s           r@   compute_log_likelihoodszPolicy.compute_log_likelihoods  s    @ "!rB   sample_batchother_agent_batchesc                     |S )a$  Implements algorithm-specific trajectory postprocessing.

        This will be called on each trajectory fragment computed during policy
        evaluation. Each fragment is guaranteed to be only from one episode.
        The given fragment may or may not contain the end of this episode,
        depending on the `batch_mode=truncate_episodes|complete_episodes`,
        `rollout_fragment_length`, and other settings.

        Args:
            sample_batch: batch of experiences for the policy,
                which will contain at most one episode trajectory.
            other_agent_batches: In a multi-agent env, this contains a
                mapping of agent ids to (policy, agent_batch) tuples
                containing the policy and experiences of the other agents.
            episode: An optional multi-agent episode object to provide
                access to all of the internal episode state, which may
                be useful for model-based or multi-agent algorithms.

        Returns:
            The postprocessed sample batch.
        rd   )r?   r  r  r   s       r@   postprocess_trajectoryzPolicy.postprocess_trajectory  s
    > rB   modelrt   train_batchc                     t           )a  Loss function for this Policy.

        Override this method in order to implement custom loss computations.

        Args:
            model: The model to calculate the loss(es).
            dist_class: The action distribution class to sample actions
                from the model's outputs.
            train_batch: The input batch on which to calculate the loss.

        Returns:
            Either a single loss tensor or a list of loss tensors.
        r   )r?   r  rt   r  s       r@   losszPolicy.loss  s
    " "!rB   samplesc                 `    |                      |          \  }}|                     |           |S )a  Perform one learning update, given `samples`.

        Either this method or the combination of `compute_gradients` and
        `apply_gradients` must be implemented by subclasses.

        Args:
            samples: The SampleBatch object to learn from.

        Returns:
            Dictionary of extra metadata from `compute_gradients()`.

        .. testcode::
            :skipif: True

            policy, sample_batch = ...
            policy.learn_on_batch(sample_batch)
        )compute_gradientsapply_gradients)r?   r  grads	grad_infos       r@   learn_on_batchzPolicy.learn_on_batch  s6    (  11'::yU###rB   replay_actorr   c                 :   t          j        |j                            |                    }|i S t	          | d          rFt          | j                  dk    r.|                     |d           |                     dd          S | 	                    |          S )a  Samples a batch from given replay actor and performs an update.

        Args:
            replay_actor: The replay buffer actor to sample from.
            policy_id: The ID of this policy.

        Returns:
            Dictionary of extra metadata from `compute_gradients()`.
        )r   Ndevicesr   r   )buffer_index)offsetr  )
rayrl   replayremoter   r   r  load_batch_into_bufferlearn_on_loaded_batchr  )r?   r  r   batchs       r@   !learn_on_batch_from_replay_bufferz(Policy.learn_on_batch_from_replay_buffer  s    " +22Y2GGHH=I 4## 	.DL(9(9A(=(=''A'>>>--QQ-GGG&&u---rB   r   r  r  c                     t           )a\  Bulk-loads the given SampleBatch into the devices' memories.

        The data is split equally across all the Policy's devices.
        If the data is not evenly divisible by the batch size, excess data
        should be discarded.

        Args:
            batch: The SampleBatch to load.
            buffer_index: The index of the buffer (a MultiGPUTowerStack) to use
                on the devices. The number of buffers on each device depends
                on the value of the `num_multi_gpu_tower_stacks` config key.

        Returns:
            The number of tuples loaded per device.
        r   )r?   r  r  s      r@   r  zPolicy.load_batch_into_buffer  
      "!rB   c                     t           )a  Returns the number of currently loaded samples in the given buffer.

        Args:
            buffer_index: The index of the buffer (a MultiGPUTowerStack)
                to use on the devices. The number of buffers on each device
                depends on the value of the `num_multi_gpu_tower_stacks` config
                key.

        Returns:
            The number of tuples loaded per device.
        r   )r?   r  s     r@   "get_num_samples_loaded_into_bufferz)Policy.get_num_samples_loaded_into_buffer'  
     "!rB   r  c                     t           )a  Runs a single step of SGD on an already loaded data in a buffer.

        Runs an SGD step over a slice of the pre-loaded batch, offset by
        the `offset` argument (useful for performing n minibatch SGD
        updates repeatedly on the same, already pre-loaded data).

        Updates the model weights based on the averaged per-device gradients.

        Args:
            offset: Offset into the preloaded data. Used for pre-loading
                a train-batch once to a device, then iterating over
                (subsampling through) this batch n times doing minibatch SGD.
            buffer_index: The index of the buffer (a MultiGPUTowerStack)
                to take the already pre-loaded data from. The number of buffers
                on each device depends on the value of the
                `num_multi_gpu_tower_stacks` config key.

        Returns:
            The outputs of extra_ops evaluated over the batch.
        r   )r?   r  r  s      r@   r  zPolicy.learn_on_loaded_batch5  s
    * "!rB   postprocessed_batchc                     t           )a  Computes gradients given a batch of experiences.

        Either this in combination with `apply_gradients()` or
        `learn_on_batch()` must be implemented by subclasses.

        Args:
            postprocessed_batch: The SampleBatch object to use
                for calculating gradients.

        Returns:
            grads: List of gradient output values.
            grad_info: Extra policy-specific info values.
        r   )r?   r$  s     r@   r  zPolicy.compute_gradientsL  r  rB   	gradientsc                     t           )a%  Applies the (previously) computed gradients.

        Either this in combination with `compute_gradients()` or
        `learn_on_batch()` must be implemented by subclasses.

        Args:
            gradients: The already calculated gradients to apply to this
                Policy.
        r   )r?   r&  s     r@   r  zPolicy.apply_gradients^  
     "!rB   c                     t           )a  Returns model weights.

        Note: The return value of this method will reside under the "weights"
        key in the return value of Policy.get_state(). Model weights are only
        one part of a Policy's state. Other state information contains:
        optimizer variables, exploration state, and global state vars such as
        the sampling timestep.

        Returns:
            Serializable copy or view of model weights.
        r   rH   s    r@   get_weightszPolicy.get_weightsj  r"  rB   weightsc                     t           )aO  Sets this Policy's model's weights.

        Note: Model weights are only one part of a Policy's state. Other
        state information contains: optimizer variables, exploration state,
        and global state vars such as the sampling timestep.

        Args:
            weights: Serializable copy or view of model weights.
        r   )r?   r+  s     r@   set_weightszPolicy.set_weightsx  r(  rB   c                 4    | j                                         S )zReturns the state of this Policy's exploration component.

        Returns:
            Serializable information on the `self.exploration` object.
        )explorationrI   rH   s    r@   get_exploration_statezPolicy.get_exploration_state  s     ))+++rB   c                     dS )zWhether this Policy holds a recurrent Model.

        Returns:
            True if this Policy has an RNN-based Model.
        Frd   rH   s    r@   is_recurrentzPolicy.is_recurrent  s	     urB   c                     dS )zThe number of internal states needed by the RNN-Model of the Policy.

        Returns:
            int: The number of RNN internal states kept by this Policy's Model.
        r   rd   rH   s    r@   num_state_tensorszPolicy.num_state_tensors  s	     qrB   c                     g S )zReturns initial RNN state for the current policy.

        Returns:
            List[TensorType]: Initial RNN state for the current policy.
        rd   rH   s    r@   get_initial_statezPolicy.get_initial_state  s	     	rB   c                 p   |                                  | j        | j        d}t          t	          |           | j        | j        | j                  }|                                |d<   i }| j	        r| j	        
                                |d<   | j        r| j        
                                |d<   ||d<   |S )a  Returns the entire current state of this Policy.

        Note: Not to be confused with an RNN model's internal state.
        State includes the Model(s)' weights, optimizer weights,
        the exploration component's state, as well as global variables, such
        as sampling timesteps.

        Note that the state may contain references to the original variables.
        This means that you may need to deepcopy() the state before mutating it.

        Returns:
            Serialized local state.
        )r+  rr   rs   r:   rO   agentactionconnector_configs)r*  rr   rs   r7   r\   r<   r=   r>   rV   rw   to_staterx   )r?   rJ   rO   r:  s       r@   rI   zPolicy.get_state  s    " ''))#3 $ 5
 
 !d"4*;	
 
 
  +4466m   	J)-)>)G)G)I)Ig&! 	L*.*@*I*I*K*Kh'%6!"rB   c                    ddl m} |                    di           }d|v rd || |d                   | _        t                              d           t                              | j                            d                     d|v rf || |d                   | _        t                              d	           t                              | j                            d                     d
S d
S )zRestore agent and action connectors if configs available.

        Args:
            state: The new state to set this policy to. Can be
                obtained by calling `self.get_state()`.
        r   )restore_connectors_for_policyr:  r8  zrestoring agent connectors:   )indentationr9  zrestoring action connectors:N)ray.rllib.connectors.utilr=  rl   rw   rT   debug__str__rx   )r?   rJ   r=  r:  s       r@   restore_connectorszPolicy.restore_connectors  s    	LKKKKK!II&92>>'''$A$A'0% %D! LL6777LL.6616EEFFF(((%B%B'1& &D" LL7888LL/77A7FFGGGGG )(rB   c                    d|v rt                               |d                   }|j        ;|j        | j        k    r+t                              d|j         d| j         d           |j        ;|j        | j        k    r+t                              d|j         d| j         d           |j        r|j        | _        |                     |d                    |                     |           dS )	zRestores the entire current state of this Policy from `state`.

        Args:
            state: The new state to set this policy to. Can be
                obtained by calling `self.get_state()`.
        rO   Nz+`observation_space` in given policy state (z2) does not match this Policy's observation space (z).z&`action_space` in given policy state (z-) does not match this Policy's action space (r+  )	r7   r^   r<   rT   rU   r=   r>   r-  rC  )r?   rJ   rO   s      r@   r   zPolicy.set_state  s1    E!!$00}1EFFK -91T5KKKE"4E E*.*@E E E   (4,0AAA;"/; ;%)%6; ; ;   ! 1)0 	y)***&&&&&rB   funcc                      || g|R i |S )ah  Calls the given function with this Policy instance.

        Useful for when the Policy class has been converted into a ActorHandle
        and the user needs to execute some functionality (e.g. add a property)
        on the underlying policy object.

        Args:
            func: The function to call, with this Policy as first
                argument, followed by args, and kwargs.
            args: Optional additional args to pass to the function call.
            kwargs: Optional additional kwargs to pass to the function call.

        Returns:
            The return value of the function call.
        rd   )r?   rE  argsr   s       r@   applyzPolicy.apply  s$    * tD*4***6***rB   global_varsc                     | j         dk    r!| j                            |d                    n|d         | _        |                    d          }|	|| _        dS dS )zCalled on an update to global vars.

        Args:
            global_vars: Global variables by str key, broadcast from the
                driver.
        tf2r   rs   N)ri   rr   assignrl   rs   )r?   rI  rs   s      r@   on_global_var_updatezPolicy.on_global_var_update$  sl     >U"" ''J(?@@@@#.z#:D &??+=>>'$4D!!! ('rB   cloudpickle)r   checkpoint_format
export_dirr   rO  c          
         |t           k    rt          dd           |dvrt          d| d          ||                                 }t	          j        |d           |d	k    rht          |d
<   d}t          t          j        	                    ||          d          5 }t          j        ||           ddd           n# 1 swxY w Y   nddlm} t          d          }t          t                    |d
<   |                    |d         d                   |d         d<   d}t          t          j        	                    ||          d          5 }|                    ||           ddd           n# 1 swxY w Y   t          t          j        	                    |d          d          5 }t#          j        dt          |d
                   ||t$          j        t$          j        d|           ddd           n# 1 swxY w Y   | j        d         r5|                     t          j        	                    |d                     dS dS )a  Exports Policy checkpoint to a local directory and returns an AIR Checkpoint.

        Args:
            export_dir: Local writable directory to store the AIR Checkpoint
                information into.
            policy_state: An optional PolicyState to write to disk. Used by
                `Algorithm.save_checkpoint()` to save on the additional
                `self.get_state()` calls of its different Policies.
            checkpoint_format: Either one of 'cloudpickle' or 'msgpack'.

        .. testcode::
            :skipif: True

            from ray.rllib.algorithms.ppo import PPOTorchPolicy
            policy = PPOTorchPolicy(...)
            policy.export_checkpoint("/tmp/export_dir")
        z-Policy.export_checkpoint(filename_prefix=...)T)oldr   )rN  r   zValue of `checkpoint_format` (z,) must either be 'cloudpickle' or 'msgpack'!N)exist_okrN  r}   zpolicy_state.pklzw+br   )AlgorithmConfigr   rO   r>   zpolicy_state.msgpckzrllib_checkpoint.jsonwrf   )r\   r}   r   r~   ray_version
ray_commitexport_native_model_filesr  )r   r   r   rI   r   makedirsr   r   r   r   r   dump%ray.rllib.algorithms.algorithm_configrT  r   r[   _serialize_dictjsonr  __version__
__commit__r>   export_model)	r?   rP  filename_prefixr   rO  r~   r   rT  r   s	            r@   export_checkpointzPolicy.export_checkpoint8  s)   6 ...C    $>>>.1B . . .  
 >>++L 	J....--1CL-.+Jbgll:z::EBB -aL!,,,- - - - - - - - - - - - - - - NMMMMM(t444G145G1H1HL-.4C4S4S]+H55 5L'1 /Jbgll:z::EBB .a\1---. . . . . . . . . . . . . . . "',,z+BCCSII 	QI$*-l;O.P*Q*Q/",#&?"%.  
 
 
	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 ;23 	Abgll:w??@@@@@	A 	As7    CC	CE88E<?E<1AHHHonnxc                     t           )al  Exports the Policy's Model to local directory for serving.

        Note: The file format will depend on the deep learning framework used.
        See the child classed of Policy and their `export_model`
        implementations for more details.

        Args:
            export_dir: Local writable directory.
            onnx: If given, will export model in ONNX format. The
                value of this parameter set the ONNX OpSet version to use.

        Raises:
            ValueError: If a native DL-framework based model (e.g. a keras Model)
                cannot be saved to disk for various reasons.
        r   )r?   rP  rc  s      r@   r`  zPolicy.export_model  r  rB   import_filec                     t           )zeImports Policy from local file.

        Args:
            import_file: Local readable file.
        r   )r?   re  s     r@   import_model_from_h5zPolicy.import_model_from_h5  s
     "!rB   ztf1.Sessionc                     dS )a\  Returns tf.Session object to use for computing actions or None.

        Note: This method only applies to TFPolicy sub-classes. All other
        sub-classes should expect a None to be returned from this method.

        Returns:
            The tf Session to use for computing actions and losses with
                this policy or None.
        Nrd   rH   s    r@   get_sessionzPolicy.get_session  s	     trB   c                 (    t          j                    S )zReturns the computer's network name.

        Returns:
            The computer's networks name or an empty string, if the network
            name could not be determined.
        )platformnoderH   s    r@   get_hostzPolicy.get_host  s     }rB   c                    | j                             dd          }| j                             dd          }t          j        j                                        t          j        j        j        k    r|sd}n!|dk    r| j         d         }n| j         d         }|dk    rd}nd                    ||rd	nd
          }t          	                    d                    |dk    r|nd|                     |S )zDecide on the number of CPU/GPU nodes this policy should run on.

        Return:
            0 if policy should run on CPU. >0 if policy should run on 1 or
            more GPUs.
        worker_indexr   
_fake_gpusFnum_gpusnum_gpus_per_env_runnerCPUz{} {}z	fake-GPUsGPUsz!Policy (worker={}) running on {}.local)
r>   rl   r  _privater   _mode
LOCAL_MODEr   rT   r   )r?   
worker_idx	fake_gpusrq  devs        r@   _get_num_gpus_for_policyzPolicy._get_num_gpus_for_policy  s     [__^Q77
KOOL%88	 L%%''3<+>+III J HH1__{:.HH {#<=Hq==CC..)+O;;PPC/66(1nn

'3 	
 	
 	
 rB   c                    t          | dd          | j        S t          t          | j                            dddi          | j        | j        t          | dd          | j                            dd          | j                            d	d          t          | d
| j                            d
d                              }|S )aR  Creates the Policy's Exploration object.

        This method only exists b/c some Algorithms do not use TfPolicy nor
        TorchPolicy, but inherit directly from Policy. Others inherit from
        TfPolicy w/o using DynamicTFPolicy.

        Returns:
            Exploration: The Exploration object to be used by this Policy.
        r/  Nexploration_configr\   StochasticSamplingr  num_env_runnersr   ro  ri   r   )r=   r   r  num_workersro  ri   )getattrr/  r"   r   r>   rl   r=   )r?   r/  s     r@   _create_explorationzPolicy._create_exploration  s     4--9##!KOO06;O2PQQ*+$..(91==;;dKd1S1STT	
 	
 	
 rB   c                     t           j        t          | j                  t           j        t          t           j        d| j        d          t           j        t          | j        d          t           j        t          t           j        d| j                  t           j        t                      t           j	        t          t           j        d          t           j
        t                      t           j        t                      t           j        t          d	          t           j        t                      t           j        t                      t           j        t                      t           j        t                      iS )
aV  Returns a default ViewRequirements dict.

        Note: This is the base/maximum requirement dict, from which later
        some requirements will be subtracted again automatically to streamline
        data collection, batch creation, and data transfer.

        Returns:
            ViewReqDict: The default view requirements dict.
        spacer   F)data_colshiftr  used_for_compute_actionsr  r  )r  r  r  )r  r  r  )r   r   r   r<   NEXT_OBSACTIONSr=   r   REWARDSr   TERMINATEDS
TRUNCATEDSr   EPS_ID	UNROLL_IDAGENT_INDEXr2   rH   s    r@   r   z%Policy._get_default_view_requirements  s+    O_43IJJJ /$,).	# # # '%" " "
 $o$,Bd>O' ' ' !2!2 $o$,B' ' ' #_%6%6"O$5$5NNN 1 1!?#4#4#_%6%6M?,,9
 	
rB   auto_remove_unneeded_view_reqsc           
         | j                             dd          rdS d| _        t          t	          | j                            }t          t          | j        dz  d          | j         d                   }| 	                    |          | _
        |                     | j
                   d}|                     | j
        |          \  }}}| j                                        D ]\  }	}
|	| j
        j        vrd|
_        |                                D ]q\  }	}|| j
        |	<   |	| j        vrYt#          |t$          t&          j        f          r)t+          |          }t-          |d	          | j        |	<   bt/          d
          r| j
        j        D ]3}	|	| j        vr(t-                      | j        |	<   d| j        |	         _        4| 	                    |          }| j
                            d           |D ]}|| j
        vr||         | j
        |<   | j
        j                                         | j
        j                                         | j
        j                                         | j        r | j                            | | j
                   |                     | j
                  }d}|rd}d}d                    |          |v r|d                    |                   d|         |d                    |          <   d                    |          |v r9|d                    |                   d|         |d                    |          <   |dz  }d                    |          |v ||z  t'          j        fdtA          |          D             t&          j!                  }||tD          j#        <   |                     |          }|$                    d           |||tD          j#        <   | j
        j%        |_%        | j&        #| &                    | | j'        | j(        |           nBtS          | j*                  r.| j         d         s!| *                    | j'        | j(        |           | || |           tW          | d          r"| j         d         s| ,                    |           d| _        |r|j        | j
        j        z  | j
        j        z  }|D ]3}	|	| j        vr(|	tD          j#        k    rt-          d          | j        |	<   4| j&        stS          | j*                  r| j
        j        D ]}	|	|j        vr|	| j        v r||	| j'        j        vrn|	tD          j-        tD          j.        tD          j/        tD          j0        tD          j1        tD          j2        tD          j3        tD          j4        fvrd| j        |	         _5        tm          | j        7                                          D ]}	|	|vr|	tD          j-        tD          j.        tD          j/        tD          j0        tD          j1        tD          j2        tD          j3        tD          j4        fvr_|	| j'        j        vrQ|	| j
        j        v r.tp          9                    d                    |	                     | j         d         | j        |	= tu          | j                  t          u r	|| _        dS t#          | j        tv          j<                  r| j        =                    |           dS t/          d                    | tu          | j                                      )ar  Performs test calls through policy's model and loss.

        NOTE: This base method should work for define-by-run Policies such as
        torch and tf-eager policies.

        If required, will thereby detect automatically, which data views are
        required by a) the forward pass, b) the postprocessing, and c) the loss
        functions, and remove those from self.view_requirements that are not
        necessary for these computations (to save data storage and transfer).

        Args:
            auto_remove_unneeded_view_reqs: Whether to automatically
                remove those ViewRequirements records from
                self.view_requirements that are not needed.
                stats_fn (Optional[Callable[[Policy, SampleBatch], Dict[str,
                    TensorType]]]): An optional stats function to be called after
                    the loss.
        )_disable_initialize_loss_from_dummy_batchFNTr>      train_batch_size)r   r  zqpolicy.compute_actions_from_input_dict() returns an extra action output that is neither a numpy array nor a dict.r   state_in_{}state_out_{}r   c                     g | ]}S rd   rd   )r   _seq_lens     r@   r   z<Policy._initialize_loss_from_dummy_batch.<locals>.<listcomp>  s     ; ; ;Q ; ; ;rB   )dtypein_evaluationstats_fnr  zSampleBatch key '{}' was deleted manually in postprocessing function! RLlib will automatically remove non-used items from the data stream. Remove the `del` from your postprocessing function.outputzpVariable self.global_timestep of policy {} needs to be either of type `int` or `tf.Variable`, but is of type {}.)>r>   rl   _no_tracingintr#   rr   minmaxbatch_divisibility_req'_get_dummy_batch_from_view_requirements_dummy_batch_lazy_tensor_dictr   r   r   accessed_keysr  rZ   dictr   ndarray$get_gym_space_from_struct_of_tensorsr   r   set_get_interceptorcleardeleted_keys
added_keysr/  r  r   arrayrangeint32r   SEQ_LENSset_trainingcount_lossr  rt   r   r
  r   r  r  r  r  r  r  r  r   r2   used_for_traininglistkeysrT   rU   r\   r   VariablerL  )r?   r  r  global_ts_before_initsample_batch_sizer   r   
state_outs
extra_outskeyview_reqvaluer  	new_batchr   r$  r   Br   r  all_accessed_keysr  s                        @r@   !_initialize_loss_from_dummy_batchz(Policy._initialize_loss_from_dummy_batch  s   0 ;??FNN 	F
   #$4T5I$J$J K K+a/44K*+
 
 !HH
 
 	t0111*.*N*Nw +O +
 +
'Z "399;; 	: 	:MC$+999491 %**,, 	 	JC%*Dc"$000edBJ%788 
@GGE2A#e3 3 3D*3// %X   1 $2 	M 	MC$000.=.?.?&s+GL&s+D@@ARSS	--d333  	4 	4A)))'0|!!$ 	'--///&,,...$**,,, 	M33D$:KLLL"99$:KLL 	AAA&&q))-@@@?R!((++@1"@#M$8$8$;$;< "((++/BBBDW&--a00EqbE'(=(=a(@(@A Q  &&q))-@@@ (1,Gx ; ; ; ;%(( ; ; ;28LLLH8@ 45 ,,-@AA  &&&08K,- -3
 :!JJtTZ+FFFF49%% 	@dk/.J 	@IIdj$/;??? HT;'''4$$ 	'T[-I 	'MM+&&& ! * B	< )#12#./ 
 )  d444@T9T9T2A163 3 3D*3/ z 6<]4955 6<  ,: N NC;#<<<4#999tz'CCC'.'3'1'3'2'/'-'M	 
 
 IN.s3E   6 ; ; = =>> < <C#444'.'3'1'3'2'/'-'M	 
 
  tz'CCC
 $"3"@@@"NN!; <B6#;;    "[2: $ 6s ;$%%,,#8D   ,bk:: 	 ''(=>>>>>%%+VD$t7K2L2L%M%M  rB   c                     t           )zRemoves a time dimension for recurrent RLModules.

        Args:
            input_dict: The input dict.

        Returns:
            The input dict with a possibly removed time dimension.
        r   )r?   r   s     r@   maybe_remove_time_dimensionz"Policy.maybe_remove_time_dimension  s
     "!rB   r   
batch_sizec                 :   i }| j                                         D ]o\  }j        p|}t          j        t
          j        j        t
          j        j        f          r|t          j
        k    r| j        d         r*|t          j        k    rr| j                            d          sXt          j        j        | j        d                   \  }}t!          j        |f|dd         z   t           j                  ||<   t          j        t
          j        j                  rJt)          j                  dk    rt)          j                  nd}t-          j        ||          ||<   Qfdt/          |          D             ||<   qt          |          S )	zCreates a numpy dummy batch based on the Policy's view requirements.

        Args:
            batch_size: The size of the batch to create.

        Returns:
            Dict[str, TensorType]: The dummy batch containing all zero values.
        _disable_preprocessor_api_disable_action_flatteningri   )ri   r   N)r  	time_sizec                     g | ]	}j         
S rd   r  )r   r  r  s     r@   r   zBPolicy._get_dummy_batch_from_view_requirements.<locals>.<listcomp>*  s    $O$O$OX^$O$O$OrB   )r   r   r  rZ   r  gymspacesr   r   r   r   r>   r  rl   r   get_action_shaper   zerosfloat32Spacer   	shift_arrr(   r  )	r?   r  retview_colr  r  shaper  r  s	           @r@   r  z.Policy._get_dummy_batch_from_view_requirements  s    "&"8">">"@"@ 	P 	PHh(4HH8>CJ,<cjo+NOO P// K(CD 0  333 KOO,HII 4 (8Ndk+.F  5 !#*qrr)BBJ O OH hncj.>?? P36x7I3J3JQ3N3NH.///TX  %> :% % %CMM %P$O$O$OU:=N=N$O$O$OCMM 3rB   c                   	 d| _         t          | dd          		p| }	r6t          	d          s&t          j        t          | j                  i	_        |j        }g }t          |d          r)t          |j	                  r|	                                }nIt          r.t          	t          j        j                  rd|vr	fd|_	        nd	 |_	        d|v rd
 | _        |gt          | d          r| j        gng z   }t          |          D ]L\  }}t          |t           j                  rt           n)t$          r!t$                              |          rt$          nd}|r3|                    |dk              rt+          dd|j                  n|}n|}|D ]}d                    |          |vrjt          d                    |          dd| j                            di                               dd          |          |d                    |          <   d                    |          |vr't          |d          |d                    |          <   NdS )a	  Uses Model's (or this Policy's) init state to add needed ViewReqs.

        Can be called from within a Policy to make sure RNNs automatically
        update their internal state-related view requirements.
        Changes the `self.view_requirements` dict.
        Tr  Nr   r  r6  
state_in_0c                  J    d  j                                         D             S )Nc                     g | ]E\  }}|                     d           t          j        |j                                                  FS )r   )r   r   
zeros_liker  sample)r   r   r  s      r@   r   z\Policy._update_model_view_requirements_from_init_state.<locals>.<lambda>.<locals>.<listcomp>K  sV     1 1 1#8||K001M(."7"7"9"9::1 1 1rB   )r   r   )r  s   r@   r   zHPolicy._update_model_view_requirements_from_init_state.<locals>.<lambda>K  s0     1 1','>'D'D'F'F1 1 1 rB   c                      g S r9   rd   rd   rB   r@   r   zHPolicy._update_model_view_requirements_from_init_state.<locals>.<lambda>Q  s     rB   c                      dS )NTrd   rd   rB   r@   r   zHPolicy._update_model_view_requirements_from_init_state.<locals>.<lambda>S  s     rB   g                    ?)r  r  r  r  max_seq_lenr   )r  r  batch_repeat_valuer  )r  r  )rv   r  r   r   r   r   r<   r   callabler6  r   rZ   kerasModelr2  r   r   r  r   	is_tensorallr   r  r   r>   rl   )
r?   objr   
init_stater   rJ   fwr  vrr  s
            @r@   /_update_model_view_requirements_from_init_statez6Policy._update_model_view_requirements_from_init_state0  s    6:2gt,,mt 	(;<< 	t7M!N!N!N'E# )	
3+,, 	5#:O1P1P 	5..00JJ 5ubhn555 !	11) ) ) )%% )3
%9,,(4D% K(/6I(J(JRT#$$PR
	 "*-- "	 "	HAu
 eRZ00 "__U33UU   9;9M9MXCc5555SX     !''**"442A&--a00 15+/;??7B+G+G+K+K)1, , $3 3 3B}++A../ "((++2553B#t4 4 4B~,,Q//0!!"	 "	rB   c                 *    t          |           j        S r9   )r\   r_   rH   s    r@   __repr__zPolicy.__repr__  s    Dzz""rB   r9   )rF   N)NN)NNN)NNNNNNN)NNNTT)r   )r   r   )TN)r   )Rr_   r`   ra   rb   r  r  r-   rA   staticmethodr   r[   r5   r
   r   r0   r   r   r1   rP   ru   r   r   r3   r	   r4   r  r   boolr  r   r   r   r   r  r   r  r   r,   r  r   r   r   r
  r  r   r  r  r!  r  r.   r  r  r/   r*  r-  r0  r2  r4  r6  rI   rC  r   r   r   r2   rH  rM  r   rb  r`  rg  ri  rm  r|  r   r  r   r  r  r  r  r  rd   rB   r@   rf   rf      s	        .>&9>& i>& $	>& >& >& >&@  6:U, U,#z/*U,Z12U, 
xh011	2U, U, U, \U,n .+ .( . . . \.`2 2 2&
t 
 
 
 
4 4 4 4 +/,0i

 3726,0"&"&i
 i
 i
&'i
 Z()i

 ./i
 ./i
 i
 [)i
 $i
 3-i
 
j!14Z3HH	Ii
 i
 i
 i
\ #'"&4
 4
+tC1A,A'BBC4
 $4
 3-	4
 
z4
+T#z/-BB	C4
 4
 4
 4
l  59MQMQ04#'"&"&)" )"./1AAB)"  Z 01)" !&6!79I!IJ	)"
 !&6!79I!IJ)" T#t)_-)" 4.)" $)" 3-)" 
z4
+T#z/-BB	C)" )" )" ^)"^ 59KOKO#'  "  "tJ'34 " j):56 "  Z 01	 "
 $E$z*:J*F$GH " $E$z*:J*F$GH " ! "  " 
 "  "  "  "D ;  ! &%+ 5667
 
   ;:@ $""*<"KV"	z4
++	," " " $#"$k d3
?6K    0.'.4<.	c:o	. . . .:" "K "s "SV " " " "$" "s "3 " " " "" "C "3 " " " "."#."	~tCO44	5" " " "$
" 
"D 
" 
" 
" 
""\ " " " "
"< 
"D 
" 
" 
" 
",tCO'< , , , ,d    3    4
#3     ;); ) ) ) ;:)VH H H H H0 ;"'{ "'t "' "' "' ;:"'H+(3-#?BC+
 
+ + + +.5S*_0E 5$ 5 5 5 5. )MA
 /3!.MA MA MAMA
 {+MA MA 
MA MA MA MA^" "s "(3- "4 " " " "$" " " " " "
Xm4 
 
 
 
#    "# " " " "H[    2*
 *
 *
\ 04V V(,V 
	V V V Vp	"d3
?6K 	" 	" 	" 	" !",  , , 	,  ,  ,  , \M M M^# # # # #rB   rf   )	metaclassTr  rF   c                 b    |rdndt          j        fd|           }t          |          }|S )Nr   r   c                 ~    t           j                            dd| j        d          t	          |                     S )Nr  r  )r  r  )r  r  r   r  r*   )r   	start_idxs    r@   r   z6get_gym_space_from_struct_of_tensors.<locals>.<lambda>  s6    #*..#QWYZZ0Q ! 
 
 rB   )r   r   #get_gym_space_from_struct_of_spaces)r  batched_inputstructr  r  s       @r@   r  r    sR    
 #)I	
 	
 	
 	
 		 F 077ELrB   c                    t          | t                    r;t          j                            d |                                 D                       S t          | t          t          f          r)t          j                            d | D                       S t          | t          j        j	                  sJ dt          |                        | S )Nc                 4    i | ]\  }}|t          |          S rd   r  )r   r   r   s      r@   r   z7get_gym_space_from_struct_of_spaces.<locals>.<dictcomp>  s'    QQQ41aQ3A66QQQrB   c                 ,    g | ]}t          |          S rd   r  )r   r   s     r@   r   z7get_gym_space_from_struct_of_spaces.<locals>.<listcomp>  s!     W W WA!DQ!G!G W W WrB   zbThe struct of spaces should only contain dicts, tiples and primitive gym spaces. Space is of type )rZ   r  r  r  r   r   r   r  r   r  r\   )r  s    r@   r  r    s    % zQQ5;;==QQQ
 
 	
 
EE4=	)	) z W WQV W W WXXX%!122 	
 	
:,0KK: :	
 	
2 rB   )T)er]  loggingr   rk  abcr   r   typingr   r   r   r   r	   r
   r   r   r   	gymnasiumr  numpyr   r   gymnasium.spacesr   	packagingr   r  ray.cloudpicklerN  r   ray._common.deprecationr   r   	ray.actorr   ray.rllib.models.action_distr   ray.rllib.models.catalogr   ray.rllib.models.modelv2r   ray.rllib.policy.sample_batchr   !ray.rllib.policy.view_requirementr   ray.rllib.utils.annotationsr   r   r   r   ray.rllib.utils.checkpointsr   r   r   'ray.rllib.utils.exploration.explorationr   ray.rllib.utils.frameworkr    r!   ray.rllib.utils.from_configr"   ray.rllib.utils.numpyr#   ray.rllib.utils.serializationr$   r%   r&   "ray.rllib.utils.spaces.space_utilsr'   r(   r)   ray.rllib.utils.tensor_dtyper*   ray.rllib.utils.tf_utilsr+   ray.rllib.utils.typingr,   r-   r.   r/   r0   r1   r2   r3   r4   ray.tuner5   tf1r   tfvr   r  	getLoggerr_   rT   r7   rf   r  r  r  r  rd   rB   r@   <module>r     s     				  ' ' ' ' ' ' ' '
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
                            



                    " ! ! ! ! ! ; ; ; ; ; ; 1 1 1 1 1 1 , , , , , , 5 5 5 5 5 5 = = = = = =                    
 @ ? ? ? ? ? E E E E E E E E 3 3 3 3 3 3 2 2 2 2 2 2         
         
 6 5 5 5 5 5 B B B B B B
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
       }Rq 
	8	$	$ _
 _
 _
 _
 _
 _
 _
 _
D I# I# I# I# I#w I# I# I# I#X.   udJ./ 	Y    uT5[/A cjo      rB   