
    Pi                         d dl mZ d dlZd dlmZ d dlmc mZ  G d dej                  Z	 G d dej                  Z
dS )    )TupleNc                        e Zd ZdZ	 	 ddedef fdZdej        dej        d	ej        d
ej        deej        ej        ej        f         f
dZ	 xZ
S )DPOLossaO  
    Direct Preference Optimization (DPO) Loss module: https://arxiv.org/abs/2305.18290
    Simply stated from the paper:

        Intuitively, the DPO update increases the relative log probability of preferred to dispreferred responses,
        but it incorporates a dynamic, per-example importance weight that prevents
        the model degeneration that we find occurs with a naive probability ratio objective.

    Based on the implementation in HF's TRL library:
    https://github.com/huggingface/trl/blob/5d1deb1445828cfd0e947cb3a7925b1c03a283fc/trl/trainer/dpo_trainer.py#L844

    DPO retains similarities to PPO (https://arxiv.org/abs/2009.01325), where it optimizes a policy
    (language) model to align with human preferences, and regularizes the loss function using a baseline
    reference (the frozen, initial language model) to prevent over-fitting to the preference dataset.
    It differs from PPO by optimizing the policy model directly using labelled preference data, rather
    than using an additional reward model to provide feedback.
    This significantly simplifies training and reduces compute overhead.

    Args:
        beta (float): Temperature parameter for the DPO loss, typically in the range of 0.1 to 0.5. Default is 0.1.
        label_smoothing (float): Parameter encoding uncertainty about the labels. Default is 0.
    皙?        betalabel_smoothingc                 d    t                                                       || _        || _        d S N)super__init__r   r	   )selfr   r	   	__class__s      k/home/jaya/work/projects/VOICE-AGENT/VIET/agent-env/lib/python3.11/site-packages/torchtune/rlhf/loss/dpo.pyr   zDPOLoss.__init__&   s0    
 		.    policy_chosen_logpspolicy_rejected_logpsreference_chosen_logpsreference_rejected_logpsreturnc                 B   ||z
  }||z
  }||z
  }t          j        | j        |z             d| j        z
  z  t          j        | j         |z            | j        z  z
  }| j        ||z
                                  z  }	| j        ||z
                                  z  }
||	|
fS )a  
        Compute the DPO loss for a batch of policy and reference model log probabilities.

        Args:
            policy_chosen_logps (torch.Tensor): Log probabilities of the policy model
                for the chosen responses. Shape: (batch_size)
            policy_rejected_logps (torch.Tensor): Log probabilities of the policy model
                for the rejected responses. Shape: (batch_size)
            reference_chosen_logps (torch.Tensor): Log probabilities of the reference model
                for the chosen responses. Shape: (batch_size)
            reference_rejected_logps (torch.Tensor): Log probabilities of the reference model
                for the rejected responses. Shape: (batch_size)

        Returns:
            Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: A tuple of three tensors:
                - losses: The DPO loss for each example in the batch.
                - chosen_rewards: Rewards for the chosen responses.
                - rejected_rewards: Rewards for the rejected responses.

           )F
logsigmoidr   r	   detachr   r   r   r   r   pi_logratiosref_logratioslogitslosseschosen_rewardsrejected_rewardss              r   forwardzDPOLoss.forward/   s    6 +-BB.1II- \$)f,---T5I1IJlDI:.//$2FFG 	 I,/EEMMOOO 	 I.1IIQQSSS 	 ~'777r   )r   r   __name__
__module____qualname____doc__floatr   torchTensorr   r#   __classcell__r   s   @r   r   r      s         2 !$/ // / / / / / //8"\/8  %|/8 !&	/8
 #(,/8 
u|U\5<7	8/8 /8 /8 /8 /8 /8 /8 /8r   r   c                        e Zd ZdZ	 ddef fdZdej        dej        dej        dej        d	eej        ej        ej        f         f
d
Z	 xZ
S )RSOLossa  
    Statistical Rejection Sampling Optimization (RSO) or "hinge" loss module: https://arxiv.org/abs/2309.06657.
    Intuition from the paper:

        DPO is a logistic regression on human preference data, and SLiC (https://arxiv.org/abs/2305.10425) is almost
        equivalent to a support vector machine (SVM) with hinge loss. [RSO] improve[s] SLiC as the SVM counter part of DPO.

    Based on the implementation in HF's TRL library:
    https://github.com/huggingface/trl/blob/4dce042a3863db1d375358e8c8092b874b02934b/trl/trainer/dpo_trainer.py#L1141

    Args:
        gamma (float): Equivalent temperature parameter (from DPO) for the RSO loss.
    r   gammac                 V    t                                                       || _        d S r   )r   r   r0   )r   r0   r   s     r   r   zRSOLoss.__init__p   s&     	


r   r   r   r   r   r   c                     ||z
  }||z
  }||z
  }t          j        d| j        |z  z
            }| j        ||z
                                  z  }	| j        ||z
                                  z  }
||	|
fS )a  
        Compute the RSO loss for a batch of policy and reference model log probabilities.

        Args:
            policy_chosen_logps (torch.Tensor): Log probabilities of the policy model
                for the chosen responses. Shape: (batch_size)
            policy_rejected_logps (torch.Tensor): Log probabilities of the policy model
                for the rejected responses. Shape: (batch_size)
            reference_chosen_logps (torch.Tensor): Log probabilities of the reference model
                for the chosen responses. Shape: (batch_size)
            reference_rejected_logps (torch.Tensor): Log probabilities of the reference model
                for the rejected responses. Shape: (batch_size)

        Returns:
            Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: A tuple of three tensors:
                - losses: The RSO loss for each example in the batch.
                - chosen_rewards: Rewards for the chosen responses.
                - rejected_rewards: Rewards for the rejected responses.

        r   )r*   relur0   r   r   s              r   r#   zRSOLoss.forwardw   s    6 +-BB.1II-A
V 3344 J-0FFNNPPP 	 J/2JJRRTTT 	 ~'777r   )r   r$   r-   s   @r   r/   r/   a   s                 )8"\)8  %|)8 !&	)8
 #(,)8 
u|U\5<7	8)8 )8 )8 )8 )8 )8 )8 )8r   r/   )typingr   r*   torch.nnnntorch.nn.functional
functionalr   Moduler   r/    r   r   <module>r;      s                         P8 P8 P8 P8 P8bi P8 P8 P8f?8 ?8 ?8 ?8 ?8bi ?8 ?8 ?8 ?8 ?8r   