
    Pi                        d dl Z d dlZd dlmZ d dlmZmZmZmZm	Z	m
Z
 d dlmZ d dlZd dlmZmZ d dlmZ d dlmZmZ d dlmZ d d	lmZmZ d d
lmZmZmZmZ d dlm Z  d dl!m"Z" d dl#m$Z$ d dl%m&Z& d dl'm(Z(m)Z) d dl*m+Z+ d dl,m-Z- d dl.m.Z.  ej/        d          Z0 G d de&          Z1ej2        deddfd            Z3e4dk    r e j5         e3                       dS dS )    N)partial)AnyDictListOptionalTupleUnion)warn)
DictConfig
ListConfig)nn)destroy_process_groupinit_process_group)	Optimizer)
DataLoaderDistributedSampler)configmodulestrainingutils)_get_component_from_path)padded_collate_packed)ConcatDataset)FTRecipeInterface)DummyProfilerPROFILER_KEY)(apply_selective_activation_checkpointing)get_lr)tqdmDEBUGc                      e Zd ZdZdeddfdZdedeeef         fdZ	deeef         ddfd	Z
deddfd
Z	 d&dee         deej        j        ef         fdZ	 	 	 	 d'dedededededeeef         deee                  dee         dee         dee         dej        fdZ	 	 d(dededeeeef                  dee         fdZdededed edeeef         f
d!Zd"eddfd#Zd)d$Z d)d%Z!dS )*QATRecipeDistributeda  
    Quantization-aware training (QAT) recipe for dense transformer-based LLMs such as Llama2.
    This recipe supports distributed training and can be run on a single node (1 to 8 GPUs).
    Only compatible with PyTorch 2.4+.

    Features:
        - Quantization-aware training (QAT). Perform fake quantization on weights and/or activations
            during finetuning, with the goal of ultimately producing a quantized model with minimal
            accuracy degradation. This recipe produces an unquantized model in the original dtype,
            which can then be quantized separately.

        - Delayed fake quantization. Optionally specify the step after which fake quantization occurs.
            Empirically, allowing the model to finetune without fake quantization initially allows the
            weight and activation values to stabilize before fake quantizing them, potentially leading
            to improved quantized accuracy. This can be specified through ``fake_quant_after_n_steps``.

        - FSDP. Supported using PyTorch's FSDP APIs. CPU offload of parameters, gradients, and optimizer states
            is supported via ``fsdp_cpu_offload``. Resharding of parameters after the forward pass is
            done by default (corresponding to FULL_SHARD sharding strategy), but can be disabled by setting the config
            ``fsdp_reshard_after_forward`` to False (this corresponds to SHARD_GRAD_OP sharding strategy).
            DDP is currently not supported. Training on CPU is not supported.

        - Activation Checkpointing. This can be controlled using the ``enable_activation_checkpointing``
            flag. Activation checkpointing helps reduce the memory footprint since we no longer keep
            activations in memory and instead recompute them during the backward pass. This is especially
            helpful for larger batch sizes when you're memory constrained. But these savings in memory
            come at the cost of training performance. In most cases training can slow-down quite a bit as
            a result of this activation recomputation.

        - Activation Offloading. This can be controlled using the ``enable_activation_offloading``
            flag. Activation offloading is a technique similar to activations checkpointing that helps
            reduce the memory footprint to prevent OOMs on CUDA and enable bigger batches. Where activations
            checkpointing drops the activation in the forward to recompute it later in the backward,
            activations offloading will drop the activation in the forward to the CPU and bring it
            back during the backward pass. As always, there is a tradeoff--these savings in memory can
            come at the cost of training performance and CPU resources. To recover some runtime cost,
            we've added an option to enable offloading on a different stream to permit overlapping with
            the computation. This option is currently only available on PyTorch 2.5 or later and will
            be enabled by default if an acceptable torch version is found. Activation offloading can be
            used in conjunction with activation checkpointing.

        - Precision. Full fp32 and bf16 training are supported. Precision is controlled using the ``dtype``
            flag. When ``dtype=bf16``, all activations, gradients and optimizer states are in bfloat16. In
            most cases this should halve the memory footprint of full precision (fp32) training, without
            loss in model quality (will depend on the model, training data and other settings). For
            GPUs which do not support bfloat16, we fall back to fp32. Mixed precision training and fp16
            precision are currently not supported.

        - Gradient Accumulation. You can simulate larger batch sizes by accumulating gradients. This is
            controlled using the ``gradient_accumulation_steps`` flag.

                Total Batch Size = batch_size * number of GPUs * gradient accumulation steps.

            For example: with batch_size=1, nproc_per_node=2 and gradient_accumulation_steps=32 we get a
            total batch size of 64.

            Gradient accumulation is especially useful when you are memory constrained. In this case,
            accumulating gradients might give you better training speed than enabling activation
            checkpointing.

        - Checkpointing. Model weights are checkpointed both at the end of each epoch and at the end of
            training. Optimizer state and recipe state (seed, total_epochs, number of epochs run etc) are
            only saved at the end of a given epoch and used in case of resuming training.

            Resuming training is controlled by the ``resume_from_checkpoint`` flag. Mid-epoch checkpointing is
            currently not supported.

            For more details on the checkpointer, please take a look at
            our checkpointer deepdive (https://pytorch.org/torchtune/main/deep_dives/checkpointer.html).

        - Logging. Terminal, Disk, WandB and TensorBoard are all supported.

        - Gradient Clipping. Gradient clipping is supported using the ``clip_grad_norm`` flag. By default,
            ``clip_grad_norm`` is set to ``None``. If you only want to log the grad norm, you can set
            ``clip_grad_norm='inf'``.

    For a full list of example configs for this recipe, run ``tune ls`` on the command line. Each config
    has example commands for how to kick-off training.

    Args:
        cfg (DictConfig): OmegaConf object parsed from yaml file

    Raises:
        ValueError: If ``dtype`` is set to fp16.
        ValueError: If ``compile`` is set to True.
        RuntimeError: If ``dtype`` is set to bf16 and the hardware does not support bf16.
        RuntimeError: If ``left_pad_sequence`` is set as the data collator.
        RuntimeError: If ``enable_activation_offloading`` is True and device is not CUDA.
        RuntimeError: If ``enable_activation_offloading`` is True and ``enable_activation_checkpointing`` is False.
    cfgreturnNc                    t          j        |j                  | _        t	          j        |j        | j                  | _        | j        t          j	        k    rt          d          |                    dd          rt          d          |j        | _        |                    dd          | _        |                    dd          | _        | j        r1| j        j        d	k    r!t"                              d
           d| _        t          j                    \  | _        | _        | j        dk    | _        |j        | _        |j        | _        |                    dd          | _        |                    dd           | _        |                    dd           | _        d | _        | j        r0| j        t?          d          | j        dk    rt?          d          |                    dd          | _         |                    dd          | _!        | j!        r6| j        j        d	k    rt?          d          | j         st?          d          n1| j         r*|j"        j#        dk    rt          j$        t"          d           t	          j%        |j&        |                    dd                     | _&        d| _'        |j(        | _)        |j*        | _*        d| _+        d S )NdevicezVfull fp16 training is not supported with this recipe. Please use bf16 or fp32 instead.compileFz?Compile is not yet supported for QAT. Please set compile=False.log_every_n_steps   log_peak_memory_statscudazplog_peak_memory_stats was set to True, however, training does not use cuda. Setting log_peak_memory_stats=False.r   optimizer_in_bwdclip_grad_normfake_quant_after_n_stepszsGradient clipping is not supported with optimizer in bwd.Please set clip_grad_norm=None, or optimizer_in_bwd=False.zGradient accumulation is not supported with optimizer in bwd.Please set gradient_accumulation_steps=1, or optimizer_in_bwd=False.enable_activation_checkpointingenable_activation_offloadingzFenable_activation_offloading should only be True when training on CUDAz]enable_activation_offloading should only be True when enable_activation_checkpointing is TrueLLAMA3_VISIONzHint: enable_activation_checkpointing is True, but enable_activation_offloading isn't. Enabling activation offloading should reduce memory further.cudnn_deterministic_mode)seed
debug_mode),r   
get_devicer'   _devicer   	get_dtypedtype_dtypetorchfloat16
ValueErrorget
output_dir_output_dir_log_every_n_steps_log_peak_memory_statstypeloginfoget_world_size_and_rank
world_sizerank_is_rank_zeroresume_from_checkpoint_resume_from_checkpointgradient_accumulation_steps_gradient_accumulation_steps_optimizer_in_bwd_clip_grad_norm_fake_quant_after_n_steps_quantizer_modeRuntimeError _enable_activation_checkpointing_enable_activation_offloadingcheckpointer
model_typelog_rank_zeroset_seedr4   
epochs_runepochstotal_epochsmax_steps_per_epochglobal_step)selfr#   s     k/home/jaya/work/projects/VOICE-AGENT/VIET/agent-env/lib/python3.11/site-packages/recipes/qat_distributed.py__init__zQATRecipeDistributed.__init__   s   'sz:::(4<HHH;%-''h   779e$$ 	Q  
 >"%''*=q"A"A&)gg.Eu&M&M#& 	04<+<+F+FHH C   +0D'%*%B%D%D"!Y!^ (+'A$,/,K)!$);U!C!C"ww'7>>),1KT)R)R&# ! 
	#/"Q   0144"[   14-u1
 1
- .1WW*E.
 .
* - 	| F**"\   8 "s  
 1	 +>>O   %cgg.H$&O&O
 
 
	 J#&#:     cfg_checkpointerc                     t          j        || j                  | _        | j                                        }| j        r|                     |           |S )z
        Extract the checkpoint state from file and validate. If resume_from_checkpoint
        is True, this also includes the recipe state.
        )should_load_recipe_state)r   instantiaterK   _checkpointerload_checkpoint_update_recipe_state)r^   rb   checkpoint_dicts      r_   rg   z$QATRecipeDistributed.load_checkpoint   sb    
 $/%)%A
 
 
 ,<<>>' 	7%%o666ra   	ckpt_dictc                 8   	 |t           j                 | _        | j        |t           j                 k    r:t          d|t           j                             |t           j                 | _        | j        |t           j                 k    r:t          d|t           j                             |t           j                 | _        | j        |t           j	                 k    rt          d| j                    dS dS # t          $ r}t          d          |d}~ww xY w)z;
        Updates the recipe state from checkpoint.
        zWConfig value for seed does not match the checkpoint value, using the checkpoint value: )messagezfConfig value for max_steps_per_epoch does not match the checkpoint value, using the checkpoint value: z[Config value for total_epochs does not match the checkpoint value, using the config value: zCheckpoint does not contain the required keys needed for updating recipe state. Are you sure you passed in the right recipe checkpoint?N)r   
EPOCHS_KEYrY   r4   SEED_KEYr
   r\   MAX_STEPS_KEYr[   TOTAL_EPOCHS_KEYKeyError)r^   rj   es      r_   rh   z)QATRecipeDistributed._update_recipe_state   sf   "	'(;<DO yIh&7888V7@AR7SV V    &h&78	'9X5K+LLL[7@AW7X[ [    ,5X5K+L(  Ih.G$HHHG373DG G      IH  	 	 	J  	s   C4C: :
DDDc                 b   | j         r8t          j        |j                  | _        | j                            |           |                     |j                  }|                    dd          | _	        | 
                    |j        | j        | j        |                    dd          |                    dd          |                    dd          |t          j                 |                    d	d          |                    d
d          |                    dd          
  
        | _        t          j        |j                  | _        |                     |j        | j        | j        r|t          j                 nd          | _        t          j        |j                  | _        | j	        r t          j        | j        | j                    | j        j        j        dk    r$| j                            | j        j                   tA          j!        tD          d           |                    dd          }| #                    |j$        |j%        |j&        |          \  | _'        | _(        tS          | j(                  | j*        z  | _+        | j,        | j,        | j+        k     r| j,        | _+        | j-        | j+        z  | _.        | /                    |                    t`          d                    | _1        te          j3        |j&        df| j        j4        | j5                  | _6        dS )z
        Setup the recipe. This includes training state (if resume_from_checkpoint is True),
        model, tokenizer, loss, optimizer, sampler, and dataloader.
        )rb   r(   Fcustom_sharded_layersNfsdp_cpu_offloadfsdp_reshard_after_forwardTac_mode	ac_option	quantizer)
	cfg_modelr0   r1   rt   ru   reshard_after_forwardmodel_state_dictrw   rx   quantizer_cfg)cfg_optimizerr-   opt_state_dictverboseCEWithChunkedOutputLosszLoss is initialized.
collate_fnz!torchtune.data.padded_collate_sft)cfg_datasetshuffle
batch_sizer   r*   r&   )7rI   r   re   metric_logger_metric_logger
log_configrg   rU   r>   _compile_setup_modelmodelrS   rT   r   	MODEL_KEY_model	tokenizer
_tokenizer_setup_optimizer	optimizerrN   rK   OPT_KEY
_optimizerloss_loss_fncompile_loss	__class____name__set_num_output_chunksnum_output_chunksr   rW   rD   _setup_datadatasetr   r   _sampler_dataloaderlenrM   _steps_per_epochr\   rY   r]   _setup_profilerr   	_profilerr;   fullignore_indexr7   ignore_labels_cache)r^   r#   ri   collate_names       r_   setupzQATRecipeDistributed.setup  s   
  	0"("4S5F"G"GD **3///..@P.QQ	511''i,0,Q)-)K"%''*A4"H"H WW%7??"%''*F"M"M,X-?@GGIt,,ggk400''+t44 ( 
 
 !,S];;//-!3 / 011 0 
 
 *3844= 	M!$-9KLLLL="+/HHHK--dm.MNNNC!7888 ww|-PQQ*.*:*:K~#	 +; +
 +
't'  !!T%FF 	 $0(4+@@@$($<D!?T-BB --cgglD.I.IJJ $):^Q!;DL$
 $
 $
   ra   cfg_profilerc                    |t          ddi          }|                    dd          d|d<   n#|                    d          dk    s
J d            t          j        |          \  }}t	          j        t          d|            | j        rJ|                    dd          | _        |d         r'|d	         | _	        |d
         | _
        |d         | _        |S )a  
        Parses the `profiler` section of top-level `cfg` and sets up profiler

        Args:
            cfg_profiler (Optional[DictConfig]): ``profiler`` section of the top-level ``cfg`` (the main config passed to
                `recipe.main`). Default None.

        Returns:
            profiler: Union[torch.profiler.profile, DummyProfiler] - DummyProfiler is a nullcontext with no-op methods
            for `start`, `stop`, and `step` that can be used in place of `torch.profiler.profile` if profiler is not enabled such
            that the instrumented training loop does not need to be changed profiling is disabled.

        The profiler config can be provided in configs under the `profiler` key with the following layout:

        .. code-block:: yaml
            profiler:
                enabled: bool

                #Output directory of trace artifacts
                output_dir: str

            #`torch.profiler.ProfilerActivity` types to trace
            cpu: bool
            cuda: bool

                #Trace options
                profile_memory: bool
                with_stack: bool
                record_shapes: bool
                with_flops: bool

            # `torch.profiler.schedule` options:
            # wait_steps -> wait, warmup_steps -> warmup, active_steps -> active, num_cycles -> repeat
            wait_steps: int
            warmup_steps: int
            active_steps: int
            num_cycles: int
        NenabledF_component_z'torchtune.training.setup_torch_profilerzdOnly torch profiler supported currently: component must be `torchtune.training.setup_torch_profiler`z& Profiler config after instantiation: profile_memory
wait_stepswarmup_stepsactive_steps)r   r>   r   re   r   rW   rD   rI   profiler_profile_memoryprofiler_wait_stepsprofiler_warmup_stepsprofiler_active_steps)r^   r   profilerprofiler_cfgs       r_   r   z$QATRecipeDistributed._setup_profilerb  s   T %y%&899L M4008*SL''   //<= = =u= = = "(!3L!A!A,H,HH	
 	
 	
  	J+7+;+;<Le+T+TD(I& J+7+E(-9.-I*-9.-I*ra   rz   r0   r1   ru   r{   r|   rt   rw   rx   r}   c                 "   t          j        t          d           t          j                    }t          j        | j                  5  t          j	        d          5  t          j        |          }ddd           n# 1 swxY w Y   ddd           n# 1 swxY w Y   | j        rt          j        || j                   |s|t          |||	           |r#|!t          j        |t"          j        h           |
t'          d          t          j        |
          }| j        |_        t
          j                            |          }d|vrt'          d|z            || _        |                    |          }t3          t
          j        |	          g}t          j        ||||
           t          j        | j                  5  | j        5  |                                D ]&}t;          |d          r|                                 '	 ddd           n# 1 swxY w Y   ddd           n# 1 swxY w Y   t          j        ||| j        d|           t          j         ||          | _!        t          j"        |           t          j        t          dt          j                    |z
  dd           | j        r.t          j#        | j                  }t          j$        |           t          j%        &                                 |S )aX  
        Model initialization has some important considerations:
           a. To minimize GPU peak memory, we initialize the model on meta device with
              the right dtype
           b. All ranks calls ``load_state_dict`` without peaking CPU RAMs since
              full state dicts are loaded with ``torch.load(mmap=True)``
        zIFSDP is enabled. Instantiating model and loading checkpoint on Rank 0 ...metaNr   )auto_wrap_policyz+Quantizer must be specified for QAT recipe.qatz3Quantizer mode '%s' is not supported for finetuning)names_to_match)r   shard_conditionscpu_offloadr{   	rope_initT)strictr   z0Instantiating model and loading checkpoint took .2f secsr&   )'r   rW   rD   timeperf_counterr   set_default_dtyper:   r;   r'   r   re   r   compile_modelrI   r   set_activation_checkpointingr   TransformerSelfAttentionLayerr=   	precisionquantizationget_quantizer_moderQ   preparer   get_shard_conditionsshard_modelr7   hasattrr   load_from_full_model_state_dictget_act_offloading_ctx_manageractivations_handling_ctx!validate_no_params_on_meta_deviceget_memory_statslog_memory_statsdistributedbarrier)r^   rz   r0   r1   ru   r{   r|   rt   rw   rx   r}   
init_startr   ry   quantizer_modefsdp_shard_conditionsmmemory_statss                     r_   r   z!QATRecipeDistributed._setup_model  sJ   * 	W	
 	
 	
 &((
'44 	2 	2el66J6J 	2 	2&y11E	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 = 	F"5$2DEEEE 0 	g6I4   + 	w1)N(O   
  JKKK&}55	"k	!.AA)LL&&EV    .!!%(( -4  !
 	2("7		
 	
 	
 	
 '44 	" 	"dl 	" 	"]]__ " "1k** "KKMMM"	" 	" 	" 	" 	" 	" 	" 	" 	" 	" 	" 	" 	" 	" 	" 	" 	" 	" 	" 	" 	" 	" 	" 	" 	" 	" 	" 	" 	" 	" 	0L(	
 	
 	
 	
 )1(O/)
 )
%
 	25999jt?P?R?RU_?_jjjj	
 	
 	
  	4#4DLIIIL%l333 	!!###sl   BA=1B=B	BB	BBB
H2<HH2H	H2"H	#H22H69H6Fr~   r-   r   c                    |rfd| j                                         D             }t          j        | j         |           t          j        | j         |          | _        |q|                                D ]\}	 t          j        | j         | j        j        |         ||         | j	                   ;# t          $ r}t          d          |d }~ww xY wt          j        t          d           d S t          j        | j                                                   }|r!t          j        | j         ||| j	                   t          j        t          d           |S )Nc                 >    i | ]}|t          j        |g          S  )r   re   ).0paramr~   s     r_   
<dictcomp>z9QATRecipeDistributed._setup_optimizer.<locals>.<dictcomp>"  s:        v)-%AA  ra   )r   
optim_dictzzFailed loading in-backward optimizer checkpoints.Please make sure run being restored from was using in-backward optimizer.z"In-backward optimizers are set up.zOptimizer is initialized.)r   
parametersr   register_optim_in_bwd_hookscreate_optim_in_bwd_wrapper_optim_ckpt_wrapperkeys#load_from_full_optimizer_state_dict	optim_mapr7   BaseExceptionrR   r   rW   rD   r   re   )r^   r~   r-   r   r   r   rr   r   s    `      r_   r   z%QATRecipeDistributed._setup_optimizer  s     -	   ![3355  J 0kj    (0'Kkj( ( (D$ )+0022 ! !E! D K 4>uE*51 L	    ) ! ! !*h   !!!
 %IJJJ4*=$+:P:P:R:RSSI <K"L	   %@AAAs   =7B55
C?CCr   r   r   r   c                 ,    t          |t                    r0 fd|D             }t          |          }t          |dd          }n0t	          j        | j                  }|                    dd          }d|v rt          d          t          |          }t          | j         j        |d          }t          |||d	|s&t          | j        j         j        j        
          nt$                    }	t'          j        t*          d           ||	fS )z
        All data related setup happens here. Currently this recipe only supports the
        DistributedSamplers with Map-style Datasets which fit into memory. Other samplers,
        iterable datasets and streaming datasets are not supported.
        c                 D    g | ]}t          j        |j                  S r   )r   re   r   )r   single_cfg_datasetr^   s     r_   
<listcomp>z4QATRecipeDistributed._setup_data.<locals>.<listcomp>]  s9       & "#5tGG  ra   )datasetspackedFleft_pad_sequencez1left_pad_sequence collator is only for inference.r   )num_replicasrH   r   r4   T)padding_idx
ignore_idx)r   r   sampler	drop_lastr   z$Dataset and Sampler are initialized.)
isinstancer   r   getattrr   re   r   r>   rR   r   r   rG   rH   r   r   pad_idr   r   r   r   rW   rD   )
r^   r   r   r   r   r   dsr   r   
dataloaders
   `         r_   r   z QATRecipeDistributed._setup_dataO  sS    k:.. 		6   *5  H 111BR511FF#KAAB __Xu55F *,,RSSS-j99
$T_49gTU
 
 
  ! + $ 6#}9    +
 
 

" 	C!GHHH
""ra   epochc                 $   i }|dz   | j         k     }t          j        t          d           t	          j                    }t          j        | j        | j	        | j
                  }t          j        t          dt	          j                    |z
  dd           |rt	          j                    }t          j        t          d           | j        s-t          j        | j        | j        | j	        | j
                  }nPi }| j        j                                        D ]/\  }}t          j        | j        || j	        | j
                  ||<   0t          j        t          dt	          j                    |z
  dd           nd	}| j	        rt	          j                    }|                    t          j        |i           |re|                    t          j        |t          j        | j        t          j        | j        t          j        | j         t          j        | j        i           | j                            |||
           t                              dt	          j                    |z
  dd           t<          j                                          d	S )a  
        Checkpoint the state of the recipe. The constructed checkpoint state dict
        contains the following information:
        - Model weights with key training.MODEL_KEY
        - Relevant recipe state if training is not complete

        Checkpointer will save the model weights and recipe state in
        different checkpoint files. To correctly resume training from an intermediate checkpoint,
        the model weights and recipe state must be provided.
        r*   zOSaving checkpoint. This may take some time. Retrieving full model state dict...r&   z#Getting full model state dict took r   r   zGetting optimizer state dict...z"Getting optimizer state dict took N)r   intermediate_checkpointzSaving checkpoint took )!r[   r   rW   rD   r   r   r   gather_cpu_state_dictr   rI   r7   rN   get_full_optimizer_state_dictr   r   r   itemsupdater   r   rn   r4   rm   rY   rp   ro   r\   rf   save_checkpointrE   r;   r   r   )	r^   r   ri   r   startcpu_state_dictr   r   opts	            r_   r  z$QATRecipeDistributed.save_checkpoint  s    "'!)d.?"?]	
 	
 	
 !## "7K<
 
 
 	X$2C2E2E2MXXXX	
 	
 	

 # 	"%''E%FGGG) !)!GKO&<	" " " "$"&":"D"J"J"L"L  JE3,4,RS$*<T\- - -N5)) [T5F5H5H55P[[[[   
 "N
  	W%''E""H$6#GHHH ' 	&& (. )49 +T_ 143D .0H   ..(? /   
 HHUt/@/B/BU/JUUUUVVV!!#####ra   c           	      @   t          j                     | j        s| j                                         n5| j        j                                        D ]}|                                 t          j	                    }d}d}| j
                                         t          | j        | j                  D ]O}| j                            |           t#          | j        | j        dk               }t)          | j                  D ]\  }}| j        || j        z  | j        k    r n| j        rS|dk    rM| j        rF|| j        | j        z   k    r3| j        j        dk    r#t<          j        j         !                                 | j"        | j#        dk    ratH          %                    d| j"        z             t           j&        '                    | j(                  }	| j)        *                    |	           np| j#        | j"        k    r`tH          %                    d| j"        z             t           j&        +                    | j(                  }
| j)        *                    |
           tY          j-        || j                   |d         | j.        j/        k    0                                }||z  }|1                    d          }| j2        5   | j)        di |}ddd           n# 1 swxY w Y   t=          j3        |dd	df         | j4        d|j5        d                  f          }tm          |tn                    s>|8                    d
          }|8                    d
|9                    d
                    }| .                    ||          |z  }~||z  }| j        rKt<          j:        ;                    |           t<          j:        ;                    |           || j<        |z  z  }|=                                 |d	z   | j        z  dk    r| j        st<          j:        ;                    |           t<          j:        ;                    |           t          j>        | j)        | j<        |z             | j?        at<          j@        j,        A                    | j)        B                                t          | j?                            D                                }| j        E                                 | j                            d           | xj#        d	z  c_#        |F                                |z  }|G                    d	           |H                    |d	z    d| j#         d|            | j#        | jI        z  dk    r| j        rt          j	                    |z
  }|t          | j        s| j        n| j                  ||| j<        z  z  d}| jK        r-|G                    t          jL        | j                             | j?        |G                    d|i           | jM        N                    || j#                   d}d}t          j	                    }| j        r]|dk    rW| j        rP|| j        | j        z   | jO        z   k    r5| j        j        dk    r%t<          j        j         !                    d           | j
        E                                 | xj        d	z  c_        | P                    |           Q| j
        Q                                 dS )z)
        The core training loop.
        r   )totaldisableNr,   z7Step 0: Disabling fake quant, will re-enable in step %szStep %s: Enabling fake quantlabels.r*   )max_normT)set_to_none|z|Loss: )r   lrtokens_per_second_per_gpur&   	grad_norm)step)r   )r   r   )Rr   cleanup_before_trainingrN   r   	zero_gradr   r   valuesr   r   r   r  rangerY   r[   r   	set_epochr   r   rH   	enumerater   r\   rM   rI   r   r   r   r7   rC   r;   r,   memory_record_memory_historyrP   r]   rD   rE   r   _get_disable_fake_quantrQ   r   apply_get_enable_fake_quantr   batch_to_devicer   r   sumpopr   hstackr   shaper   listreshapesizer   
all_reducerG   backwardscale_gradsrO   r   clip_grad_norm_r   floatfull_tensorr  itemr  set_descriptionrA   r   rB   r   r   log_dictr   r  stop)r^   r  t0running_loss
num_tokens
curr_epochpbaridxbatch
disable_fq	enable_fqcurrent_num_tokensr
  logitscurrent_lossr  loss_to_logtime_per_stepr.  s                      r_   trainzQATRecipeDistributed.train  s   
 	(*** % 	 O%%''''/9@@BB       
1BCC k	3 k	3J M##J///d3$)q.=QRRRD'(899 b* b*
U,8 AA/0 0 E &?"a4 (t7$:TTTT)V33J%<<>>> 1='1,,U"<=   &.%:%R%R 0& &
 ))*5555)T-KKK:"<=   %-$9$P$P 0% %	 )))444%eT\:::
 (Ot}'AA#%% # 00
 8,,2 2 2(T[11511F2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 CG_d&>?Pa?P&QR  "&$// A#^^B//F#^^BB@@F
  $}}VV<<?QQ , ) Q%00<<<%00>>> $04?Z3O#PL%%''' !Gt@@AEE1 D)44Z@@@)44\BBB !,T[$/J:VWWW/;(-(F(F $ 6 6 8 8).t/C)D)D )G ) ) *kmm & ,,...11d1CCC $$)$$"."3"3"5"5
"BKKKNNN((%>RRD,<RR[RR   (4+BBaGG . H )-(9(;(;b(@$/"( ,0+A%BDOO)-)A	# # :D,t>:@$ $  6 $OO ( 9 N N N    /;$OO[),DEEE+44$!%!1 5    $%L!"J*,,B *
O&!OO 8 ,345455 5 !L-77
)@@@NNN
 N'')))OOq OO  z 2222s   K""K&)K&c                 b    | j         r| j                                         t                       d S N)rI   r   closer   )r^   s    r_   cleanupzQATRecipeDistributed.cleanup  s4     	(%%'''ra   r@  )NNNN)FN)r$   N)"r   
__module____qualname____doc__r   r`   r   strr   rg   rh   r   r   r	   r;   r   profiler   r   boolr   intr   Moduler   r   r   r   r   r   r   r  r>  rB  r   ra   r_   r"   r"   $   s       Y YvSJ S4 S S S Sj
 tCH~    &d38n & & & & &PT
 T
 T
 T
 T
 T
n 48B B$Z0B	u~%}4	5B B B BX 6:!%#'.2r rr *.r '+	r
 r  $r sCx.r  (S	2r #r C=r  
+r 
r r r rn "'37	3 3!3 3 !c3h0	3
 
)	3 3 3 3j3#3# 3# 	3#
 3# 
!:-	.3# 3# 3# 3#jX$X$ 
X$ X$ X$ X$tB B B BH           ra   r"   r#   r$   c                    t          j                    st          d          t          d           |                     dd          rt          j                     t          j        d|            t          |           }|	                    |            |
                                 |                                 dS )	z
    Entry point for the recipe.

    Configurable parameters are read in the following order:
        - Parameters specified in config (see available configs through ``tune ls``)
        - Overwritten by arguments from the command-line
    zDistributed finetune recipe should be run via a distributed launcher.If using tune CLI, please specify --nnodes 1 and --nproc_per_node [num_gpus]zcuda:nccl,cpu:glooru   Fr"   )recipe_namer#   )r#   N)r   is_distributedrR   r   r>   set_torch_num_threadsr   r   r"   r   r>  rB  )r#   recipes     r_   recipe_mainrP    s     "$$ 
[
 
 	
 +,,,
ww!5)) ) 	&(((
"8cBBBB!c***F
LLSL
LLNNN
NNra   __main__)6sysr   	functoolsr   typingr   r   r   r   r   r	   warningsr
   r;   	omegaconfr   r   r   torch.distributedr   r   torch.optimr   torch.utils.datar   r   	torchtuner   r   r   r   torchtune.config._utilsr   torchtune.datar   torchtune.datasetsr   torchtune.recipe_interfacesr   torchtune.trainingr   r   torchtune.training.activationsr    torchtune.training.lr_schedulersr   r   
get_loggerrD   r"   parserP  r   exitr   ra   r_   <module>re     sE   


        : : : : : : : : : : : : : : : :        , , , , , , , ,       G G G G G G G G ! ! ! ! ! ! ; ; ; ; ; ; ; ; 6 6 6 6 6 6 6 6 6 6 6 6 < < < < < < 0 0 0 0 0 0 , , , , , , 9 9 9 9 9 9 : : : : : : : : S S S S S S 3 3 3 3 3 3      ewA  A  A  A  A , A  A  A H Z D    6 zCH[[]] ra   