
    Pi                        d dl Z d dlZd dlmZ d dlmZmZmZmZ d dl	m
Z
 d dlZd dlmc mZ d dlmZmZ d dlmZ d dlmZ d dlmZ d d	lmZ d d
lmZmZmZmZ d dlmZ d dlm Z  d dl!m"Z" d dl#m$Z$m%Z%m&Z&m'Z'm(Z(m)Z) d dl*m+Z+ d dl,m-Z-m.Z. d dl/m/Z/  ej0        d          Z1 G d de+          Z2ej3        deddfd            Z4e5dk    r e j6         e4                       dS dS )    N)partial)AnyDictOptionalUnion)warn)
DictConfig
ListConfig)nn)	Optimizer)StatefulDataLoader)StatefulDistributedSampler)configmodulestrainingutils)_get_component_from_path)padded_collate_packed)ConcatDataset)get_adapter_paramsget_adapter_state_dictget_lora_module_namesget_merged_lora_ckptset_trainable_params(validate_missing_and_unexpected_for_lora)FTRecipeInterface)DummyProfilerPROFILER_KEY)tqdmDEBUGc                      e Zd ZdZdeddfdZdedeeef         fdZ	deeef         ddfd	Z
deddfd
Z	 d'dee         deej        j        ef         fdZ	 d'dededededeeef         deeeef                  dej        fdZ	 d'dedeeeef                  defdZdedededefdZ	 d'dededededeeeef                  defd Zd!eddfd"Zd#eeej        f         dej        fd$Zd(d%Z d(d&Z!dS ))LoRAFinetuneRecipeSingleDeviceah  
    LoRA finetuning recipe for dense transformer-based LLMs such as Llama2. This recipe is optimized
    for single GPU training. Training on CPU is not supported.

    Features:
        - Activation Checkpointing. This can be controlled using the ``enable_activation_checkpointing``
            flag. Activation checkpointing helps reduce the memory footprint since we no longer keep
            activations in memory and instead recompute them during the backward pass. This is especially
            helpful for larger batch sizes when you're memory constrained. But these savings in memory
            come at the cost of training performance. In most cases training can slow-down quite a bit as
            a result of this activation recomputation.

        - Activation Offloading. This can be controlled using the ``enable_activation_offloading``
            flag. Activation offloading is a technique similar to activations checkpointing that helps
            reduce the memory footprint to prevent OOMs on CUDA and enable bigger batches. Where activations
            checkpointing drops the activation in the forward to recompute it later in the backward,
            activations offloading will drop the activation in the forward to the CPU and bring it
            back during the backward pass. As always, there is a tradeoff--these savings in memory can
            come at the cost of training performance and CPU resources. To recover some runtime cost,
            we've added an option to enable offloading on a different stream to permit overlapping with
            the computation. This option is currently only available on PyTorch 2.5 or later and will
            be enabled by default if an acceptable torch version is found. Activation offloading can be
            used in conjunction with activation checkpointing.

        - Precision. Full fp32 and bf16 training are supported. Precision is controlled using the ``dtype``
            flag. When ``dtype=bf16``, all activations, gradients and optimizer states are in bfloat16. In
            most cases this should halve the memory footprint of full precision (fp32) training, without
            loss in model quality (will depend on the model, training data and other settings). For
            GPUs which do not support bfloat16, we fall back to fp32. Mixed precision training and fp16
            precision are currently not supported.

        - Gradient Accumulation. You can simulate larger batch sizes by accumulating gradients. This is
            controlled using the ``gradient_accumulation_steps`` flag.

                Total Batch Size = batch_size * gradient accumulation steps.

            For example: with batch_size=1 and gradient_accumulation_steps=32 we get a total batch size of 32.

            Gradient accumulation is especially useful when you are memory constrained. In this case,
            accumulating gradients might give you better training speed than enabling activation
            checkpointing.

        - Lower precision optimizers. This recipe supports lower-precision optimizers from the bitsandbytes
            library (https://huggingface.co/docs/bitsandbytes/main/en/index). We've tested the recipe with
            8-bit AdamW and Paged AdamW.

        - Checkpointing. Model weights are checkpointed both at the end of each epoch and at the end of
            training. Currently we checkpoint both the adapter weights (trainable params only) and the
            complete merged weights (adapter weights added back to the base model). For more details
            please take a look at our LoRA tutorial
            (https://pytorch.org/torchtune/main/tutorials/lora_finetune.html).

            Optimizer State and recipe state (seed, total_epochs, number of epochs run etc) are
            only saved at the end of a given epoch and used in case of resuming training. Resuming
            training is controlled by the ``resume_from_checkpoint`` flag. Mid-epoch checkpointing is
            currently not supported.

            For more details on the checkpointer, please take a look at
            our checkpointer deepdive (https://pytorch.org/torchtune/main/tutorials/checkpointer.html).

        - Logging. Terminal, Disk, WandB and TensorBoard are all supported.

        - Gradient Clipping. Gradient clipping is supported using the ``clip_grad_norm`` flag. By default,
            ``clip_grad_norm`` is set to ``None``. If you only want to log the grad norm, you can set
            ``clip_grad_norm='inf'``.

    For a full list of example configs for this recipe, run ``tune ls`` on the command line. Each config
    has example commands for how to kick-off training.

    Args:
        cfg (DictConfig): OmegaConf object parsed from yaml file

    Raises:
        ValueError: If ``dtype`` is set to fp16.
        RuntimeError: If ``dtype`` is set to bf16 and the hardware does not support bf16.
        RuntimeError: If ``enable_activation_offloading`` is True and device is not CUDA.
        RuntimeError: If ``enable_activation_offloading`` is True and ``enable_activation_checkpointing`` is False.
        RuntimeError: If ``left_pad_sequence`` is set as the data collator

    cfgreturnNc                 l   t          j        |j                  | _        t	          j        |j        | j                  | _        | j        t          j	        k    rt          d          |j        | _        |                    dd          | _        |                    dd          | _        | j        r1| j        j        dk    r!t"                              d           d| _        t	          j        |j        |                    d	d           
          | _        d| _        |j        | _        |j        | _        d| _        |j        | _        |                    dd          | _        |j        | _        |                    dd           | _        |                    dd          | _         |                    dd          | _!        | j!        r7| j        j        dk    rtE          d          | j         stE          d          d S | j         r,|j#        j$        dk    rt          j%        t"          d           d S d S d S )NdevicezHfp16 precision is not supported in this recipe. Please use fp32 or bf16.log_every_n_steps   log_peak_memory_statsFcpuzglog_peak_memory_stats was set to True, however, training uses cpu. Setting log_peak_memory_stats=False.cudnn_deterministic_mode)seed
debug_moder   save_adapter_weights_onlyclip_grad_normenable_activation_checkpointingenable_activation_offloadingcudazFenable_activation_offloading should only be True when training on CUDAz]enable_activation_offloading should only be True when enable_activation_checkpointing is TrueLLAMA3_VISIONzHint: enable_activation_checkpointing is True, but enable_activation_offloading isn't. Enabling activation offloading should reduce memory further.)&r   
get_devicer'   _devicer   	get_dtypedtype_dtypetorchfloat16
ValueError
output_dir_output_dirget_log_every_n_steps_log_peak_memory_statstypeloginfoset_seedr-   
epochs_runepochstotal_epochsmax_steps_per_epochglobal_stepresume_from_checkpoint_resume_from_checkpoint_save_adapter_weights_onlygradient_accumulation_steps_gradient_accumulation_steps_clip_grad_norm _enable_activation_checkpointing_enable_activation_offloadingRuntimeErrorcheckpointer
model_typelog_rank_zero)selfr#   s     w/home/jaya/work/projects/VOICE-AGENT/VIET/agent-env/lib/python3.11/site-packages/recipes/lora_finetune_single_device.py__init__z'LoRAFinetuneRecipeSingleDevice.__init__|   sZ   'sz:::(4<HHH ;%-''Z  
 >"%''*=q"A"A&)gg.Eu&M&M#& 	04<+<+E+EHHy   +0D' %cgg.H$&O&O
 
 
	 J#&#: '*'A$*-''2Mu*U*U',/,K)"ww'7>> 14-u1
 1
- .1WW*E.
 .
* - 	| F**"\   8 "s   
 1	 +>>O    		 	>>    cfg_checkpointerc                     t          j        || j                  | _        | j                                        }| j        r2t
          j        |vrt          d          |                     |           |S )z
        Extract the checkpoint state from file and validate. This includes the
        base model weights. If resume_from_checkpoint is True, this also includes
        the adapter weights and recipe state
        )should_load_recipe_statezPAdapter weights not found. Please ensure a valid adapter checkpoint is provided.)	r   instantiaterL   _checkpointerload_checkpointr   ADAPTER_KEYr<   _update_recipe_state)rW   r[   checkpoint_dicts      rX   r`   z.LoRAFinetuneRecipeSingleDevice.load_checkpoint   s     $/%)%A
 
 
 ,<<>>' 	7#?:: f  
 %%o666rZ   	ckpt_dictc                 8   	 |t           j                 | _        | j        |t           j                 k    r:t          d|t           j                             |t           j                 | _        | j        |t           j                 k    r:t          d|t           j                             |t           j                 | _        | j        |t           j	                 k    rt          d| j                    dS dS # t          $ r}t          d          |d}~ww xY w)z;
        Updates the recipe state from checkpoint.
        zWConfig value for seed does not match the checkpoint value, using the checkpoint value: )messagezfConfig value for max_steps_per_epoch does not match the checkpoint value, using the checkpoint value: z[Config value for total_epochs does not match the checkpoint value, using the config value: zCheckpoint does not contain the required keys needed for updating recipe state. Are you sure you passed in the right recipe checkpoint?N)r   
EPOCHS_KEYrF   r-   SEED_KEYr   rI   MAX_STEPS_KEYrH   TOTAL_EPOCHS_KEYKeyError)rW   rd   es      rX   rb   z3LoRAFinetuneRecipeSingleDevice._update_recipe_state   sf   "	'(;<DO yIh&7888V7@AR7SV V    &h&78	'9X5K+LLL[7@AW7X[ [    ,5X5K+L(  Ih.G$HHHG373DG G      IH  	 	 	J  	s   C4C: :
DDDc           	         t          j        |j                  | _        | j                            |           |j        | _        |j        dk    r|j        rt          d          | 	                    |j
                  }|                    dd          t          _        |                     |j        | j        | j        |j        |t$          j                 | j        r|t$          j                 nd          | _        t          j        |j                  | _        t2                              d           |                     |j        | j        r|t$          j                 nd	          | _        t          j        |j                  | _         | j        rt%          j!        | j                   | _         | j         j"        j#        d
k    r$| j        $                    | j         j%                   t2                              d           |                    dd          }| &                    |j'        |j(        |j)        || j        r|t$          j*                 nd          | _+        tY          | j+                  | j-        z  | _.        | j/        0| j/        | j.        k     r | j/        | _.        | j0        | j.        z  | _1        | 2                    |j3        | j4        | j.        z  | j1        dz
            | _5        | 6                    |                    tn          d                    | _8        ts          j:        |j)        df| j         j;        | j<                  | _=        dS )z
        Setup the recipe state. This includes recipe state (if resume_from_checkpoint is True),
        model, tokenizer, loss, optimizer, learning rate scheduler, sampler, and dataloader.
        npuzRNPU does not support model compilation. Please set `compile: False` in the config.)r[   low_cpu_ramFN)	cfg_modelr1   r2   compile_modelbase_model_state_dictlora_weights_state_dictz#Tokenizer is initialized from file.)cfg_optimizeropt_state_dictCEWithChunkedOutputLosszLoss is initialized.
collate_fnz!torchtune.data.padded_collate_sft)cfg_datasetshuffle
batch_sizerw   dataloader_state_dictr)   )cfg_lr_schedulernum_training_steps
last_epochr&   )>r   r^   metric_logger_metric_logger
log_configcompile_compiler'   r<   r`   rT   r?   common_utils_use_low_cpu_ram_setup_modelmodelrQ   rR   r   	MODEL_KEYrL   ra   _model	tokenizer
_tokenizerrC   rD   _setup_optimizer	optimizerOPT_KEY
_optimizerloss_loss_fncompile_loss	__class____name__set_num_output_chunksnum_output_chunks_setup_datadatasetry   rz   DATALOADER_KEY_dataloaderlenrO   _steps_per_epochrI   rF   rJ   _setup_lr_schedulerlr_schedulerrH   _lr_scheduler_setup_profilerr   	_profilerr:   fullignore_indexr6   ignore_labels_cache)rW   r#   rc   collate_names       rX   setupz$LoRAFinetuneRecipeSingleDevice.setup   s&   
 %01BCC 	&&s+++:3;d   ..@P.QQ ),u(E(E% ''i,0,Q)-)K+"1(2D"E / 455 ( 
 
 !,S];;6777//- / 011 0 
 
 *3844= 	A$1$-@@DM="+/HHHK--dm.MNNN'((( ww|-PQQ++K~# / 788 , 

 

(  !!T%FF 	 $0(4+@@@$($<D!#1FFD "55 -#043HH'!+ 6 
 
 --cgglD.I.IJJ $):^Q!;DL$
 $
 $
   rZ   cfg_profilerc                    |t          ddi          }|                    dd          d|d<   n#|                    d          dk    s
J d            t          j        |          \  }}t                              d|            |                    dd          | _        |d         r'|d	         | _        |d
         | _        |d         | _	        |S )a  
        Parses the `profiler` section of top-level `cfg` and sets up profiler

        Args:
            cfg_profiler (Optional[DictConfig]): ``profiler`` section of the top-level ``cfg`` (the main config passed to
                `recipe.main`). Default None.

        Returns:
            profiler: Union[torch.profiler.profile, DummyProfiler] - DummyProfiler is a nullcontext with no-op methods
            for `start`, `stop`, and `step` that can be used in place of `torch.profiler.profile` if profiler is not enabled such
            that the instrumented training loop does not need to be changed profiling is disabled.

        The profiler config can be provided in configs under the `profiler` key with the following layout:

        .. code-block:: yaml
            profiler:
                enabled: bool

                #Output directory of trace artifacts
                output_dir: str

            #`torch.profiler.ProfilerActivity` types to trace
            cpu: bool
            cuda: bool

                #Trace options
                profile_memory: bool
                with_stack: bool
                record_shapes: bool
                with_flops: bool

            # `torch.profiler.schedule` options:
            # wait_steps -> wait, warmup_steps -> warmup, active_steps -> active, num_cycles -> repeat
            wait_steps: int
            warmup_steps: int
            active_steps: int
            num_cycles: int
        NenabledF_component_z'torchtune.training.setup_torch_profilerzdOnly torch profiler supported currently: component must be `torchtune.training.setup_torch_profiler`z& Profiler config after instantiation: profile_memory
wait_stepswarmup_stepsactive_steps)
r	   r?   r   r^   rC   rD   profiler_profile_memoryprofiler_wait_stepsprofiler_warmup_stepsprofiler_active_steps)rW   r   profilerprofiler_cfgs       rX   r   z.LoRAFinetuneRecipeSingleDevice._setup_profilerc  s    V %y%&899L M4008*SL''   //<= = =u= = = "(!3L!A!A,H,HHIII'3'7'78H%'P'P$	" 	F'3L'AD$)5n)ED&)5n)ED&rZ   rp   r1   r2   rq   rr   rs   c           	      B   t          j        | j                  5  | j        5  t	          j        |          }d d d            n# 1 swxY w Y   d d d            n# 1 swxY w Y   |j        | _        |j        | _	        t          |j                  | _        |j        | _        t          |dd          | _        t#          |          | _        t'          d | j                                        D                       | _        t-          || j                   |rt          j        |           |r!t          j        |t2          j        h           |                    |d          \  }}	| j        r;|                                D ]&}
t9          |
d          r|
                                 '|r|                    |d          \  }}nd\  }}t=          | j        | j        | j        ||	||           t          j        | j                                         | j        	           t          j!        ||          | _"        tF          $                    d
| j         d           | j        j%        dk    r.t          j&        | j                  }t          j'        |           |S )Napply_lora_to_outputFc                     g | ]}d |v S )	magnitude ).0ks     rX   
<listcomp>z?LoRAFinetuneRecipeSingleDevice._setup_model.<locals>.<listcomp>  s    RRR![A-RRRrZ   )auto_wrap_policy)strictinitialize_dora_magnitude)NN)lora_attn_modulesapply_lora_to_mlpr   base_missingbase_unexpectedlora_missinglora_unexpected)r8   z$Model is initialized with precision .r+   r&   )(r   set_default_dtyper9   r6   r   r^   	lora_rank
_lora_rank
lora_alpha_lora_alphalistr   _lora_attn_modulesr   _apply_lora_to_mlpgetattr_apply_lora_to_outputr   adapter_paramsanykeys_is_dorar   rq   set_activation_checkpointingr   TransformerSelfAttentionLayerload_state_dicthasattrr   r   validate_expected_param_dtypeitemsget_act_offloading_ctx_manageractivations_handling_ctxrC   rD   rB   get_memory_statslog_memory_stats)rW   rp   r1   r2   rq   rr   rs   r   r   r   mr   r   memory_statss                 rX   r   z+LoRAFinetuneRecipeSingleDevice._setup_model  sD    '44 	2 	2dl 	2 	2&y11E	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 	2 $-$/"&y'B"C"C"+"=%,Y8NPU%V%V"077RRt7J7O7O7Q7QRRRSSUD$7888 	*"5)))* 	1)N(O    ).(=(=!% )> )
 )
%o
 = 	2]]__ 2 219:: 2//111" 	7,1,A,A' -B - -)L// -7)L/0"5"5!%!;%+%+	
 	
 	
 	
 	.%%''t{	
 	
 	
 	

 )1(O/)
 )
% 	FFFFGGG<%%#4DLIIIL%l333s3   AAAA	A
A	AA!Art   ru   c                     t          j        || j                                                  }|r|                    |           t
                              d           |S )Nz#Optimizer and loss are initialized.)r   r^   r   
parametersr   rC   rD   )rW   rt   ru   r   s       rX   r   z/LoRAFinetuneRecipeSingleDevice._setup_optimizer  sY     &}dk6L6L6N6NOO	 	6%%n5556777rZ   r|   r}   r~   c                 t    t          j        || j        ||          }t                              d           |S )N)r}   r~   z'Learning rate scheduler is initialized.)r   r^   r   rC   rD   )rW   r|   r}   r~   r   s        rX   r   z2LoRAFinetuneRecipeSingleDevice._setup_lr_scheduler  sD     )O1!	
 
 
 	:;;;rZ   rx   ry   rz   rw   r{   c           
          t          |t                    r0 fd|D             }t          |          }t          |dd          }n0t	          j        | j                  }|                    dd          }d|v rt          d          t          |          }t          |dd|d	          }	t          |||	|s&t          | j        j         j        j        
          nt           d          }
|
S )z
        All data related setup happens here. This recipe currently supports only
        map-style datasets. If a state_dict is provided (meaning we are resuming a training run),
        it is loaded into the dataloader.
        c                 D    g | ]}t          j        |j                  S r   )r   r^   r   )r   single_cfg_datasetrW   s     rX   r   z>LoRAFinetuneRecipeSingleDevice._setup_data.<locals>.<listcomp>  s9       & "#5tGG  rZ   )datasetspackedFleft_pad_sequencez1left_pad_sequence collator is only for inference.r)   r   )num_replicasrankry   r-   )padding_idx
ignore_idxT)r   rz   samplerrw   	drop_last)
isinstancer
   r   r   r   r^   r   r?   rS   r   r   r   r   pad_idr   r   r   )rW   rx   ry   rz   rw   r{   r   dsr   r   
dataloaders   `          rX   r   z*LoRAFinetuneRecipeSingleDevice._setup_data	  s;    k:.. 		6   *5  H 111BR511FF#KAAB __Xu55F *,,RSSS-j99
,
 
 
 (! + $ 6#}9    + 
 
 

" rZ   epochc                    i }|dz   | j         k     }|r|                    t          j        | j                                        t          j        | j        t          j        | j	        t          j
        | j         t          j        | j        t          j        | j                                        i           t          | j                                                  }|                    t          j        |i           | j        srd | j                                                                        D             }t)          || j        | j                  }|                    t          j        |i           | j        | j        t1          | j        | j        | j                  dd}|                    t          j        |i           | j                            |||| j                   dS )a  
        Checkpoint the state of the recipe. The constructed checkpoint state dict
        contains the following information:
        - Merged weights with key MODEL_KEY
        - Adapter weights with key ADAPTER_KEY
        - Relevant recipe state if training is not complete
        - If the `self._save_adapter_weights_only` option is True, the checkpointer will save only the adapter weights

        To correctly resume from training, the adapter weights and recipe state must be provided along with the base model weights.
        r)   c                 >    i | ]\  }}||                                 S r   )r+   )r   r   vs      rX   
<dictcomp>zBLoRAFinetuneRecipeSingleDevice.save_checkpoint.<locals>.<dictcomp>b  s&    RRRA!QUUWWRRRrZ   )r   alphaLORA)rr   target_modules	peft_type)r   intermediate_checkpointadapter_onlyN)rH   updater   r   r   
state_dictrh   r-   rg   rF   rj   ri   rI   r   r   r   r   ra   rM   r   r   r   r   r   r   r   r   r   ADAPTER_CONFIGr_   save_checkpoint)rW   r   rd   r   adapter_state_dictr  merged_state_dictadapter_configs           rX   r  z.LoRAFinetuneRecipeSingleDevice.save_checkpoint@  s    	"'!)d.?"?" 
	$do&@&@&B&B%ty'-t/@*D,D+T-=-H-H-J-J	 	 	 4DK4J4J4L4LMM(.0BCDDD. 	F SR1G1G1I1I1O1O1Q1QRRRJ 4_&! ! ! h02CDEEE *3''* 
  	
 	
 	(1>BCCC**$;8	 	+ 	
 	
 	
 	
 	
rZ   batchc                    |                     d          }| j        5   | j        di |}d d d            n# 1 swxY w Y   t          j        |ddd f         | j        d |j        d                  f          }t          |t                    s>|	                    d          }|	                    d|
                    d                    }|                     ||          }~|S )Nlabels.r)   r   r   )popr   r   r:   hstackr   shaper   r   reshapesizer   )rW   r  r	  logitsr   s        rX   
_loss_stepz)LoRAFinetuneRecipeSingleDevice._loss_step  s"   8$$* 	* 	* T[))5))F	* 	* 	* 	* 	* 	* 	* 	* 	* 	* 	* 	* 	* 	* 	* CG_d67Ha7HIJ
 
 &$'' 	9^^B''F^^BB88F}}VV,, s   7;;c           
      	   | j         rt                              d           t          j                    }d}d}| j        5 }t          | j        | j                  D ]P}t          | j
                  }| j        j                            |           t          | j                  D ]o\  }}|dk    rM| j        rF|| j        | j        z   k    r3| j        j        dk    r#t(          j        j                                         t1          j        || j                   |d         | j        j        k                                    }	||	z  }|                     |          |	z  }
||
z  }|
                                 |dz   | j        z  dk    rtA          j!        | j"        d|z             | j#        Ot(          j$        j        %                    | j"        &                                tO          | j#                            }| j(        )                                 | j(        *                    d	
           | j+        )                                 | xj,        dz  c_,        |-                                |z  }|.                    d           |/                    |dz    d| j,         d|            | j,        | j0        z  dk    rt          j                    |z
  }|| j(        j1        d         d         ||z  d}| j        j        dk    r4| j2        r-|.                    tA          j3        | j                             | j#        |.                    d|i           | j4        5                    || j,                   d}d}t          j                    }|dk    rW| j        rP|| j        | j        z   | j6        z   k    r5| j        j        dk    r%t(          j        j                            d           |)                                 |dz   | j        z  | j7        k    r nq| xj        dz  c_        t          j                    }t                              d           | 8                    |           t                              d9                    t          j                    |z
                       R	 ddd           dS # 1 swxY w Y   dS )z)
        The core training loop.
        zpNOTE: torch.compile is enabled and model is compiled in first forward. Expect a relatively slow first iteration.r   )totalr3   r	  r)   N)max_normT)set_to_none|z|Loss: lr)r   r  tokens_per_second_per_gpur+   r&   	grad_norm)step)r   zStarting checkpoint save...)r   z#Checkpoint saved in {:.2f} seconds.):r   rC   rD   timeperf_counterr   rangerF   rH   r   r   r   r   	set_epoch	enumerater   r   r   r6   rB   r:   r3   memory_record_memory_historyr   batch_to_devicer   r   sumr  backwardrO   r   scale_gradsr   rP   r   clip_grad_norm_r   floatr   r  	zero_gradr   rJ   itemr   set_descriptionr@   param_groupsrA   r   r   log_dictr   rI   r  format)rW   t0running_loss
num_tokensprof
curr_epochpbaridxr  current_num_tokenscurrent_lossr  loss_to_logtime_per_stepr,  start_save_checkpoints                   rX   trainz$LoRAFinetuneRecipeSingleDevice.train  sb   
 = 	HH C  
   
^ k	t#DOT5FGG i i
$"7888 (22:>>>"+D,<"="= \ \JC #a 8 (4#;d>X#XXX L-77
)@@BBB)%>>>
 h4=+EEcee ' "44J $(??5#9#9<N#NL L0L ))+++ a4#DDII ,T[!j.III/;(-(F(F $ 6 6 8 8).t/C)D)D )G ) )I ,,...11d1CCC*//111((A-((&2&7&7&9&9J&FA,,)A~VV0@VVVV  
  +d.EEJJ,0,=,?,?",DM(3&*o&B1&Ed&K=G-=W( (H !% 1U : :$($? !; !)$,$=T\$R$R$R!" !" !"  $3? (i0H I I I /88 (%)%5 9    ()%&
!.00 #a 8 (345455 5 !L-77
)@@@NNN
 IIKKK qT%FF12 2 2
 1$(,(9(;(;%6777$$:$6669@@)++.CC    Kik	 k	 k	 k	 k	 k	 k	 k	 k	 k	 k	 k	 k	 k	 k	 k	 k	 k	s    Q-R;;R?R?c                 8    | j                                          d S N)r   close)rW   s    rX   cleanupz&LoRAFinetuneRecipeSingleDevice.cleanup  s    !!#####rZ   r<  )r$   N)"r   
__module____qualname____doc__r	   rY   r   strr   r`   rb   r   r   r   r:   r   profiler   r   boolr   Moduler   r   r   intr   r   r   r  Tensorr  r:  r>  r   rZ   rX   r"   r"   *   s       O Ob<J <4 < < < <|
 tCH~    ,&d38n & & & & &Pi
 i
 i
 i
 i
 i
X 48A A$Z0A	u~%}4	5A A A AT =AG GG *.G '+	G
 G  $CH~G "*$sCx.!9G 
G G G GT UY '9A$sCx.9Q	   $   	
 
   , ;?5 55 5 	5
 5  (S#X75 
5 5 5 5n=
S =
T =
 =
 =
 =
~S%,%6 7 EL    0z z z zx$ $ $ $ $ $rZ   r"   r#   r$   c                     t          j        d|            t          |           }|                    |            |                                 |                                 dS )z
    Entry point for the recipe.

    Configurable parameters are read in the following order:
        - Parameters specified in config (see available configs through ``tune ls``)
        - Overwritten by arguments from the command-line
    r"   )recipe_namer#   )r#   N)r   r   r"   r   r:  r>  )r#   recipes     rX   recipe_mainrK    sb     "BLLLL+444F
LLSL
LLNNN
NNrZ   __main__)7sysr  	functoolsr   typingr   r   r   r   warningsr   r:   torchtune.modules.common_utilsr   r   	omegaconfr	   r
   r   torch.optimr   torchdata.stateful_dataloaderr   %torchdata.stateful_dataloader.samplerr   	torchtuner   r   r   torchtune.config._utilsr   torchtune.datar   torchtune.datasetsr   torchtune.modules.peftr   r   r   r   r   r   torchtune.recipe_interfacesr   torchtune.trainingr   r   r   
get_loggerrC   r"   parserK  r   exitr   rZ   rX   <module>r`     sv   


        - - - - - - - - - - - -        5 5 5 5 5 5 5 5 5 , , , , , , , ,       ! ! ! ! ! ! < < < < < < L L L L L L 6 6 6 6 6 6 6 6 6 6 6 6 < < < < < < 0 0 0 0 0 0 , , , , , ,                : 9 9 9 9 9 : : : : : : : :      ewj$ j$ j$ j$ j$%6 j$ j$ j$Z Z D     zCH[[]] rZ   