
    &`i3              
       *   d dl Z d dlmZ d dlmZmZmZmZmZ d dl	Z	d dl
mZ d dlZd dlmZ d dlmZ d dlmZ d dlmZ d d	lmZmZ d d
lmZ d dlmZ  e j        e          ZdZ de!de"de#de$de!f
dZ% ed           G d de                      ZdS )    N)partial)AnyCallableDictOptionalUnion)Version)
Checkpoint)TRAIN_DATASET_KEY)
GenDataset)_log_deprecation_warning)RayTrainReportCallbackXGBoostConfig)XGBoostTrainer)	PublicAPIaK  Passing in `xgboost.train` kwargs such as `params`, `num_boost_round`, `label_column`, etc. to `XGBoostTrainer` is deprecated in favor of the new API which accepts a training function, similar to the other DataParallelTrainer APIs (ex: TorchTrainer). See this issue for more context: https://github.com/ray-project/ray/issues/50042configlabel_columnnum_boost_rounddataset_keysxgboost_train_kwargsc           	      L   t           j                                        }d }|}|rNt          j        |          }|                                }||z
  }t                              d| d|d           t           j                            t                    }	|	
                                                                }
d |D             }d |                                D             }|
                    |d          |
|         }}t          j        ||          }|t          fg}|                                D ]O\  }}|                    |d          ||         }}|                    t          j        ||          |f           Pi }t          j        | f|||||d	| d S )
Nz7Model loaded from checkpoint will train for additional zY iterations (trees) in order to achieve the target number of iterations (num_boost_round=z).c                 d    i | ]-}|t           k    |t          j                            |          .S  )r   raytrainget_dataset_shard).0ks     u/home/jaya/work/projects/VOICE-AGENT/VIET/agent-env/lib/python3.11/site-packages/ray/train/xgboost/xgboost_trainer.py
<dictcomp>z0_xgboost_train_fn_per_worker.<locals>.<dictcomp>6   s@       !!! 	
39&&q))!!!    c                 b    i | ],\  }}||                                                                 -S r   )materialize	to_pandas)r   r   ds      r   r    z0_xgboost_train_fn_per_worker.<locals>.<dictcomp>;   s2    QQQ41a1==??,,..QQQr!      )axis)label)dtrainevalsevals_resultr   	xgb_model)r   r   get_checkpointr   	get_modelnum_boosted_roundsloggerinfor   r   r#   r$   itemsdropxgboostDMatrixappend)r   r   r   r   r   
checkpointstarting_modelremaining_itersstarting_itertrain_ds_itertrain_dfeval_ds_iterseval_dfstrain_Xtrain_yr)   r*   	eval_nameeval_dfeval_Xeval_yr+   s                         r   _xgboost_train_fn_per_workerrE      s    ))++JN%O 	
/9*EE&99;;)M9%)% %  % % %	
 	
 	
 I//0ABBM((**4466H   M
 RQ=;N;N;P;PQQQH}}\}::H\<RWG_WG444F '()E&nn.. I I	7 l;;W\=RgofF;;;YGHHHHLM!'       r!   beta)	stabilityc                   X    e Zd ZdZdZdZ	 ddddddddddddddeeeg df         ee	gdf         f                  dee	         dee
         deej        j                 d	eej        j                 d
ee	eef                  deej        j                 dee         dee	eef                  dee         dee	eef                  dee         f fdZde	d	eej        j                 d
ee	eef                  dee         dee         dee	gdf         fdZededej        fd            Z xZS )r   a  A Trainer for distributed data-parallel XGBoost training.

    Example
    -------

    .. testcode::
        :skipif: True

        import xgboost

        import ray.data
        import ray.train
        from ray.train.xgboost import RayTrainReportCallback, XGBoostTrainer

        def train_fn_per_worker(config: dict):
            # (Optional) Add logic to resume training state from a checkpoint.
            # ray.train.get_checkpoint()

            # 1. Get the dataset shard for the worker and convert to a `xgboost.DMatrix`
            train_ds_iter, eval_ds_iter = (
                ray.train.get_dataset_shard("train"),
                ray.train.get_dataset_shard("validation"),
            )
            train_ds, eval_ds = train_ds_iter.materialize(), eval_ds_iter.materialize()

            train_df, eval_df = train_ds.to_pandas(), eval_ds.to_pandas()
            train_X, train_y = train_df.drop("y", axis=1), train_df["y"]
            eval_X, eval_y = eval_df.drop("y", axis=1), eval_df["y"]

            dtrain = xgboost.DMatrix(train_X, label=train_y)
            deval = xgboost.DMatrix(eval_X, label=eval_y)

            params = {
                "tree_method": "approx",
                "objective": "reg:squarederror",
                "eta": 1e-4,
                "subsample": 0.5,
                "max_depth": 2,
            }

            # 2. Do distributed data-parallel training.
            # Ray Train sets up the necessary coordinator processes and
            # environment variables for your workers to communicate with each other.
            bst = xgboost.train(
                params,
                dtrain=dtrain,
                evals=[(deval, "validation")],
                num_boost_round=10,
                callbacks=[RayTrainReportCallback()],
            )

        train_ds = ray.data.from_items([{"x": x, "y": x + 1} for x in range(32)])
        eval_ds = ray.data.from_items([{"x": x, "y": x + 1} for x in range(16)])
        trainer = XGBoostTrainer(
            train_fn_per_worker,
            datasets={"train": train_ds, "validation": eval_ds},
            scaling_config=ray.train.ScalingConfig(num_workers=4),
        )
        result = trainer.fit()
        booster = RayTrainReportCallback.get_model(result.checkpoint)

    Args:
        train_loop_per_worker: The training function to execute on each worker.
            This function can either take in zero arguments or a single ``Dict``
            argument which is set by defining ``train_loop_config``.
            Within this function you can use any of the
            :ref:`Ray Train Loop utilities <train-loop-api>`.
        train_loop_config: A configuration ``Dict`` to pass in as an argument to
            ``train_loop_per_worker``.
            This is typically used for specifying hyperparameters.
        xgboost_config: The configuration for setting up the distributed xgboost
            backend. Defaults to using the "rabit" backend.
            See :class:`~ray.train.xgboost.XGBoostConfig` for more info.
        datasets: The Ray Datasets to use for training and validation.
        dataset_config: The configuration for ingesting the input ``datasets``.
            By default, all the Ray Datasets are split equally across workers.
            See :class:`~ray.train.DataConfig` for more details.
        scaling_config: The configuration for how to scale data parallel training.
            ``num_workers`` determines how many Python processes are used for training,
            and ``use_gpu`` determines whether or not each process should use GPUs.
            See :class:`~ray.train.ScalingConfig` for more info.
        run_config: The configuration for the execution of the training run.
            See :class:`~ray.train.RunConfig` for more info.
        resume_from_checkpoint: A checkpoint to resume training from.
            This checkpoint can be accessed from within ``train_loop_per_worker``
            by calling ``ray.train.get_checkpoint()``.
        metadata: Dict that should be made available via
            `ray.train.get_context().get_metadata()` and in `checkpoint.get_metadata()`
            for checkpoints saved from this Trainer. Must be JSON-serializable.
        label_column: [Deprecated] Name of the label column. A column with this name
            must be present in the training dataset.
        params: [Deprecated] XGBoost training parameters.
            Refer to `XGBoost documentation <https://xgboost.readthedocs.io/>`_
            for a list of possible parameters.
        num_boost_round: [Deprecated] Target number of boosting iterations (trees in the model).
            Note that unlike in ``xgboost.train``, this is the target number
            of trees, meaning that if you set ``num_boost_round=10`` and pass a model
            that has already been trained for 5 iterations, it will be trained for 5
            iterations more, instead of 10 more.
        **train_kwargs: [Deprecated] Additional kwargs passed to ``xgboost.train()`` function.
    TN)train_loop_configxgboost_configscaling_config
run_configdatasetsdataset_configresume_from_checkpointmetadatar   paramsr   train_loop_per_workerrI   rJ   rK   rL   rM   rN   rO   rP   r   rQ   r   c                X   t          t          j                  t          d          k     rt          d          |d u }|r|                     |||
||          }|pi }n|rt          dt                      t          t          |           	                    |||||||||		  	         d S )Nz1.7.0zm`XGBoostTrainer` requires the `xgboost` version to be >= 1.7.0. Upgrade with: `pip install -U "xgboost>=1.7"`)r   rL   r   r   rM   zPassing `xgboost.train` kwargs to `XGBoostTrainer` is deprecated. In your training function, you can call `xgboost.train(**kwargs)` with arbitrary arguments. )	rR   rI   rJ   rK   rL   rM   rN   rO   rP   )
r	   r4   __version__ImportError_get_legacy_train_fn_per_workerr   *LEGACY_XGBOOST_TRAINER_DEPRECATION_MESSAGEsuperr   __init__)selfrR   rI   rJ   rK   rL   rM   rN   rO   rP   r   rQ   r   train_kwargs
legacy_api	__class__s                  r   rY   zXGBoostTrainer.__init__   s   ( 7&''''*:*:::@   +d2
 	$($H$H%1%) /! %I % %! !'" 	$@ >@ @   	nd##,,"7/))!)#9 	- 
	
 
	
 
	
 
	
 
	
r!   r   returnc           	      P   |pi }|                     t                    s9t          dt           dt          |                                                     |st          d          |pd}t          t                     |                     dg           }t          d |D                       }i }|r&|j        j	        }	|j        j
        }
|	|d<   |
|
nd	|d
<   |s|                    t          di |           ||d<   t          t          ||t          |          |          }|S )z<Get the training function for the legacy XGBoostTrainer API.z`datasets` must be provided for the XGBoostTrainer API if `train_loop_per_worker` is not provided. This dict must contain the training dataset under the key: 'z'. Got keys: z`label_column` must be provided for the XGBoostTrainer API if `train_loop_per_worker` is not provided. This is the column name of the label in the dataset.
   	callbacksc              3   @   K   | ]}t          |t                    V  d S N)
isinstancer   )r   callbacks     r   	<genexpr>zAXGBoostTrainer._get_legacy_train_fn_per_worker.<locals>.<genexpr>  s>       %
 %
=EJx!788%
 %
 %
 %
 %
 %
r!   	frequencyNTcheckpoint_at_end)r   r   r   r   r   )getr   
ValueErrorlistkeysr   rW   anycheckpoint_configcheckpoint_frequencyrh   r6   r   r   rE   set)rZ   r   rL   rM   r   r   ra   user_supplied_callbackcallback_kwargsro   rh   train_fn_per_workers               r   rV   z.XGBoostTrainer._get_legacy_train_fn_per_worker   s    >r||-.. 	5 +5 5 "(--//22	5 5    	G   */R !KLLL ),,["==	!$ %
 %
IR%
 %
 %
 "
 "
  	#-#?#T  * < N+?OK( &7%B!! /0 & 	H3FFoFFGGG,5[)%(%+X!5
 
 
 #"r!   r7   c                 *    t          j        |          S )z5Retrieve the XGBoost model stored in this checkpoint.)r   r.   )clsr7   s     r   r.   zXGBoostTrainer.get_model3  s     &/
;;;r!   rc   )__name__
__module____qualname____doc___handles_checkpoint_freq_handles_checkpoint_at_endr   r   r   r   r   r   r   ScalingConfig	RunConfigstrr   
DataConfigr
   r   intrY   rV   classmethodr4   Boosterr.   __classcell__)r]   s   @r   r   r   T   s6       d dL  $!% 	7
 -126<@48489=7;-1&*+/)-#7
 7
 7
'(2t8$hvt|&<<= 
7
 $D>7
 !/7
 !!897
 SY017
 4Z017
 !!567
 !) 47
 4S>*7
 sm7
  c3h(!7
" "##7
 7
 7
 7
 7
 7
r9#"9# SY019# 4Z01	9#
 sm9# "#9# 
4&$,	9# 9# 9# 9#v << 
< < < [< < < < <r!   r   )&logging	functoolsr   typingr   r   r   r   r   r4   packaging.versionr	   	ray.trainr   r
   ray.train.constantsr   ray.train.trainerr   ray.train.utilsr   ray.train.xgboostr   r   ray.train.xgboost.v2r   SimpleXGBoostTrainerray.util.annotationsr   	getLoggerrv   r0   rW   dictr~   r   rp   rE   r   r!   r   <module>r      s          7 7 7 7 7 7 7 7 7 7 7 7 7 7  % % % % % %                 1 1 1 1 1 1 ( ( ( ( ( ( 4 4 4 4 4 4 C C C C C C C C G G G G G G * * * * * *		8	$	$6 +333 3 	3
 3 3 3 3l Vd< d< d< d< d<) d< d< d< d< d<r!   