
    &`i                         d dl Z d dlmZmZmZmZmZ d dlZd dlm	Z	 d dl
mZ d dlmZ d dlmZ  e j        e          Z G d de          ZdS )	    N)AnyCallableDictOptionalUnion)
Checkpoint)DataParallelTrainer)
GenDataset)XGBoostConfigc                   J    e Zd ZdZddddddddddeeg df         eegdf         f         dee         dee         dee	j
        j                 dee	j
        j                 d	eeeef                  d
ee	j
        j                 deeeef                  dee         f fdZ xZS )XGBoostTrainera  A Trainer for distributed data-parallel XGBoost training.

    Example
    -------

    .. testcode::
        :skipif: True

        import xgboost

        import ray.data
        import ray.train
        from ray.train.xgboost import RayTrainReportCallback, XGBoostTrainer

        def train_fn_per_worker(config: dict):
            # (Optional) Add logic to resume training state from a checkpoint.
            # ray.train.get_checkpoint()

            # 1. Get the dataset shard for the worker and convert to a `xgboost.DMatrix`
            train_ds_iter, eval_ds_iter = (
                ray.train.get_dataset_shard("train"),
                ray.train.get_dataset_shard("validation"),
            )
            train_ds, eval_ds = train_ds_iter.materialize(), eval_ds_iter.materialize()

            train_df, eval_df = train_ds.to_pandas(), eval_ds.to_pandas()
            train_X, train_y = train_df.drop("y", axis=1), train_df["y"]
            eval_X, eval_y = eval_df.drop("y", axis=1), eval_df["y"]

            dtrain = xgboost.DMatrix(train_X, label=train_y)
            deval = xgboost.DMatrix(eval_X, label=eval_y)

            params = {
                "tree_method": "approx",
                "objective": "reg:squarederror",
                "eta": 1e-4,
                "subsample": 0.5,
                "max_depth": 2,
            }

            # 2. Do distributed data-parallel training.
            # Ray Train sets up the necessary coordinator processes and
            # environment variables for your workers to communicate with each other.
            bst = xgboost.train(
                params,
                dtrain=dtrain,
                evals=[(deval, "validation")],
                num_boost_round=10,
                callbacks=[RayTrainReportCallback()],
            )

        train_ds = ray.data.from_items([{"x": x, "y": x + 1} for x in range(32)])
        eval_ds = ray.data.from_items([{"x": x, "y": x + 1} for x in range(16)])
        trainer = XGBoostTrainer(
            train_fn_per_worker,
            datasets={"train": train_ds, "validation": eval_ds},
            scaling_config=ray.train.ScalingConfig(num_workers=4),
        )
        result = trainer.fit()
        booster = RayTrainReportCallback.get_model(result.checkpoint)

    Args:
        train_loop_per_worker: The training function to execute on each worker.
            This function can either take in zero arguments or a single ``Dict``
            argument which is set by defining ``train_loop_config``.
            Within this function you can use any of the
            :ref:`Ray Train Loop utilities <train-loop-api>`.
        train_loop_config: A configuration ``Dict`` to pass in as an argument to
            ``train_loop_per_worker``.
            This is typically used for specifying hyperparameters.
        xgboost_config: The configuration for setting up the distributed xgboost
            backend. Defaults to using the "rabit" backend.
            See :class:`~ray.train.xgboost.XGBoostConfig` for more info.
        datasets: The Ray Datasets to use for training and validation.
        dataset_config: The configuration for ingesting the input ``datasets``.
            By default, all the Ray Datasets are split equally across workers.
            See :class:`~ray.train.DataConfig` for more details.
        scaling_config: The configuration for how to scale data parallel training.
            ``num_workers`` determines how many Python processes are used for training,
            and ``use_gpu`` determines whether or not each process should use GPUs.
            See :class:`~ray.train.ScalingConfig` for more info.
        run_config: The configuration for the execution of the training run.
            See :class:`~ray.train.RunConfig` for more info.
        resume_from_checkpoint: A checkpoint to resume training from.
            This checkpoint can be accessed from within ``train_loop_per_worker``
            by calling ``ray.train.get_checkpoint()``.
        metadata: Dict that should be made available via
            `ray.train.get_context().get_metadata()` and in `checkpoint.get_metadata()`
            for checkpoints saved from this Trainer. Must be JSON-serializable.
    N)train_loop_configxgboost_configscaling_config
run_configdatasetsdataset_configmetadataresume_from_checkpointtrain_loop_per_workerr   r   r   r   r   r   r   r   c                    t          t          |                               |||pt                      |||||	|	  	         d S )N)	r   r   backend_configr   r   r   r   r   r   )superr   __init__r   )selfr   r   r   r   r   r   r   r   r   	__class__s             h/home/jaya/work/projects/VOICE-AGENT/VIET/agent-env/lib/python3.11/site-packages/ray/train/xgboost/v2.pyr   zXGBoostTrainer.__init__i   sZ     	nd##,,"7/)<]__))!#9 	- 
	
 
	
 
	
 
	
 
	
    )__name__
__module____qualname____doc__r   r   r   r   r   raytrainScalingConfig	RunConfigstrr
   
DataConfigr   r   r   __classcell__)r   s   @r   r   r      s+       Y Y~ -126<@48489=-17;
 
 
$Xb$h%74&$,9O%OP
 $D>	

 !/
 !!89
 SY01
 4Z01
 !!56
 4S>*
 !) 4
 
 
 
 
 
 
 
 
 
r   r   )loggingtypingr   r   r   r   r   	ray.trainr#   r   ray.train.data_parallel_trainerr	   ray.train.trainerr
   ray.train.xgboostr   	getLoggerr   loggerr    r   r   <module>r3      s     7 7 7 7 7 7 7 7 7 7 7 7 7 7                 ? ? ? ? ? ? ( ( ( ( ( ( + + + + + +		8	$	$s
 s
 s
 s
 s
( s
 s
 s
 s
 s
r   