
    &`iR                         d dl Z d dlmZmZmZmZmZ d dlZd dlm	Z	 d dl
mZ d dlmZmZ d dlmZ  e j        e          Z G d de          ZdS )	    N)AnyCallableDictOptionalUnion)
Checkpoint)DataParallelTrainer)LightGBMConfigget_network_params)
GenDatasetc                   J    e Zd ZdZddddddddddeeg df         eegdf         f         dee         dee         dee	j
        j                 dee	j
        j                 d	eeeef                  d
ee	j
        j                 deeeef                  dee         f fdZ xZS )LightGBMTrainera  A Trainer for distributed data-parallel LightGBM training.

    Example
    -------

    .. testcode::
        :skipif: True

        import lightgbm as lgb

        import ray.data
        import ray.train
        from ray.train.lightgbm import RayTrainReportCallback, LightGBMTrainer


        def train_fn_per_worker(config: dict):
            # (Optional) Add logic to resume training state from a checkpoint.
            # ray.train.get_checkpoint()

            # 1. Get the dataset shard for the worker and convert to a `lgb.Dataset`
            train_ds_iter, eval_ds_iter = (
                ray.train.get_dataset_shard("train"),
                ray.train.get_dataset_shard("validation"),
            )
            train_ds, eval_ds = train_ds_iter.materialize(), eval_ds_iter.materialize()
            train_df, eval_df = train_ds.to_pandas(), eval_ds.to_pandas()
            train_X, train_y = train_df.drop("y", axis=1), train_df["y"]
            eval_X, eval_y = eval_df.drop("y", axis=1), eval_df["y"]

            train_set = lgb.Dataset(train_X, label=train_y)
            eval_set = lgb.Dataset(eval_X, label=eval_y)

            # 2. Run distributed data-parallel training.
            # `get_network_params` sets up the necessary configurations for LightGBM
            # to set up the data parallel training worker group on your Ray cluster.
            params = {
                "objective": "regression",
                # Adding the line below is the only change needed
                # for your `lgb.train` call!
                **ray.train.lightgbm.get_network_params(),
            }
            lgb.train(
                params,
                train_set,
                valid_sets=[eval_set],
                valid_names=["eval"],
                callbacks=[RayTrainReportCallback()],
            )

        train_ds = ray.data.from_items([{"x": x, "y": x + 1} for x in range(32)])
        eval_ds = ray.data.from_items(
            [{"x": x, "y": x + 1} for x in range(32, 32 + 16)]
        )
        trainer = LightGBMTrainer(
            train_fn_per_worker,
            datasets={"train": train_ds, "validation": eval_ds},
            scaling_config=ray.train.ScalingConfig(num_workers=4),
        )
        result = trainer.fit()
        booster = RayTrainReportCallback.get_model(result.checkpoint)

    Args:
        train_loop_per_worker: The training function to execute on each worker.
            This function can either take in zero arguments or a single ``Dict``
            argument which is set by defining ``train_loop_config``.
            Within this function you can use any of the
            :ref:`Ray Train Loop utilities <train-loop-api>`.
        train_loop_config: A configuration ``Dict`` to pass in as an argument to
            ``train_loop_per_worker``.
            This is typically used for specifying hyperparameters.
        lightgbm_config: The configuration for setting up the distributed lightgbm
            backend. See :class:`~ray.train.lightgbm.LightGBMConfig` for more info.
        datasets: The Ray Datasets to use for training and validation.
        dataset_config: The configuration for ingesting the input ``datasets``.
            By default, all the Ray Dataset are split equally across workers.
            See :class:`~ray.train.DataConfig` for more details.
        scaling_config: The configuration for how to scale data parallel training.
            ``num_workers`` determines how many Python processes are used for training,
            and ``use_gpu`` determines whether or not each process should use GPUs.
            See :class:`~ray.train.ScalingConfig` for more info.
        run_config: The configuration for the execution of the training run.
            See :class:`~ray.train.RunConfig` for more info.
        resume_from_checkpoint: A checkpoint to resume training from.
            This checkpoint can be accessed from within ``train_loop_per_worker``
            by calling ``ray.train.get_checkpoint()``.
        metadata: Dict that should be made available via
            `ray.train.get_context().get_metadata()` and in `checkpoint.get_metadata()`
            for checkpoints saved from this Trainer. Must be JSON-serializable.
    N)train_loop_configlightgbm_configscaling_config
run_configdatasetsdataset_configmetadataresume_from_checkpointtrain_loop_per_workerr   r   r   r   r   r   r   r   c                    t          t          |                               |||pt                      |||||	|	  	         d S )N)	r   r   backend_configr   r   r   r   r   r   )superr   __init__r
   )selfr   r   r   r   r   r   r   r   r   	__class__s             i/home/jaya/work/projects/VOICE-AGENT/VIET/agent-env/lib/python3.11/site-packages/ray/train/lightgbm/v2.pyr   zLightGBMTrainer.__init__h   s\     	ot$$--"7/*>n.>.>))!#9 	. 
	
 
	
 
	
 
	
 
	
    )__name__
__module____qualname____doc__r   r   r   r   r
   raytrainScalingConfig	RunConfigstrr   
DataConfigr   r   r   __classcell__)r   s   @r   r   r      s+       X X| -148<@48489=-17;
 
 
$Xb$h%74&$,9O%OP
 $D>	

 ".1
 !!89
 SY01
 4Z01
 !!56
 4S>*
 !) 4
 
 
 
 
 
 
 
 
 
r   r   )loggingtypingr   r   r   r   r   	ray.trainr$   r   ray.train.data_parallel_trainerr	   ray.train.lightgbm.configr
   r   ray.train.trainerr   	getLoggerr    loggerr    r   r   <module>r4      s     7 7 7 7 7 7 7 7 7 7 7 7 7 7                 ? ? ? ? ? ? H H H H H H H H ( ( ( ( ( (		8	$	$r
 r
 r
 r
 r
) r
 r
 r
 r
 r
r   