Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions docs/examples/config.rst
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@ Data
tokenizer: null
train_files: ~/data/rlhf/gsm8k/train.parquet
val_files: ~/data/rlhf/gsm8k/test.parquet
train_max_samples: -1 # set to -1 to use full dataset
val_max_samples: -1 # set to -1 to use full dataset
prompt_key: prompt
max_prompt_length: 512
max_response_length: 512
Expand All @@ -42,6 +44,10 @@ Data
HDFS path to local path.
- ``data.val_files``: Validation parquet. Can be a list or a single
file.
- ``data.train_max_samples``: Maximum number of samples to use from the
training dataset. Set to -1 to use the full dataset.
- ``data.val_max_samples``: Maximum number of samples to use from the
validation dataset. Set to -1 to use the full dataset.
- ``data.prompt_key``: The field in the dataset where the prompt is
located. Default is 'prompt'.
- ``data.max_prompt_length``: Maximum prompt length. All prompts will be
Expand Down
2 changes: 2 additions & 0 deletions examples/split_placement/config/ppo_trainer_split.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@ data:
tokenizer: null
train_files: ~/data/rlhf/gsm8k/train.parquet
val_files: ~/data/rlhf/gsm8k/test.parquet
train_max_samples: -1 # set to -1 to use full dataset
val_max_samples: -1 # set to -1 to use full dataset
prompt_key: prompt
max_prompt_length: 512
max_response_length: 512
Expand Down
15 changes: 12 additions & 3 deletions recipe/entropy/main_entropy.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,8 +162,16 @@ def run(self, config):

from verl.utils.dataset.rl_dataset import collate_fn

train_dataset = create_rl_dataset(config.data.train_files, config.data, tokenizer, processor)
val_dataset = create_rl_dataset(config.data.val_files, config.data, tokenizer, processor)
train_dataset = create_rl_dataset(
config.data.train_files,
config.data,
tokenizer,
processor,
max_samples=config.data.get("train_max_samples", -1),
)
val_dataset = create_rl_dataset(
config.data.val_files, config.data, tokenizer, processor, max_samples=config.data.get("val_max_samples", -1)
)
train_sampler = create_rl_sampler(config.data, train_dataset)
trainer = RayEntropyTrainer(
config=config,
Expand All @@ -183,7 +191,7 @@ def run(self, config):
trainer.fit()


def create_rl_dataset(data_paths, data_config, tokenizer, processor):
def create_rl_dataset(data_paths, data_config, tokenizer, processor, max_samples: int = -1):
"""Create a dataset.
Arguments:
Expand Down Expand Up @@ -216,6 +224,7 @@ def create_rl_dataset(data_paths, data_config, tokenizer, processor):
tokenizer=tokenizer,
processor=processor,
config=data_config,
max_samples=max_samples,
)

return dataset
Expand Down
12 changes: 10 additions & 2 deletions recipe/one_step_off_policy/main_ppo.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,8 +212,16 @@ def run(self, config):
from verl.utils.dataset.rl_dataset import collate_fn

# Create training and validation datasets.
train_dataset = create_rl_dataset(config.data.train_files, config.data, tokenizer, processor)
val_dataset = create_rl_dataset(config.data.val_files, config.data, tokenizer, processor)
train_dataset = create_rl_dataset(
config.data.train_files,
config.data,
tokenizer,
processor,
max_samples=config.data.get("train_max_samples", -1),
)
val_dataset = create_rl_dataset(
config.data.val_files, config.data, tokenizer, processor, max_samples=config.data.get("val_max_samples", -1)
)
train_sampler = create_rl_sampler(config.data, train_dataset)

# Initialize the PPO trainer.
Expand Down
12 changes: 10 additions & 2 deletions recipe/spin/spin_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -393,11 +393,19 @@ def _create_dataloader(self, train_dataset, val_dataset, collate_fn, train_sampl

if train_dataset is None:
train_dataset = create_rl_dataset(
self.config.data.train_files, self.config.data, self.tokenizer, self.processor
self.config.data.train_files,
self.config.data,
self.tokenizer,
self.processor,
max_samples=self.config.data.get("train_max_samples", -1),
)
if val_dataset is None:
val_dataset = create_rl_dataset(
self.config.data.val_files, self.config.data, self.tokenizer, self.processor
self.config.data.val_files,
self.config.data,
self.tokenizer,
self.processor,
max_samples=self.config.data.get("val_max_samples", -1),
)
self.train_dataset, self.val_dataset = train_dataset, val_dataset

Expand Down
8 changes: 6 additions & 2 deletions tests/special_e2e/sft/test_sp_loss_match.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,8 +112,12 @@ def create_trainer(config):

local_model_path = copy_to_local(src=config.model.partial_pretrain, verbose=True)
tokenizer = hf_tokenizer(local_model_path, trust_remote_code=config.model.trust_remote_code)
train_dataset = create_sft_dataset(config.data.train_files, config.data, tokenizer)
val_dataset = create_sft_dataset(config.data.val_files, config.data, tokenizer)
train_dataset = create_sft_dataset(
config.data.train_files, config.data, tokenizer, max_samples=config.data.get("train_max_samples", -1)
)
val_dataset = create_sft_dataset(
config.data.val_files, config.data, tokenizer, max_samples=config.data.get("val_max_samples", -1)
)

return FSDPSFTTrainer(
config=config,
Expand Down
2 changes: 2 additions & 0 deletions tests/trainer/config/legacy_ppo_megatron_trainer.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@ data:
tokenizer: null
train_files: ~/data/rlhf/gsm8k/train.parquet
val_files: ~/data/rlhf/gsm8k/test.parquet
train_max_samples: -1 # set to -1 to use full dataset
val_max_samples: -1 # set to -1 to use full dataset
prompt_key: prompt
reward_fn_key: data_source
max_prompt_length: 512
Expand Down
10 changes: 10 additions & 0 deletions tests/trainer/config/legacy_ppo_trainer.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,16 @@ data:
# Validation parquet. Can be a list or a single file.
val_files: ~/data/rlhf/gsm8k/test.parquet

# Maximum sample length to be used.
# Set to -1 to use full dataset, otherwise, randomly
# select the specified number of samples from train dataset
train_max_samples: -1

# Maximum sample length to be used.
# Set to -1 to use full dataset, otherwise, randomly
# select the specified number of samples from val dataset
val_max_samples: -1

# The field in the dataset where the prompt is located. Default is 'prompt'.
prompt_key: prompt

Expand Down
19 changes: 19 additions & 0 deletions tests/utils/dataset/test_rl_dataset_on_cpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,25 @@ def test_rl_dataset():
print(f"\n\noutput: {output}")


def test_rl_dataset_with_max_samples():
from verl.utils import hf_tokenizer
from verl.utils.dataset.rl_dataset import RLHFDataset

tokenizer = hf_tokenizer("deepseek-ai/deepseek-coder-1.3b-instruct")
local_path = get_gsm8k_data()
config = OmegaConf.create(
{
"prompt_key": "prompt",
"max_prompt_length": 256,
"filter_overlong_prompts": True,
"filter_overlong_prompts_workers": 2,
"max_samples": 5,
}
)
dataset = RLHFDataset(data_files=local_path, tokenizer=tokenizer, config=config, max_samples=5)
assert len(dataset) == 5


def test_image_rl_data():
from verl.utils import hf_processor, hf_tokenizer
from verl.utils.dataset.rl_dataset import RLHFDataset, collate_fn
Expand Down
23 changes: 23 additions & 0 deletions tests/utils/dataset/test_sft_dataset_on_cpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,3 +72,26 @@ def test_sft_dataset():
output = tokenizer.batch_decode([data])[0]
assert len(output) > 1
assert isinstance(output, str)


def test_sft_dataset_with_max_samples():
tokenizer = hf_tokenizer("deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct")
local_path = get_gsm8k_data()
from omegaconf import OmegaConf

dataset = SFTDataset(
parquet_files=local_path,
tokenizer=tokenizer,
config=OmegaConf.create(
{
"prompt_key": "extra_info",
"prompt_dict_keys": ["question"],
"response_key": "extra_info",
"response_dict_keys": ["answer"],
"max_length": 512,
}
),
max_samples=5,
)

assert len(dataset) == 5
2 changes: 2 additions & 0 deletions verl/trainer/config/_generated_ppo_megatron_trainer.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -276,6 +276,8 @@ data:
use_shm: false
train_files: ~/data/rlhf/gsm8k/train.parquet
val_files: ~/data/rlhf/gsm8k/test.parquet
train_max_samples: -1
val_max_samples: -1
prompt_key: prompt
reward_fn_key: data_source
max_prompt_length: 512
Expand Down
2 changes: 2 additions & 0 deletions verl/trainer/config/_generated_ppo_trainer.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -272,6 +272,8 @@ data:
use_shm: false
train_files: ~/data/rlhf/gsm8k/train.parquet
val_files: ~/data/rlhf/gsm8k/test.parquet
train_max_samples: -1
val_max_samples: -1
prompt_key: prompt
reward_fn_key: data_source
max_prompt_length: 512
Expand Down
10 changes: 10 additions & 0 deletions verl/trainer/config/data/legacy_data.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,16 @@ train_files: ~/data/rlhf/gsm8k/train.parquet
# Validation parquet. Can be a list or a single file.
val_files: ~/data/rlhf/gsm8k/test.parquet

# Maximum sample length to be used.
# Set to -1 to use full dataset, otherwise, randomly
# select the specified number of samples from train dataset
train_max_samples: -1

# Maximum sample length to be used.
# Set to -1 to use full dataset, otherwise, randomly
# select the specified number of samples from val dataset
val_max_samples: -1

# The field in the dataset where the prompt is located. Default is 'prompt'.
prompt_key: prompt

Expand Down
2 changes: 2 additions & 0 deletions verl/trainer/config/sft_trainer.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@ data:
micro_batch_size_per_gpu: 4 # this is also val batch size
train_files: ~/data/gsm8k/train.parquet
val_files: ~/data/gsm8k/test.parquet
train_max_samples: -1 # set to -1 to use full dataset
val_max_samples: -1 # set to -1 to use full dataset
# Single-turn settings
prompt_key: question
response_key: answer
Expand Down
2 changes: 2 additions & 0 deletions verl/trainer/config/sft_trainer_engine.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ data:
use_dynamic_bsz: True
train_files: ~/data/gsm8k/train.parquet
val_files: null
train_max_samples: -1 # set to -1 to use full dataset
val_max_samples: -1 # set to -1 to use full dataset
# Multi-turn settings
messages_key: messages # Key for messages list in multi-turn mode
tools_key: tools # Key for tools list in multi-turn mode
Expand Down
12 changes: 8 additions & 4 deletions verl/trainer/fsdp_sft_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -800,8 +800,12 @@ def run_sft(config):

local_model_path = copy_to_local(src=config.model.partial_pretrain, verbose=True)
tokenizer = hf_tokenizer(local_model_path, trust_remote_code=config.model.trust_remote_code)
train_dataset = create_sft_dataset(config.data.train_files, config.data, tokenizer)
val_dataset = create_sft_dataset(config.data.val_files, config.data, tokenizer)
train_dataset = create_sft_dataset(
config.data.train_files, config.data, tokenizer, max_samples=config.data.get("train_max_samples", -1)
)
val_dataset = create_sft_dataset(
config.data.val_files, config.data, tokenizer, max_samples=config.data.get("val_max_samples", -1)
)

trainer = FSDPSFTTrainer(
config=config,
Expand All @@ -822,7 +826,7 @@ def main(config):
run_sft(config)


def create_sft_dataset(data_paths, data_config, tokenizer):
def create_sft_dataset(data_paths, data_config, tokenizer, max_samples=-1):
"""Create a dataset."""
# build dataset
# First check if a custom dataset class is specified
Expand All @@ -838,7 +842,7 @@ def create_sft_dataset(data_paths, data_config, tokenizer):
dataset_cls = SFTDataset

# Create datasets based on the selected class
dataset = dataset_cls(parquet_files=data_paths, tokenizer=tokenizer, config=data_config)
dataset = dataset_cls(parquet_files=data_paths, tokenizer=tokenizer, config=data_config, max_samples=max_samples)
return dataset


Expand Down
21 changes: 18 additions & 3 deletions verl/trainer/main_ppo.py
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,22 @@ def run(self, config):
from verl.utils.dataset.rl_dataset import collate_fn

# Create training and validation datasets.
train_dataset = create_rl_dataset(config.data.train_files, config.data, tokenizer, processor, is_train=True)
val_dataset = create_rl_dataset(config.data.val_files, config.data, tokenizer, processor, is_train=False)
train_dataset = create_rl_dataset(
config.data.train_files,
config.data,
tokenizer,
processor,
is_train=True,
max_samples=config.data.get("train_max_samples", -1),
)
val_dataset = create_rl_dataset(
config.data.val_files,
config.data,
tokenizer,
processor,
is_train=False,
max_samples=config.data.get("val_max_samples", -1),
)
train_sampler = create_rl_sampler(config.data, train_dataset)

# Initialize the PPO trainer.
Expand All @@ -321,7 +335,7 @@ def run(self, config):
trainer.fit()


def create_rl_dataset(data_paths, data_config, tokenizer, processor, is_train=True):
def create_rl_dataset(data_paths, data_config, tokenizer, processor, is_train=True, max_samples: int = -1):
"""Create a dataset.

Arguments:
Expand Down Expand Up @@ -365,6 +379,7 @@ def create_rl_dataset(data_paths, data_config, tokenizer, processor, is_train=Tr
tokenizer=tokenizer,
processor=processor,
config=data_config,
max_samples=max_samples,
)

return dataset
Expand Down
12 changes: 10 additions & 2 deletions verl/trainer/ppo/ray_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -350,11 +350,19 @@ def _create_dataloader(self, train_dataset, val_dataset, collate_fn, train_sampl

if train_dataset is None:
train_dataset = create_rl_dataset(
self.config.data.train_files, self.config.data, self.tokenizer, self.processor
self.config.data.train_files,
self.config.data,
self.tokenizer,
self.processor,
max_samples=self.config.data.get("train_max_samples", -1),
)
if val_dataset is None:
val_dataset = create_rl_dataset(
self.config.data.val_files, self.config.data, self.tokenizer, self.processor
self.config.data.val_files,
self.config.data,
self.tokenizer,
self.processor,
max_samples=self.config.data.get("val_max_samples", -1),
)
self.train_dataset, self.val_dataset = train_dataset, val_dataset

Expand Down
12 changes: 8 additions & 4 deletions verl/trainer/sft_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,9 +145,13 @@ def _init_engine(self):
def _build_dataset(self):
config = self.config
tokenizer = self.model_config.tokenizer
train_dataset = create_sft_dataset(config.data.train_files, config.data, tokenizer)
train_dataset = create_sft_dataset(
config.data.train_files, config.data, tokenizer, max_samples=config.data.get("train_max_samples", -1)
)
if config.data.val_files:
val_dataset = create_sft_dataset(config.data.val_files, config.data, tokenizer)
val_dataset = create_sft_dataset(
config.data.val_files, config.data, tokenizer, max_samples=config.data.get("val_max_samples", -1)
)
else:
val_dataset = None

Expand Down Expand Up @@ -372,7 +376,7 @@ def main(config):
run_sft(config)


def create_sft_dataset(data_paths, data_config, tokenizer):
def create_sft_dataset(data_paths, data_config, tokenizer, max_samples=-1):
"""Create a dataset."""
# build dataset
# First check if a custom dataset class is specified
Expand All @@ -385,7 +389,7 @@ def create_sft_dataset(data_paths, data_config, tokenizer):
dataset_cls = MultiTurnSFTDataset

# Create datasets based on the selected class
dataset = dataset_cls(parquet_files=data_paths, tokenizer=tokenizer, config=data_config)
dataset = dataset_cls(parquet_files=data_paths, tokenizer=tokenizer, config=data_config, max_samples=max_samples)
return dataset


Expand Down
Loading