Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -408,7 +408,7 @@ training:
learning_rate: 2.0e-5
effective_batch_size: 32 # per_device * grad_accum * world_size
per_device_train_batch_size: 8
warmup_ratio: 0.03
warmup_steps: 0.03
lr_scheduler_type: "cosine_with_min_lr"
lr_scheduler_kwargs:
min_lr_rate: 0.1
Expand Down
2 changes: 1 addition & 1 deletion configs/trl/dpo.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ training:
learning_rate: 5.0e-7
effective_batch_size: 4
per_device_train_batch_size: 1
warmup_ratio: 0.1
warmup_steps: 0.1
lr_scheduler_type: "cosine_with_min_lr"
lr_scheduler_kwargs:
min_lr_rate: 0.1
Expand Down
2 changes: 1 addition & 1 deletion configs/trl/sft.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ training:
learning_rate: 2.0e-5
effective_batch_size: 32 # per_device * grad_accum * world_size
per_device_train_batch_size: 8
warmup_ratio: 0.03
warmup_steps: 0.03
lr_scheduler_type: "cosine_with_min_lr"
lr_scheduler_kwargs:
min_lr_rate: 0.1
Expand Down
27 changes: 26 additions & 1 deletion src/post_training/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,16 @@

from __future__ import annotations

import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any

import yaml
from omegaconf import MISSING, DictConfig, OmegaConf

logger = logging.getLogger(__name__)

# ---------------------------------------------------------------------------
# Sub-configs
# ---------------------------------------------------------------------------
Expand Down Expand Up @@ -61,7 +64,16 @@ class TrainingConfig:
learning_rate: float = 2.0e-5
effective_batch_size: int = 512
per_device_train_batch_size: int = 4
warmup_ratio: float = 0.03
warmup_steps: float = field(
default=0.0,
metadata={
"help": (
"Linear warmup duration. Values in [0, 1) are interpreted as a "
"ratio of total training steps; values >= 1 are interpreted as an "
"absolute number of steps; 0 disables warmup."
)
},
)
lr_scheduler_type: str = "cosine_with_min_lr"
lr_scheduler_kwargs: LRSchedulerKwargs = field(default_factory=LRSchedulerKwargs)
adam_beta1: float = 0.9
Expand Down Expand Up @@ -268,6 +280,19 @@ def load(
"""
schema = OmegaConf.structured(cls)
file_cfg = OmegaConf.load(yaml_path)

# Migrate deprecated training.warmup_ratio -> training.warmup_steps
file_dict = OmegaConf.to_container(file_cfg, resolve=False)
if isinstance(file_dict, dict):
training_dict = file_dict.get("training", {})
if isinstance(training_dict, dict) and "warmup_ratio" in training_dict:
logger.warning(
"training.warmup_ratio is deprecated; use training.warmup_steps "
"(values < 1 are interpreted as a ratio). Auto-migrating."
)
training_dict.setdefault("warmup_steps", training_dict.pop("warmup_ratio"))
file_cfg = OmegaConf.create(file_dict)

merged: DictConfig = OmegaConf.merge(schema, file_cfg)

if cli_overrides:
Expand Down
2 changes: 1 addition & 1 deletion src/post_training/methods/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ def build_common_training_kwargs(
weight_decay=t.weight_decay,
adam_epsilon=t.adam_epsilon,
gradient_accumulation_steps=grad_accum,
warmup_steps=t.warmup_ratio,
warmup_steps=t.warmup_steps,
lr_scheduler_type=t.lr_scheduler_type,
lr_scheduler_kwargs={
k: v for k, v in dataclasses.asdict(t.lr_scheduler_kwargs).items() if v is not None
Expand Down
2 changes: 1 addition & 1 deletion src/post_training/utils/guardrails.py
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ def run_guardrails(config: PostTrainingConfig, run_dir: Path, tokenize_only: boo
min_lr = config.training.lr_scheduler_kwargs.min_lr_rate
lr_sched_str = lr_sched if min_lr is None else f"{lr_sched} (min_lr_rate={min_lr})"
_row("LR scheduler", lr_sched_str)
_row("Warmup ratio", str(config.training.warmup_ratio))
_row("Warmup steps", str(config.training.warmup_steps))
batch_line, _ = _batch_summary(config, total_gpus)
_row("Batch sizes", batch_line)
_row("Grad checkpoint", str(config.training.gradient_checkpointing))
Expand Down
Loading