refactor(khaosz/trainer): 使用 TrainContext 替代 kwargs 传递训练上下文
This commit is contained in:
parent
f9b6331ad7
commit
c1bf22b6ec
|
|
@ -1,16 +1,13 @@
|
||||||
import os
|
import os
|
||||||
import torch.optim as optim
|
|
||||||
|
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
from torch.nn.utils import clip_grad_norm_
|
from torch.nn.utils import clip_grad_norm_
|
||||||
from torch.optim.lr_scheduler import LambdaLR
|
from torch.optim.lr_scheduler import LambdaLR
|
||||||
from typing import Optional, cast, TYPE_CHECKING
|
from typing import Optional, TYPE_CHECKING
|
||||||
from khaosz.core.parameter import Checkpoint
|
|
||||||
from khaosz.trainer.data_util import RandomSampler
|
|
||||||
from khaosz.trainer.strategy import ScheduleConfig, SchedulerFactory
|
from khaosz.trainer.strategy import ScheduleConfig, SchedulerFactory
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from khaosz.trainer.trainer import Trainer
|
from khaosz.trainer.trainer import Trainer
|
||||||
|
from khaosz.trainer.train_context import TrainContext
|
||||||
|
|
||||||
|
|
||||||
class TrainCallback:
|
class TrainCallback:
|
||||||
|
|
@ -19,37 +16,37 @@ class TrainCallback:
|
||||||
and we use '_' to ignore unused parameters.
|
and we use '_' to ignore unused parameters.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def on_train_begin(self, trainer: 'Trainer', **kwargs):
|
def on_train_begin(self, trainer: 'Trainer', context: 'TrainContext'):
|
||||||
""" Called at the beginning of training. """
|
""" Called at the beginning of training. """
|
||||||
_ = trainer, kwargs
|
_ = trainer, context
|
||||||
|
|
||||||
def on_train_end(self, trainer: 'Trainer', **kwargs):
|
def on_train_begin(self, trainer: 'Trainer', context: 'TrainContext'):
|
||||||
""" Called at the end of training. """
|
""" Called at the end of training. """
|
||||||
_ = trainer, kwargs
|
_ = trainer, context
|
||||||
|
|
||||||
def on_epoch_begin(self, trainer: 'Trainer', **kwargs):
|
def on_train_begin(self, trainer: 'Trainer', context: 'TrainContext'):
|
||||||
""" Called at the beginning of each epoch. """
|
""" Called at the beginning of each epoch. """
|
||||||
_ = trainer, kwargs
|
_ = trainer, context
|
||||||
|
|
||||||
def on_epoch_end(self, trainer: 'Trainer', **kwargs):
|
def on_train_begin(self, trainer: 'Trainer', context: 'TrainContext'):
|
||||||
""" Called at the end of each epoch. """
|
""" Called at the end of each epoch. """
|
||||||
_ = trainer, kwargs
|
_ = trainer, context
|
||||||
|
|
||||||
def on_batch_begin(self, trainer: 'Trainer', **kwargs):
|
def on_train_begin(self, trainer: 'Trainer', context: 'TrainContext'):
|
||||||
""" Called at the beginning of each batch. """
|
""" Called at the beginning of each batch. """
|
||||||
_ = trainer, kwargs
|
_ = trainer, context
|
||||||
|
|
||||||
def on_batch_end(self, trainer: 'Trainer', **kwargs):
|
def on_train_begin(self, trainer: 'Trainer', context: 'TrainContext'):
|
||||||
""" Called at the end of each batch. """
|
""" Called at the end of each batch. """
|
||||||
_ = trainer, kwargs
|
_ = trainer, context
|
||||||
|
|
||||||
def on_step_begin(self, trainer: 'Trainer', **kwargs):
|
def on_train_begin(self, trainer: 'Trainer', context: 'TrainContext'):
|
||||||
""" Called at the beginning of each step. """
|
""" Called at the beginning of each step. """
|
||||||
_ = trainer, kwargs
|
_ = trainer, context
|
||||||
|
|
||||||
def on_step_end(self, trainer: 'Trainer', **kwargs):
|
def on_train_begin(self, trainer: 'Trainer', context: 'TrainContext'):
|
||||||
""" Called at the end of each step."""
|
""" Called at the end of each step."""
|
||||||
_ = trainer, kwargs
|
_ = trainer, context
|
||||||
|
|
||||||
|
|
||||||
class ProgressBarCallback(TrainCallback):
|
class ProgressBarCallback(TrainCallback):
|
||||||
|
|
@ -59,27 +56,23 @@ class ProgressBarCallback(TrainCallback):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.progress_bar: tqdm = None
|
self.progress_bar: tqdm = None
|
||||||
|
|
||||||
def on_epoch_begin(self, trainer: 'Trainer', **kwargs):
|
def on_epoch_begin(self, trainer: 'Trainer', context: 'TrainContext'):
|
||||||
epoch = kwargs.get('epoch')
|
|
||||||
dataloader = kwargs.get('dataloader')
|
|
||||||
self.progress_bar = tqdm(
|
self.progress_bar = tqdm(
|
||||||
dataloader,
|
context.dataloader,
|
||||||
desc=f"Epoch {epoch+1}/{trainer.train_config.n_epoch}",
|
desc=f"Epoch {context.epoch+1}/{trainer.train_config.n_epoch}",
|
||||||
dynamic_ncols=True
|
dynamic_ncols=True
|
||||||
)
|
)
|
||||||
|
|
||||||
def on_batch_end(self, trainer: 'Trainer', **kwargs):
|
def on_batch_end(self, trainer: 'Trainer', context: 'TrainContext'):
|
||||||
_ = trainer
|
_ = trainer
|
||||||
loss = kwargs.get('loss')
|
|
||||||
optimizer = cast(optim.Optimizer, kwargs.get('optimizer'))
|
|
||||||
self.progress_bar.set_postfix({
|
self.progress_bar.set_postfix({
|
||||||
"loss": f"{loss:.4f}",
|
"loss": f"{context.loss:.4f}",
|
||||||
"lr": f"{optimizer.param_groups[-1]['lr']:.2e}"
|
"lr": f"{context.optimizer.param_groups[-1]['lr']:.2e}"
|
||||||
})
|
})
|
||||||
self.progress_bar.update(1)
|
self.progress_bar.update(1)
|
||||||
|
|
||||||
def on_epoch_end(self, trainer: 'Trainer', **kwargs):
|
def on_epoch_end(self, trainer: 'Trainer', context: 'TrainContext'):
|
||||||
_ = trainer, kwargs
|
_ = trainer, context
|
||||||
if self.progress_bar:
|
if self.progress_bar:
|
||||||
self.progress_bar.close()
|
self.progress_bar.close()
|
||||||
|
|
||||||
|
|
@ -92,46 +85,31 @@ class CheckpointCallback(TrainCallback):
|
||||||
self.checkpoint_interval = checkpoint_interval
|
self.checkpoint_interval = checkpoint_interval
|
||||||
self.last_ckpt_iter = 0
|
self.last_ckpt_iter = 0
|
||||||
|
|
||||||
@staticmethod
|
def _save_checkpoint(self, trainer: 'Trainer', context: 'TrainContext'):
|
||||||
def _save_checkpoint(trainer: 'Trainer', **kwargs):
|
save_path = os.path.join(trainer.train_config.checkpoint_dir, f"iter_{context.current_iter}")
|
||||||
current_iter = kwargs.get('current_iter')
|
context.checkpoint.sampler_state = context.sampler.state_dict()
|
||||||
random_sampler = cast(RandomSampler, kwargs.get('sampler'))
|
context.checkpoint.optimizer_state = context.optimizer.state_dict()
|
||||||
optimizer = cast(optim.Optimizer, kwargs.get('optimizer'))
|
context.checkpoint.save(save_path)
|
||||||
checkpoint = cast(Checkpoint, kwargs.get('checkpoint'))
|
self.last_ckpt_iter = context.current_iter
|
||||||
|
|
||||||
save_path = os.path.join(trainer.train_config.checkpoint_dir, f"iter_{current_iter}")
|
|
||||||
checkpoint.sampler_state = random_sampler.state_dict()
|
|
||||||
checkpoint.optim_state = optimizer.state_dict()
|
|
||||||
|
|
||||||
checkpoint.save(save_path)
|
|
||||||
|
|
||||||
def on_batch_end(self, trainer: 'Trainer', **kwargs):
|
def on_batch_end(self, trainer: 'Trainer', context: 'TrainContext'):
|
||||||
current_iter = kwargs.get('current_iter')
|
context.checkpoint.loss_list.append(context.loss)
|
||||||
checkpoint = cast(Checkpoint, kwargs.get('checkpoint'))
|
|
||||||
loss = kwargs.get('loss')
|
|
||||||
checkpoint.loss_list.append(loss)
|
|
||||||
|
|
||||||
if current_iter - self.last_ckpt_iter >= self.checkpoint_interval:
|
if context.current_iter - self.last_ckpt_iter >= self.checkpoint_interval:
|
||||||
CheckpointCallback._save_checkpoint(trainer, **kwargs)
|
self._save_checkpoint(trainer, context)
|
||||||
self.last_ckpt_iter = current_iter
|
|
||||||
|
def on_train_end(self, trainer: 'Trainer', context: 'TrainContext'):
|
||||||
def on_train_end(self, trainer: 'Trainer', **kwargs):
|
if context.current_iter != self.last_ckpt_iter:
|
||||||
current_iter = kwargs.get('current_iter')
|
self._save_checkpoint(trainer, context)
|
||||||
if current_iter != self.last_ckpt_iter:
|
|
||||||
CheckpointCallback._save_checkpoint(trainer, **kwargs)
|
|
||||||
self.last_ckpt_iter = current_iter
|
|
||||||
|
|
||||||
|
|
||||||
class GradientClippingCallback(TrainCallback):
|
class GradientClippingCallback(TrainCallback):
|
||||||
"""
|
"""
|
||||||
Gradient clipping callback for trainer.
|
Gradient clipping callback for trainer.
|
||||||
"""
|
"""
|
||||||
def on_step_begin(self, trainer: 'Trainer', **kwargs):
|
def on_step_begin(self, trainer: 'Trainer', context: 'TrainContext'):
|
||||||
_ = kwargs
|
_ = context
|
||||||
clip_grad_norm_(
|
clip_grad_norm_(trainer.parameter.model.parameters(), trainer.train_config.max_grad_norm)
|
||||||
trainer.parameter.model.parameters(),
|
|
||||||
trainer.train_config.max_grad_norm
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class SchedulerCallback(TrainCallback):
|
class SchedulerCallback(TrainCallback):
|
||||||
|
|
@ -141,10 +119,8 @@ class SchedulerCallback(TrainCallback):
|
||||||
def __init__(self, schedule_config: ScheduleConfig):
|
def __init__(self, schedule_config: ScheduleConfig):
|
||||||
self.schedule_config = schedule_config
|
self.schedule_config = schedule_config
|
||||||
self.scheduler: Optional[LambdaLR] = None
|
self.scheduler: Optional[LambdaLR] = None
|
||||||
self.current_iter = 0
|
|
||||||
|
|
||||||
def on_train_begin(self, trainer: 'Trainer', **kwargs):
|
def on_train_begin(self, trainer: 'Trainer', context: 'TrainContext'):
|
||||||
self.current_iter = kwargs.get('current_iter')
|
|
||||||
|
|
||||||
for group in trainer.train_config.optimizer.param_groups:
|
for group in trainer.train_config.optimizer.param_groups:
|
||||||
if "initial_lr" not in group:
|
if "initial_lr" not in group:
|
||||||
|
|
@ -158,12 +134,10 @@ class SchedulerCallback(TrainCallback):
|
||||||
self.scheduler = LambdaLR(
|
self.scheduler = LambdaLR(
|
||||||
trainer.train_config.optimizer,
|
trainer.train_config.optimizer,
|
||||||
lambda_scheduler_fn,
|
lambda_scheduler_fn,
|
||||||
last_epoch=self.current_iter - 1
|
last_epoch=context.current_iter - 1
|
||||||
)
|
)
|
||||||
|
|
||||||
def on_batch_end(self, trainer: 'Trainer', **kwargs):
|
def on_batch_end(self, trainer: 'Trainer', context: 'TrainContext'):
|
||||||
_ = trainer, kwargs
|
_ = trainer, context
|
||||||
|
|
||||||
if self.scheduler:
|
if self.scheduler:
|
||||||
self.scheduler.step()
|
self.scheduler.step()
|
||||||
self.current_iter += 1
|
|
||||||
|
|
|
||||||
|
|
@ -44,7 +44,7 @@ class TrainContextBuilder:
|
||||||
tokenizer=self.trainer.parameter.tokenizer,
|
tokenizer=self.trainer.parameter.tokenizer,
|
||||||
config=self.trainer.parameter.config,
|
config=self.trainer.parameter.config,
|
||||||
sampler_state=None,
|
sampler_state=None,
|
||||||
optim_state=None,
|
optimizer_state=None,
|
||||||
loss_list=[]
|
loss_list=[]
|
||||||
)
|
)
|
||||||
self._context.checkpoint = checkpoint
|
self._context.checkpoint = checkpoint
|
||||||
|
|
@ -72,13 +72,13 @@ class TrainContextBuilder:
|
||||||
def with_optimizer(self) -> Self:
|
def with_optimizer(self) -> Self:
|
||||||
optimizer = self.trainer.train_config.optimizer
|
optimizer = self.trainer.train_config.optimizer
|
||||||
|
|
||||||
if self._context.checkpoint and self._context.checkpoint.optim_state:
|
if self._context.checkpoint and self._context.checkpoint.optimizer_state:
|
||||||
optimizer.load_state_dict(self._context.checkpoint.optim_state)
|
optimizer.load_state_dict(self._context.checkpoint.optimizer_state)
|
||||||
|
|
||||||
self._context.optimizer = optimizer
|
self._context.optimizer = optimizer
|
||||||
|
|
||||||
if self._context.checkpoint:
|
if self._context.checkpoint:
|
||||||
self._context.checkpoint.optim_state = optimizer.state_dict()
|
self._context.checkpoint.optimizer_state = optimizer.state_dict()
|
||||||
|
|
||||||
return self
|
return self
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -45,11 +45,10 @@ class Trainer:
|
||||||
.build())
|
.build())
|
||||||
|
|
||||||
def _call_callbacks(self, method_name: str, context: TrainContext):
|
def _call_callbacks(self, method_name: str, context: TrainContext):
|
||||||
kwargs = context.asdict()
|
|
||||||
for callback in self.callbacks:
|
for callback in self.callbacks:
|
||||||
method = getattr(callback, method_name, None)
|
method = getattr(callback, method_name, None)
|
||||||
if method:
|
if method:
|
||||||
method(self, **kwargs)
|
method(self, context)
|
||||||
|
|
||||||
def train(self, checkpoint: Optional[Checkpoint] = None) -> Checkpoint:
|
def train(self, checkpoint: Optional[Checkpoint] = None) -> Checkpoint:
|
||||||
context = self._build_train_context(checkpoint)
|
context = self._build_train_context(checkpoint)
|
||||||
|
|
|
||||||
|
|
@ -28,13 +28,13 @@ def test_callback_integration(base_test_env, random_dataset):
|
||||||
callback_calls = []
|
callback_calls = []
|
||||||
|
|
||||||
class TrackingCallback(TrainCallback):
|
class TrackingCallback(TrainCallback):
|
||||||
def on_train_begin(self, trainer, **kwargs):
|
def on_train_begin(self, trainer, context):
|
||||||
callback_calls.append('on_train_begin')
|
callback_calls.append('on_train_begin')
|
||||||
|
|
||||||
def on_batch_end(self, trainer, **kwargs):
|
def on_batch_end(self, trainer, context):
|
||||||
callback_calls.append('on_batch_end')
|
callback_calls.append('on_batch_end')
|
||||||
|
|
||||||
def on_epoch_end(self, trainer, **kwargs):
|
def on_epoch_end(self, trainer, context):
|
||||||
callback_calls.append('on_epoch_end')
|
callback_calls.append('on_epoch_end')
|
||||||
|
|
||||||
train_config.strategy = StrategyFactory.load(base_test_env["model"], "seq", base_test_env["device"])
|
train_config.strategy = StrategyFactory.load(base_test_env["model"], "seq", base_test_env["device"])
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue