From 92999fa9f660e9cc9c6ca19fbb611d8a7c89966a Mon Sep 17 00:00:00 2001 From: ViperEkura <3081035982@qq.com> Date: Sun, 28 Sep 2025 22:20:25 +0800 Subject: [PATCH] =?UTF-8?q?fix(trainer):=20=E4=BF=AE=E5=A4=8D=E8=AE=AD?= =?UTF-8?q?=E7=BB=83=E5=99=A8=E4=B8=AD=E9=85=8D=E7=BD=AE=E5=BC=95=E7=94=A8?= =?UTF-8?q?=E9=94=99=E8=AF=AF=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- khaosz/trainer/trainer.py | 50 +++++++++++++++++++-------------------- 1 file changed, 24 insertions(+), 26 deletions(-) diff --git a/khaosz/trainer/trainer.py b/khaosz/trainer/trainer.py index 7750f54..2392da0 100644 --- a/khaosz/trainer/trainer.py +++ b/khaosz/trainer/trainer.py @@ -29,91 +29,89 @@ class Trainer: def save_checkpoint( self, loss_list: list, - train_config: TrainConfig ): current_iter = len(loss_list) - save_path = os.path.join(train_config.checkpoint_dir, f"iter_{current_iter}") + save_path = os.path.join(self.train_config.checkpoint_dir, f"iter_{current_iter}") self.checkpoint.loss_list = loss_list - self.checkpoint.optim_state = train_config.optimizer.state_dict() + self.checkpoint.optim_state = self.train_config.optimizer.state_dict() self.checkpoint.save(save_path) def train( self, train_checkpoint: Optional[Checkpoint] = None ) -> Checkpoint: - train_config = self.train_config - schedule_config = self.schedule_config - assert schedule_config.schedule_type in ["cosine", "sgdr"] + assert self.schedule_config.schedule_type in ["cosine", "sgdr"] if train_checkpoint: self.checkpoint = train_checkpoint - train_config.optimizer.load_state_dict(train_checkpoint.optim_state) + self.train_config.optimizer.load_state_dict(train_checkpoint.optim_state) - self.checkpoint.optim_state = train_config.optimizer.state_dict() + self.checkpoint.optim_state = self.train_config.optimizer.state_dict() loss_list = self.checkpoint.loss_list current_iter = len(self.checkpoint.loss_list) last_ckpt_iter = current_iter - for group in train_config.optimizer.param_groups: + for group in self.train_config.optimizer.param_groups: if "initial_lr" not in group: group["initial_lr"] = group["lr"] lambda_scheduler_fn = SchedulerFactory.load_schedule_fn( - **schedule_config.get_kwargs() + **self.schedule_config.get_kwargs() ) scheduler = LambdaLR( - train_config.optimizer, + self.train_config.optimizer, lambda_scheduler_fn, last_epoch=current_iter - 1 if train_checkpoint else -1 ) - seed = train_config.random_seed + seed = self.train_config.random_seed generator = torch.Generator().manual_seed(seed) - sampler = RandomSampler(train_config.dataset, generator=generator) - remaining_epochs = train_config.n_epoch - current_iter // (len(train_config.dataset) // train_config.batch_size) + sampler = RandomSampler(self.train_config.dataset, generator=generator) + remaining_epochs = self.train_config.n_epoch - current_iter // ( + len(self.train_config.dataset) // self.train_config.batch_size) for epoch in range(remaining_epochs): self.checkpoint.model.train() dataloader = DataLoader( - train_config.dataset, - batch_size=train_config.batch_size, + self.train_config.dataset, + batch_size=self.train_config.batch_size, sampler=sampler ) progress_bar = tqdm( dataloader, - desc=f"Epoch {epoch+1}/{train_config.n_epoch}", + desc=f"Epoch {epoch+1}/{self.train_config.n_epoch}", dynamic_ncols=True ) for batch in progress_bar: #forward - loss = train_config.strategy(batch) + loss = self.train_config.strategy(batch) loss_list.append(loss.item()) #backward loss.backward() #step - if current_iter % train_config.accumulation_steps == 0: + if current_iter % self.train_config.accumulation_steps == 0: clip_grad_norm_( self.checkpoint.model.parameters(), - train_config.max_grad_norm + self.train_config.max_grad_norm ) - train_config.optimizer.step() - train_config.optimizer.zero_grad() + self.train_config.optimizer.step() + self.train_config.optimizer.zero_grad() current_iter += 1 scheduler.step() progress_bar.set_postfix({ "loss": f"{loss.item():.4f}", - "lr": f"{train_config.optimizer.param_groups[0]['lr']:.2e}" + "lr": f"{self.train_config.optimizer.param_groups[0]['lr']:.2e}" }) #save checkpotint - if current_iter - last_ckpt_iter >= train_config.checkpoint_interval: - self.save_checkpoint(loss_list, train_config) + if current_iter - last_ckpt_iter >= self.train_config.checkpoint_interval: + self.save_checkpoint(loss_list) last_ckpt_iter = current_iter if current_iter != last_ckpt_iter: - self.save_checkpoint(loss_list, train_config) + self.save_checkpoint(loss_list) last_ckpt_iter = current_iter return self.checkpoint \ No newline at end of file