Skip to content

Commit

Permalink
Fix post_init bug for run_pretrain_auto (#7604)
Browse files Browse the repository at this point in the history
  • Loading branch information
From00 committed Dec 8, 2023
1 parent 8aa96d2 commit a74916d
Showing 1 changed file with 3 additions and 13 deletions.
16 changes: 3 additions & 13 deletions llm/llama/run_pretrain_auto.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@
import numpy as np
import paddle
import paddle.distributed.auto_parallel as auto
from paddle.distributed import fleet

from paddlenlp.trainer import (
PdArgumentParser,
Expand Down Expand Up @@ -88,20 +87,11 @@ class PreTrainingArguments(TrainingArguments):

def __post_init__(self):
super().__post_init__()

assert self.use_auto_parallel
strategy = fleet.auto.Strategy(self.strategy._config_dict)

fused_passes = strategy.fused_passes
fused_passes_list = fused_passes.fused_passes_list

if self.fused_linear_param_grad_add:
fused_passes_list.append("fused_linear_param_grad_add_pass")

fused_passes.enable = len(fused_passes_list) > 0
fused_passes.fused_passes_list = fused_passes_list

self.strategy = strategy
fused_passes = self.strategy.fused_passes
fused_passes.enable = True
fused_passes.fused_passes_list.append("fused_linear_param_grad_add_pass")
logger.info(self.strategy)


Expand Down

0 comments on commit a74916d

Please sign in to comment.