Skip to content

Commit

Permalink
Revert "Support BF16 training for sharding (PaddlePaddle#46846)"
Browse files Browse the repository at this point in the history
This reverts commit 6adbed6.
  • Loading branch information
GhostScreaming committed Oct 21, 2022
1 parent 4345cb2 commit d58c1f6
Show file tree
Hide file tree
Showing 10 changed files with 0 additions and 23 deletions.
8 changes: 0 additions & 8 deletions paddle/fluid/distributed/collective/reducer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -254,10 +254,6 @@ static void ConcatTensorsWithType(
ConcatTensorsForAllReduce<DeviceContext, double>()(
context, dense_tensors_, p_dense_contents);
break;
case phi::DataType::BFLOAT16:
ConcatTensorsForAllReduce<DeviceContext, platform::bfloat16>()(
context, dense_tensors_, p_dense_contents);
break;
default:
PADDLE_THROW(platform::errors::Unimplemented(
"Data type (%s) is not supported when it concats tensors for "
Expand Down Expand Up @@ -285,10 +281,6 @@ static void SplitTensorsWithType(const DeviceContext &context,
SplitTensorsForAllReduce<DeviceContext, double>()(
context, p_dense_contents, p_dense_tensors);
break;
case phi::DataType::BFLOAT16:
SplitTensorsForAllReduce<DeviceContext, platform::bfloat16>()(
context, p_dense_contents, p_dense_tensors);
break;
default:
PADDLE_THROW(platform::errors::Unimplemented(
"Data type (%s) is not supported when it splits tensors for "
Expand Down
1 change: 0 additions & 1 deletion paddle/phi/kernels/cpu/fill_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,5 +26,4 @@ PD_REGISTER_KERNEL(fill_grad,
int64_t,
int,
paddle::platform::float16,
paddle::platform::bfloat16,
bool) {}
1 change: 0 additions & 1 deletion paddle/phi/kernels/cpu/fill_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,5 +26,4 @@ PD_REGISTER_KERNEL(fill,
int64_t,
int,
paddle::platform::float16,
paddle::platform::bfloat16,
bool) {}
1 change: 0 additions & 1 deletion paddle/phi/kernels/gpu/fill_grad_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -27,5 +27,4 @@ PD_REGISTER_KERNEL(fill_grad,
int64_t,
int,
paddle::platform::float16,
paddle::platform::bfloat16,
bool) {}
1 change: 0 additions & 1 deletion paddle/phi/kernels/gpu/fill_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -27,5 +27,4 @@ PD_REGISTER_KERNEL(fill,
int64_t,
int,
paddle::platform::float16,
paddle::platform::bfloat16,
bool) {}
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@
alignment = {"gpu": 256, "cpu": 4096}
align = {
Type.fp16.value: 2,
Type.bf16.value: 2,
Type.fp32.value: 4,
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -531,12 +531,6 @@ def _rank_buffer_size(self, buffer_max_size, model_size):
"====== FP16 GradStorage size: {:.2f}M parameters, Model size {:.2f}M parameters ======"
.format(rank_buffer_size[Type.fp16.value] / 2**19,
model_size / 2**19))
if Type.bf16.value in rank_buffer_size.keys():
# FP16 GradStorage and model size
logger_.info(
"====== BF16 GradStorage size: {:.2f}M parameters, Model size {:.2f}M parameters ======"
.format(rank_buffer_size[Type.bf16.value] / 2**19,
model_size / 2**19))
if Type.fp32.value in rank_buffer_size.keys():
# FP32 GradStorage and model size
logger_.info(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,6 @@ def __init__(self, size, dtype, device, convert_cpu=False):
dtype=np.float16) if Type.fp16.value == dtype else np.zeros(
size, dtype=np.float32)
self.buffer = core.eager.Tensor(value=value, place=core.CPUPlace())
if dtype == Type.bf16.value:
self.buffer = paddle.cast(self.buffer, dtype=paddle.bfloat16)
else:
self.buffer = paddle.zeros(size, dtype=dtype)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@ class Type(Enum):
Type of trainable parameters
"""
fp16 = paddle.float16
bf16 = paddle.bfloat16
fp32 = paddle.float32


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,6 @@ class Type(Enum):
Type of trainable parameters
"""
fp16 = paddle.float16
bf16 = paddle.bfloat16
fp32 = paddle.float32


Expand Down

0 comments on commit d58c1f6

Please sign in to comment.