Skip to content

Commit

Permalink
update 2.0 public api in all left files (#33313)
Browse files Browse the repository at this point in the history
* update 2.0 public api in all left files

* reverse device.py all list;
fix some flake8 errors
  • Loading branch information
zhiboniu committed Jun 11, 2021
1 parent 2de737e commit 022198c
Show file tree
Hide file tree
Showing 32 changed files with 186 additions and 175 deletions.
24 changes: 6 additions & 18 deletions python/paddle/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,7 @@
import paddle from the source directory; please install paddlepaddle*.whl firstly.'''
)

import paddle.batch
batch = batch.batch
from .batch import batch # noqa: F401
from .fluid import monkey_patch_variable
from .fluid.dygraph import monkey_patch_math_varbase
monkey_patch_variable()
Expand Down Expand Up @@ -136,7 +135,6 @@
from .tensor.manipulation import squeeze_ # noqa: F401
from .tensor.manipulation import stack # noqa: F401
from .tensor.manipulation import strided_slice # noqa: F401
from .tensor.manipulation import transpose # noqa: F401
from .tensor.manipulation import unique # noqa: F401
from .tensor.manipulation import unsqueeze # noqa: F401
from .tensor.manipulation import unsqueeze_ # noqa: F401
Expand Down Expand Up @@ -192,7 +190,6 @@
from .tensor.math import multiply # noqa: F401
from .tensor.math import add # noqa: F401
from .tensor.math import subtract # noqa: F401
from .tensor.math import atan # noqa: F401
from .tensor.math import logsumexp # noqa: F401
from .tensor.math import inverse # noqa: F401
from .tensor.math import log1p # noqa: F401
Expand Down Expand Up @@ -247,9 +244,8 @@
from .framework import load # noqa: F401
from .framework import DataParallel # noqa: F401

from .framework import set_default_dtype #DEFINE_ALIAS
from .framework import get_default_dtype #DEFINE_ALIAS
from .framework import set_grad_enabled #DEFINE_ALIAS
from .framework import set_default_dtype # noqa: F401
from .framework import get_default_dtype # noqa: F401

from .tensor.search import index_sample # noqa: F401
from .tensor.stat import mean # noqa: F401
Expand Down Expand Up @@ -284,7 +280,7 @@
from .tensor.random import check_shape # noqa: F401
disable_static()

__all__ = [ #noqa
__all__ = [ # noqa
'dtype',
'uint8',
'int8',
Expand Down Expand Up @@ -327,7 +323,6 @@
'cos',
'tan',
'mean',
'XPUPlace',
'mv',
'in_dynamic_mode',
'min',
Expand Down Expand Up @@ -364,7 +359,6 @@
'to_tensor',
'gather_nd',
'isinf',
'set_device',
'uniform',
'floor_divide',
'remainder',
Expand All @@ -388,8 +382,6 @@
'rand',
'less_equal',
'triu',
'is_compiled_with_cuda',
'is_compiled_with_rocm',
'sin',
'dist',
'unbind',
Expand Down Expand Up @@ -418,8 +410,6 @@
'bernoulli',
'summary',
'sinh',
'is_compiled_with_xpu',
'is_compiled_with_npu',
'round',
'DataParallel',
'argmin',
Expand All @@ -443,7 +433,6 @@
'not_equal',
'sum',
'tile',
'get_device',
'greater_equal',
'isfinite',
'create_parameter',
Expand Down Expand Up @@ -476,7 +465,6 @@
'scatter_nd',
'set_default_dtype',
'expand_as',
'get_cudnn_version',
'stack',
'sqrt',
'cholesky',
Expand All @@ -490,7 +478,6 @@
'logical_not',
'add_n',
'minimum',
'ComplexTensor',
'scatter',
'scatter_',
'floor',
Expand All @@ -499,5 +486,6 @@
'log2',
'log10',
'concat',
'check_shape'
'check_shape',
'standard_normal'
]
4 changes: 2 additions & 2 deletions python/paddle/amp/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from .auto_cast import auto_cast
from .grad_scaler import GradScaler
from .auto_cast import auto_cast # noqa: F401
from .grad_scaler import GradScaler # noqa: F401

__all__ = ['auto_cast', 'GradScaler']
2 changes: 1 addition & 1 deletion python/paddle/amp/auto_cast.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

from paddle.fluid.dygraph.amp import amp_guard

__all__ = ['auto_cast']
__all__ = []


def auto_cast(enable=True, custom_white_list=None, custom_black_list=None):
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/amp/grad_scaler.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

from paddle.fluid.dygraph.amp import AmpScaler

__all__ = ['GradScaler']
__all__ = []


class GradScaler(AmpScaler):
Expand Down
9 changes: 4 additions & 5 deletions python/paddle/autograd/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from ..fluid.dygraph.base import grad #DEFINE_ALIAS

from . import backward_mode
from .backward_mode import backward
from .py_layer import PyLayer, PyLayerContext
from ..fluid.dygraph.base import grad # noqa: F401
from . import backward_mode # noqa: F401
from .backward_mode import backward # noqa: F401
from .py_layer import PyLayer, PyLayerContext # noqa: F401

__all__ = ['grad', 'backward', 'PyLayer', 'PyLayerContext']
2 changes: 1 addition & 1 deletion python/paddle/autograd/backward_mode.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from paddle.fluid import core
from paddle.fluid import framework
import paddle
__all__ = ['backward']
__all__ = []


@framework.dygraph_only
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/autograd/py_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
import paddle
from paddle.fluid.framework import dygraph_only
from paddle.fluid import core
__all__ = ['PyLayer', 'PyLayerContext']
__all__ = []


class PyLayerContext(object):
Expand Down
8 changes: 4 additions & 4 deletions python/paddle/batch.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

__all__ = ['batch']
__all__ = []


def batch(reader, batch_size, drop_last=False):
Expand All @@ -35,11 +35,11 @@ def batch(reader, batch_size, drop_last=False):
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
def reader():
for i in range(10):
yield i
batch_reader = fluid.io.batch(reader, batch_size=2)
batch_reader = paddle.batch(reader, batch_size=2)
for data in batch_reader():
print(data)
Expand All @@ -60,7 +60,7 @@ def batch_reader():
if len(b) == batch_size:
yield b
b = []
if drop_last == False and len(b) != 0:
if drop_last is False and len(b) != 0:
yield b

# Batch size check
Expand Down
11 changes: 2 additions & 9 deletions python/paddle/compat.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,18 +15,11 @@
import six
import math

__all__ = [
'long_type',
'to_text',
'to_bytes',
'round',
'floor_division',
'get_exception_message',
]
__all__ = []

if six.PY2:
int_type = int
long_type = long
long_type = long # noqa: F821
else:
int_type = int
long_type = int
Expand Down
34 changes: 14 additions & 20 deletions python/paddle/device.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,21 +18,16 @@
from paddle.fluid import core
from paddle.fluid import framework
from paddle.fluid.dygraph.parallel import ParallelEnv
from paddle.fluid.framework import is_compiled_with_cuda #DEFINE_ALIAS
from paddle.fluid.framework import is_compiled_with_rocm #DEFINE_ALIAS
from paddle.fluid.framework import is_compiled_with_cuda # noqa: F401
from paddle.fluid.framework import is_compiled_with_rocm # noqa: F401

__all__ = [

__all__ = [ # npqa
'get_cudnn_version',
'set_device',
'get_device',
'XPUPlace',
'is_compiled_with_xpu',
# 'cpu_places',
# 'CPUPlace',
# 'cuda_pinned_places',
# 'cuda_places',
# 'CUDAPinnedPlace',
# 'CUDAPlace',
'is_compiled_with_cuda',
'is_compiled_with_rocm',
'is_compiled_with_npu'
Expand Down Expand Up @@ -68,7 +63,7 @@ def is_compiled_with_xpu():
.. code-block:: python
import paddle
support_xpu = paddle.device.is_compiled_with_xpu()
support_xpu = paddle.is_compiled_with_xpu()
"""
return core.is_compiled_with_xpu()

Expand All @@ -82,9 +77,10 @@ def XPUPlace(dev_id):
Examples:
.. code-block:: python
# required: xpu
import paddle
place = paddle.device.XPUPlace(0)
place = paddle.XPUPlace(0)
"""
return core.XPUPlace(dev_id)

Expand Down Expand Up @@ -127,15 +123,13 @@ def _convert_to_place(device):
place = core.CPUPlace()
elif lower_device == 'gpu':
if not core.is_compiled_with_cuda():
raise ValueError(
"The device should not be 'gpu', " \
"since PaddlePaddle is not compiled with CUDA")
raise ValueError("The device should not be 'gpu', "
"since PaddlePaddle is not compiled with CUDA")
place = core.CUDAPlace(ParallelEnv().dev_id)
elif lower_device == 'xpu':
if not core.is_compiled_with_xpu():
raise ValueError(
"The device should not be 'xpu', " \
"since PaddlePaddle is not compiled with XPU")
raise ValueError("The device should not be 'xpu', "
"since PaddlePaddle is not compiled with XPU")
selected_xpus = os.getenv("FLAGS_selected_xpus", "0").split(",")
device_id = int(selected_xpus[0])
place = core.XPUPlace(device_id)
Expand All @@ -149,7 +143,7 @@ def _convert_to_place(device):
if avaliable_gpu_device:
if not core.is_compiled_with_cuda():
raise ValueError(
"The device should not be {}, since PaddlePaddle is " \
"The device should not be {}, since PaddlePaddle is "
"not compiled with CUDA".format(avaliable_gpu_device))
device_info_list = device.split(':', 1)
device_id = device_info_list[1]
Expand All @@ -158,7 +152,7 @@ def _convert_to_place(device):
if avaliable_xpu_device:
if not core.is_compiled_with_xpu():
raise ValueError(
"The device should not be {}, since PaddlePaddle is " \
"The device should not be {}, since PaddlePaddle is "
"not compiled with XPU".format(avaliable_xpu_device))
device_info_list = device.split(':', 1)
device_id = device_info_list[1]
Expand Down
5 changes: 1 addition & 4 deletions python/paddle/distributed/parallel.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,7 @@
from paddle.fluid.dygraph.parallel import ParallelEnv
from paddle.distributed.fleet.base.private_helper_function import wait_server_ready # noqa: F401

__all__ = [ #noqa
"init_parallel_env"
]
__all__ = []

ParallelStrategy = core.ParallelStrategy

Expand Down Expand Up @@ -152,7 +150,6 @@ def _check_var_exists(var_name):
init_gloo = int(os.getenv("PADDLE_WITH_GLOO", "0"))
if init_gloo:
ep_rank_0 = parallel_env.trainer_endpoints[0].split(":")
ep_rank = parallel_env.trainer_endpoints[parallel_env.rank].split(":")
manager = Manager()
# glboal dict to store status
http_server_d = manager.dict()
Expand Down
13 changes: 7 additions & 6 deletions python/paddle/incubate/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from . import optimizer
from . import checkpoint
from ..fluid.layer_helper import LayerHelper
from .optimizer import LookAhead # noqa: F401
from .optimizer import ModelAverage # noqa: F401
from .checkpoint import auto_checkpoint # noqa: F401
from ..fluid.layer_helper import LayerHelper # noqa: F401

__all__ = []
__all__ += optimizer.__all__
__all__ += checkpoint.__all__
__all__ = [ # noqa
'LookAhead', 'ModelAverage'
]
4 changes: 2 additions & 2 deletions python/paddle/incubate/checkpoint/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from ...fluid.incubate.checkpoint import auto_checkpoint
from ...fluid.incubate.checkpoint import auto_checkpoint # noqa: F401

__all__ = ["auto_checkpoint"]
__all__ = []
6 changes: 3 additions & 3 deletions python/paddle/incubate/optimizer/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from .lookahead import LookAhead
from .modelaverage import ModelAverage
from .lookahead import LookAhead # noqa: F401
from .modelaverage import ModelAverage # noqa: F401

__all__ = ['LookAhead', 'ModelAverage']
__all__ = []
Loading

0 comments on commit 022198c

Please sign in to comment.