Skip to content

Commit

Permalink
move fluid.layers.create_global_var to static.create_global_var
Browse files Browse the repository at this point in the history
  • Loading branch information
cyber-pioneer committed Dec 8, 2022
1 parent 7368889 commit 71d20d2
Show file tree
Hide file tree
Showing 38 changed files with 179 additions and 184 deletions.
10 changes: 5 additions & 5 deletions python/paddle/distributed/fleet/meta_optimizers/dgc_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,9 @@
from paddle.common_ops_import import LayerHelper
from paddle.fluid.clip import GradientClipByNorm, append_gradient_clip_ops
from paddle.fluid.dygraph import base as imperative_base
from paddle.fluid.layers import tensor
from paddle.fluid.optimizer import Momentum, Optimizer
from paddle.framework import core
from paddle.static import create_global_var


class DGCMomentumOptimizer(Optimizer):
Expand Down Expand Up @@ -217,7 +217,7 @@ def _append_dgc_ops(self, param_and_grads):
)

# rampup begin step var for all_reduce_op_handle
self._rampup_begin_step_var = tensor.create_global_var(
self._rampup_begin_step_var = create_global_var(
shape=[1],
dtype=core.VarDesc.VarType.FP32,
persistable=True,
Expand All @@ -237,7 +237,7 @@ def _append_dgc_ops(self, param_and_grads):

v_var = self._add_accumulator(self._v_velocity_acc_str, param_var)

k_var = tensor.create_global_var(
k_var = create_global_var(
shape=[1],
dtype=param_var.dtype,
persistable=True,
Expand All @@ -246,7 +246,7 @@ def _append_dgc_ops(self, param_and_grads):
force_cpu=True,
)

encoded_var = tensor.create_global_var(
encoded_var = create_global_var(
shape=[1],
dtype=param_var.dtype,
persistable=True,
Expand All @@ -255,7 +255,7 @@ def _append_dgc_ops(self, param_and_grads):
force_cpu=False,
)

gather_var = tensor.create_global_var(
gather_var = create_global_var(
shape=[1],
dtype=param_var.dtype,
persistable=True,
Expand Down
14 changes: 7 additions & 7 deletions python/paddle/distributed/fleet/metrics/metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def sum(input, scope=None, util=None):
# in model.py
input = fluid.layers.cast(some_input, dtype='float32')
cnt = paddle.sum(input)
global_cnt = fluid.layers.create_global_var(persistable=True, dtype='float32', shape=[1], value=0)
global_cnt = paddle.static.create_global_var(persistable=True, dtype='float32', shape=[1], value=0)
tmp = fluid.layers.elementwise_add(cnt, global_cnt)
fluid.layers.assign(tmp, global_cnt)
Expand Down Expand Up @@ -80,7 +80,7 @@ def max(input, scope=None, util=None):
# in model.py
input = fluid.layers.cast(some_input, dtype='float32')
cnt = paddle.sum(input)
global_cnt = fluid.layers.create_global_var(persistable=True, dtype='float32', shape=[1], value=0)
global_cnt = paddle.static.create_global_var(persistable=True, dtype='float32', shape=[1], value=0)
tmp = paddle.maximum(cnt, global_cnt)
fluid.layers.assign(tmp, global_cnt)
Expand Down Expand Up @@ -120,7 +120,7 @@ def min(input, scope=None, util=None):
# in model.py
input = fluid.layers.cast(some_input, dtype='float32')
cnt = paddle.sum(input)
global_cnt = fluid.layers.create_global_var(persistable=True, dtype='float32', shape=[1], value=0)
global_cnt = paddle.static.create_global_var(persistable=True, dtype='float32', shape=[1], value=0)
tmp = fluid.layers.elementwise_min(cnt, global_cnt)
fluid.layers.assign(tmp, global_cnt)
Expand Down Expand Up @@ -391,15 +391,15 @@ def acc(correct, total, scope=None, util=None):
.. code-block:: python
# in model.py
correct = fluid.layers.create_global_var(dtype='float32', shape=[1], value=0)
total = fluid.layers.create_global_var(dtype='float32', shape=[1], value=0)
correct = paddle.static.create_global_var(dtype='float32', shape=[1], value=0)
total = paddle.static.create_global_var(dtype='float32', shape=[1], value=0)
acc = fluid.layers.acc(predict, label, k=1, correct=correct, total=total)
global_correct = fluid.layers.create_global_var(persistable=True, dtype='float32', shape=[1], value=0)
global_correct = paddle.static.create_global_var(persistable=True, dtype='float32', shape=[1], value=0)
tmp1 = fluid.layers.elementwise_min(correct, global_correct)
fluid.layers.assign(tmp1, global_correct)
global_total = fluid.layers.create_global_var(persistable=True, dtype='float32', shape=[1], value=0)
global_total = paddle.static.create_global_var(persistable=True, dtype='float32', shape=[1], value=0)
tmp2 = fluid.layers.elementwise_min(total, global_total)
fluid.layers.assign(tmp2, global_total)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def _remove_and_get_optimizer_op(main_program, dist_context):
def _get_gm_cond_var(main_program, k_steps, dist_context):
main_block = main_program.global_block()
# Add const var
k_step_var = layers.create_global_var(
k_step_var = paddle.static.create_global_var(
name="gradient_merge_k",
shape=[1],
value=int(k_steps),
Expand All @@ -74,7 +74,7 @@ def _get_gm_cond_var(main_program, k_steps, dist_context):
)
set_var_dist_attr(dist_context, k_step_var, [-1], world_process_group.ranks)

zero_var = layers.create_global_var(
zero_var = paddle.static.create_global_var(
name="gradient_merge_zero",
shape=[1],
value=int(0),
Expand All @@ -85,7 +85,7 @@ def _get_gm_cond_var(main_program, k_steps, dist_context):
set_var_dist_attr(dist_context, zero_var, [-1], world_process_group.ranks)

# Add step var & cond var
step_var = layers.create_global_var(
step_var = paddle.static.create_global_var(
name="gradient_merge_step",
shape=[1],
value=int(0),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import paddle
from paddle.fluid import (
core,
default_main_program,
Expand Down Expand Up @@ -68,7 +69,7 @@ def _init_amp_var(self):
if isinstance(self._optimizer._learning_rate, float):
self._optimizer._learning_rate_map[
default_main_program()
] = layers.create_global_var(
] = paddle.static.create_global_var(
name=unique_name.generate("learning_rate"),
shape=[1],
value=float(self._optimizer._learning_rate),
Expand Down
8 changes: 4 additions & 4 deletions python/paddle/fluid/contrib/mixed_precision/decorator.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ def _supports_check_nan_inf(self):
return getattr(self._optimizer, "_supports_check_nan_inf", False)

def _init_amp_var(self):
self._loss_scaling = layers.create_global_var(
self._loss_scaling = paddle.static.create_global_var(
name=unique_name.generate("loss_scaling"),
shape=[1],
value=self._init_loss_scaling,
Expand All @@ -131,14 +131,14 @@ def _init_amp_var(self):
)

if self._use_dynamic_loss_scaling:
self._num_good_steps = layers.create_global_var(
self._num_good_steps = paddle.static.create_global_var(
name=unique_name.generate("num_good_steps"),
shape=[1],
value=0,
dtype='int32',
persistable=True,
)
self._num_bad_steps = layers.create_global_var(
self._num_bad_steps = paddle.static.create_global_var(
name=unique_name.generate("num_bad_steps"),
shape=[1],
value=0,
Expand All @@ -151,7 +151,7 @@ def _init_amp_var(self):
if isinstance(self._optimizer._learning_rate, float):
self._optimizer._learning_rate_map[
default_main_program()
] = layers.create_global_var(
] = paddle.static.create_global_var(
name=unique_name.generate("learning_rate"),
shape=[1],
value=float(self._optimizer._learning_rate),
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/fluid/contrib/optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ def _create_master_weight(self, param):

var_name = param.name + "_fp32_master"
var_name = unique_name.generate(var_name)
var = layers.create_global_var(
var = paddle.static.create_global_var(
name=var_name,
shape=param.shape,
value=0,
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/fluid/dygraph/learning_rate_scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def create_lr_var(self, lr):
"""
from .. import layers

lr = layers.create_global_var(
lr = paddle.static.create_global_var(
name=unique_name.generate("learning_rate"),
shape=[1],
value=float(lr),
Expand Down
3 changes: 2 additions & 1 deletion python/paddle/fluid/layers/control_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -2368,9 +2368,10 @@ class Switch:
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
lr = fluid.layers.create_global_var(
lr = paddle.static.create_global_var(
shape=[1],
value=0.0,
dtype='float32',
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/fluid/layers/learning_rate_scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -420,7 +420,7 @@ def piecewise_decay(boundaries, values):
else:
global_step = _decay_step_counter()

lr = tensor.create_global_var(
lr = paddle.static.create_global_var(
shape=[1],
value=0.0,
dtype='float32',
Expand Down Expand Up @@ -575,7 +575,7 @@ def linear_lr_warmup(learning_rate, warmup_steps, start_lr, end_lr):
)
return lr
else:
lr = tensor.create_global_var(
lr = paddle.static.create_global_var(
shape=[1],
value=0.0,
dtype=dtype,
Expand Down
87 changes: 0 additions & 87 deletions python/paddle/fluid/layers/tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,25 +12,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import math
import numpy
import warnings

from ..layer_helper import LayerHelper
from ..param_attr import ParamAttr
from ..initializer import Initializer
from ..framework import (
_current_expected_place,
convert_np_dtype_to_dtype_,
_non_static_mode,
_varbase_creator,
device_guard,
_in_legacy_dygraph,
in_dygraph_mode,
_get_paddle_place,
)
from ..framework import Variable
from ..initializer import Constant
from ..core import VarDesc
from .. import core
from .layer_function_generator import templatedoc
Expand All @@ -47,7 +41,6 @@
from paddle import _C_ops, _legacy_C_ops

__all__ = [
'create_global_var',
'cast',
'tensor_array_to_tensor',
'concat',
Expand All @@ -61,86 +54,6 @@
]


def create_global_var(
shape, value, dtype, persistable=False, force_cpu=False, name=None
):
"""
This function creates a new tensor variable with value in the global block(block 0).
Parameters:
shape (list[int]|tuple[int]): Shape of the variable
value (float): The value of the variable. The new created
variable will be filled with it.
dtype (str): Data type of the variable
persistable (bool, optional): If this variable is persistable.
Default: False
force_cpu (bool, optional): Force this variable to be on CPU.
Default: False
name (str, optional): For detailed information, please refer to
:ref:`api_guide_Name` . Usually name is no need to set and None by default.
Returns:
Variable: The created Variable
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
var = paddle.static.create_global_var(shape=[2,3], value=1.0, dtype='float32',
persistable=True, force_cpu=True, name='new_var')
"""
check_type(
shape, 'shape', (list, tuple, numpy.ndarray), 'create_global_var'
)
for item in shape:
check_type(
item,
'item of shape',
(
int,
numpy.uint8,
numpy.int8,
numpy.int16,
numpy.int32,
numpy.int64,
),
'create_global_var',
)

check_dtype(
dtype,
'dtype',
[
'bool',
'float16',
'float32',
'float64',
'int8',
'int16',
'int32',
'int64',
'uint8',
'uint16',
],
'create_global_var',
)

helper = LayerHelper("global_var", **locals())
var = helper.create_global_variable(
dtype=dtype,
shape=shape,
persistable=persistable,
name=name,
stop_gradient=True,
)
helper.set_variable_initializer(
var, initializer=Constant(value=float(value), force_cpu=force_cpu)
)

return var


def cast(x, dtype):
"""
Expand Down
Loading

0 comments on commit 71d20d2

Please sign in to comment.