Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Zero-Dim] change Tensor.numpy to other equivalent usage, avoid hack #52197

Merged
merged 1 commit into from
Mar 29, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 4 additions & 3 deletions paddle/fluid/pybind/eager_method.cc
Original file line number Diff line number Diff line change
Expand Up @@ -141,10 +141,11 @@ static PyObject* tensor_method_numpy(TensorObject* self,
"order to avoid this problem, "
"0D Tensor will be changed to 1D numpy currently, but it's not "
"correct and will be "
"removed in future. Please modify "
" 'Tensor.numpy()[0]' to 'float(Tensor)' as soon as "
"removed in future. For Tensor contain only one element, Please "
"modify "
" 'Tensor.numpy()[0]' to 'Tensor.item()' as soon as "
"possible, "
"otherwise 'Tensor.numpy()[0]' will raise error";
"otherwise 'Tensor.numpy()[0]' will raise error in future.";
py_rank = 1;
py_dims[0] = 1;
py_strides[0] = sizeof_dtype * numel;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ def _recv_shape_dtype(self, group):
# recv stop_gradient
stop_grad = paddle.to_tensor([0])
paddle.distributed.recv(stop_grad, src=src_rank, group=group)
return shape.numpy().tolist(), dtype.item(), stop_grad.item()
return shape.tolist(), dtype.item(), stop_grad.item()

def recv_meta(self, group):
tensor_type = paddle.to_tensor([0])
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/fluid/dataloader/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -514,7 +514,7 @@ def random_split(dataset, lengths, generator=None):
)
# TODO(@Joejiong): support Variable or Tensor type with .tolist class member function.
# For example var.item() and var.tolist()
indices = paddle.randperm(sum(lengths)).numpy().tolist()
indices = paddle.randperm(sum(lengths)).tolist()
return [
Subset(dataset, indices[offset - length : offset])
for offset, length in zip(_accumulate(lengths), lengths)
Expand Down
8 changes: 4 additions & 4 deletions python/paddle/fluid/dygraph/math_op_patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,21 +140,21 @@ def _float_(var):
), "only one element variable can be converted to float."
tensor = var.value().get_tensor()
assert tensor._is_initialized(), "variable's tensor is not initialized"
return float(var.numpy().flatten()[0])
return float(var.item())

def _long_(var):
numel = np.prod(var.shape)
assert numel == 1, "only one element variable can be converted to long."
tensor = var.value().get_tensor()
assert tensor._is_initialized(), "variable's tensor is not initialized"
return int(var.numpy().flatten()[0])
return int(var.item())

def _int_(var):
numel = np.prod(var.shape)
assert numel == 1, "only one element variable can be converted to int."
tensor = var.value().get_tensor()
assert tensor._is_initialized(), "variable's tensor is not initialized"
return int(var.numpy().flatten()[0])
return int(var.item())

def _len_(var):
assert var.ndim > 0, "len() of a 0D tensor is wrong"
Expand All @@ -172,7 +172,7 @@ def _index_(var):
), "only one element variable can be converted to python index."
tensor = var.value().get_tensor()
assert tensor._is_initialized(), "variable's tensor is not initialized"
return int(var.numpy().flatten()[0])
return int(var.item())

@property
def _ndim_(var):
Expand Down
8 changes: 4 additions & 4 deletions python/paddle/fluid/dygraph/varbase_patch_methods.py
Original file line number Diff line number Diff line change
Expand Up @@ -379,8 +379,8 @@ def gradient(self):
if self.grad is None:
return None
if self.grad.is_selected_rows():
return (np.array(self.grad.numpy()), np.array(self.grad.rows()))
return self.grad.numpy()
return (np.array(self.grad), np.array(self.grad.rows()))
return np.array(self.grad)
else:
if self._grad_ivar() is None:
return None
Expand Down Expand Up @@ -735,11 +735,11 @@ def __nonzero__(self):
), "When Variable is used as the condition of if/while , Variable can only contain one element."
if framework.global_var._in_eager_mode_:
assert self._is_initialized(), "tensor not initialized"
return bool(np.all(self.numpy() > 0))
return bool(self.item() > 0)
else:
tensor = self.value().get_tensor()
assert tensor._is_initialized(), "tensor not initialized"
return bool(np.all(tensor.__array__() > 0))
return bool(self.item() > 0)

def __bool__(self):
return self.__nonzero__()
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/fluid/layers/control_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -1150,7 +1150,7 @@ def body(i, ten):
)

if in_dygraph_mode():
now_cond = pre_cond.numpy().item()
now_cond = pre_cond.item()
while now_cond:
output_vars = body(*loop_vars)
if not isinstance(output_vars, (list, tuple)):
Expand All @@ -1160,7 +1160,7 @@ def body(i, ten):
"body in while_loop should return the same arity "
"(length and structure) and types as loop_vars"
)
now_cond = cond(*output_vars).numpy().item()
now_cond = cond(*output_vars).item()
map_structure(assign_skip_lod_tensor_array, output_vars, loop_vars)
return loop_vars
else:
Expand Down
8 changes: 4 additions & 4 deletions python/paddle/framework/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def _build_saved_state_dict(state_dict):
raise ValueError(
"The saved tensor is not initialized. If you used group sharded, please use save_group_sharded_model."
)
save_dict[key] = value.numpy()
save_dict[key] = np.array(value)
name_table[key] = value.name
else:
save_dict[key] = value
Expand Down Expand Up @@ -92,7 +92,7 @@ def _load_state_dict_from_save_inference_model(model_path, config):
# 3. construct state_dict
load_param_dict = {}
for var_name in persistable_var_dict:
load_param_dict[var_name] = persistable_var_dict[var_name].numpy()
load_param_dict[var_name] = np.array(persistable_var_dict[var_name])

# if *.info exists, we can recover structured_name
var_info_filename = str(config.params_filename) + ".info"
Expand Down Expand Up @@ -146,7 +146,7 @@ def _load_state_dict_from_save_params(model_path):
# 3. construct state_dict
load_param_dict = {}
for var in load_var_list:
load_param_dict[var.name] = var.numpy()
load_param_dict[var.name] = np.array(var)

return load_param_dict

Expand Down Expand Up @@ -291,7 +291,7 @@ def _pickle_save(obj, f, protocol):
)

def reduce_varbase(self):
data = self.numpy()
data = np.array(self)
name = self.name

return (tuple, ((name, data),))
Expand Down
4 changes: 1 addition & 3 deletions python/paddle/framework/io_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,9 +180,7 @@ def _load_program_scope(main=None, startup=None, scope=None):
@static_only
def _legacy_static_save(param_dict, model_path, protocol=2):
def get_tensor(var):
if isinstance(var, (core.VarBase, core.eager.Tensor)):
return var.numpy()
elif isinstance(var, core.LoDTensor):
if isinstance(var, (core.VarBase, core.eager.Tensor, core.LoDTensor)):
return np.array(var)
return var

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/hapi/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def to_numpy(var):
var, (Variable, fluid.core.VarBase, fluid.core.eager.Tensor)
), "not a variable"
if isinstance(var, (fluid.core.VarBase, fluid.core.eager.Tensor)):
return var.numpy()
return np.array(var)
t = global_scope().find_var(var.name).get_tensor()
return np.array(t)

Expand Down
14 changes: 7 additions & 7 deletions python/paddle/metric/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -292,7 +292,7 @@ def update(self, correct, *args):
Tensor: the accuracy of current step.
"""
if isinstance(correct, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
correct = correct.numpy()
correct = np.array(correct)
num_samples = np.prod(np.array(correct.shape[:-1]))
accs = []
for i, k in enumerate(self.topk):
Expand Down Expand Up @@ -420,12 +420,12 @@ def update(self, preds, labels):
The data type is 'int32' or 'int64'.
"""
if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
preds = preds.numpy()
preds = np.array(preds)
elif not _is_numpy_(preds):
raise ValueError("The 'preds' must be a numpy ndarray or Tensor.")

if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
labels = labels.numpy()
labels = np.array(labels)
elif not _is_numpy_(labels):
raise ValueError("The 'labels' must be a numpy ndarray or Tensor.")

Expand Down Expand Up @@ -553,12 +553,12 @@ def update(self, preds, labels):
Shape: [batch_size, 1], Dtype: 'int32' or 'int64'.
"""
if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
preds = preds.numpy()
preds = np.array(preds)
elif not _is_numpy_(preds):
raise ValueError("The 'preds' must be a numpy ndarray or Tensor.")

if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
labels = labels.numpy()
labels = np.array(labels)
elif not _is_numpy_(labels):
raise ValueError("The 'labels' must be a numpy ndarray or Tensor.")

Expand Down Expand Up @@ -705,12 +705,12 @@ def update(self, preds, labels):
representing the label of the instance i.
"""
if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
labels = labels.numpy()
labels = np.array(labels)
elif not _is_numpy_(labels):
raise ValueError("The 'labels' must be a numpy ndarray or Tensor.")

if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
preds = preds.numpy()
preds = np.array(preds)
elif not _is_numpy_(preds):
raise ValueError("The 'preds' must be a numpy ndarray or Tensor.")

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/nn/decode.py
Original file line number Diff line number Diff line change
Expand Up @@ -712,7 +712,7 @@ def _maybe_copy(state, new_state, step_mask):

step_idx = 0
step_idx_tensor = paddle.full(shape=[1], fill_value=step_idx, dtype="int64")
while cond.numpy():
while cond.item():
(step_outputs, next_states, next_inputs, next_finished) = decoder.step(
step_idx_tensor, inputs, states, **kwargs
)
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/nn/functional/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -496,7 +496,7 @@ def _is_list_or_turple_(data):
else:
if in_dynamic_mode():
if isinstance(out_shape, Variable):
out_shape = list(out_shape.numpy())
out_shape = list(out_shape.numpy(False))
else:
out_shape = list(out_shape)

Expand Down
4 changes: 2 additions & 2 deletions python/paddle/nn/functional/pooling.py
Original file line number Diff line number Diff line change
Expand Up @@ -706,7 +706,7 @@ def _unpool_output_size(x, kernel_size, stride, padding, output_size):
else:
for i, var in enumerate(output_size):
if isinstance(var, Variable):
output_size[i] = var.numpy().item()
output_size[i] = var.item()

if len(output_size) == len(kernel_size) + 2:
output_size = output_size[2:]
Expand Down Expand Up @@ -1609,7 +1609,7 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None):

if in_dygraph_mode():
output_size = [
item.numpy().item(0) if isinstance(item, Variable) else item
item.item(0) if isinstance(item, Variable) else item
for item in output_size
]
# output_size support Variable in static graph mode
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/optimizer/optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -382,9 +382,9 @@ def set_state_dict(self, state_dict):
load_para = state_dict[var_tmp.name]

if isinstance(load_para, Variable):
load_para_np = load_para.numpy()
load_para_np = np.array(load_para)
elif isinstance(load_para, core.VarBase):
load_para_np = load_para.numpy()
load_para_np = np.array(load_para)
elif isinstance(load_para, np.ndarray):
load_para_np = load_para
else:
Expand Down
8 changes: 4 additions & 4 deletions python/paddle/quantization/imperative/ptq_quantizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,13 +54,13 @@ def combine_abs_max_and_hist(
return origin_max, origin_hist
elif origin_max == 0.0:
new_hist, _ = np.histogram(
paddle.abs(tensor).numpy(), range=(0, new_max), bins=bins
paddle.abs(tensor).numpy(False), range=(0, new_max), bins=bins
)
new_hist = new_hist.astype(np.float32)
return new_max, new_hist
elif new_max <= origin_max:
new_hist, _ = np.histogram(
paddle.abs(tensor).numpy(), range=(0, origin_max), bins=bins
paddle.abs(tensor).numpy(False), range=(0, origin_max), bins=bins
)
new_hist = new_hist.astype(np.float32)
new_hist += origin_hist
Expand All @@ -84,7 +84,7 @@ def combine_abs_max_and_hist(
sampled_hist = sampled_hist.astype(np.float32)

new_hist, _ = np.histogram(
paddle.abs(tensor).numpy(), range=(0, new_max), bins=bins
paddle.abs(tensor).numpy(False), range=(0, new_max), bins=bins
)
new_hist = new_hist.astype(np.float32)
new_hist += sampled_hist
Expand Down Expand Up @@ -189,7 +189,7 @@ def sample_data(self, layer, tensors):
self.hists.append(None)
else:
hist, _ = np.histogram(
paddle.abs(tensor).numpy(),
paddle.abs(tensor).numpy(False),
range=(0.0, abs_max_vals[idx]),
bins=self.bins,
)
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/static/nn/metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ def accuracy(input, label, k=1, correct=None, total=None):
if total is None:
total = _varbase_creator(dtype="int32")

_k = k.numpy().item(0) if isinstance(k, Variable) else k
_k = k.item(0) if isinstance(k, Variable) else k
topk_out, topk_indices = _legacy_C_ops.top_k_v2(
input, 'k', _k, 'sorted', False
)
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/tensor/array.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ def array_read(array, i):
assert i.shape == [
1
], "The shape of index 'i' should be [1] in dygraph mode"
i = i.numpy().item(0)
i = i.item(0)
return array[i]
else:
check_variable_and_dtype(i, 'i', ['int64'], 'array_read')
Expand Down Expand Up @@ -179,7 +179,7 @@ def array_write(x, i, array=None):
assert i.shape == [
1
], "The shape of index 'i' should be [1] in dygraph mode"
i = i.numpy().item(0)
i = i.item(0)
if array is None:
array = create_array(x.dtype)
assert isinstance(
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/tensor/manipulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -334,7 +334,7 @@ def slice(input, axes, starts, ends):
for item in starts
]
elif isinstance(starts, tmp_tensor_type):
tensor_t = starts.numpy()
tensor_t = starts.numpy(False)
starts = [ele for ele in tensor_t]
infer_flags = [-1 for i in range(len(axes))]

Expand All @@ -344,7 +344,7 @@ def slice(input, axes, starts, ends):
for item in ends
]
elif isinstance(ends, tmp_tensor_type):
tensor_t = ends.numpy()
tensor_t = ends.numpy(False)
ends = [ele for ele in tensor_t]
infer_flags = [-1 for i in range(len(axes))]

Expand Down
4 changes: 2 additions & 2 deletions python/paddle/utils/layers_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -456,12 +456,12 @@ def convert_shape_to_list(shape):
if isinstance(shape, (list, tuple)):
shape = list(
map(
lambda x: x.numpy().flat[0] if isinstance(x, Variable) else x,
lambda x: x.item(0) if isinstance(x, Variable) else x,
shape,
)
)
else:
shape = shape.numpy().astype(int).tolist()
shape = shape.astype(int).tolist()
return shape


Expand Down