Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[PHI]Standardise some C++ API (Part4) #47702

Merged
merged 7 commits into from
Nov 10, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddle/fluid/operators/controlflow/compare_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ class CompareOp : public framework::OperatorWithKernel {
char _##op_type##Comment::equation[]{_equation}; \
DECLARE_INFER_SHAPE_FUNCTOR(op_type, \
op_type##_InferShapeFunctor, \
PD_INFER_META(phi::CompareInferMeta)); \
PD_INFER_META(phi::CompareRawInferMeta)); \
REGISTER_OPERATOR( \
op_type, \
::paddle::operators::CompareOp<_##op_type##Comment>, \
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -400,7 +400,6 @@ class {{name | to_pascal_case}}OpMaker : public framework::SingleGradOpMaker<T>
grad_op->SetInput("{{attr_name | to_pascal_case}}Tensor", this->Input("{{attr_name | to_pascal_case}}Tensor"));
{% endif %}
{% else %}{# maybe something wrong: backward op has more attrs than the forward one#}
grad_op->AddAttr<{{attr["typename"] | to_op_attr_type}}>({{attr_name}}, "({{attr["typename"] | to_op_attr_type}}), exceptional attr {{attr_name}}");
grad_op->SetAttr("{{attr_name}}", {{process_default_value(attr)}});
{% endif %}
{% endfor %}
Expand Down
12 changes: 6 additions & 6 deletions paddle/fluid/pybind/eager_math_op_patch.cc
Original file line number Diff line number Diff line change
Expand Up @@ -841,7 +841,7 @@ static PyObject* tensor__gt__method(TensorObject* self,
VLOG(6) << "Calling greater_than_ad_func in tensor__gt__method";
{
eager_gil_scoped_release guard;
ret = greater_than_ad_func(self_tensor, other_tensor, -1);
ret = greater_than_ad_func(self_tensor, other_tensor);
}

return ToPyObject(ret);
Expand Down Expand Up @@ -927,7 +927,7 @@ static PyObject* tensor__ge__method(TensorObject* self,
VLOG(6) << "Calling greater_equal_ad_func in tensor__ge__method";
{
eager_gil_scoped_release guard;
ret = greater_equal_ad_func(self_tensor, other_tensor, -1);
ret = greater_equal_ad_func(self_tensor, other_tensor);
}

return ToPyObject(ret);
Expand Down Expand Up @@ -1204,7 +1204,7 @@ static PyObject* tensor__lt__method(TensorObject* self,
VLOG(6) << "Calling less_than_ad_func in tensor__lt__method";
{
eager_gil_scoped_release guard;
ret = less_than_ad_func(self_tensor, other_tensor, -1);
ret = less_than_ad_func(self_tensor, other_tensor);
}

return ToPyObject(ret);
Expand Down Expand Up @@ -1290,7 +1290,7 @@ static PyObject* tensor__le__method(TensorObject* self,
VLOG(6) << "Calling less_equal_ad_func in tensor__le__method";
{
eager_gil_scoped_release guard;
ret = less_equal_ad_func(self_tensor, other_tensor, -1);
ret = less_equal_ad_func(self_tensor, other_tensor);
}

return ToPyObject(ret);
Expand Down Expand Up @@ -1636,7 +1636,7 @@ static PyObject* tensor__ne__method(TensorObject* self,
VLOG(6) << "Calling not_equal_ad_func in tensor__ne__method";
{
eager_gil_scoped_release guard;
ret = not_equal_ad_func(self_tensor, other_tensor, -1);
ret = not_equal_ad_func(self_tensor, other_tensor);
}

return ToPyObject(ret);
Expand Down Expand Up @@ -1722,7 +1722,7 @@ static PyObject* tensor__eq__method(TensorObject* self,
VLOG(6) << "Calling equal_ad_func in tensor__eq__method";
{
eager_gil_scoped_release guard;
ret = equal_ad_func(self_tensor, other_tensor, -1);
ret = equal_ad_func(self_tensor, other_tensor);
}

return ToPyObject(ret);
Expand Down
20 changes: 10 additions & 10 deletions paddle/phi/api/yaml/legacy_backward.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@
func : addmm_grad

- backward_op : affine_grid_grad
forward : affine_grid (Tensor input, IntArray outputShape, bool use_cudnn=true, bool align_corners=true) -> Tensor(output)
forward : affine_grid (Tensor input, IntArray outputShape, bool align_corners=true, bool use_cudnn=true) -> Tensor(output)
args : (Tensor output_grad, IntArray outputShape, bool use_cudnn=true, bool align_corners=true)
output : Tensor(input_grad)
infer_meta :
Expand Down Expand Up @@ -577,8 +577,8 @@
inplace : (out_grad -> x_grad)

- backward_op : fmax_grad
forward : fmax(Tensor x, Tensor y, int axis) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad, int axis)
forward : fmax(Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad, int axis = -1)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
Expand All @@ -587,8 +587,8 @@
func : fmax_grad

- backward_op : fmin_grad
forward : fmin(Tensor x, Tensor y, int axis) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad, int axis)
forward : fmin(Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad, int axis = -1)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
Expand Down Expand Up @@ -684,8 +684,8 @@
func : gumbel_softmax_grad

- backward_op : hardswish_grad
forward : hardswish (Tensor x, float threshold = 6.0, float scale = 6.0, float offset = 3.0) -> Tensor(out)
args : (Tensor x, Tensor out_grad, float threshold, float scale, float offset)
forward : hardswish (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad, float threshold = 6.0, float scale = 6.0, float offset = 3.0)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
Expand Down Expand Up @@ -1407,8 +1407,8 @@
invoke : real_grad_impl(out_grad, x_grad)

- backward_op : relu6_grad
forward : relu6 (Tensor x, float threshold) -> Tensor(out)
args : (Tensor out, Tensor out_grad, float threshold)
forward : relu6 (Tensor x) -> Tensor(out)
args : (Tensor out, Tensor out_grad, float threshold = 6)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
Expand Down Expand Up @@ -1799,7 +1799,7 @@
optional: u_grad, vh_grad, s_grad

- backward_op : swish_grad
forward : swish (Tensor x, float beta=1.0) -> Tensor(out)
forward : swish (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad, float bete=1.0)
output : Tensor(x_grad)
infer_meta :
Expand Down
25 changes: 12 additions & 13 deletions paddle/phi/api/yaml/legacy_ops.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@
backward : addmm_grad

- op : affine_grid
args : (Tensor input, IntArray outputShape, bool use_cudnn=true, bool align_corners=true)
args : (Tensor input, IntArray outputShape, bool align_corners=true, bool use_cudnn=true)
output : Tensor
infer_meta :
func : AffineGridInferMeta
Expand Down Expand Up @@ -649,7 +649,7 @@
backend : place > x

- op : equal
args : (Tensor x, Tensor y, int axis = -1)
args : (Tensor x, Tensor y)
output : Tensor(out)
infer_meta :
func : CompareInferMeta
Expand Down Expand Up @@ -751,7 +751,7 @@
func : floor_divide

- op : fmax
args : (Tensor x, Tensor y, int axis)
args : (Tensor x, Tensor y)
output : Tensor(out)
infer_meta :
param: [x, y]
Expand All @@ -761,7 +761,7 @@
backward : fmax_grad

- op : fmin
args : (Tensor x, Tensor y, int axis)
args : (Tensor x, Tensor y)
output : Tensor(out)
infer_meta :
param: [x, y]
Expand Down Expand Up @@ -898,15 +898,15 @@
func : generate_proposals_v2

- op : greater_equal
args : (Tensor x, Tensor y, int axis = -1)
args : (Tensor x, Tensor y)
output : Tensor(out)
infer_meta :
func : CompareInferMeta
kernel :
func : greater_equal

- op : greater_than
args : (Tensor x, Tensor y, int axis = -1)
args : (Tensor x, Tensor y)
output : Tensor(out)
infer_meta :
func : CompareInferMeta
Expand Down Expand Up @@ -945,7 +945,7 @@
backward : gumbel_softmax_grad

- op : hardswish
args : (Tensor x, float threshold = 6.0, float scale = 6.0, float offset = 3.0)
args : (Tensor x)
output : Tensor
infer_meta :
func : UnchangedInferMeta
Expand Down Expand Up @@ -1180,15 +1180,15 @@
backward : lerp_grad

- op : less_equal
args : (Tensor x, Tensor y, int axis = -1)
args : (Tensor x, Tensor y)
output : Tensor(out)
infer_meta :
func : CompareInferMeta
kernel :
func : less_equal

- op : less_than
args : (Tensor x, Tensor y, int axis = -1)
args : (Tensor x, Tensor y)
output : Tensor(out)
infer_meta :
func : CompareInferMeta
Expand Down Expand Up @@ -1623,7 +1623,7 @@
backward : norm_grad

- op : not_equal
args : (Tensor x, Tensor y, int axis = -1)
args : (Tensor x, Tensor y)
output : Tensor(out)
infer_meta :
func : CompareInferMeta
Expand Down Expand Up @@ -1820,7 +1820,7 @@
backward : real_grad

- op : relu6
args : (Tensor x, float threshold)
args : (Tensor x)
output : Tensor
infer_meta :
func : UnchangedInferMeta
Expand Down Expand Up @@ -2192,9 +2192,8 @@
func : svd
backward : svd_grad

# The python API paddle.nn.functional.swish has no `bete` argument, it may be removed later
- op : swish
args : (Tensor x, float beta=1.0)
args : (Tensor x)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/api/yaml/sparse_backward.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -251,8 +251,8 @@
pow_csr_grad {sparse_csr, sparse_csr -> sparse_csr}

- backward_op : relu6_grad
forward : relu6(Tensor x, float threshold) -> Tensor(out)
args : (Tensor out, Tensor out_grad, float threshold)
forward : relu6(Tensor x) -> Tensor(out)
args : (Tensor out, Tensor out_grad, float threshold = 6)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/api/yaml/sparse_ops.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,7 @@
backward : relu_grad

- op : relu6
args : (Tensor x, float threshold)
args : (Tensor x)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
Expand Down
14 changes: 10 additions & 4 deletions paddle/phi/infermeta/binary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -328,10 +328,10 @@ void CholeskySolveInferMeta(const MetaTensor& x,
out->share_lod(x);
}

void CompareInferMeta(const MetaTensor& x,
const MetaTensor& y,
int axis,
MetaTensor* out) {
void CompareRawInferMeta(const MetaTensor& x,
const MetaTensor& y,
int axis,
MetaTensor* out) {
auto dim_x = x.dims();
auto dim_y = y.dims();

Expand All @@ -358,6 +358,12 @@ void CompareInferMeta(const MetaTensor& x,
out->set_dtype(DataType::BOOL);
}

void CompareInferMeta(const MetaTensor& x,
const MetaTensor& y,
MetaTensor* out) {
CompareRawInferMeta(x, y, -1, out);
}

void CompareAllInferMeta(const MetaTensor& x,
const MetaTensor& y,
MetaTensor* out) {
Expand Down
6 changes: 5 additions & 1 deletion paddle/phi/infermeta/binary.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,9 +69,13 @@ void CompareAllInferMeta(const MetaTensor& x,

void CompareInferMeta(const MetaTensor& x,
const MetaTensor& y,
int axis,
MetaTensor* out);

void CompareRawInferMeta(const MetaTensor& x,
const MetaTensor& y,
int axis,
MetaTensor* out);

void ComplexInferMeta(const MetaTensor& x,
const MetaTensor& y,
MetaTensor* out);
Expand Down
Loading