Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【Zero_dim】support zero_dim for some prim ops part2 #54907

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 10 additions & 1 deletion test/legacy_test/test_assign_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,14 @@ def setUp(self):
self.public_python_api = paddle.assign
self.op_type = "assign"
self.prim_op_type = "prim"
x = np.random.random(size=(100, 10)).astype('float64')
self.init_input_configs()
x = np.random.random(size=self.shape).astype('float64')
self.inputs = {'X': x}
self.outputs = {'Out': x}

def init_input_configs(self):
self.shape = (100, 10)

def test_forward(self):
paddle.enable_static()
self.check_output()
Expand All @@ -47,6 +51,11 @@ def test_backward(self):
paddle.disable_static()


class TestAssignOp_ZeroDim(TestAssignOp):
def init_input_configs(self):
self.shape = ()


@unittest.skipIf(
not paddle.is_compiled_with_cuda(), "FP16 test runs only on GPU"
)
Expand Down
10 changes: 9 additions & 1 deletion test/legacy_test/test_erf_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,12 +30,15 @@ def setUp(self):
self.public_python_api = paddle.erf
self.python_api = paddle.erf
self.dtype = self._init_dtype()
self.x_shape = [11, 17]
self.init_shape()
x = np.random.uniform(-1, 1, size=self.x_shape).astype(self.dtype)
y_ref = erf(x).astype(self.dtype)
self.inputs = {'X': x}
self.outputs = {'Out': y_ref}

def init_shape(self):
self.x_shape = [11, 17]

def _init_dtype(self):
return "float64"

Expand All @@ -46,6 +49,11 @@ def test_check_grad(self):
self.check_grad(['X'], 'Out', check_prim=True)


class TestErfOp_ZeroDim(TestErfOp):
def init_shape(self):
self.x_shape = []


class TestErfLayer(unittest.TestCase):
def _test_case(self, place):
x = np.random.uniform(-1, 1, size=(11, 17)).astype(np.float64)
Expand Down
25 changes: 25 additions & 0 deletions test/legacy_test/test_expand_as_v2_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,31 @@ def test_check_grad(self):
self.check_grad(['X'], 'Out', check_prim=True)


class TestExpandAs_ZeroDim1(TestExpandAsBasic):
def init_inputs_and_outputs(self):
x = np.random.random(()).astype(self.dtype)
target_tensor = np.random.random(1).astype(self.dtype)
self.inputs = {'X': x, "Y": target_tensor}
self.attrs = {'target_shape': target_tensor.shape}
bcast_dims = [1]
output = np.tile(self.inputs['X'], bcast_dims)
self.outputs = {'Out': output}


class TestExpandAs_ZeroDim2(TestExpandAsBasic):
def init_inputs_and_outputs(self):
x = np.random.random(()).astype(self.dtype)
target_tensor = np.random.random(()).astype(self.dtype)
self.inputs = {'X': x, "Y": target_tensor}
self.attrs = {'target_shape': target_tensor.shape}
bcast_dims = []
output = np.tile(self.inputs['X'], bcast_dims)
self.outputs = {'Out': output}

def if_enable_cinn(self):
self.enable_cinn = False


@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
Expand Down
27 changes: 25 additions & 2 deletions test/legacy_test/test_expand_v2_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,20 +36,43 @@ def setUp(self):
self.attrs = {'shape': self.shape}
output = np.tile(self.inputs['X'], self.expand_times)
self.outputs = {'Out': output}
self.enable_cinn = True
self.if_enable_cinn()

def init_data(self):
self.ori_shape = [100]
self.shape = [100]
self.expand_times = [1]

def if_enable_cinn(self):
pass

def test_check_output(self):
self.check_output(check_cinn=self.enable_cinn)
self.check_output(check_cinn=True)

def test_check_grad(self):
self.check_grad(['X'], 'Out', check_prim=True)


class TestExpandV2OpRank1_ZeroDim1(TestExpandV2OpRank1):
def init_data(self):
self.ori_shape = []
self.shape = [10]
self.expand_times = [10]

def if_enable_cinn(self):
self.enable_cinn = False


class TestExpandV2OpRank1_ZeroDim2(TestExpandV2OpRank1):
def init_data(self):
self.ori_shape = []
self.shape = []
self.expand_times = []

def if_enable_cinn(self):
pass


class TestExpandV2OpRank2_DimExpanding(TestExpandV2OpRank1):
def init_data(self):
self.ori_shape = [120]
Expand Down
7 changes: 7 additions & 0 deletions test/legacy_test/test_flatten2_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,13 @@ def init_attrs(self):
self.attrs = {"axis": self.axis}


class TestFlattenOp_ZeroDim(TestFlattenOp):
def init_test_case(self):
self.in_shape = ()
self.axis = 0
self.new_shape = 1


class TestFlattenOp1(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 5, 4)
Expand Down
7 changes: 7 additions & 0 deletions test/legacy_test/test_full_like_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,13 @@ def if_enable_cinn(self):
pass


class TestFullLikeOp1_ZeroDim(TestFullLikeOp1):
def init_data(self):
self.fill_value = 5
self.shape = []
self.dtype = np.float32


class TestFullLikeOp2(TestFullLikeOp1):
def init_data(self):
self.fill_value = 1000
Expand Down
27 changes: 27 additions & 0 deletions test/legacy_test/test_gather_nd_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,33 @@ def test_check_grad(self):
self.check_grad(['X'], 'Out', check_prim=True)


class TestGatherNdOpWithIndex1_ZeroDim(TestGatherNdOpWithIndex1):
def setUp(self):
self.op_type = "gather_nd"
self.prim_op_type = "prim"
self.python_api = paddle.gather_nd
self.public_python_api = paddle.gather_nd
self.config_dtype()
self.if_enable_cinn()
if self.dtype == np.float64:
target_dtype = "float64"
elif self.dtype == np.float16:
target_dtype = "float16"
else:
target_dtype = "float32"
xnp = np.random.random((100,)).astype(target_dtype)
index = np.array([1]).astype("int32")
output = xnp[index[-1]]
if self.dtype == np.uint16:
xnp = convert_float_to_uint16(xnp)
output = convert_float_to_uint16(output)
self.inputs = {'X': xnp, 'Index': index}
self.outputs = {'Out': output}

def if_enable_cinn(self):
self.enable_cinn = False


class TestGatherNdOpWithIndex1FP16(TestGatherNdOpWithIndex1):
def config_dtype(self):
self.dtype = np.float16
Expand Down
23 changes: 20 additions & 3 deletions test/legacy_test/test_reduce_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -278,15 +278,18 @@ def setUp(self):
self.python_api = paddle.max
self.public_python_api = paddle.max
self.if_enable_cinn()
self.init_inputs_and_outputs()

def if_enable_cinn(self):
self.enable_cinn = False

def init_inputs_and_outputs(self):
self.inputs = {'X': np.random.random([]).astype("float64")}
self.attrs = {'dim': []}
self.outputs = {
'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
}

def if_enable_cinn(self):
self.enable_cinn = False

def test_check_output(self):
self.check_output()

Expand All @@ -300,6 +303,20 @@ def test_check_grad(self):
)


class TestMaxOp_ZeroDim1(TestMaxOp_ZeroDim):
def init_inputs_and_outputs(self):
self.inputs = {'X': np.random.random([5]).astype("float64")}
self.attrs = {'dim': [0]}
self.outputs = {'Out': self.inputs['X'].max(axis=(0,))}


class TestMaxOp_ZeroDim2(TestMaxOp_ZeroDim1):
def init_inputs_and_outputs(self):
self.inputs = {'X': np.random.random([5, 20]).astype("float64")}
self.attrs = {'dim': [0, 1]}
self.outputs = {'Out': self.inputs['X'].max(axis=(0, 1))}


class TestMaxFP32Op(OpTest):
"""Remove Max with subgradient from gradient check to confirm the success of CI."""

Expand Down