Skip to content

Commit

Permalink
Add einsum tests
Browse files Browse the repository at this point in the history
  • Loading branch information
co63oc committed May 17, 2023
1 parent f023d42 commit 9667d25
Showing 1 changed file with 60 additions and 4 deletions.
64 changes: 60 additions & 4 deletions python/paddle/fluid/tests/unittests/test_einsum_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,15 +15,18 @@
import unittest

import numpy as np
from eager_op_test import OpTest
from eager_op_test import OpTest, convert_float_to_uint16

import paddle
from paddle.fluid import core


def einsum_wrapper(a, b):
if not isinstance(a, list):
a = [a]
return paddle._C_ops.einsum(a, b)
ret = paddle._C_ops.einsum(a, b)
# ret include list: [Tensor(Not initialized)], skip the list
return ret[0]


class TestEinsumBinary(OpTest):
Expand All @@ -33,10 +36,14 @@ def setUp(self):
self.python_api = einsum_wrapper
self.python_out_sig = ['Out']
self.disable = False
self.init_dtype()
self.set_mandatory()
self.init_input()
np.random.seed(123)
out = np.einsum(self.equation, *self.inputs)
# bfloat16 change inputs
if self.dtype == np.uint16:
self.inputs = self.bf16_inputs
self.operands = []
for idx, inp in enumerate(self.inputs):
self.operands.append(("x" + str(idx), inp))
Expand All @@ -53,16 +60,28 @@ def setUp(self):
for i in range(len(self.operands))
],
}
if self.dtype == np.uint16:
self.place = core.CUDAPlace(0)
self.outputs["Out"] = convert_float_to_uint16(self.outputs["Out"])

def init_dtype(self):
self.dtype = np.float64

def init_input(self):
self.inputs = []
self.bf16_inputs = []
for t, s in zip(self.types, self.shapes):
self.inputs.append(np.random.random(s).astype(t))
input_data = np.random.random(s).astype(t)
self.inputs.append(input_data)
if self.dtype == np.uint16:
self.bf16_inputs.append(convert_float_to_uint16(input_data))

def set_mandatory(self):
self.shapes = [(10, 10, 20), (20, 6)]
self.types = [np.float64, np.float64]
self.types = [self.dtype, self.dtype]
self.equation = "mij,jk->ki"
if self.dtype == np.uint16:
self.types = [self.np_dtype, self.np_dtype]

def test_check_output(self):
if not self.disable:
Expand Down Expand Up @@ -213,5 +232,42 @@ def set_mandatory(self):
self.equation = "ijki,jkjk->"


class TestEinsumFP16Op(TestEinsumBinary):
def init_dtype(self):
self.dtype = np.float16


@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not compiled with CUDA or not support bfloat16",
)
class TestEinsumBF16Op(TestEinsumBinary):
def init_dtype(self):
self.dtype = np.uint16
self.np_dtype = np.float32

# If it is a complex calculation, the difference value is large
def set_mandatory(self):
self.shapes = [(10, 3, 10)]
self.types = [self.np_dtype]
self.equation = "iji->j"

def test_check_output(self):
if not self.disable:
self.check_output_with_place(
self.place, no_check_set=["InnerCache", "XShape"]
)

def test_grad(self):
if not self.disable:
self.check_grad_with_place(
self.place,
[op[0] for op in self.operands],
["Out"],
numeric_grad_delta=0.05,
)


if __name__ == "__main__":
unittest.main()

0 comments on commit 9667d25

Please sign in to comment.