|
27 | 27 |
|
28 | 28 | paddle.enable_static() |
29 | 29 |
|
| 30 | +from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper |
| 31 | + |
30 | 32 |
|
31 | | -@unittest.skipIf(not paddle.is_compiled_with_xpu(), |
32 | | - "core is not compiled with XPU") |
33 | 33 | class TestMulOpError(unittest.TestCase): |
34 | 34 | def test_errors(self): |
35 | 35 | with program_guard(Program(), Program()): |
36 | 36 | # The input type of mul_op must be Variable. |
37 | 37 | x1 = fluid.create_lod_tensor( |
38 | | - np.array([[-1]]), [[1]], fluid.CPUPlace()) |
| 38 | + np.array([[-1]]), [[1]], fluid.XPUPlace(0)) |
39 | 39 | x2 = fluid.create_lod_tensor( |
40 | | - np.array([[-1]]), [[1]], fluid.CPUPlace()) |
| 40 | + np.array([[-1]]), [[1]], fluid.XPUPlace(0)) |
41 | 41 | self.assertRaises(TypeError, fluid.layers.mul, x1, x2) |
42 | | - # The input dtype of mul_op must be float32 or float64. |
| 42 | + # The input dtype of mul_op must be float32. |
43 | 43 | x3 = fluid.layers.data(name='x3', shape=[4], dtype="int32") |
44 | 44 | x4 = fluid.layers.data(name='x4', shape=[4], dtype="int32") |
45 | 45 | self.assertRaises(TypeError, fluid.layers.mul, x3, x4) |
46 | 46 |
|
47 | 47 |
|
48 | | -@unittest.skipIf(not paddle.is_compiled_with_xpu(), |
49 | | - "core is not compiled with XPU") |
50 | | -class TestXPUMulOp1(XPUOpTest): |
51 | | - def setUp(self): |
52 | | - self.op_type = "mul" |
53 | | - self.dtype = np.float32 |
54 | | - self.use_xpu = True |
55 | | - self.init_dtype_type() |
56 | | - self.inputs = { |
57 | | - 'X': np.random.random((3, 4, 2, 9)).astype(self.dtype), |
58 | | - 'Y': np.random.random((3, 6, 1, 2, 3)).astype(self.dtype) |
59 | | - } |
60 | | - self.attrs = { |
61 | | - 'x_num_col_dims': 2, |
62 | | - 'y_num_col_dims': 2, |
63 | | - } |
64 | | - result = np.dot(self.inputs['X'].reshape(3 * 4, 2 * 9), |
65 | | - self.inputs['Y'].reshape(3 * 6, 1 * 2 * 3)) |
66 | | - result = result.reshape(3, 4, 1, 2, 3) |
67 | | - self.outputs = {'Out': result} |
68 | | - |
69 | | - def init_dtype_type(self): |
70 | | - pass |
71 | | - |
72 | | - def test_check_output(self): |
73 | | - place = paddle.XPUPlace(0) |
74 | | - self.check_output_with_place(place, atol=0.01) |
75 | | - |
76 | | - def test_check_grad_normal(self): |
77 | | - place = paddle.XPUPlace(0) |
78 | | - self.check_grad_with_place( |
79 | | - place, ['X', 'Y'], 'Out', max_relative_error=0.1) |
80 | | - |
81 | | - def test_check_grad_ingore_x(self): |
82 | | - place = paddle.XPUPlace(0) |
83 | | - self.check_grad_with_place( |
84 | | - place, ['Y'], 'Out', max_relative_error=0.1, no_grad_set=set("X")) |
85 | | - |
86 | | - def test_check_grad_ignore_y(self): |
87 | | - place = paddle.XPUPlace(0) |
88 | | - self.check_grad_with_place( |
89 | | - place, ['X'], 'Out', max_relative_error=0.1, no_grad_set=set('Y')) |
90 | | - |
91 | | - |
92 | | -@unittest.skipIf(not paddle.is_compiled_with_xpu(), |
93 | | - "core is not compiled with XPU") |
94 | | -class TestXPUMulOp2(XPUOpTest): |
95 | | - def setUp(self): |
96 | | - self.op_type = "mul" |
97 | | - self.use_xpu = True |
98 | | - self.dtype = np.float32 |
99 | | - self.init_dtype_type() |
100 | | - self.inputs = { |
101 | | - 'X': np.random.random((20, 5)).astype(self.dtype), |
102 | | - 'Y': np.random.random((5, 21)).astype(self.dtype) |
103 | | - } |
104 | | - self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])} |
105 | | - |
106 | | - def init_dtype_type(self): |
107 | | - self.dtype = np.float32 |
108 | | - |
109 | | - def test_check_output(self): |
110 | | - place = paddle.XPUPlace(0) |
111 | | - self.check_output_with_place(place, atol=0.01) |
112 | | - |
113 | | - def test_check_grad_normal(self): |
114 | | - place = paddle.XPUPlace(0) |
115 | | - self.check_grad_with_place( |
116 | | - place, ['X', 'Y'], 'Out', max_relative_error=0.1) |
117 | | - |
118 | | - def test_check_grad_ingore_x(self): |
119 | | - place = paddle.XPUPlace(0) |
120 | | - self.check_grad_with_place( |
121 | | - place, ['Y'], 'Out', max_relative_error=0.1, no_grad_set=set("X")) |
122 | | - |
123 | | - def test_check_grad_ingore_y(self): |
124 | | - place = paddle.XPUPlace(0) |
125 | | - self.check_grad_with_place( |
126 | | - place, ['X'], 'Out', max_relative_error=0.1, no_grad_set=set('Y')) |
127 | | - |
| 48 | +class XPUTestMulOp(XPUOpTestWrapper): |
| 49 | + def __init__(self): |
| 50 | + self.op_name = 'mul' |
| 51 | + self.use_dynamic_create_class = False |
| 52 | + |
| 53 | + class TestXPUMulOp1(XPUOpTest): |
| 54 | + def setUp(self): |
| 55 | + self.op_type = "mul" |
| 56 | + self.dtype = self.in_type |
| 57 | + self.inputs = { |
| 58 | + 'X': np.random.random((3, 4, 2, 9)).astype(self.in_type_str), |
| 59 | + 'Y': np.random.random((3, 6, 1, 2, 3)).astype(self.in_type_str) |
| 60 | + } |
| 61 | + self.attrs = { |
| 62 | + 'x_num_col_dims': 2, |
| 63 | + 'y_num_col_dims': 2, |
| 64 | + } |
| 65 | + result = np.dot(self.inputs['X'].reshape(3 * 4, 2 * 9), |
| 66 | + self.inputs['Y'].reshape(3 * 6, 1 * 2 * 3)) |
| 67 | + result = result.reshape(3, 4, 1, 2, 3) |
| 68 | + self.outputs = {'Out': result} |
| 69 | + |
| 70 | + def test_check_output(self): |
| 71 | + paddle.enable_static() |
| 72 | + place = paddle.XPUPlace(0) |
| 73 | + self.check_output_with_place(place, atol=0.01) |
| 74 | + |
| 75 | + def test_check_grad_normal(self): |
| 76 | + place = paddle.XPUPlace(0) |
| 77 | + paddle.enable_static() |
| 78 | + self.check_grad_with_place( |
| 79 | + place, ['X', 'Y'], 'Out', max_relative_error=0.1) |
| 80 | + |
| 81 | + def test_check_grad_ingore_x(self): |
| 82 | + place = paddle.XPUPlace(0) |
| 83 | + paddle.enable_static() |
| 84 | + self.check_grad_with_place( |
| 85 | + place, ['Y'], |
| 86 | + 'Out', |
| 87 | + max_relative_error=0.1, |
| 88 | + no_grad_set=set("X")) |
| 89 | + |
| 90 | + def test_check_grad_ignore_y(self): |
| 91 | + place = paddle.XPUPlace(0) |
| 92 | + paddle.enable_static() |
| 93 | + self.check_grad_with_place( |
| 94 | + place, ['X'], |
| 95 | + 'Out', |
| 96 | + max_relative_error=0.1, |
| 97 | + no_grad_set=set('Y')) |
| 98 | + |
| 99 | + class TestXPUMulOp2(XPUOpTest): |
| 100 | + def setUp(self): |
| 101 | + self.op_type = "mul" |
| 102 | + self.use_xpu = True |
| 103 | + self.dtype = self.in_type |
| 104 | + self.inputs = { |
| 105 | + 'X': np.random.random((20, 5)).astype(self.in_type_str), |
| 106 | + 'Y': np.random.random((5, 21)).astype(self.in_type_str) |
| 107 | + } |
| 108 | + self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])} |
| 109 | + |
| 110 | + def test_check_output(self): |
| 111 | + place = paddle.XPUPlace(0) |
| 112 | + paddle.enable_static() |
| 113 | + self.check_output_with_place(place, atol=0.01) |
| 114 | + |
| 115 | + def test_check_grad_normal(self): |
| 116 | + place = paddle.XPUPlace(0) |
| 117 | + paddle.enable_static() |
| 118 | + self.check_grad_with_place( |
| 119 | + place, ['X', 'Y'], 'Out', max_relative_error=0.1) |
| 120 | + |
| 121 | + def test_check_grad_ingore_x(self): |
| 122 | + place = paddle.XPUPlace(0) |
| 123 | + paddle.enable_static() |
| 124 | + self.check_grad_with_place( |
| 125 | + place, ['Y'], |
| 126 | + 'Out', |
| 127 | + max_relative_error=0.1, |
| 128 | + no_grad_set=set("X")) |
| 129 | + |
| 130 | + def test_check_grad_ingore_y(self): |
| 131 | + place = paddle.XPUPlace(0) |
| 132 | + paddle.enable_static() |
| 133 | + self.check_grad_with_place( |
| 134 | + place, ['X'], |
| 135 | + 'Out', |
| 136 | + max_relative_error=0.1, |
| 137 | + no_grad_set=set('Y')) |
| 138 | + |
| 139 | + |
| 140 | +support_types = get_xpu_op_support_types('mul') |
| 141 | +for stype in support_types: |
| 142 | + create_test_class(globals(), XPUTestMulOp, stype) |
128 | 143 |
|
129 | 144 | if __name__ == "__main__": |
| 145 | + paddle.enable_static() |
130 | 146 | unittest.main() |
0 commit comments