Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
9f80f2a
mv accuracy and auc
sljlp Nov 30, 2022
ba88249
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
sljlp Dec 1, 2022
1d96dca
update for double_buffer
sljlp Dec 1, 2022
60d0b75
mv accuracy and auc
sljlp Nov 30, 2022
500e6b2
fix
sljlp Dec 1, 2022
fd3bf33
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
sljlp Dec 1, 2022
a08f447
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
sljlp Dec 1, 2022
47a0b10
Merge branch 'rm-fluid-io' into rm-fluid-dy
sljlp Dec 1, 2022
1b79176
update
sljlp Dec 1, 2022
30e263b
Merge branch 'rm-fluid-io' into rm-fluid-dy
sljlp Dec 1, 2022
aa14584
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
sljlp Dec 2, 2022
b207f29
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
sljlp Dec 2, 2022
d74f91a
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
sljlp Dec 2, 2022
c35c58a
update
sljlp Dec 2, 2022
1b2fcfb
Merge branch 'develop' into rm-fluid-dy
sljlp Dec 2, 2022
e42a1a2
update for api
sljlp Dec 5, 2022
c29432a
update
sljlp Dec 5, 2022
df8d2fa
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
sljlp Dec 5, 2022
3058e50
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
sljlp Dec 6, 2022
08d5134
update
sljlp Dec 7, 2022
52e437e
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
sljlp Dec 7, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
422 changes: 0 additions & 422 deletions python/paddle/fluid/dygraph/nn.py

Large diffs are not rendered by default.

6 changes: 3 additions & 3 deletions python/paddle/fluid/reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -1351,9 +1351,9 @@ def __init__(
self._use_double_buffer = use_double_buffer
self._capacity = capacity
if not self._iterable:
# Because layers.io.double_buffer is not supported anymore, and only when iterable and use_double_buffer
# are both True layers.io.double_buffer will be in use, here if itrable is False, use_double_buffer will be
# forcely set False to avoid using layers.io.double_buffer.
# Because layers.io.double_buffer is not supported anymore and that iterable is False and use_double_buffer
# is True is not spported, here if itrable is False, use_double_buffer will be
# forcely set False to avoid unexpected error.
# TODO: keep use_double_buffer
self._use_double_buffer = False
self._init_non_iterable()
Expand Down
32 changes: 18 additions & 14 deletions python/paddle/fluid/tests/unittests/test_group_norm_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -293,21 +293,25 @@ def attr_data_format():

class TestGroupNormEager(unittest.TestCase):
def test_dygraph_api(self):
self.dtype = np.float64

# not supported float64
# only support float32
self.dtype = np.float32

self.shape = (8, 32, 32)
input = np.random.random(self.shape).astype(self.dtype)

with fluid.dygraph.guard():
tensor_1 = fluid.dygraph.to_variable(input)
tensor_1.stop_gradient = False
groupNorm = fluid.dygraph.nn.GroupNorm(channels=32, groups=4)
groupNorm = paddle.nn.GroupNorm(num_channels=32, num_groups=4)
ret1 = groupNorm(tensor_1)
ret1.backward()
with _test_eager_guard():
tensor_eager_1 = fluid.dygraph.to_variable(input)
tensor_eager_1.stop_gradient = False
groupNorm_eager = fluid.dygraph.nn.GroupNorm(
channels=32, groups=4
groupNorm_eager = paddle.nn.GroupNorm(
num_channels=32, num_groups=4
)
ret2 = groupNorm_eager(tensor_eager_1)
ret2.backward()
Expand All @@ -328,16 +332,14 @@ def test_dygraph_api(self):
with fluid.dygraph.guard():
tensor_1 = fluid.dygraph.to_variable(input)
tensor_1.stop_gradient = False
groupNorm = fluid.dygraph.nn.GroupNorm(
channels=32, groups=4, dtype='float32'
)
groupNorm = paddle.nn.GroupNorm(num_channels=32, num_groups=4)
ret1 = groupNorm(tensor_1)
ret1.backward()
with _test_eager_guard():
tensor_eager_1 = fluid.dygraph.to_variable(input)
tensor_eager_1.stop_gradient = False
groupNorm_eager = fluid.dygraph.nn.GroupNorm(
channels=32, groups=4
groupNorm_eager = paddle.nn.GroupNorm(
num_channels=32, num_groups=4
)
ret2 = groupNorm_eager(tensor_eager_1)
ret2.backward()
Expand All @@ -351,23 +353,25 @@ def test_dygraph_api(self):

class TestGroupNormEager_fp16(unittest.TestCase):
def test_dygraph_api(self):

# not supported float16
# only support float32
self.dtype = np.float32

self.shape = (8, 32, 32)
input = np.random.random(self.shape).astype(self.dtype)

with fluid.dygraph.guard():
tensor_1 = fluid.dygraph.to_variable(input)
tensor_1.stop_gradient = False
groupNorm = fluid.dygraph.nn.GroupNorm(
channels=32, groups=4, dtype='float16'
)
groupNorm = paddle.nn.GroupNorm(num_channels=32, num_groups=4)
ret1 = groupNorm(tensor_1)
ret1.backward()
with _test_eager_guard():
tensor_eager_1 = fluid.dygraph.to_variable(input)
tensor_eager_1.stop_gradient = False
groupNorm_eager = fluid.dygraph.nn.GroupNorm(
channels=32, groups=4
groupNorm_eager = paddle.nn.GroupNorm(
num_channels=32, num_groups=4
)
ret2 = groupNorm_eager(tensor_eager_1)
ret2.backward()
Expand Down
101 changes: 0 additions & 101 deletions python/paddle/fluid/tests/unittests/test_group_norm_op_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid import Program, program_guard
from paddle.fluid.framework import _test_eager_guard


Expand All @@ -39,106 +38,6 @@ def group_norm_naive_for_general_dimension(x, scale, bias, epsilon, groups):
return output


class TestDygraphGroupNormv2(unittest.TestCase):
def test_dygraph(self):
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda() and core.op_support_gpu("group_norm"):
places.append(fluid.CUDAPlace(0))
shapes = [
[2, 2, 2, 2],
[2, 2, 4],
[4, 2],
[4, 2, 6, 6, 2],
[2, 2, 2, 2, 2, 2],
]
for p in places:

def compute_v1(x):
with fluid.dygraph.guard(p):
gn = fluid.dygraph.GroupNorm(channels=2, groups=2)
y = gn(fluid.dygraph.to_variable(x))
return y.numpy()

def compute_v2(x):
with fluid.dygraph.guard(p):
gn = paddle.nn.GroupNorm(num_channels=2, num_groups=2)
y = gn(fluid.dygraph.to_variable(x))
return y.numpy()

def test_weight_bias_false():
with fluid.dygraph.guard(p):
gn = paddle.nn.GroupNorm(
num_channels=2,
num_groups=2,
weight_attr=False,
bias_attr=False,
)

def test_nn_exception():
with fluid.dygraph.guard(p):

def attr_data_format():
out = paddle.nn.GroupNorm(
num_groups=2, num_channels=2, data_format="CNHW"
)

self.assertRaises(ValueError, attr_data_format)

for shape in shapes:
x = np.random.randn(*shape).astype("float32")
y1 = compute_v1(x)
y2 = compute_v2(x)
result = np.allclose(y1, y2, atol=1e-5)
if not result:
print("y1:", y1, "\ty2:", y2)
self.assertTrue(result)
test_weight_bias_false()
test_nn_exception()

def test_static(self):
paddle.enable_static()
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda() and core.op_support_gpu("group_norm"):
places.append(fluid.CUDAPlace(0))
shapes = [
[2, 6, 2, 2],
[2, 6, 4],
[4, 6],
[4, 6, 6, 6, 2],
[4, 6, 2, 2, 2, 2],
]
for p in places:
exe = fluid.Executor(p)

def compute_v1(x_np):
with program_guard(Program(), Program()):
gn = fluid.dygraph.GroupNorm(channels=6, groups=2)
x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
y = gn(x)
exe.run(fluid.default_startup_program())
r = exe.run(feed={'x': x_np}, fetch_list=[y])[0]
return r

def compute_v2(x_np):
with program_guard(Program(), Program()):
gn = paddle.nn.GroupNorm(num_channels=6, num_groups=2)
x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
y = gn(x)
exe.run(fluid.default_startup_program())
r = exe.run(feed={'x': x_np}, fetch_list=[y])[0]
return r

for shape in shapes:
x = np.random.randn(*shape).astype("float32")
y1 = compute_v1(x)
y2 = compute_v2(x)
np.testing.assert_allclose(y1, y2, rtol=1e-05, atol=1e-05)

def test_eager_api(self):
with _test_eager_guard():
self.test_dygraph()


class TestGroupNormAPIV2_With_General_Dimensions(unittest.TestCase):
def test_numerical_accuracy(self):
paddle.disable_static()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
import paddle
import paddle.fluid as fluid
import paddle.fluid.framework as framework
from paddle.fluid.dygraph.nn import BatchNorm, Embedding, GroupNorm
from paddle.fluid.dygraph.nn import BatchNorm, Embedding
from paddle.nn import Linear


Expand Down Expand Up @@ -122,10 +122,10 @@ def testLoadStaticModel(self):
name='groupnorm_in', shape=[None, 8, 32, 32], dtype='float32'
)
groupnorm_out1 = paddle.static.nn.group_norm(
input=groupnorm_in, groups=4
input=groupnorm_in, groups=4, param_attr=True, bias_attr=True
)
groupnorm_out2 = paddle.static.nn.group_norm(
input=groupnorm_in, groups=4
input=groupnorm_in, groups=4, param_attr=True, bias_attr=True
)
'''
spec_norm = fluid.data(name='spec_norm', shape=[2, 8, 32, 32], dtype='float32')
Expand Down Expand Up @@ -212,8 +212,8 @@ def __init__(self):
self.layer_norm_1 = paddle.nn.LayerNorm([10])
self.layer_norm_2 = paddle.nn.LayerNorm(10)

self.group_norm1 = GroupNorm(8, 4)
self.gourp_norm2 = GroupNorm(8, 4)
self.group_norm1 = paddle.nn.GroupNorm(4, 8)
self.gourp_norm2 = paddle.nn.GroupNorm(4, 8)

self.w_1 = self.create_parameter(
[100, 100], dtype='float32', attr="weight_test_1"
Expand Down
Loading