-
Notifications
You must be signed in to change notification settings - Fork 5.9k
【Fix PIR Unittest】fix some public API and inference UT in pir #66481
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -350,133 +350,147 @@ def create_fake_model(program_config): | |
| program_config = copy.deepcopy(program_config) | ||
| program_config._cast() | ||
| paddle.enable_static() | ||
| main_program_desc = core.ProgramDesc() | ||
| util_program = base.Program() | ||
| main_block_desc = main_program_desc.block(0) | ||
|
|
||
| var_desc = main_block_desc.var(b"feed") | ||
| var_desc.set_type(core.VarDesc.VarType.FEED_MINIBATCH) | ||
| var_desc.set_persistable(True) | ||
|
|
||
| index = 0 | ||
| for name, tensor_config in program_config.inputs.items(): | ||
| var_desc = main_block_desc.var(name.encode()) | ||
| var_desc.set_type(core.VarDesc.VarType.LOD_TENSOR) | ||
| var_desc.set_dtype(convert_np_dtype_to_proto_type(tensor_config.dtype)) | ||
| var_desc.set_shape(tensor_config.shape) | ||
| var_desc.set_need_check_feed(True) | ||
| if tensor_config.lod is not None: | ||
| var_desc.set_lod_level(len(tensor_config.lod)) | ||
| op_desc = main_block_desc._prepend_op() | ||
| op_desc.set_type("feed") | ||
| op_desc.set_input('X', ["feed"]) | ||
| op_desc.set_output('Out', [name]) | ||
| op_desc._set_attr("col", index) | ||
| index = index + 1 | ||
|
|
||
| save_var_map = {} | ||
| for name, tensor_config in program_config.weights.items(): | ||
| var_desc = main_block_desc.var(name.encode()) | ||
| var_desc.set_type(core.VarDesc.VarType.LOD_TENSOR) | ||
| var_desc.set_dtype(convert_np_dtype_to_proto_type(tensor_config.dtype)) | ||
| var_desc.set_shape(tensor_config.shape) | ||
| with paddle.pir_utils.OldIrGuard(): | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 我们需要修复PIR模式下的问题单测,但使用OldIrGuard的方式相当于切换回了旧IR运行,自然不会报错,未来OldIr会下线,那这个时候这个单测该怎么处理呢?所以我觉得需要搞清楚问题单测是否真正需要在PIR模式下运行,如果测试内容不需要的话,可以将单测迁移至deprecated目录下后续废弃掉 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 目前讨论的方式是对于未来新加入的pir API及pass建议使用新的inference 单测API和开发方式,目前针对于推全中用于旧IR的pass和API暂时以OldIrGuard的方式支持,后续可以考虑再迁移到新的API;至于是否需要废弃可能需要进一步进行讨论分类 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
这些单测目前不需要在PIR下运行,但仍暂时保留这些单测 |
||
| main_program_desc = core.ProgramDesc() | ||
| # util_program = base.Program() | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 无效注释可以删掉 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. done |
||
| util_program = paddle.static.Program() | ||
| main_block_desc = main_program_desc.block(0) | ||
|
|
||
| var_desc = main_block_desc.var(b"feed") | ||
| var_desc.set_type(core.VarDesc.VarType.FEED_MINIBATCH) | ||
| var_desc.set_persistable(True) | ||
|
|
||
| save_var_map[name] = util_program.global_block().create_parameter( | ||
| dtype=tensor_config.dtype, | ||
| shape=tensor_config.shape, | ||
| type=core.VarDesc.VarType.LOD_TENSOR, | ||
| name=name, | ||
| initializer=paddle.nn.initializer.Assign(tensor_config.data), | ||
| ) | ||
| in_vars = [] | ||
| for name in sorted(save_var_map.keys()): | ||
| in_vars.append(save_var_map[name]) | ||
|
|
||
| out_var = util_program.global_block().create_var( | ||
| type=core.VarDesc.VarType.RAW, name="out_var_0" | ||
| ) | ||
| out_var.desc.set_persistable(True) | ||
| util_program.global_block().append_op( | ||
| type='save_combine', | ||
| inputs={'X': in_vars}, | ||
| outputs={'Y': out_var}, | ||
| attrs={'file_path': '', 'save_to_memory': True}, | ||
| ) | ||
| for op_config in program_config.ops: | ||
| op_desc = main_block_desc.append_op() | ||
| op_desc.set_type(op_config.type) | ||
| # canonicalize scalar attrs | ||
| if OpProtoHolder.instance().has_op_proto(op_config.type): | ||
| proto = OpProtoHolder.instance().get_op_proto(op_config.type) | ||
| canonicalized_attrs = framework.canonicalize_attrs( | ||
| op_config.attrs, proto | ||
| index = 0 | ||
| for name, tensor_config in program_config.inputs.items(): | ||
| var_desc = main_block_desc.var(name.encode()) | ||
| var_desc.set_type(core.VarDesc.VarType.LOD_TENSOR) | ||
| var_desc.set_dtype( | ||
| convert_np_dtype_to_proto_type(tensor_config.dtype) | ||
| ) | ||
| else: | ||
| canonicalized_attrs = op_config.attrs | ||
|
|
||
| for name, values in op_config.inputs.items(): | ||
| op_desc.set_input(name, values) | ||
| for name, values in canonicalized_attrs.items(): | ||
| if name == 'sub_block': | ||
| sub_block_desc = main_program_desc.append_block(main_block_desc) | ||
| values.fill_block_desc(sub_block_desc) | ||
| op_desc._set_attr(name, sub_block_desc) | ||
| var_desc.set_shape(tensor_config.shape) | ||
| var_desc.set_need_check_feed(True) | ||
| if tensor_config.lod is not None: | ||
| var_desc.set_lod_level(len(tensor_config.lod)) | ||
| op_desc = main_block_desc._prepend_op() | ||
| op_desc.set_type("feed") | ||
| op_desc.set_input('X', ["feed"]) | ||
| op_desc.set_output('Out', [name]) | ||
| op_desc._set_attr("col", index) | ||
| index = index + 1 | ||
|
|
||
| save_var_map = {} | ||
| for name, tensor_config in program_config.weights.items(): | ||
| var_desc = main_block_desc.var(name.encode()) | ||
| var_desc.set_type(core.VarDesc.VarType.LOD_TENSOR) | ||
| var_desc.set_dtype( | ||
| convert_np_dtype_to_proto_type(tensor_config.dtype) | ||
| ) | ||
| var_desc.set_shape(tensor_config.shape) | ||
| var_desc.set_persistable(True) | ||
|
|
||
| save_var_map[name] = util_program.global_block().create_parameter( | ||
| dtype=tensor_config.dtype, | ||
| shape=tensor_config.shape, | ||
| type=core.VarDesc.VarType.LOD_TENSOR, | ||
| name=name, | ||
| initializer=paddle.nn.initializer.Assign(tensor_config.data), | ||
| ) | ||
| in_vars = [] | ||
| for name in sorted(save_var_map.keys()): | ||
| in_vars.append(save_var_map[name]) | ||
|
|
||
| out_var = util_program.global_block().create_var( | ||
| type=core.VarDesc.VarType.RAW, name="out_var_0" | ||
| ) | ||
| out_var.desc.set_persistable(True) | ||
| util_program.global_block().append_op( | ||
| type='save_combine', | ||
| inputs={'X': in_vars}, | ||
| outputs={'Y': out_var}, | ||
| attrs={'file_path': '', 'save_to_memory': True}, | ||
| ) | ||
| for op_config in program_config.ops: | ||
| op_desc = main_block_desc.append_op() | ||
| op_desc.set_type(op_config.type) | ||
| # canonicalize scalar attrs | ||
| if OpProtoHolder.instance().has_op_proto(op_config.type): | ||
| proto = OpProtoHolder.instance().get_op_proto(op_config.type) | ||
| canonicalized_attrs = framework.canonicalize_attrs( | ||
| op_config.attrs, proto | ||
| ) | ||
| else: | ||
| op_desc._set_attr(name, values) | ||
| for name, values in op_config.outputs.items(): | ||
| op_desc.set_output(name, values) | ||
| for v in values: | ||
| if main_block_desc.has_var_recursive(v.encode()): | ||
| continue | ||
| var_desc = main_block_desc.var(v.encode()) | ||
| var_desc.set_type(core.VarDesc.VarType.LOD_TENSOR) | ||
| if ( | ||
| op_config.outputs_var_type is not None | ||
| and v in op_config.outputs_var_type.keys() | ||
| ): | ||
| canonicalized_attrs = op_config.attrs | ||
|
|
||
| for name, values in op_config.inputs.items(): | ||
| op_desc.set_input(name, values) | ||
| for name, values in canonicalized_attrs.items(): | ||
| if name == 'sub_block': | ||
| sub_block_desc = main_program_desc.append_block( | ||
| main_block_desc | ||
| ) | ||
| values.fill_block_desc(sub_block_desc) | ||
| op_desc._set_attr(name, sub_block_desc) | ||
| else: | ||
| op_desc._set_attr(name, values) | ||
| for name, values in op_config.outputs.items(): | ||
| op_desc.set_output(name, values) | ||
| for v in values: | ||
| if main_block_desc.has_var_recursive(v.encode()): | ||
| continue | ||
| var_desc = main_block_desc.var(v.encode()) | ||
| var_desc.set_type(core.VarDesc.VarType.LOD_TENSOR) | ||
| if ( | ||
| op_config.outputs_var_type[v] | ||
| == VarType.LOD_TENSOR_ARRAY | ||
| op_config.outputs_var_type is not None | ||
| and v in op_config.outputs_var_type.keys() | ||
| ): | ||
| var_desc.set_type(core.VarDesc.VarType.LOD_TENSOR_ARRAY) | ||
| elif op_config.outputs_var_type[v] == VarType.STEP_SCOPES: | ||
| var_desc.set_type(core.VarDesc.VarType.STEP_SCOPES) | ||
| continue | ||
| var_desc.set_dtype(convert_np_dtype_to_proto_type(np.float32)) | ||
| if ( | ||
| op_config.outputs_dtype is not None | ||
| and v in op_config.outputs_dtype.keys() | ||
| ): | ||
| if ( | ||
| op_config.outputs_var_type[v] | ||
| == VarType.LOD_TENSOR_ARRAY | ||
| ): | ||
| var_desc.set_type( | ||
| core.VarDesc.VarType.LOD_TENSOR_ARRAY | ||
| ) | ||
| elif ( | ||
| op_config.outputs_var_type[v] == VarType.STEP_SCOPES | ||
| ): | ||
| var_desc.set_type(core.VarDesc.VarType.STEP_SCOPES) | ||
| continue | ||
| var_desc.set_dtype( | ||
| convert_np_dtype_to_proto_type( | ||
| op_config.outputs_dtype[v] | ||
| ) | ||
| convert_np_dtype_to_proto_type(np.float32) | ||
| ) | ||
| if op_config.type not in _OP_WITHOUT_KERNEL_SET: | ||
| op_desc.infer_var_type(main_block_desc) | ||
| op_desc.infer_shape(main_block_desc) | ||
| op_desc.check_attrs() | ||
|
|
||
| for index, name in enumerate(program_config.outputs): | ||
| var_desc = main_block_desc.var(b"fetch") | ||
| var_desc.set_type(core.VarDesc.VarType.FETCH_LIST) | ||
| var_desc.set_need_check_feed(True) | ||
| op_desc = main_block_desc.append_op() | ||
| op_desc.set_type("fetch") | ||
| op_desc.set_input('X', [name]) | ||
| op_desc.set_output('Out', ["fetch"]) | ||
| op_desc._set_attr("col", index) | ||
|
|
||
| model = main_program_desc.serialize_to_string() | ||
|
|
||
| util_program._sync_with_cpp() | ||
| place = base.CPUPlace() | ||
| executor = base.Executor(place) | ||
| scope = base.Scope() | ||
| with base.scope_guard(scope): | ||
| executor.run(util_program) | ||
| params = scope.find_var("out_var_0").get_bytes() | ||
| if ( | ||
| op_config.outputs_dtype is not None | ||
| and v in op_config.outputs_dtype.keys() | ||
| ): | ||
| var_desc.set_dtype( | ||
| convert_np_dtype_to_proto_type( | ||
| op_config.outputs_dtype[v] | ||
| ) | ||
| ) | ||
| if op_config.type not in _OP_WITHOUT_KERNEL_SET: | ||
| op_desc.infer_var_type(main_block_desc) | ||
| op_desc.infer_shape(main_block_desc) | ||
| op_desc.check_attrs() | ||
|
|
||
| for index, name in enumerate(program_config.outputs): | ||
| var_desc = main_block_desc.var(b"fetch") | ||
| var_desc.set_type(core.VarDesc.VarType.FETCH_LIST) | ||
| var_desc.set_need_check_feed(True) | ||
| op_desc = main_block_desc.append_op() | ||
| op_desc.set_type("fetch") | ||
| op_desc.set_input('X', [name]) | ||
| op_desc.set_output('Out', ["fetch"]) | ||
| op_desc._set_attr("col", index) | ||
|
|
||
| model = main_program_desc.serialize_to_string() | ||
|
|
||
| util_program._sync_with_cpp() | ||
| place = base.CPUPlace() | ||
| executor = base.Executor(place) | ||
| scope = base.Scope() | ||
| with base.scope_guard(scope): | ||
| executor.run(util_program) | ||
| params = scope.find_var("out_var_0").get_bytes() | ||
|
|
||
| return model, params | ||
|
|
||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
同下