Skip to content

Commit 0712bff

Browse files
committed
update
1 parent 826394a commit 0712bff

File tree

2 files changed

+5
-15
lines changed

2 files changed

+5
-15
lines changed

test/ir/pir/cinn/symbolic/simple_llama.config

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -94,11 +94,11 @@
9494
(%93) = "pd_op.matmul" (%91, %3) {is_persistable:[false],stop_gradient:[false],transpose_x:false,transpose_y:false} : (builtin.tensor<-1x-1x4096xf16>, builtin.tensor<4096x4096xf16>) -> builtin.tensor<-1x-1x4096xf16>
9595
(%94) = "pd_op.matmul" (%91, %4) {is_persistable:[false],stop_gradient:[false],transpose_x:false,transpose_y:false} : (builtin.tensor<-1x-1x4096xf16>, builtin.tensor<4096x4096xf16>) -> builtin.tensor<-1x-1x4096xf16>
9696
(%95) = "pd_op.full_int_array" () {dtype:(pd_op.DataType)int64,place:(pd_op.Place)Place(cpu),stop_gradient:[true],value:[(Int64)0,(Int64)0,(Int64)32,(Int64)128]} : () -> builtin.tensor<4xi64>
97-
(%96, %97) = "pd_op.reshape" (%92, %95) {is_persistable:[false,false],stop_gradient:[false,false]} : (builtin.tensor<-1x-1x4096xf16>, builtin.tensor<4xi64>) -> builtin.tensor<-1x-1x32x128xf16>, builtin.tensor<0x-1x-1x4096xf16>
97+
(%96) = "pd_op.reshape" (%92, %95) {is_persistable:[false,false],stop_gradient:[false,false]} : (builtin.tensor<-1x-1x4096xf16>, builtin.tensor<4xi64>) -> builtin.tensor<-1x-1x32x128xf16>
9898
(%98) = "pd_op.full_int_array" () {dtype:(pd_op.DataType)int64,place:(pd_op.Place)Place(cpu),stop_gradient:[true],value:[(Int64)0,(Int64)0,(Int64)32,(Int64)128]} : () -> builtin.tensor<4xi64>
99-
(%99, %100) = "pd_op.reshape" (%93, %98) {is_persistable:[false,false],stop_gradient:[false,false]} : (builtin.tensor<-1x-1x4096xf16>, builtin.tensor<4xi64>) -> builtin.tensor<-1x-1x32x128xf16>, builtin.tensor<0x-1x-1x4096xf16>
99+
(%99) = "pd_op.reshape" (%93, %98) {is_persistable:[false,false],stop_gradient:[false,false]} : (builtin.tensor<-1x-1x4096xf16>, builtin.tensor<4xi64>) -> builtin.tensor<-1x-1x32x128xf16>
100100
(%101) = "pd_op.full_int_array" () {dtype:(pd_op.DataType)int64,place:(pd_op.Place)Place(cpu),stop_gradient:[true],value:[(Int64)0,(Int64)0,(Int64)32,(Int64)128]} : () -> builtin.tensor<4xi64>
101-
(%102, %103) = "pd_op.reshape" (%94, %101) {is_persistable:[false,false],stop_gradient:[false,false]} : (builtin.tensor<-1x-1x4096xf16>, builtin.tensor<4xi64>) -> builtin.tensor<-1x-1x32x128xf16>, builtin.tensor<0x-1x-1x4096xf16>
101+
(%102) = "pd_op.reshape" (%94, %101) {is_persistable:[false,false],stop_gradient:[false,false]} : (builtin.tensor<-1x-1x4096xf16>, builtin.tensor<4xi64>) -> builtin.tensor<-1x-1x32x128xf16>
102102
(%104) = "pd_op.shape" (%99) {is_persistable:[false],stop_gradient:[false]} : (builtin.tensor<-1x-1x32x128xf16>) -> builtin.tensor<4xi32>
103103
(%105) = "pd_op.full_int_array" () {dtype:(pd_op.DataType)int64,place:(pd_op.Place)Place(cpu),stop_gradient:[true],value:[(Int64)1]} : () -> builtin.tensor<1xi64>
104104
(%106) = "pd_op.full_int_array" () {dtype:(pd_op.DataType)int64,place:(pd_op.Place)Place(cpu),stop_gradient:[true],value:[(Int64)2]} : () -> builtin.tensor<1xi64>
@@ -173,7 +173,7 @@
173173
(%181) = "pd_op.matmul" (%179, %180) {is_persistable:[false],stop_gradient:[false],transpose_x:false,transpose_y:false} : (builtin.tensor<-1x32x-1x128xf16>, builtin.tensor<-1x32x128x-1xf16>) -> builtin.tensor<-1x32x-1x-1xf16>
174174
(%182) = "pd_op.full" () {dtype:(pd_op.DataType)int32,is_persistable:[false],place:(pd_op.Place)Place(cpu),shape:(pd_op.IntArray)[1],stop_gradient:[false],value:(Float)1} : () -> builtin.tensor<1xi32>
175175
(%183) = "builtin.combine" (%167, %182, %170, %174) {} : (builtin.tensor<i32>, builtin.tensor<1xi32>, builtin.tensor<i32>, builtin.tensor<i32>) -> vec[builtin.tensor<i32>,builtin.tensor<1xi32>,builtin.tensor<i32>,builtin.tensor<i32>]
176-
(%184, %185) = "pd_op.reshape" (%82, %183) {is_persistable:[false,false],stop_gradient:[false,false]} : (builtin.tensor<-1x1x-1x-1xf16>, vec[builtin.tensor<i32>,builtin.tensor<1xi32>,builtin.tensor<i32>,builtin.tensor<i32>]) -> builtin.tensor<-1x1x-1x-1xf16>, builtin.tensor<0x-1x1x-1x-1xf16>
176+
(%184) = "pd_op.reshape" (%82, %183) {is_persistable:[false,false],stop_gradient:[false,false]} : (builtin.tensor<-1x1x-1x-1xf16>, vec[builtin.tensor<i32>,builtin.tensor<1xi32>,builtin.tensor<i32>,builtin.tensor<i32>]) -> builtin.tensor<-1x1x-1x-1xf16>
177177
(%186) = "pd_op.add" (%181, %184) {is_persistable:[false],stop_gradient:[false]} : (builtin.tensor<-1x32x-1x-1xf16>, builtin.tensor<-1x1x-1x-1xf16>) -> builtin.tensor<-1x32x-1x-1xf16>
178178
(%187) = "pd_op.cast" (%186) {dtype:(pd_op.DataType)float32,is_persistable:[false],stop_gradient:[false]} : (builtin.tensor<-1x32x-1x-1xf16>) -> builtin.tensor<-1x32x-1x-1xf32>
179179
(%188) = "pd_op.softmax" (%187) {axis:(Int32)-1,is_persistable:[false],stop_gradient:[false]} : (builtin.tensor<-1x32x-1x-1xf32>) -> builtin.tensor<-1x32x-1x-1xf32>
@@ -182,7 +182,7 @@
182182
(%191) = "pd_op.transpose" (%190) {is_persistable:[false],perm:[(Int32)0,(Int32)2,(Int32)1,(Int32)3],stop_gradient:[false]} : (builtin.tensor<-1x32x-1x128xf16>) -> builtin.tensor<-1x-1x32x128xf16>
183183
(%192) = "pd_op.full" () {dtype:(pd_op.DataType)int32,is_persistable:[false],place:(pd_op.Place)Place(cpu),shape:(pd_op.IntArray)[1],stop_gradient:[false],value:(Float)4096} : () -> builtin.tensor<1xi32>
184184
(%193) = "builtin.combine" (%167, %170, %192) {} : (builtin.tensor<i32>, builtin.tensor<i32>, builtin.tensor<1xi32>) -> vec[builtin.tensor<i32>,builtin.tensor<i32>,builtin.tensor<1xi32>]
185-
(%194, %195) = "pd_op.reshape" (%191, %193) {is_persistable:[false,false],stop_gradient:[false,false]} : (builtin.tensor<-1x-1x32x128xf16>, vec[builtin.tensor<i32>,builtin.tensor<i32>,builtin.tensor<1xi32>]) -> builtin.tensor<-1x-1x4096xf16>, builtin.tensor<0x-1x-1x32x128xf16>
185+
(%194) = "pd_op.reshape" (%191, %193) {is_persistable:[false,false],stop_gradient:[false,false]} : (builtin.tensor<-1x-1x32x128xf16>, vec[builtin.tensor<i32>,builtin.tensor<i32>,builtin.tensor<1xi32>]) -> builtin.tensor<-1x-1x4096xf16>
186186
(%196) = "pd_op.matmul" (%194, %7) {is_persistable:[false],stop_gradient:[false],transpose_x:false,transpose_y:false} : (builtin.tensor<-1x-1x4096xf16>, builtin.tensor<4096x4096xf16>) -> builtin.tensor<-1x-1x4096xf16>
187187
(%197) = "pd_op.add" (%49, %196) {is_persistable:[false],stop_gradient:[false]} : (builtin.tensor<-1x-1x4096xf16>, builtin.tensor<-1x-1x4096xf16>) -> builtin.tensor<-1x-1x4096xf16>
188188
(%198) = "pd_op.cast" (%197) {dtype:(pd_op.DataType)float32,is_persistable:[false],stop_gradient:[false]} : (builtin.tensor<-1x-1x4096xf16>) -> builtin.tensor<-1x-1x4096xf32>

test/ir/pir/cinn/symbolic/test_simple_llama_dy.py

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -57,16 +57,6 @@ def __init__(self):
5757
def run(self, file):
5858
program = self.load_from(file)
5959
for op in program.global_block().ops:
60-
if op.name() == "pd_op.reshape":
61-
if (
62-
op.result(1).initialized()
63-
and not op.result(1).use_empty()
64-
and op.result(1).first_use().owner().name() == "pd_op.fetch"
65-
):
66-
program.global_block().remove_op(
67-
op.result(1).first_use().owner()
68-
)
69-
7060
if op.name() == "pd_op.squeeze":
7161
if (
7262
op.result(1).initialized()

0 commit comments

Comments
 (0)