Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【Paddle Tensor】Fix bugs related to converting unit tests of the old ir-trt into pir-trt #71083

Open
wants to merge 13 commits into
base: develop
Choose a base branch
from
2 changes: 2 additions & 0 deletions paddle/fluid/pir/transforms/tensorrt/trt_op_marker_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@ DEFINE_GENERAL_PATTERN(Hardswish, paddle::dialect::HardswishOp)
DEFINE_GENERAL_PATTERN(Assign, paddle::dialect::AssignOp)
DEFINE_GENERAL_PATTERN(Tile, paddle::dialect::TileOp)
DEFINE_GENERAL_PATTERN(Share_Data, paddle::dialect::ShareDataOp)
DEFINE_GENERAL_PATTERN(Share_Data_, paddle::dialect::ShareData_Op)
DEFINE_GENERAL_PATTERN(AssignOut, paddle::dialect::AssignOut_Op)
DEFINE_GENERAL_PATTERN(Swish, paddle::dialect::SwishOp)
DEFINE_GENERAL_PATTERN(Log, paddle::dialect::LogOp)
Expand Down Expand Up @@ -2485,6 +2486,7 @@ class TrtOpMarkerPass : public pir::PatternRewritePass {
ADD_PATTERN(Assign)
ADD_PATTERN(Tile)
ADD_PATTERN(Share_Data)
ADD_PATTERN(Share_Data_)
ADD_PATTERN(Swish)
ADD_PATTERN(Log)
ADD_PATTERN(Floor)
Expand Down
3 changes: 1 addition & 2 deletions python/paddle/tensorrt/impls/others.py
Original file line number Diff line number Diff line change
Expand Up @@ -269,11 +269,10 @@ def set_value_converter(network, paddle_op, inputs):


@converter_registry.register("pd_op.share_data", trt_version="8.x")
@converter_registry.register("pd_op.share_data_", trt_version="8.x")
def share_data_converter(network, paddle_op, inputs):
x = inputs[0]

identity_layer = network.add_identity(x)

return identity_layer.get_output(0)


Expand Down
3 changes: 3 additions & 0 deletions test/ir/inference/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -170,6 +170,9 @@ if(WITH_GPU AND TENSORRT_FOUND)

set_tests_properties(test_trt_ops_fp32_mix_precision PROPERTIES TIMEOUT 300)
set_tests_properties(test_trt_convert_unary PROPERTIES TIMEOUT 600)
set_tests_properties(test_trt_convert_index_select PROPERTIES TIMEOUT 2000)
set_tests_properties(test_trt_convert_anchor_generator PROPERTIES TIMEOUT
2000)

if(NOT WIN32)

Expand Down
2 changes: 1 addition & 1 deletion test/ir/inference/auto_scan_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -923,7 +923,7 @@ def random_to_skip():
prog_config.ops[i].attrs
for i in range(len(prog_config.ops))
]
dynamic_shape = self.generate_dynamic_shape()
dynamic_shape = self.generate_dynamic_shape(attrs)

main_program_desc, util_program = create_fake_model(
prog_config,
Expand Down
79 changes: 41 additions & 38 deletions test/ir/inference/test_trt_convert_affine_channel.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def generate_weight1(dims, attrs: list[dict[str, Any]]):
else:
return np.random.random([3]).astype(np.float32)

for dims in [2, 4]:
for dims in [4]:
for batch in [1, 2, 4]:
for data_layout in ["NCHW", "NHWC"]:
self.dims = dims
Expand Down Expand Up @@ -86,35 +86,37 @@ def generate_weight1(dims, attrs: list[dict[str, Any]]):

yield program_config

def generate_dynamic_shape(self, attrs):
if self.dims == 2:
self.dynamic_shape.min_input_shape = {"input_data": [1, 64]}
self.dynamic_shape.max_input_shape = {"input_data": [4, 64]}
self.dynamic_shape.opt_input_shape = {"input_data": [2, 64]}
else:
if attrs[0]['data_layout'] == "NCHW":
self.dynamic_shape.min_input_shape = {
"input_data": [1, 3, 64, 64]
}
self.dynamic_shape.max_input_shape = {
"input_data": [4, 3, 64, 64]
}
self.dynamic_shape.opt_input_shape = {
"input_data": [1, 3, 64, 64]
}
else:
self.dynamic_shape.min_input_shape = {
"input_data": [1, 64, 64, 3]
}
self.dynamic_shape.max_input_shape = {
"input_data": [4, 64, 64, 3]
}
self.dynamic_shape.opt_input_shape = {
"input_data": [1, 64, 64, 3]
}
return self.dynamic_shape

def sample_predictor_configs(
self, program_config
self, program_config, run_pir=False
) -> tuple[paddle_infer.Config, list[int], float]:
def generate_dynamic_shape(attrs):
if self.dims == 2:
self.dynamic_shape.min_input_shape = {"input_data": [1, 32]}
self.dynamic_shape.max_input_shape = {"input_data": [4, 64]}
self.dynamic_shape.opt_input_shape = {"input_data": [2, 64]}
else:
if attrs[0]['data_layout'] == "NCHW":
self.dynamic_shape.min_input_shape = {
"input_data": [1, 3, 32, 32]
}
self.dynamic_shape.max_input_shape = {
"input_data": [4, 3, 64, 64]
}
self.dynamic_shape.opt_input_shape = {
"input_data": [1, 3, 64, 64]
}
else:
self.dynamic_shape.min_input_shape = {
"input_data": [1, 32, 32, 3]
}
self.dynamic_shape.max_input_shape = {
"input_data": [4, 64, 64, 3]
}
self.dynamic_shape.opt_input_shape = {
"input_data": [1, 64, 64, 3]
}

def clear_dynamic_shape():
self.dynamic_shape.min_input_shape = {}
Expand All @@ -133,17 +135,18 @@ def generate_trt_nodes_num(attrs, dynamic_shape):

# for static_shape
clear_dynamic_shape()
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False
), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False
), (1e-3, 1e-3)
if not run_pir:
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False
), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False
), (1e-3, 1e-3)

# for dynamic_shape
generate_dynamic_shape(attrs)
self.generate_dynamic_shape(attrs)
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True
Expand All @@ -154,7 +157,7 @@ def generate_trt_nodes_num(attrs, dynamic_shape):
), (1e-3, 1e-3)

def test(self):
self.run_test()
self.run_test(run_pir=True)


if __name__ == "__main__":
Expand Down
43 changes: 22 additions & 21 deletions test/ir/inference/test_trt_convert_anchor_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,13 +82,15 @@ def generate_input1(batch, attrs: list[dict[str, Any]]):

yield program_config

def generate_dynamic_shape(self, attrs):
self.dynamic_shape.min_input_shape = {"input_data": [1, 3, 64, 64]}
self.dynamic_shape.max_input_shape = {"input_data": [4, 3, 64, 64]}
self.dynamic_shape.opt_input_shape = {"input_data": [2, 3, 64, 64]}
return self.dynamic_shape

def sample_predictor_configs(
self, program_config
self, program_config, run_pir=False
) -> tuple[paddle_infer.Config, list[int], float]:
def generate_dynamic_shape(attrs):
self.dynamic_shape.min_input_shape = {"input_data": [1, 3, 32, 32]}
self.dynamic_shape.max_input_shape = {"input_data": [4, 3, 64, 64]}
self.dynamic_shape.opt_input_shape = {"input_data": [1, 3, 64, 64]}

def clear_dynamic_shape():
self.dynamic_shape.min_input_shape = {}
Expand All @@ -107,34 +109,33 @@ def generate_trt_nodes_num(attrs, dynamic_shape):

# for static_shape
clear_dynamic_shape()
self.trt_param.precision = paddle_infer.PrecisionType.Float32
program_config.set_input_type(np.float32)
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False
), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
# NOTE(tizheng): This config will fall back to paddle native OP,
# which only supports FP32 input.
program_config.set_input_type(np.float32)
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False
), 1e-3
if not run_pir:
self.trt_param.precision = paddle_infer.PrecisionType.Float32
program_config.set_input_type(np.float32)
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False
), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
# NOTE(tizheng): This config will fall back to paddle native OP,
# which only supports FP32 input.
program_config.set_input_type(np.float32)
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, False
), 1e-3

# for dynamic_shape
generate_dynamic_shape(attrs)
self.generate_dynamic_shape(attrs)
self.trt_param.precision = paddle_infer.PrecisionType.Float32
program_config.set_input_type(np.float32)
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True
), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
program_config.set_input_type(np.float16)
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True
), 1e-3

def test(self):
self.run_test()
self.run_test(run_pir=True)


if __name__ == "__main__":
Expand Down
6 changes: 3 additions & 3 deletions test/ir/inference/test_trt_convert_clip.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def generate_weight1(attrs: list[dict[str, Any]]):
def generate_weight2(attrs: list[dict[str, Any]]):
return np.array([np.random.uniform(10, 20)]).astype("float32")

for dims in [0, 1, 2, 3, 4]:
for dims in [1, 2, 3, 4]:
for batch in [1, 4]:
for dtype in [np.float32, np.int32]:
for op_inputs in [
Expand Down Expand Up @@ -99,7 +99,7 @@ def generate_weight2(attrs: list[dict[str, Any]]):

yield program_config

def generate_dynamic_shape(self):
def generate_dynamic_shape(self, attrs):
if self.dims == 0:
self.dynamic_shape.min_input_shape = {"input_data": []}
self.dynamic_shape.max_input_shape = {"input_data": []}
Expand Down Expand Up @@ -151,7 +151,7 @@ def generate_trt_nodes_num(attrs, dynamic_shape):
), (1e-3, 1e-3)

# for dynamic_shape
self.generate_dynamic_shape()
self.generate_dynamic_shape(attrs)
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), generate_trt_nodes_num(
attrs, True
Expand Down
70 changes: 36 additions & 34 deletions test/ir/inference/test_trt_convert_grid_sampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def generate_input2():
}
)

for dims in [4, 5]:
for dims in [4]:
for desc in descs:
self.dims = dims
ops_config = [
Expand All @@ -81,48 +81,50 @@ def generate_input2():
ops=ops,
weights={},
inputs={
"input_data": TensorConfig(
data_gen=partial(generate_input1)
),
"grid_data": TensorConfig(
data_gen=partial(generate_input2)
),
"input_data": TensorConfig(
data_gen=partial(generate_input1)
),
},
outputs=["output_data"],
)

yield program_config

def generate_dynamic_shape(self, attrs):
if self.dims == 4:
self.dynamic_shape.min_input_shape = {
"input_data": [1, 3, 32, 32],
"grid_data": [1, 3, 3, 2],
}
self.dynamic_shape.max_input_shape = {
"input_data": [1, 3, 64, 64],
"grid_data": [1, 3, 6, 2],
}
self.dynamic_shape.opt_input_shape = {
"input_data": [1, 3, 32, 32],
"grid_data": [1, 3, 3, 2],
}
elif self.dims == 5:
self.dynamic_shape.min_input_shape = {
"input_data": [1, 3, 32, 32, 64],
"grid_data": [1, 3, 3, 2, 3],
}
self.dynamic_shape.max_input_shape = {
"input_data": [1, 3, 64, 64, 128],
"grid_data": [1, 3, 3, 6, 3],
}
self.dynamic_shape.opt_input_shape = {
"input_data": [1, 3, 32, 32, 64],
"grid_data": [1, 3, 3, 2, 3],
}
return self.dynamic_shape

def sample_predictor_configs(
self, program_config
self, program_config, run_pir=False
) -> tuple[paddle_infer.Config, list[int], float]:
def generate_dynamic_shape():
if self.dims == 4:
self.dynamic_shape.min_input_shape = {
"input_data": [1, 3, 32, 32],
"grid_data": [1, 3, 3, 2],
}
self.dynamic_shape.max_input_shape = {
"input_data": [1, 3, 64, 64],
"grid_data": [1, 3, 6, 2],
}
self.dynamic_shape.opt_input_shape = {
"input_data": [1, 3, 32, 32],
"grid_data": [1, 3, 3, 2],
}
elif self.dims == 5:
self.dynamic_shape.min_input_shape = {
"input_data": [1, 3, 32, 32, 64],
"grid_data": [1, 3, 3, 2, 3],
}
self.dynamic_shape.max_input_shape = {
"input_data": [1, 3, 64, 64, 128],
"grid_data": [1, 3, 3, 6, 3],
}
self.dynamic_shape.opt_input_shape = {
"input_data": [1, 3, 32, 32, 64],
"grid_data": [1, 3, 3, 2, 3],
}

def clear_dynamic_shape():
self.dynamic_shape.max_input_shape = {}
Expand All @@ -137,14 +139,14 @@ def clear_dynamic_shape():
clear_dynamic_shape()

# for dynamic_shape
generate_dynamic_shape()
self.generate_dynamic_shape(attrs)
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), (1, 3), 1e-5
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), (1, 3), 1e-3

def test(self):
self.run_test()
self.run_test(run_pir=True)


if __name__ == "__main__":
Expand Down
Loading