From abf411fd0d205bf8f9f644d7972bd058a9d27f30 Mon Sep 17 00:00:00 2001 From: zhupengyang Date: Tue, 18 Jan 2022 09:46:17 +0000 Subject: [PATCH] remove -1 device in host kernels; test=develop --- lite/kernels/host/compare_compute.cc | 285 +++++++++++------- lite/kernels/host/fetch_compute.cc | 10 +- lite/kernels/host/flatten_compute.cc | 15 +- lite/kernels/host/shape_compute.cc | 10 +- lite/kernels/host/squeeze_compute.cc | 25 +- lite/kernels/host/stack_compute.cc | 30 +- .../host/unique_with_counts_compute.cc | 20 +- lite/kernels/host/unsqueeze_compute.cc | 45 +-- lite/kernels/host/unstack_compute.cc | 20 +- 9 files changed, 276 insertions(+), 184 deletions(-) diff --git a/lite/kernels/host/compare_compute.cc b/lite/kernels/host/compare_compute.cc index 6d65b1fb306..f17731ca657 100644 --- a/lite/kernels/host/compare_compute.cc +++ b/lite/kernels/host/compare_compute.cc @@ -238,14 +238,17 @@ using equal_float = paddle::lite::kernels::host::CompareCompute< paddle::lite::kernels::host::_EqualFunctor>; REGISTER_LITE_KERNEL(equal, kHost, kFloat, kAny, equal_float, def) .BindInput("X", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kFloat), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kFloat), + DATALAYOUT(kAny))}) .BindInput("Y", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kFloat), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kFloat), + DATALAYOUT(kAny))}) .BindOutput("Out", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kBool), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kBool), + DATALAYOUT(kAny))}) .BindPaddleOpVersion("equal", 1) .Finalize(); @@ -254,14 +257,17 @@ using equal_int64 = paddle::lite::kernels::host::CompareCompute< paddle::lite::kernels::host::_EqualFunctor>; REGISTER_LITE_KERNEL(equal, kHost, kInt64, kAny, equal_int64, def) .BindInput("X", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt64), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt64), + DATALAYOUT(kAny))}) .BindInput("Y", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt64), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt64), + DATALAYOUT(kAny))}) .BindOutput("Out", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kBool), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kBool), + DATALAYOUT(kAny))}) .BindPaddleOpVersion("equal", 1) .Finalize(); @@ -272,14 +278,17 @@ using equal_int64_f = paddle::lite::kernels::host::CompareCompute< paddle::lite::kernels::host::_EqualFunctor>; REGISTER_LITE_KERNEL(equal, kHost, kFloat, kAny, equal_int64_f, int64) .BindInput("X", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt64), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt64), + DATALAYOUT(kAny))}) .BindInput("Y", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt64), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt64), + DATALAYOUT(kAny))}) .BindOutput("Out", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kBool), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kBool), + DATALAYOUT(kAny))}) .BindPaddleOpVersion("equal", 1) .Finalize(); @@ -288,14 +297,17 @@ using equal_int32 = paddle::lite::kernels::host::CompareCompute< paddle::lite::kernels::host::_EqualFunctor>; REGISTER_LITE_KERNEL(equal, kHost, kInt32, kAny, equal_int32, def) .BindInput("X", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt32), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt32), + DATALAYOUT(kAny))}) .BindInput("Y", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt32), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt32), + DATALAYOUT(kAny))}) .BindOutput("Out", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kBool), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kBool), + DATALAYOUT(kAny))}) .BindPaddleOpVersion("equal", 1) .Finalize(); @@ -305,14 +317,17 @@ using equal_int32_f = paddle::lite::kernels::host::CompareCompute< paddle::lite::kernels::host::_EqualFunctor>; REGISTER_LITE_KERNEL(equal, kHost, kFloat, kAny, equal_int32_f, int32) .BindInput("X", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt32), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt32), + DATALAYOUT(kAny))}) .BindInput("Y", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt32), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt32), + DATALAYOUT(kAny))}) .BindOutput("Out", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kBool), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kBool), + DATALAYOUT(kAny))}) .BindPaddleOpVersion("equal", 1) .Finalize(); @@ -321,14 +336,17 @@ using not_equal_float = paddle::lite::kernels::host::CompareCompute< paddle::lite::kernels::host::_NotEqualFunctor>; REGISTER_LITE_KERNEL(not_equal, kHost, kFloat, kAny, not_equal_float, def) .BindInput("X", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kFloat), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kFloat), + DATALAYOUT(kAny))}) .BindInput("Y", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kFloat), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kFloat), + DATALAYOUT(kAny))}) .BindOutput("Out", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kBool), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kBool), + DATALAYOUT(kAny))}) .BindPaddleOpVersion("not_equal", 1) .Finalize(); @@ -337,14 +355,17 @@ using not_equal_int32 = paddle::lite::kernels::host::CompareCompute< paddle::lite::kernels::host::_NotEqualFunctor>; REGISTER_LITE_KERNEL(not_equal, kHost, kFloat, kAny, not_equal_int32, int32) .BindInput("X", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt32), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt32), + DATALAYOUT(kAny))}) .BindInput("Y", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt32), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt32), + DATALAYOUT(kAny))}) .BindOutput("Out", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kBool), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kBool), + DATALAYOUT(kAny))}) .BindPaddleOpVersion("not_equal", 1) .Finalize(); @@ -353,14 +374,17 @@ using not_equal_int64 = paddle::lite::kernels::host::CompareCompute< paddle::lite::kernels::host::_NotEqualFunctor>; REGISTER_LITE_KERNEL(not_equal, kHost, kFloat, kAny, not_equal_int64, int64) .BindInput("X", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt64), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt64), + DATALAYOUT(kAny))}) .BindInput("Y", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt64), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt64), + DATALAYOUT(kAny))}) .BindOutput("Out", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kBool), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kBool), + DATALAYOUT(kAny))}) .BindPaddleOpVersion("not_equal", 1) .Finalize(); @@ -369,14 +393,17 @@ using less_than_float = paddle::lite::kernels::host::CompareCompute< paddle::lite::kernels::host::_LessThanFunctor>; REGISTER_LITE_KERNEL(less_than, kHost, kFloat, kAny, less_than_float, def) .BindInput("X", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kFloat), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kFloat), + DATALAYOUT(kAny))}) .BindInput("Y", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kFloat), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kFloat), + DATALAYOUT(kAny))}) .BindOutput("Out", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kBool), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kBool), + DATALAYOUT(kAny))}) .BindPaddleOpVersion("less_than", 1) .Finalize(); @@ -385,14 +412,17 @@ using less_than_int32 = paddle::lite::kernels::host::CompareCompute< paddle::lite::kernels::host::_LessThanFunctor>; REGISTER_LITE_KERNEL(less_than, kHost, kInt32, kAny, less_than_int32, def) .BindInput("X", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt32), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt32), + DATALAYOUT(kAny))}) .BindInput("Y", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt32), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt32), + DATALAYOUT(kAny))}) .BindOutput("Out", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kBool), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kBool), + DATALAYOUT(kAny))}) .BindPaddleOpVersion("less_than", 1) .Finalize(); @@ -421,14 +451,17 @@ using less_than_int64 = paddle::lite::kernels::host::CompareCompute< paddle::lite::kernels::host::_LessThanFunctor>; REGISTER_LITE_KERNEL(less_than, kHost, kInt64, kAny, less_than_int64, def) .BindInput("X", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt64), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt64), + DATALAYOUT(kAny))}) .BindInput("Y", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt64), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt64), + DATALAYOUT(kAny))}) .BindOutput("Out", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kBool), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kBool), + DATALAYOUT(kAny))}) .BindPaddleOpVersion("less_than", 1) .Finalize(); @@ -457,14 +490,17 @@ using less_equal_float = paddle::lite::kernels::host::CompareCompute< paddle::lite::kernels::host::_LessEqualFunctor>; REGISTER_LITE_KERNEL(less_equal, kHost, kFloat, kAny, less_equal_float, def) .BindInput("X", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kFloat), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kFloat), + DATALAYOUT(kAny))}) .BindInput("Y", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kFloat), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kFloat), + DATALAYOUT(kAny))}) .BindOutput("Out", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kBool), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kBool), + DATALAYOUT(kAny))}) .BindPaddleOpVersion("less_equal", 1) .Finalize(); @@ -473,14 +509,17 @@ using less_equal_int64 = paddle::lite::kernels::host::CompareCompute< paddle::lite::kernels::host::_LessEqualFunctor>; REGISTER_LITE_KERNEL(less_equal, kHost, kInt64, kAny, less_equal_int64, def) .BindInput("X", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt64), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt64), + DATALAYOUT(kAny))}) .BindInput("Y", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt64), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt64), + DATALAYOUT(kAny))}) .BindOutput("Out", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kBool), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kBool), + DATALAYOUT(kAny))}) .BindPaddleOpVersion("less_equal", 1) .Finalize(); @@ -529,14 +568,17 @@ using greater_than_float = paddle::lite::kernels::host::CompareCompute< paddle::lite::kernels::host::_GreaterThanFunctor>; REGISTER_LITE_KERNEL(greater_than, kHost, kFloat, kAny, greater_than_float, def) .BindInput("X", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kFloat), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kFloat), + DATALAYOUT(kAny))}) .BindInput("Y", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kFloat), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kFloat), + DATALAYOUT(kAny))}) .BindOutput("Out", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kBool), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kBool), + DATALAYOUT(kAny))}) .BindPaddleOpVersion("greater_than", 1) .Finalize(); @@ -545,14 +587,17 @@ using greater_than_bool = paddle::lite::kernels::host::CompareCompute< paddle::lite::kernels::host::_GreaterThanFunctor>; REGISTER_LITE_KERNEL(greater_than, kHost, kFloat, kAny, greater_than_bool, bool) .BindInput("X", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kBool), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kBool), + DATALAYOUT(kAny))}) .BindInput("Y", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kBool), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kBool), + DATALAYOUT(kAny))}) .BindOutput("Out", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kBool), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kBool), + DATALAYOUT(kAny))}) .BindPaddleOpVersion("greater_than", 1) .Finalize(); @@ -561,14 +606,17 @@ using greater_than_int32 = paddle::lite::kernels::host::CompareCompute< paddle::lite::kernels::host::_GreaterThanFunctor>; REGISTER_LITE_KERNEL(greater_than, kHost, kInt32, kAny, greater_than_int32, def) .BindInput("X", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt32), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt32), + DATALAYOUT(kAny))}) .BindInput("Y", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt32), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt32), + DATALAYOUT(kAny))}) .BindOutput("Out", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kBool), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kBool), + DATALAYOUT(kAny))}) .BindPaddleOpVersion("greater_than", 1) .Finalize(); @@ -577,14 +625,17 @@ using greater_than_int64 = paddle::lite::kernels::host::CompareCompute< paddle::lite::kernels::host::_GreaterThanFunctor>; REGISTER_LITE_KERNEL(greater_than, kHost, kInt64, kAny, greater_than_int64, def) .BindInput("X", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt64), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt64), + DATALAYOUT(kAny))}) .BindInput("Y", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt64), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt64), + DATALAYOUT(kAny))}) .BindOutput("Out", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kBool), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kBool), + DATALAYOUT(kAny))}) .BindPaddleOpVersion("greater_than", 1) .Finalize(); @@ -615,14 +666,17 @@ using greater_equal_float = paddle::lite::kernels::host::CompareCompute< REGISTER_LITE_KERNEL( greater_equal, kHost, kFloat, kAny, greater_equal_float, def) .BindInput("X", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kFloat), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kFloat), + DATALAYOUT(kAny))}) .BindInput("Y", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kFloat), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kFloat), + DATALAYOUT(kAny))}) .BindOutput("Out", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kBool), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kBool), + DATALAYOUT(kAny))}) .BindPaddleOpVersion("greater_equal", 1) .Finalize(); @@ -632,13 +686,16 @@ using greater_equal_int64 = paddle::lite::kernels::host::CompareCompute< REGISTER_LITE_KERNEL( greater_equal, kHost, kFloat, kAny, greater_equal_int64, def_int64) .BindInput("X", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt64), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt64), + DATALAYOUT(kAny))}) .BindInput("Y", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt64), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt64), + DATALAYOUT(kAny))}) .BindOutput("Out", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kBool), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kBool), + DATALAYOUT(kAny))}) .BindPaddleOpVersion("greater_equal", 1) .Finalize(); diff --git a/lite/kernels/host/fetch_compute.cc b/lite/kernels/host/fetch_compute.cc index c53b987b8f8..8ce15519b7c 100644 --- a/lite/kernels/host/fetch_compute.cc +++ b/lite/kernels/host/fetch_compute.cc @@ -45,9 +45,11 @@ class FetchCompute REGISTER_LITE_KERNEL( fetch, kHost, kAny, kAny, paddle::lite::kernels::host::FetchCompute, def) .BindInput("X", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kAny), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kAny), + DATALAYOUT(kAny))}) .BindOutput("Out", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kAny), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kAny), + DATALAYOUT(kAny))}) .Finalize(); diff --git a/lite/kernels/host/flatten_compute.cc b/lite/kernels/host/flatten_compute.cc index c0df41c536a..feca3deda65 100644 --- a/lite/kernels/host/flatten_compute.cc +++ b/lite/kernels/host/flatten_compute.cc @@ -43,12 +43,15 @@ REGISTER_LITE_KERNEL(flatten_contiguous_range, paddle::lite::kernels::host::FlattenContiguousRangeCompute, def) .BindInput("X", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kAny), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kAny), + DATALAYOUT(kAny))}) .BindOutput("Out", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kAny), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kAny), + DATALAYOUT(kAny))}) .BindOutput("XShape", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kAny), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kAny), + DATALAYOUT(kAny))}) .Finalize(); diff --git a/lite/kernels/host/shape_compute.cc b/lite/kernels/host/shape_compute.cc index 03f5e608edf..8b202bf233e 100644 --- a/lite/kernels/host/shape_compute.cc +++ b/lite/kernels/host/shape_compute.cc @@ -36,9 +36,11 @@ void ShapeCompute::Run() { REGISTER_LITE_KERNEL( shape, kHost, kAny, kAny, paddle::lite::kernels::host::ShapeCompute, def) .BindInput("Input", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kAny), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kAny), + DATALAYOUT(kAny))}) .BindOutput("Out", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt32), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt32), + DATALAYOUT(kAny))}) .Finalize(); diff --git a/lite/kernels/host/squeeze_compute.cc b/lite/kernels/host/squeeze_compute.cc index 205e47c7100..63c4a34e65e 100644 --- a/lite/kernels/host/squeeze_compute.cc +++ b/lite/kernels/host/squeeze_compute.cc @@ -45,11 +45,13 @@ REGISTER_LITE_KERNEL(squeeze, paddle::lite::kernels::host::SqueezeCompute, def) .BindInput("X", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kAny), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kAny), + DATALAYOUT(kAny))}) .BindOutput("Out", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kAny), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kAny), + DATALAYOUT(kAny))}) .Finalize(); REGISTER_LITE_KERNEL(squeeze2, @@ -59,12 +61,15 @@ REGISTER_LITE_KERNEL(squeeze2, paddle::lite::kernels::host::SqueezeCompute, def) .BindInput("X", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kAny), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kAny), + DATALAYOUT(kAny))}) .BindOutput("Out", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kAny), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kAny), + DATALAYOUT(kAny))}) .BindOutput("XShape", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kAny), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kAny), + DATALAYOUT(kAny))}) .Finalize(); diff --git a/lite/kernels/host/stack_compute.cc b/lite/kernels/host/stack_compute.cc index a825700469b..7178d912ee8 100644 --- a/lite/kernels/host/stack_compute.cc +++ b/lite/kernels/host/stack_compute.cc @@ -61,31 +61,37 @@ using stack_float = paddle::lite::kernels::host::StackCompute; REGISTER_LITE_KERNEL(stack, kHost, kFloat, kAny, stack_float, def) .BindInput("X", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kFloat), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kFloat), + DATALAYOUT(kAny))}) .BindOutput("Y", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kFloat), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kFloat), + DATALAYOUT(kAny))}) .Finalize(); using stack_int32 = paddle::lite::kernels::host::StackCompute; REGISTER_LITE_KERNEL(stack, kHost, kFloat, kAny, stack_int32, int32_def) .BindInput("X", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt32), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt32), + DATALAYOUT(kAny))}) .BindOutput("Y", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt32), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt32), + DATALAYOUT(kAny))}) .Finalize(); using stack_int64 = paddle::lite::kernels::host::StackCompute; REGISTER_LITE_KERNEL(stack, kHost, kFloat, kAny, stack_int64, int64_def) .BindInput("X", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt64), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt64), + DATALAYOUT(kAny))}) .BindOutput("Y", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt64), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt64), + DATALAYOUT(kAny))}) .Finalize(); diff --git a/lite/kernels/host/unique_with_counts_compute.cc b/lite/kernels/host/unique_with_counts_compute.cc index ef2fa6d6109..cae027197f0 100644 --- a/lite/kernels/host/unique_with_counts_compute.cc +++ b/lite/kernels/host/unique_with_counts_compute.cc @@ -162,15 +162,19 @@ REGISTER_LITE_KERNEL(unique_with_counts, paddle::lite::kernels::host::UniqueWithCountsCompute, def) .BindInput("X", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kAny), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kAny), + DATALAYOUT(kAny))}) .BindOutput("Out", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kAny), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kAny), + DATALAYOUT(kAny))}) .BindOutput("Index", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt32), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt32), + DATALAYOUT(kAny))}) .BindOutput("Count", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt32), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt32), + DATALAYOUT(kAny))}) .Finalize(); diff --git a/lite/kernels/host/unsqueeze_compute.cc b/lite/kernels/host/unsqueeze_compute.cc index 65c9d9af41f..42b2345c4ae 100644 --- a/lite/kernels/host/unsqueeze_compute.cc +++ b/lite/kernels/host/unsqueeze_compute.cc @@ -46,17 +46,21 @@ REGISTER_LITE_KERNEL(unsqueeze, paddle::lite::kernels::host::UnsqueezeCompute, def) .BindInput("X", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kAny), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kAny), + DATALAYOUT(kAny))}) .BindInput("AxesTensor", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt32), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt32), + DATALAYOUT(kAny))}) .BindInput("AxesTensorList", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt32), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt32), + DATALAYOUT(kAny))}) .BindOutput("Out", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kAny), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kAny), + DATALAYOUT(kAny))}) .Finalize(); REGISTER_LITE_KERNEL(unsqueeze2, @@ -66,18 +70,23 @@ REGISTER_LITE_KERNEL(unsqueeze2, paddle::lite::kernels::host::UnsqueezeCompute, def) .BindInput("X", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kAny), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kAny), + DATALAYOUT(kAny))}) .BindInput("AxesTensor", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt32), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt32), + DATALAYOUT(kAny))}) .BindInput("AxesTensorList", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt32), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt32), + DATALAYOUT(kAny))}) .BindOutput("Out", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kAny), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kAny), + DATALAYOUT(kAny))}) .BindOutput("XShape", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kAny), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kAny), + DATALAYOUT(kAny))}) .Finalize(); diff --git a/lite/kernels/host/unstack_compute.cc b/lite/kernels/host/unstack_compute.cc index cbcb828f9a9..0cb2d7e0b60 100644 --- a/lite/kernels/host/unstack_compute.cc +++ b/lite/kernels/host/unstack_compute.cc @@ -60,20 +60,24 @@ using unstack_float = paddle::lite::kernels::host::UnstackCompute; REGISTER_LITE_KERNEL(unstack, kHost, kFloat, kAny, unstack_float, def) .BindInput("X", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kFloat), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kFloat), + DATALAYOUT(kAny))}) .BindOutput("Y", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kFloat), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kFloat), + DATALAYOUT(kAny))}) .Finalize(); using unstack_int32 = paddle::lite::kernels::host::UnstackCompute; REGISTER_LITE_KERNEL(unstack, kHost, kFloat, kAny, unstack_int32, unstack_int32) .BindInput("X", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt32), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt32), + DATALAYOUT(kAny))}) .BindOutput("Y", - {LiteType::GetTensorTy( - TARGET(kHost), PRECISION(kInt32), DATALAYOUT(kAny), -1)}) + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kInt32), + DATALAYOUT(kAny))}) .Finalize();