diff --git a/csharp/test/Microsoft.ML.OnnxRuntime.Tests.NetCoreApp/InferenceTest.netcore.cs b/csharp/test/Microsoft.ML.OnnxRuntime.Tests.NetCoreApp/InferenceTest.netcore.cs index 393b6dd1d11f7..f538daf8dcef3 100644 --- a/csharp/test/Microsoft.ML.OnnxRuntime.Tests.NetCoreApp/InferenceTest.netcore.cs +++ b/csharp/test/Microsoft.ML.OnnxRuntime.Tests.NetCoreApp/InferenceTest.netcore.cs @@ -229,6 +229,7 @@ private static Dictionary GetSkippedModels(DirectoryInfo modelsD { "tf_resnet_v1_50", "result mismatch when Conv BN Fusion is applied" }, { "tf_resnet_v1_101", "result mismatch when Conv BN Fusion is applied" }, { "tf_resnet_v1_152", "result mismatch when Conv BN Fusion is applied" }, + { "cntk_simple_seg", "Bad onnx test output caused by wrong SAME_UPPER/SAME_LOWER for ConvTranspose" }, { "coreml_Imputer-LogisticRegression_sklearn_load_breast_cancer", "Can't determine model file name" }, { "mask_rcnn_keras", "Model should be edited to remove the extra outputs" }, { "test_strnormalizer_export_monday_casesensintive_lower", "ElementType not currently supported"}, @@ -304,7 +305,6 @@ private static Dictionary GetSkippedModels(DirectoryInfo modelsD { "test_min_uint16", "node test error"}, { "test_adam_multiple", "node test error"}, { "test_loop13_seq", "node test error"}, - { "test_convtranspose_autopad_same", "node test error"}, { "test_training_dropout_default_mask", "node test error"}, { "test_min_int8", "node test error"}, { "test_identity_sequence", "data type not supported"}, diff --git a/onnxruntime/core/providers/cpu/nn/conv_transpose.h b/onnxruntime/core/providers/cpu/nn/conv_transpose.h index 53025e3b13013..49c174969b38b 100644 --- a/onnxruntime/core/providers/cpu/nn/conv_transpose.h +++ b/onnxruntime/core/providers/cpu/nn/conv_transpose.h @@ -25,15 +25,7 @@ namespace onnxruntime { template class ConvTranspose : public OpKernel { public: - ConvTranspose(const OpKernelInfo& info) : OpKernel(info), conv_transpose_attrs_(info) { - if (conv_transpose_attrs_.auto_pad == AutoPadType::SAME_UPPER || - conv_transpose_attrs_.auto_pad == AutoPadType::SAME_LOWER) { - // TODO(jcwchen): #9740 ORT 1.13 will correct the logic by switching them to meet ONNX spec - LOGS_DEFAULT(WARNING) << "The existing bug in the padding distribution for auto_pad type" - << " SAME_UPPER/SAME_LOWER will be fixed in next ORT 1.13 release and hence the" - << " results of ConvTranspose operator using the above auto_pad type(s) will be different."; - } - } + ConvTranspose(const OpKernelInfo& info) : OpKernel(info), conv_transpose_attrs_(info) {} Status PrePack(const Tensor& tensor, int input_idx, AllocatorPtr alloc, /*out*/ bool& is_packed, diff --git a/onnxruntime/core/providers/cpu/nn/conv_transpose_attributes.h b/onnxruntime/core/providers/cpu/nn/conv_transpose_attributes.h index ee74877da8d86..7d0bc287081c9 100644 --- a/onnxruntime/core/providers/cpu/nn/conv_transpose_attributes.h +++ b/onnxruntime/core/providers/cpu/nn/conv_transpose_attributes.h @@ -176,7 +176,7 @@ struct ConvTransposeAttributes : public ConvAttributes { void DistributePadding(AutoPadType pad_type, const int64_t& total_pad, int64_t& pad_head, int64_t& pad_tail) const { - if (pad_type == AutoPadType::SAME_UPPER) { // pad more on head when total_pad is odd. + if (pad_type == AutoPadType::SAME_LOWER) { // pad more on head when total_pad is odd. pad_head = total_pad - total_pad / 2; pad_tail = total_pad / 2; } else { diff --git a/onnxruntime/core/providers/dml/OperatorAuthorHelper/OperatorHelper.cpp b/onnxruntime/core/providers/dml/OperatorAuthorHelper/OperatorHelper.cpp index 7ac27f659883a..a6df72565c89c 100644 --- a/onnxruntime/core/providers/dml/OperatorAuthorHelper/OperatorHelper.cpp +++ b/onnxruntime/core/providers/dml/OperatorAuthorHelper/OperatorHelper.cpp @@ -721,7 +721,7 @@ namespace OperatorHelper int paddings = gsl::narrow_cast((inputDimensions[i + dimOffset] - 1) * stride + windowSize - m_outputShapes[0].GetShape()[i + dimOffset]); paddings = std::max(0, paddings); - m_kernel.startPadding[i] = m_kernel.autoPadSameUpper ? (paddings + 1) / 2 : paddings / 2; + m_kernel.startPadding[i] = m_kernel.autoPadSameUpper ? paddings / 2 : (paddings + 1) / 2; m_kernel.endPadding[i] = paddings - m_kernel.startPadding[i]; } } diff --git a/onnxruntime/test/onnx/main.cc b/onnxruntime/test/onnx/main.cc index 671d90b11505b..58dd7bec5df81 100644 --- a/onnxruntime/test/onnx/main.cc +++ b/onnxruntime/test/onnx/main.cc @@ -627,12 +627,13 @@ select from 'TF8', 'TF16', 'UINT8', 'FLOAT', 'ITENSOR'. \n)"); {"BERT_Squad", "test data bug"}, {"constantofshape_float_ones", "test data bug", {"onnx141", "onnx150"}}, {"constantofshape_int_zeros", "test data bug", {"onnx141", "onnx150"}}, - {"convtranspose_autopad_same", "Implementation need to be adjusted for ONNX changes"}, + {"convtranspose_autopad_same", "Test data has been corrected in ONNX 1.10.", {"onnx180", "onnx181", "onnx190"}}, {"cast_STRING_to_FLOAT", "Linux CI has old ONNX python package with bad test data", {"onnx141"}}, // Numpy float to string has unexpected rounding for some results given numpy default precision is meant to be 8. // "e.g. 0.296140194 -> '0.2961402' not '0.29614019'. ORT produces the latter with precision set to 8, // which doesn't match the expected output that was generated with numpy. {"cast_FLOAT_to_STRING", "Numpy float to string has unexpected rounding for some results."}, + {"cntk_simple_seg", "Bad onnx test output caused by wrong SAME_UPPER/SAME_LOWER for ConvTranspose", {}}, {"tf_nasnet_large", "disable temporarily"}, {"tf_nasnet_mobile", "disable temporarily"}, {"tf_pnasnet_large", "disable temporarily"}, diff --git a/onnxruntime/test/providers/cpu/model_tests.cc b/onnxruntime/test/providers/cpu/model_tests.cc index 4961718e64ca1..01526789e2e09 100644 --- a/onnxruntime/test/providers/cpu/model_tests.cc +++ b/onnxruntime/test/providers/cpu/model_tests.cc @@ -166,6 +166,7 @@ TEST_P(ModelTest, Run) { {"bitshift_left_uint16", "BitShift(11) uint16 support not enabled currently"}, {"maxunpool_export_with_output_shape", "Invalid output in ONNX test. See https://github.com/onnx/onnx/issues/2398"}, + {"cntk_simple_seg", "Bad onnx test output caused by wrong SAME_UPPER/SAME_LOWER for ConvTranspose"}, {"training_dropout", "result differs", {}}, // Temporary, subsequent PR will remove this. {"training_dropout_default", "result differs", {}}, // Temporary, subsequent PR will remove this. {"training_dropout_default_mask", "result differs", {}}, // Temporary, subsequent PR will remove this. @@ -184,7 +185,7 @@ TEST_P(ModelTest, Run) { {"castlike_FLOAT_to_BFLOAT16_expanded", "type error", {}}, {"castlike_FLOAT_to_STRING", "type error", {}}, {"castlike_FLOAT_to_STRING_expanded", "type error", {}}, - {"convtranspose_autopad_same", "type error", {}}, + {"convtranspose_autopad_same", "Test data has been corrected in ONNX 1.10.", {"onnx180", "onnx181", "onnx190"}}, {"gru_batchwise", "type error", {}}, {"lstm_batchwise", "type error", {}}, {"optional_get_element", "type error", {}}, diff --git a/onnxruntime/test/providers/cpu/nn/conv_transpose_op_test.cc b/onnxruntime/test/providers/cpu/nn/conv_transpose_op_test.cc index 19ca6cf22793f..fae2bb3c6272d 100644 --- a/onnxruntime/test/providers/cpu/nn/conv_transpose_op_test.cc +++ b/onnxruntime/test/providers/cpu/nn/conv_transpose_op_test.cc @@ -1045,7 +1045,7 @@ TEST(ConvTransposeTest, ConvTranspose_1D_AutoPad_SameUpper) { vector W = {1.0f, 1.0f, 1.0f, 1.0f}; vector W_shape = {1, 2, 2}; vector Y_shape = {1, 2, 4}; - auto expected_vals = {3.0f, 5.0f, 7.0f, 4.0f, 3.0f, 5.0f, 7.0f, 4.0f}; + auto expected_vals = {1.0f, 3.0f, 5.0f, 7.0f, 1.0f, 3.0f, 5.0f, 7.0f}; TestConvTransposeOp(attrs, {X, W}, {X_shape, W_shape}, expected_vals, Y_shape, OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider, kOpenVINOExecutionProvider}); //Accuracy Mismatch on OpenVINO-EP @@ -1068,7 +1068,7 @@ TEST(ConvTransposeTest, ConvTranspose_1D_AutoPad_SameLower) { vector W = {1.0f, 1.0f, 1.0f, 1.0f}; vector W_shape = {1, 2, 2}; vector Y_shape = {1, 2, 4}; - auto expected_vals = {1.0f, 3.0f, 5.0f, 7.0f, 1.0f, 3.0f, 5.0f, 7.0f}; + auto expected_vals = {3.0f, 5.0f, 7.0f, 4.0f, 3.0f, 5.0f, 7.0f, 4.0f}; TestConvTransposeOp(attrs, {X, W}, {X_shape, W_shape}, expected_vals, Y_shape, OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider, kOpenVINOExecutionProvider}); //Accuracy Mismatch on OpenVINO-EP @@ -1083,7 +1083,7 @@ TEST(ConvTransposeTest, ConvTranspose_AutoPad_with_non_default_strides) { vector{2, 2}, // strides vector{1, 1}, // dilations 1, // group - "SAME_LOWER" // auto_pad + "SAME_UPPER" // auto_pad }; vector X = {0.0f, 1.0f, 2.0f, diff --git a/onnxruntime/test/testdata/onnx_backend_test_series_filters.jsonc b/onnxruntime/test/testdata/onnx_backend_test_series_filters.jsonc index e6ed4e8346830..ce9bab78ba2d8 100644 --- a/onnxruntime/test/testdata/onnx_backend_test_series_filters.jsonc +++ b/onnxruntime/test/testdata/onnx_backend_test_series_filters.jsonc @@ -35,12 +35,7 @@ "^test_batchnorm_epsilon_training_mode", "^test_batchnorm_example_old", "^test_batchnorm_example_training_mode", - "^convtranspose_autopad_same", - "^test_convtranspose_autopad_same", - "^test_convtranspose_autopad_same_cuda", // Due to changes in ONNX 1.9 "^test_gathernd_example_int32_batch_dim1", - "^test_convtranspose_autopad_same", // ONNX 1.9 changes - "^test_convtranspose_autopad_same_cuda", // ONNX 1.9 changes "^test_max_int16", "^test_max_int8", "^test_max_uint16", diff --git a/winml/test/model/model_tests.cpp b/winml/test/model/model_tests.cpp index 8a9e937f7d89f..57833cf149f0a 100644 --- a/winml/test/model/model_tests.cpp +++ b/winml/test/model/model_tests.cpp @@ -227,6 +227,8 @@ static std::vector GetAllTestCases() { }; allDisabledTests.insert(std::begin(x86DisabledTests), std::end(x86DisabledTests)); #endif +// Bad onnx test output caused by previously wrong SAME_UPPER/SAME_LOWER for ConvTranspose +allDisabledTests.insert(ORT_TSTR("cntk_simple_seg")); WINML_EXPECT_NO_THROW(LoadTests(dataDirs, whitelistedTestCases, TestTolerances(1e-3, 1e-3, {}, {}),