Skip to content

Commit

Permalink
inline variable, add double
Browse files Browse the repository at this point in the history
  • Loading branch information
adityagoel4512 committed Jan 11, 2024
1 parent c630d5d commit 8905dbd
Show file tree
Hide file tree
Showing 5 changed files with 156 additions and 3 deletions.
2 changes: 1 addition & 1 deletion docs/OperatorKernels.md
Original file line number Diff line number Diff line change
Expand Up @@ -420,7 +420,7 @@ Do not modify directly.*
|DictVectorizer|*in* X:**T1**<br> *out* Y:**T2**|1+|**T1** = map(int64,tensor(double)), map(int64,tensor(float)), map(int64,tensor(string)), map(string,tensor(double)), map(string,tensor(float)), map(string,tensor(int64))<br/> **T2** = tensor(double), tensor(float), tensor(int64), tensor(string)|
|FeatureVectorizer|*in* X:**T1**<br> *out* Y:**tensor(float)**|1+|**T1** = tensor(double), tensor(float), tensor(int32), tensor(int64)|
|Imputer|*in* X:**T**<br> *out* Y:**T**|1+|**T** = tensor(float), tensor(int64)|
|LabelEncoder|*in* X:**T1**<br> *out* Y:**T2**|4+|**T1** = tensor(float), tensor(int64), tensor(string)<br/> **T2** = tensor(float), tensor(int16), tensor(int64), tensor(string)|
|LabelEncoder|*in* X:**T1**<br> *out* Y:**T2**|4+|**T1** = tensor(double), tensor(float), tensor(int64), tensor(string)<br/> **T2** = tensor(double), tensor(float), tensor(int16), tensor(int64), tensor(string)|
|||[2, 3]|**T1** = tensor(float), tensor(int64), tensor(string)<br/> **T2** = tensor(float), tensor(int64), tensor(string)|
|||1|**T1** = tensor(int64), tensor(string)<br/> **T2** = tensor(int64), tensor(string)|
|LinearClassifier|*in* X:**T1**<br> *out* Y:**T2**<br> *out* Z:**tensor(float)**|1+|**T1** = tensor(double), tensor(float), tensor(int32), tensor(int64)<br/> **T2** = tensor(int64), tensor(string)|
Expand Down
15 changes: 15 additions & 0 deletions onnxruntime/core/providers/cpu/cpu_execution_provider.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2532,6 +2532,11 @@ class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kMLDomain, 4,
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kMLDomain, 4, string_string, LabelEncoder);
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kMLDomain, 4, float_float, LabelEncoder);
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kMLDomain, 4, string_int16, LabelEncoder);
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kMLDomain, 4, double_string, LabelEncoder);
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kMLDomain, 4, string_double, LabelEncoder);
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kMLDomain, 4, int64_double, LabelEncoder);
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kMLDomain, 4, double_int64, LabelEncoder);
class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kMLDomain, 4, double_double, LabelEncoder);

template <>
KernelCreateInfo BuildKernelCreateInfo<void>() {
Expand Down Expand Up @@ -2656,6 +2661,16 @@ Status RegisterOnnxMLOperatorKernels(KernelRegistry& kernel_registry) {
LabelEncoder)>,
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kMLDomain, 4, string_int16,
LabelEncoder)>,
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kMLDomain, 4, double_string,
LabelEncoder)>,
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kMLDomain, 4, string_double,
LabelEncoder)>,
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kMLDomain, 4, int64_double,
LabelEncoder)>,
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kMLDomain, 4, double_int64,
LabelEncoder)>,
BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kMLDomain, 4, double_double,
LabelEncoder)>,
};

for (auto& function_table_entry : function_table) {
Expand Down
65 changes: 65 additions & 0 deletions onnxruntime/core/providers/cpu/ml/label_encoder.cc
Original file line number Diff line number Diff line change
Expand Up @@ -328,5 +328,70 @@ void LabelEncoder_4<std::string, std::int16_t>::InitializeAttrFields(const OpKer
default_value_ = static_cast<std::int16_t>(GetDefault(kernel_info, "", static_cast<std::int16_t>(-1)));
};

ONNX_CPU_OPERATOR_TYPED_ML_KERNEL(LabelEncoder, 4, double_double,
KernelDefBuilder()
.TypeConstraint("T1",
std::vector<MLDataType>{DataTypeImpl::GetTensorType<double>()})
.TypeConstraint("T2",
std::vector<MLDataType>{DataTypeImpl::GetTensorType<double>()}),
LabelEncoder_4<double, double>)

template <>
void LabelEncoder_4<double, double>::InitializeAttrFields(const OpKernelInfo& kernel_info) {
default_value_ = GetDefault(kernel_info, "default_float", -0.);
};

ONNX_CPU_OPERATOR_TYPED_ML_KERNEL(
LabelEncoder, 4, double_string,
KernelDefBuilder()
.TypeConstraint("T1", std::vector<MLDataType>{DataTypeImpl::GetTensorType<double>()})
.TypeConstraint("T2", std::vector<MLDataType>{DataTypeImpl::GetTensorType<std::string>()}),
LabelEncoder_4<double, std::string>)

template <>
void LabelEncoder_4<double, std::string>::InitializeAttrFields(const OpKernelInfo& kernel_info) {
value_field_name_ = "values_strings";
default_value_ = GetDefault(kernel_info, "default_string", std::string("_Unused"));
};

ONNX_CPU_OPERATOR_TYPED_ML_KERNEL(
LabelEncoder, 4, string_double,
KernelDefBuilder()
.TypeConstraint("T1", std::vector<MLDataType>{DataTypeImpl::GetTensorType<std::string>()})
.TypeConstraint("T2", std::vector<MLDataType>{DataTypeImpl::GetTensorType<double>()}),
LabelEncoder_4<std::string, double>)

template <>
void LabelEncoder_4<std::string, double>::InitializeAttrFields(const OpKernelInfo& kernel_info) {
key_field_name_ = "keys_strings";
default_value_ = GetDefault(kernel_info, "default_float", -0.);
};

ONNX_CPU_OPERATOR_TYPED_ML_KERNEL(
LabelEncoder, 4, double_int64,
KernelDefBuilder()
.TypeConstraint("T1", std::vector<MLDataType>{DataTypeImpl::GetTensorType<double>()})
.TypeConstraint("T2", std::vector<MLDataType>{DataTypeImpl::GetTensorType<std::int64_t>()}),
LabelEncoder_4<double, std::int64_t>)

template <>
void LabelEncoder_4<double, std::int64_t>::InitializeAttrFields(const OpKernelInfo& kernel_info) {
value_field_name_ = "values_int64s";
default_value_ = GetDefault(kernel_info, "default_int64", static_cast<int64_t>(-1));
};

ONNX_CPU_OPERATOR_TYPED_ML_KERNEL(
LabelEncoder, 4, int64_double,
KernelDefBuilder()
.TypeConstraint("T1", std::vector<MLDataType>{DataTypeImpl::GetTensorType<std::int64_t>()})
.TypeConstraint("T2", std::vector<MLDataType>{DataTypeImpl::GetTensorType<double>()}),
LabelEncoder_4<std::int64_t, double>)

template <>
void LabelEncoder_4<std::int64_t, double>::InitializeAttrFields(const OpKernelInfo& kernel_info) {
key_field_name_ = "keys_int64s";
default_value_ = GetDefault(kernel_info, "default_float", -0.);
};

} // namespace ml
} // namespace onnxruntime
3 changes: 1 addition & 2 deletions onnxruntime/core/providers/cpu/ml/label_encoder.h
Original file line number Diff line number Diff line change
Expand Up @@ -105,8 +105,7 @@ template <typename T>
std::vector<T> GetAttribute(const OpKernelInfo& info, const std::string& name, const std::string& tensor_name) {
if constexpr (std::is_same_v<T, std::string> || std::is_same_v<T, float> || std::is_same_v<T, int64_t>) {
std::vector<T> attrs;
auto result = info.GetAttrs<T>(name, attrs);
if (result.IsOK()) {
if (info.GetAttrs<T>(name, attrs).IsOK()) {
return attrs;
}
}
Expand Down
74 changes: 74 additions & 0 deletions onnxruntime/test/providers/cpu/ml/label_encoder_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -372,6 +372,46 @@ TEST(LabelEncoder, StringToFloatOpset4) {
test.Run();
}

TEST(LabelEncoder, StringToDoubleOpset4) {
std::vector<std::int64_t> dims{1, 5};

std::vector<std::string> input{"Hello", "world", "Random", "onnxruntime", "!"};
std::vector<double> output{0.1, 1.1231e30, -0.0, 2.718, 5.0};
std::vector<std::string> key_data{"Hello", "world", "onnxruntime", "!"};
std::vector<double> value_data{0.1, 1.1231e30, 2.718, 5.0};

OpTester test("LabelEncoder", 4, onnxruntime::kMLDomain);

ONNX_NAMESPACE::TensorProto keys_proto;
keys_proto.set_name("keys_tensor");
keys_proto.set_data_type(ONNX_NAMESPACE::TensorProto_DataType_STRING);
keys_proto.add_dims(key_data.size());
for (const auto& key : key_data) {
keys_proto.add_string_data(key);
}
test.AddAttribute("keys_tensor", keys_proto);

ONNX_NAMESPACE::TensorProto values_proto;
values_proto.set_name("values_tensor");
values_proto.set_data_type(ONNX_NAMESPACE::TensorProto_DataType_DOUBLE);
values_proto.add_dims(value_data.size());
for (const auto& value : value_data) {
values_proto.add_double_data(value);
}
test.AddAttribute("values_tensor", values_proto);

ONNX_NAMESPACE::TensorProto default_proto;
default_proto.set_name("default_tensor");
default_proto.set_data_type(ONNX_NAMESPACE::TensorProto_DataType_DOUBLE);
default_proto.add_dims(1);
default_proto.add_double_data(-0.0);
test.AddAttribute("default_tensor", default_proto);
test.AddInput<std::string>("X", dims, input);
test.AddOutput<double>("Y", dims, output);

test.Run();
}

TEST(LabelEncoder, TensorBasedAttributesOpset4) {
std::vector<std::int64_t> dims{1, 5};

Expand Down Expand Up @@ -437,5 +477,39 @@ TEST(LabelEncoder, NaNsMappedTogetherOpset4) {

test.Run();
}

TEST(LabelEncoder, DoubleNaNsMappedTogetherOpset4) {
std::vector<std::int64_t> dims{1, 6};
std::vector<double> input{3.14, std::nan("1"), 2.718, std::nan("2"), 5.0, -1};
std::vector<std::string> output{"a", "ONNX", "b", "ONNX", "c", "onnxruntime"};
std::vector<double> key_data{3.14, 2.718, 5.0, std::nan("3")};
std::vector<std::string> value_data{"a", "b", "c", "ONNX"};

OpTester test("LabelEncoder", 4, onnxruntime::kMLDomain);

ONNX_NAMESPACE::TensorProto keys_proto;
keys_proto.set_name("keys_tensor");
keys_proto.set_data_type(ONNX_NAMESPACE::TensorProto_DataType_DOUBLE);
keys_proto.add_dims(key_data.size());
for (const auto key : key_data) {
keys_proto.add_double_data(key);
}
test.AddAttribute("keys_tensor", keys_proto);

test.AddAttribute("values_strings", value_data);

ONNX_NAMESPACE::TensorProto default_proto;
default_proto.set_name("default_tensor");
default_proto.set_data_type(ONNX_NAMESPACE::TensorProto_DataType_STRING);
default_proto.add_dims(1);
default_proto.add_string_data("onnxruntime");
test.AddAttribute("default_tensor", default_proto);

test.AddInput<double>("X", dims, input);
test.AddOutput<std::string>("Y", dims, output);

test.Run();
}

} // namespace test
} // namespace onnxruntime

0 comments on commit 8905dbd

Please sign in to comment.