Skip to content

Commit

Permalink
7.0 Release (#1977)
Browse files Browse the repository at this point in the history
  • Loading branch information
TobyRoseman authored Sep 18, 2023
1 parent d7a1479 commit e4b0d63
Show file tree
Hide file tree
Showing 51 changed files with 1,237 additions and 453 deletions.
4 changes: 2 additions & 2 deletions cmake/coreml-utils.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ function(coreml_add_build_proto proto_fn target_suffix)
${CMAKE_CURRENT_BINARY_DIR}/format/${proto_fn}_enum.h
COMMENT "Generating c++ enums from ${proto_fn}.proto into ${CMAKE_CURRENT_BINARY_DIR}/format/"
COMMAND ${CMAKE_BINARY_DIR}/deps/protobuf/cmake/protoc
--plugin=protoc-gen-enum=mlmodel${target_suffix}/enumgen
--plugin=protoc-gen-enum=mlmodel/enumgen
--enum_out=${CMAKE_CURRENT_BINARY_DIR}/format/
-I${CMAKE_CURRENT_SOURCE_DIR}/format/
${CMAKE_CURRENT_SOURCE_DIR}/format/${proto_fn}.proto
Expand Down Expand Up @@ -77,7 +77,7 @@ function(coreml_add_build_proto proto_fn target_suffix)
add_custom_target(tgt_${proto_fn}_enums ALL
COMMENT "Generating c++ enums from ${proto_fn}.proto into ${CMAKE_CURRENT_SOURCE_DIR}/build/format/"
COMMAND ${CMAKE_BINARY_DIR}/deps/protobuf/cmake/protoc
--plugin=protoc-gen-enum=mlmodel${target_suffix}/enumgen
--plugin=protoc-gen-enum=mlmodel/enumgen
--enum_out=${CMAKE_CURRENT_SOURCE_DIR}/build/format/
-I${CMAKE_CURRENT_SOURCE_DIR}/format/
${CMAKE_CURRENT_SOURCE_DIR}/format/${proto_fn}.proto
Expand Down
2 changes: 0 additions & 2 deletions coremltools/converters/mil/_deployment_compatibility.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,6 @@ class AvailableTarget(IntEnum):
iOS17 = _SPECIFICATION_VERSION_IOS_17

# macOS versions (aliases of iOS versions)
macOS15 = _SPECIFICATION_VERSION_IOS_13
macOS16 = _SPECIFICATION_VERSION_IOS_14
macOS10_15 = _SPECIFICATION_VERSION_IOS_13
macOS10_16 = _SPECIFICATION_VERSION_IOS_14
macOS11 = _SPECIFICATION_VERSION_IOS_14
Expand Down
19 changes: 14 additions & 5 deletions coremltools/converters/mil/backend/mil/helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,9 @@
from coremltools.converters.mil.mil.types.type_mapping import np_val_to_py_type
from coremltools.models.utils import _WEIGHTS_DIR_NAME, _WEIGHTS_FILE_NAME

# For immediate values, those types are stored in bytes (MIL parser reads those types from bytes).
IMMEDIATE_VALUE_TYPES_IN_BYTES = (types.fp16, types.int8, types.uint8, types.uint32)


def create_valuetype_scalar(data_type):
"""
Expand Down Expand Up @@ -105,7 +108,7 @@ def _tensor_field_by_type(tensor_val, builtin_type):
elif types.is_int(builtin_type):
if builtin_type == types.int64 or builtin_type == types.uint64:
return tensor_val.longInts.values
if builtin_type in (types.int8, types.uint8, types.uint32):
if builtin_type in IMMEDIATE_VALUE_TYPES_IN_BYTES:
return tensor_val.bytes.values
if builtin_type == types.int16 or builtin_type == types.uint16:
# TODO (rdar://111797203): Serialize to byte after MIL changes to read from byte field.
Expand All @@ -132,7 +135,7 @@ def _set_empty_tensor_field_by_type(tensor_val, builtin_type):
elif types.is_int(builtin_type):
if (builtin_type == types.int64 or builtin_type == types.uint64):
tensor_val.longInts.SetInParent()
elif builtin_type in (types.int8, types.uint8, types.uint32):
elif builtin_type in IMMEDIATE_VALUE_TYPES_IN_BYTES:
tensor_val.bytes.SetInParent()
else:
tensor_val.ints.SetInParent()
Expand Down Expand Up @@ -167,7 +170,7 @@ def create_tensor_value(np_tensor):
if builtin_type == types.str:
for x in np.nditer(np_tensor):
t_field.append(x.encode("utf-8"))
elif builtin_type in (types.fp16, types.int8, types.uint8, types.uint32):
elif builtin_type in IMMEDIATE_VALUE_TYPES_IN_BYTES:
val.immediateValue.tensor.bytes.values = np_val_to_py_type(np_tensor)
else:
for x in np_tensor.flatten():
Expand All @@ -189,7 +192,7 @@ def create_scalar_value(py_scalar):

# Set the tensor value
t_field = _tensor_field_by_type(t_val, builtin_type)
if builtin_type in (types.fp16, types.int8, types.uint8, types.uint32):
if builtin_type in IMMEDIATE_VALUE_TYPES_IN_BYTES:
# Serialize to bytes because MIL read them from the "bytes" field in TensorValue.
val.immediateValue.tensor.bytes.values = np_val_to_py_type(py_scalar)
else:
Expand Down Expand Up @@ -295,7 +298,7 @@ def types_to_proto(valuetype):
return create_valuetype_scalar(types_to_proto_primitive(valuetype))


def create_file_value(output_var, blob_writer):
def _get_offset_by_writing_data(output_var, blob_writer):
if output_var.val.dtype.kind == 'f' and output_var.val.dtype.itemsize == 4:
offset = blob_writer.write_float_data(np.ascontiguousarray(output_var.val.flatten()))
elif output_var.val.dtype.kind == "f" and output_var.val.dtype.itemsize == 2:
Expand All @@ -316,6 +319,12 @@ def create_file_value(output_var, blob_writer):
else:
raise TypeError("Unsupported type, {}, for net buffer serialization.".format(output_var.val.dtype))

return offset


def create_file_value(output_var, blob_writer):
offset = _get_offset_by_writing_data(output_var, blob_writer)

return create_file_value_tensor(
file_name=os.path.join(os.path.join('@model_path', _WEIGHTS_DIR_NAME), _WEIGHTS_FILE_NAME),
offset=offset,
Expand Down
3 changes: 2 additions & 1 deletion coremltools/converters/mil/backend/mil/load.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,8 @@

try:
from coremltools.libmilstoragepython import _BlobStorageWriter as BlobWriter
except:
except Exception as e:
logger.warning(f"Fail to import BlobWriter from libmilstoragepython. {e}")
BlobWriter = None


Expand Down
6 changes: 3 additions & 3 deletions coremltools/converters/mil/backend/mil/passes/test_passes.py
Original file line number Diff line number Diff line change
Expand Up @@ -1088,13 +1088,13 @@ def program(x):
x = mb.pow(x=x, y=2.0)
x = mb.sqrt(x=x)
x = mb.reduce_argmax(x=x)
x = mb.reshape(x=x, shape=[*x_shape])
x = mb.reshape(x=x, shape=[*x_shape[:-1]])
else:
x = mb.mul(x=x, y=x)
x = mb.sqrt(x=x)
x = mb.pow(x=x, y=2.0)
x = mb.reduce_argmax(x=x)
x = mb.reshape(x=x, shape=[*x_shape])
x = mb.reshape(x=x, shape=[*x_shape[:-1]])
return x

prev_prog, _, block = apply_pass_and_basic_check(
Expand All @@ -1108,5 +1108,5 @@ def program(x):
program=program,
inputs={"x": x_shape},
backend=("mlprogram", "fp32"),
expected_output_shapes={block.outputs[0].name: tuple(x_shape)},
expected_output_shapes={block.outputs[0].name: tuple(x_shape[:-1])},
)
2 changes: 1 addition & 1 deletion coremltools/converters/mil/converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -288,7 +288,7 @@ def mil_convert_to_proto(

PassPipelineManager.apply_pipeline(prog, main_pipeline)

prog._check_invalid_tensor_rank()
prog._check_invalid_program()

if convert_to == 'milinternal':
return None, prog
Expand Down
2 changes: 1 addition & 1 deletion coremltools/converters/mil/frontend/milproto/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,4 @@
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause

from .load import load
from . import load
48 changes: 33 additions & 15 deletions coremltools/converters/mil/frontend/milproto/load.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,24 +8,31 @@
import numpy as np

from coremltools import _logger as logger
from coremltools.converters.mil._deployment_compatibility import \
AvailableTarget as _target
from coremltools.converters.mil._deployment_compatibility import AvailableTarget as _target
from coremltools.converters.mil.backend.mil import helper
from coremltools.converters.mil.mil import Block
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.mil import (Function, ListVar, Placeholder,
Program, TupleInputType, Var,
mil_list, types)
from coremltools.converters.mil.mil import (
Function,
ListVar,
Placeholder,
Program,
TupleInputType,
Var,
mil_list,
types,
)
from coremltools.converters.mil.mil.block import curr_block
from coremltools.converters.mil.mil.ops.registry import \
SSAOpRegistry as _SSAOpRegistry
from coremltools.converters.mil.mil.ops.registry import SSAOpRegistry as _SSAOpRegistry
from coremltools.proto import MIL_pb2 as pm
from coremltools.proto import Model_pb2 as ml

from .helper import proto_to_types

try:
from coremltools.libmilstoragepython import _BlobStorageReader as BlobReader
except:
except Exception as e:
logger.warning(f"Fail to import BlobReader from libmilstoragepython. {e}")
BlobReader = None


Expand Down Expand Up @@ -145,7 +152,7 @@ def _load_value(context, value_spec):
else:
value = _load_file_value(context, value_spec.blobFileValue, dtype)

if dtype in (types.fp16, types.int8, types.uint8, types.uint32):
if dtype in helper.IMMEDIATE_VALUE_TYPES_IN_BYTES:
value = np.frombuffer(value, types.nptype_from_builtin(dtype)).reshape(
shape
)
Expand Down Expand Up @@ -246,20 +253,23 @@ def _dummy_false_fn(*loop_vars):
inputs["_false_fn"] = _dummy_false_fn


def _load_const_op(context, op_spec):
inputs = {k: _load_value(context, v) for k, v in op_spec.attributes.items()}
pymil_var = getattr(mb, op_spec.type)(**inputs)
context.register_var_with_name(op_spec.outputs[0].name, pymil_var)


def _load_operation(context, op_spec):
if not isinstance(op_spec, pm.Operation):
raise TypeError("Invalid Operation spec object")

op_type = op_spec.type
if op_type == "const" or op_type.startswith("constexpr_"):
if op_type == "const" or "constexpr_" in op_type:
if op_spec.blocks:
raise ValueError("const / constexpr operation can't have any block")
if op_spec.inputs:
raise ValueError("const / constexpr operation can't have any input")

inputs = {k: _load_value(context, v) for k, v in op_spec.attributes.items()}
pymil_var = getattr(mb, op_type)(**inputs)
context.register_var_with_name(op_spec.outputs[0].name, pymil_var)
_load_const_op(context, op_spec)

else:
if op_type == "custom_layer":
Expand Down Expand Up @@ -402,11 +412,19 @@ def _load_function(context, func_spec, spec_version):


def load(model_spec, specification_version, file_weights_dir="", **kwargs):
"""
Load MILProto to Pymil.
Set force_spec_version to force override the spec version.
"""
if not isinstance(model_spec, ml.Model):
raise TypeError("Invalid Model sepc object")

if specification_version < model_spec.specificationVersion:
raise ValueError("specification_version must be greater or equal to the input model spec version")
if not kwargs.get("force_spec_version", False):
raise ValueError(
"specification_version must be greater or equal to the input model spec version"
)

if model_spec.WhichOneof("Type") != "mlProgram":
raise ValueError("Only MIL proto based mlmodels can be loaded")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ def build_model(x):

@pytest.mark.parametrize(
"target",
[ct.target.iOS13, ct.target.macOS15, ct.target.watchOS6, ct.target.tvOS13],
[ct.target.iOS13, ct.target.macOS10_15, ct.target.watchOS6, ct.target.tvOS13],
)
def test_invalid_deployment_target_cumsum(self, target):
x_shape = (3, 4, 5)
Expand All @@ -179,7 +179,7 @@ def build_model(x):

@pytest.mark.parametrize(
"target",
[ct.target.iOS14, ct.target.macOS16, ct.target.watchOS7, ct.target.tvOS14],
[ct.target.iOS14, ct.target.macOS10_16, ct.target.watchOS7, ct.target.tvOS14],
)
def test_valid_deployment_target_cumsum(self, target):
x_shape = (3, 4, 5)
Expand Down
11 changes: 7 additions & 4 deletions coremltools/converters/mil/frontend/torch/converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
from .._utils import get_output_names
from .internal_graph import InternalTorchIRGraph, InternalTorchIRNode
from .ops import convert_nodes
from .quantization_ops import _dequantized_weight
from .torch_op_registry import _TORCH_OPS_REGISTRY
from .torchir_passes import (
flatten_graph_input_values,
Expand Down Expand Up @@ -147,8 +148,6 @@ def get_dequantized_var(self, name: str, dequantized_name: str = None):
# the MIL op.
if dequantized_name is not None:
self._context.add(original_var, dequantized_name)
if self._quant_dtype is None:
raise AssertionError("Trying to dequantize without quantization info")
return original_var, self._quant_dtype

quant_params = self.get_quantization_info(name)
Expand Down Expand Up @@ -429,6 +428,10 @@ def convert_const(self):
if isinstance(val, torch._C.ScriptObject):
logger.info(f"Encountered constant {name} of type _torch._C.ScriptObject")
continue
elif isinstance(val, torch.Tensor) and val.is_quantized:
const = _dequantized_weight(val.cpu(), name)
self.context.add(const)
continue
elif not isinstance(val, np.ndarray):
raise ValueError(f"unsupported class for {name} in PyTorch graph: {type(val)}")
# TODO (rdar://107718371): support uint8 quantization
Expand Down Expand Up @@ -623,10 +626,10 @@ def _lower_graph_block(graph):
if is_tensor or is_quantized_tensor:
if is_tensor and prefix in state_dict:
assert torch.equal(
module, state_dict[prefix]
module.cpu(), state_dict[prefix].cpu()
), "tensor value not consistent between torch ir and state_dict"
if prefix in params_dict:
assert torch.equal(module, params_dict[prefix])
assert torch.equal(module.cpu(), params_dict[prefix].cpu())
replace_input[_output] = first_node_with_prefix[prefix]
else:
params_dict[prefix] = module
Expand Down
5 changes: 4 additions & 1 deletion coremltools/converters/mil/frontend/torch/internal_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,10 @@ def __init__(
# Add params
for name, param in params_dict.items():
if isinstance(param, torch.Tensor):
value = param.detach().cpu().numpy()
if param.is_quantized:
value = param
else:
value = param.detach().cpu().numpy()
else:
value = param
self.params[name] = value
Expand Down
Loading

0 comments on commit e4b0d63

Please sign in to comment.