Skip to content

Commit

Permalink
6.0 Release (#1600)
Browse files Browse the repository at this point in the history
* 6.0 Release

* Fix error message

* Update torch in warning message

* Skip iOS16 target unit test on macOS 12 or lower

* Fix unit test

* Skip a test on macOS 12
  • Loading branch information
TobyRoseman authored Sep 19, 2022
1 parent 1f625b3 commit 20b8352
Show file tree
Hide file tree
Showing 135 changed files with 4,249 additions and 2,456 deletions.
4 changes: 2 additions & 2 deletions BUILDING.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ Follow these steps:
1. Fork and clone the GitHub [coremltools repository](https://github.com/apple/coremltools).

2. Run the [build.sh](scripts/build.sh) script to build `coremltools`.
* By default this script uses Python 3.7, but you can include `--python=3.6` (or `3.7`, `3.8`, `3.9`) as a argument to change the Python version.
* By default this script uses Python 3.7, but you can include `--python=3.6` (or `3.7`, `3.8`, `3.9`, `3.10`) as a argument to change the Python version.
* The script creates a new `build` folder with the coremltools distribution, and a `dist` folder with Python wheel files.

3. Run the [test.sh](scripts/test.sh) script to test the build.
Expand All @@ -45,7 +45,7 @@ The following build targets help you configure the development environment. If y
* `test_slow` | Run all non-fast tests.
* `wheel` | Build wheels in release mode.

The script uses Python 3.7, but you can include `--python=3.6` (or `3.7`, `3.8`, `3.9`) as a argument to change the Python version.
The script uses Python 3.7, but you can include `--python=3.6` (or `3.7`, `3.8`, `3.9`, `3.10`) as a argument to change the Python version.

## Resources

Expand Down
6 changes: 3 additions & 3 deletions coremltools/_deps/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def _warn_if_above_max_supported_version(package_name, package_version, max_supp
_HAS_SKLEARN = True
_SKLEARN_VERSION = None
_SKLEARN_MIN_VERSION = "0.17"
_SKLEARN_MAX_VERSION = "0.19.2"
_SKLEARN_MAX_VERSION = "1.1.2"


def __get_sklearn_version(version):
Expand Down Expand Up @@ -98,7 +98,7 @@ def __get_sklearn_version(version):
_HAS_TF_1 = False
_HAS_TF_2 = False
_TF_1_MIN_VERSION = "1.12.0"
_TF_1_MAX_VERSION = "1.15.0"
_TF_1_MAX_VERSION = "1.15.4"
_TF_2_MIN_VERSION = "2.1.0"
_TF_2_MAX_VERSION = "2.8.0"

Expand Down Expand Up @@ -145,7 +145,7 @@ def __get_sklearn_version(version):

# ---------------------------------------------------------------------------------------
_HAS_TORCH = True
_TORCH_MAX_VERSION = "1.11.0"
_TORCH_MAX_VERSION = "1.12.1"
try:
import torch
_warn_if_above_max_supported_version("Torch", torch.__version__, _TORCH_MAX_VERSION)
Expand Down
2 changes: 1 addition & 1 deletion coremltools/converters/_converters_entry.py
Original file line number Diff line number Diff line change
Expand Up @@ -484,7 +484,7 @@ def _set_default_specification_version(target):
return _LOWEST_ALLOWED_SPECIFICATION_VERSION_FOR_NEURALNETWORK
elif target == "mlprogram":
return _LOWEST_ALLOWED_SPECIFICATION_VERSION_FOR_MILPROGRAM
elif target == "milinternal":
elif target in ("milinternal", "milpython"):
return None
else:
raise NotImplementedError("Backend converter {} not implemented".format(target))
Expand Down
26 changes: 0 additions & 26 deletions coremltools/converters/mil/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,57 +3,31 @@
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause

# This import should be pruned rdar://84519338
from .mil import (
block,
Block,
BoolInputType,
BoolTensorInputType,
builder,
Builder,
curr_block,
DefaultInputs,
FloatInputType,
FloatTensorInputType,
Function,
get_existing_symbol,
get_new_symbol,
get_new_variadic_symbol,
input_type,
InputSpec,
IntInputType,
IntOrFloatInputType,
IntOrFloatOrBoolInputType,
IntTensorInputType,
InternalInputType,
InternalScalarOrTensorInputType,
InternalStringInputType,
InternalVar,
ListInputType,
ListOrScalarOrTensorInputType,
ListVar,
mil_list,
operation,
Operation,
ops,
Placeholder,
precondition,
program,
Program,
PyFunctionInputType,
register_op,
SPACES,
SUPPORT_FLOAT_TYPES,
SUPPORT_INT_TYPES,
ScalarOrTensorInputType,
StringInputType,
Symbol,
TensorInputType,
TupleInputType,
types,
var,
Var,
visitors
)

from .frontend.torch import register_torch_op
Expand Down
6 changes: 6 additions & 0 deletions coremltools/converters/mil/backend/mil/load.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,6 +197,12 @@ def feeds_to_only_constexprs(op):
# Classify's "classes" param should be serialized as a value literal bound
# directly to the param, rather than as a const-generated variable.
proto_ops.append(translate_generic_op(op, parameters, blob_writer, ["classes"]))
elif op_cls_name == "reshape_like":
# The reshape_like should also be able to take value from a const op
# This is a workaround solution
# rdar://98689808 (Reshape_like should also accept const value from non literal input)
literal_params = ["begins", "ends", "end_masks"]
proto_ops.append(translate_generic_op(op, parameters, blob_writer, literal_params))
else:
proto_ops.append(translate_generic_op(op, parameters, blob_writer))

Expand Down
1 change: 0 additions & 1 deletion coremltools/converters/mil/backend/mil/passes/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
from . import (
adjust_io_to_supported_types,
fuse_activation_silu,
homogenize_input_dtypes,
insert_image_preprocessing_op,
sanitize_name_strings
)
Original file line number Diff line number Diff line change
Expand Up @@ -105,8 +105,8 @@ def _adjust_main_inputs(func):
# This is some other dtype. Change the type to fp32 and add a cast.
# This is only a limitation of main--other functions do not represent CoreML model inputs
# and do not have the same limitation on input types.
supported_dtypes = "{int32, fp32, fp64}" if func.opset_version < target.iOS16 else \
"{int32, fp16, fp32, fp64}"
supported_dtypes = "{int32, fp32}" if func.opset_version < target.iOS16 else \
"{int32, fp16, fp32}"
msg = "\nInput '{}' is of dtype {}. The " +\
"CoreML runtime does not support inputs with this dtype " +\
"(supported dtypes are: {}). This input will be assigned a dtype of " +\
Expand Down Expand Up @@ -135,8 +135,8 @@ def _adjust_main_outputs(func):
and (func.opset_version < target.iOS16 or output_var.dtype != types.fp16):
# since fp16 is a valid output type for coreml from ios16 spec onwards, no need to cast
output_dtype_str = types.builtin_to_string(output_var.dtype)
supported_dtypes = "{int32, fp32, fp64}" if func.opset_version < target.iOS16 else \
"{int32, fp16, fp32, fp64}"
supported_dtypes = "{int32, fp32}" if func.opset_version < target.iOS16 else \
"{int32, fp16, fp32}"
msg = "\nOutput '{}' is of dtype {}. The " +\
"CoreML runtime does not support outputs with this dtype " +\
"(supported dtypes are: {}). This output will be assigned a dtype " +\
Expand Down Expand Up @@ -192,74 +192,12 @@ def _adjust_func_inputs(func):
for input_name, input_var in func.inputs.items():
_adjust_var(input_var)

def _adjust_block_inputs(block):
for input_var in block.inputs:
_adjust_var(input_var)

@block_context_manager
def _adjust_ops(block):
len_block = len(block.operations)
i = 0
while i < len_block:
op = block.operations[i]

# Classifier is a special exception to this rule. It can output 64 bit integer labels.
# Classifier should be inserted after running this pass.
if op.op_type == "classify":
raise ValueError("ML Program backend pass adjust_to_supported_types does not support programs" +\
" that have already added a classify op.")

for subblock in op.blocks:
_adjust_block_inputs(subblock)
_adjust_ops(subblock)

for var in op.outputs:
_adjust_var(var)

# Cast ops have a param (dtype) that should match the output dtype.
# If the output dtype or input dtype was previously adjusted,
# the cast op must change or be removed in kind.
if op.op_type == "cast":
output_type_str = types.builtin_to_string(op.outputs[0].dtype)
if op.outputs[0].dtype == op.x.dtype:
# The type of the input or output of this cast op was changed per the rules
# defined in the top level comment for adjust_io_to_supported_types.
#
# That changed output type is the same type as the input to the cast
# op. Therefore, regardless of whether the user created this cast or
# not, it is now redundant (noop), and should be removed.
#
# The removal isn't covered by the main cast
# optimization pass since that pass runs before this pass.
block.replace_uses_of_var_after_op(
anchor_op=op, old_var=op.outputs[0], new_var=op.x
)
block.remove_ops([op])
len_block = len(block.operations)
i -= 1
elif output_type_str != op.dtype.val:
# The type of the output of this cast op was changed per the rules
# defined in the top level comment for adjust_io_to_supported_types.
#
# This cast is meaningful, and the "dtype" param now differs from the output
# type. Replace the dtype cast with a new cast op with a matching dtype param.
new_cast_out = mb.cast(x=op.x, dtype=output_type_str, before_op=op)
block.replace_uses_of_var_after_op(
anchor_op=op, old_var=op.outputs[0], new_var=new_cast_out
)
block.remove_ops([op])
len_block = len(block.operations)
i = i + 1
return block

#####
# The Pass
#####
def _adjust_io_to_supported_types(func, is_main):
if is_main:
_adjust_main_inputs(func)
_adjust_ops(func)
_adjust_main_outputs(func)
else:
_adjust_func_inputs(func)
_adjust_ops(func)

This file was deleted.

8 changes: 1 addition & 7 deletions coremltools/converters/mil/backend/mil/passes/mil_passes.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,7 @@ def mil_backend_passes(prog):
"mil_backend::insert_image_preprocessing_ops",
"mil_backend::fuse_activation_silu",
"common::const_elimination", # rank0_expand_dims_swap might introduce some new const tensor
# TODO: Right now, "const elimination" pass CANNOT be done after the "homogenize_input_dtypes" pass.
# Remove this requirement in rdar://76032946.
# Right now due to a bug in the PYMIL const op, which is that it can only produce FP32 and INT32 types tensors (e.g. it can't produce int64),
# therefore invoking const elimination after the var type promotion that happens in "homogenize_input_dtypes" will lead to issues if a
# const var (on const propagation through cast op) has to be promoted to int64 dtype.
"mil_backend::homogenize_input_dtypes",
"common::cast_optimization", # Need to run after homogenize_input_dtypes
"common::cast_optimization",
"common::dead_code_elimination",
"mil_backend::sanitize_name_strings",
"common::dedup_op_and_var_names",
Expand Down
Loading

0 comments on commit 20b8352

Please sign in to comment.