Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support tuple inputs in extract_submodel #2267

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 9 additions & 2 deletions coremltools/converters/mil/debugging_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,14 @@ def validate_inputs(func, input_vars):
reachable_vars.add(op.outputs[0])

for op in func.operations:
if all([x in reachable_vars for x in op.inputs.values()]):
input_values = []
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thank you so much @smpanaro for putting this PR!
In order to get this PR merged,
could you also add an unittest for this change?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No problem. Just added one (and also fixed one that was failing because of the opset_version change).

for v in op.inputs.values():
if isinstance(v, (list, tuple)):
input_values.extend(v)
else:
input_values.append(v)

if all([x in reachable_vars for x in input_values]):
reachable_vars.update(op.outputs)

for out in func.outputs:
Expand Down Expand Up @@ -170,6 +177,6 @@ def replace_inputs(func, input_vars):
PASS_REGISTRY["common::dead_code_elimination"](prog)

prog.skip_all_passes = True
submodel = ct.convert(prog, convert_to=backend, compute_units=model.compute_unit)
submodel = ct.convert(prog, convert_to=backend, compute_units=model.compute_unit, minimum_deployment_target=func.opset_version)
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not directly related but fixes an error I get with my example conversion script:

RuntimeWarning: You will not be able to run predict() on this Core ML model. Underlying exception message was: Error compiling model: "compiler error: Error reading protobuf spec. validator error: Description of multiarray feature 'x_cast_fp16' has FLOAT16 dataType, which is only valid in specification version >= 7. This model has version 6".

Happy to put this up as a separate PR if that's preferred.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is a correct fix :)


return submodel
34 changes: 30 additions & 4 deletions coremltools/converters/mil/mil/tests/test_debug.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,12 @@
import coremltools as ct
from coremltools.converters.mil import Builder as mb
from coremltools.converters.mil.debugging_utils import extract_submodel
from coremltools.converters.mil.mil import get_new_symbol
from coremltools.converters.mil.mil import get_new_symbol, types
from coremltools.converters.mil.mil.types.symbolic import is_symbolic
from coremltools.converters.mil.testing_utils import get_op_types_in_program

def get_simple_program():
@mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 3, 4)),])
def get_simple_program(opset_vesrion=None):
@mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 3, 4)),], opset_version=opset_vesrion)
def prog(x):
x = mb.add(x=x, y=1.2, name="add")
x = mb.transpose(x=x, perm=[0, 2, 3, 1])
Expand Down Expand Up @@ -227,6 +227,32 @@ def prog(x, y):
with pytest.raises(ValueError, match="output sin not reachable from inputs"):
submodel = extract_submodel(model, outputs=["sin"], inputs=["mul"])

def test_extract_submodel_tuple_input_ops(self):
"""
Input graph:
x -> relu ---> sin ---
| |
v v
cos -> concat -> tanh -> output_1
"""
@mb.program(input_specs=[mb.TensorSpec(shape=(1, 2), dtype=types.fp16)], opset_version=ct.target.iOS16)
def prog(x):
relu = mb.relu(x=x, name="relu")
sin = mb.sin(x=relu, name="sin")
cos = mb.cos(x=relu, name="cos")
concat = mb.concat(values=[sin, cos], axis=1, name="concat")
tanh = mb.tanh(x=concat, name="tanh")
return tanh

model = ct.convert(prog, convert_to="mlprogram")
submodel = extract_submodel(model, outputs=["tanh"], inputs=["relu"])

assert get_op_types_in_program(submodel._mil_program) == ["sin", "cos", "concat", "tanh"]

outputs = list(submodel._mil_program.functions["main"].outputs)
assert len(outputs) == 1
assert outputs[0].name == "tanh"

@pytest.mark.parametrize(
"compute_unit",
[
Expand Down Expand Up @@ -267,7 +293,7 @@ def test_extract_submodel_neuralnetwork(self, compute_unit):
)
)
def test_extract_submodel_mlprogram(self, compute_unit, store_to_disk):
prog = get_simple_program()
prog = get_simple_program(ct.target.iOS16)
model = ct.convert(
prog,
convert_to="mlprogram",
Expand Down