From 6eb6adff2f1a2fe988e79c53e622b18f9ff7ab26 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Fri, 2 Feb 2024 17:22:54 +0100 Subject: [PATCH 1/6] update requirements --- azure-pipelines.yml | 4 ++-- requirements-dev.txt | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 907bb9f..61587f4 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -4,8 +4,8 @@ jobs: vmImage: 'ubuntu-latest' strategy: matrix: - Python311-Linux: - python.version: '3.11' + Python312-Linux: + python.version: '3.12' maxParallel: 3 steps: diff --git a/requirements-dev.txt b/requirements-dev.txt index 5804529..5e262e3 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -11,7 +11,7 @@ lightgbm matplotlib ml-dtypes git+https://github.com/onnx/onnxmltools.git -onnxruntime>=1.16.1 +onnxruntime>=1.17.0 openpyxl packaging pandas From 271c29d604a9bffdebc5890c27f7952e41bf2c8e Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Wed, 21 Feb 2024 12:08:46 +0100 Subject: [PATCH 2/6] Add a mode to compare model without execution --- .../ut_reference/test_evaluator_yield.py | 66 ++++++- onnx_array_api/_command_lines_parser.py | 15 +- onnx_array_api/reference/evaluator_yield.py | 169 +++++++++++++++--- 3 files changed, 219 insertions(+), 31 deletions(-) diff --git a/_unittests/ut_reference/test_evaluator_yield.py b/_unittests/ut_reference/test_evaluator_yield.py index e7f6817..5d128ed 100644 --- a/_unittests/ut_reference/test_evaluator_yield.py +++ b/_unittests/ut_reference/test_evaluator_yield.py @@ -1,6 +1,7 @@ import unittest import numpy as np from onnx import TensorProto +from onnx.checker import check_model from onnx.helper import ( make_function, make_graph, @@ -9,6 +10,7 @@ make_opsetid, make_tensor_value_info, ) +from onnx.numpy_helper import from_array from onnx.parser import parse_model from onnx_array_api.ext_test_case import ExtTestCase from onnx_array_api.reference import ( @@ -426,7 +428,7 @@ def test_distance_sequence_str(self): 002=|INPUTfloat322x2ABCDB|INPUTfloat322x2ABCDB 003~|INPUTfloat322x3ABCDX|INPUTfloat322x2ABCDX 004-|RESULTfloat322x2CEIOExpH| - 005=|RESULTfloat322x2CEIOLinearRegrY1|RESULTfloat322x2CEIOLinearRegrY1 + 005=|RESULTfloat322x2CEIOLinearRegresY1|RESULTfloat322x2CEIOLinearRegresY1 006~|RESULTfloat322x2CEIOAbsY|RESULTfloat322x3CEIPAbsZ 007~|OUTPUTfloat322x2CEIOY|OUTPUTfloat322x2CEIPY """.replace( @@ -460,6 +462,68 @@ def test_compare_execution(self): self.assertIn("CAAA Constant", text) self.assertEqual(len(align), 5) + def test_no_execution(self): + model = make_model( + make_graph( + [ + make_node("Unsqueeze", ["X", "zero"], ["xu1"]), + make_node("Unsqueeze", ["xu1", "un"], ["xu2"]), + make_node("Reshape", ["xu2", "shape1"], ["xm1"]), + make_node("Reshape", ["Y", "shape2"], ["xm2c"]), + make_node("Cast", ["xm2c"], ["xm2"], to=1), + make_node("MatMul", ["xm1", "xm2"], ["xm"]), + make_node("Reshape", ["xm", "shape3"], ["Z"]), + ], + "dummy", + [ + make_tensor_value_info("X", TensorProto.FLOAT, [32, 128]), + make_tensor_value_info("Y", TensorProto.FLOAT, [3, 5, 128, 64]), + ], + [make_tensor_value_info("Z", TensorProto.FLOAT, [3, 5, 32, "N"])], + [ + from_array(np.array([0], dtype=np.int64), name="zero"), + from_array(np.array([1], dtype=np.int64), name="un"), + from_array(np.array([1, 32, 128], dtype=np.int64), name="shape1"), + from_array(np.array([15, 128, 64], dtype=np.int64), name="shape2"), + from_array(np.array([3, 5, 32, 64], dtype=np.int64), name="shape3"), + ], + ) + ) + check_model(model) + res1, res2, align, dc = compare_onnx_execution(model, model, mode="nodes") + text = dc.to_str(res1, res2, align) + self.assertIn("012 = | NODE", text) + + model2 = make_model( + make_graph( + [ + make_node("Unsqueeze", ["X", "zero"], ["xu1"]), + make_node("Unsqueeze", ["xu1", "un"], ["xu2"]), + make_node("Reshape", ["xu2", "shape1"], ["xm1"]), + make_node("Reshape", ["Y", "shape2"], ["xm2c"]), + make_node("MatMul", ["xm1", "xm2c"], ["xm"]), + make_node("Reshape", ["xm", "shape3"], ["Z"]), + ], + "dummy", + [ + make_tensor_value_info("X", TensorProto.FLOAT, [32, 128]), + make_tensor_value_info("Y", TensorProto.FLOAT, [3, 5, 128, 64]), + ], + [make_tensor_value_info("Z", TensorProto.FLOAT, [3, 5, 32, "N"])], + [ + from_array(np.array([0], dtype=np.int64), name="zero"), + from_array(np.array([1], dtype=np.int64), name="un"), + from_array(np.array([1, 32, 128], dtype=np.int64), name="shape1"), + from_array(np.array([15, 128, 64], dtype=np.int64), name="shape2"), + from_array(np.array([3, 5, 32, 64], dtype=np.int64), name="shape3"), + ], + ) + ) + check_model(model2) + res1, res2, align, dc = compare_onnx_execution(model, model2, mode="nodes") + text = dc.to_str(res1, res2, align) + self.assertIn("012 = | NODE", text) + if __name__ == "__main__": unittest.main(verbosity=2) diff --git a/onnx_array_api/_command_lines_parser.py b/onnx_array_api/_command_lines_parser.py index a180deb..5ac48db 100644 --- a/onnx_array_api/_command_lines_parser.py +++ b/onnx_array_api/_command_lines_parser.py @@ -20,7 +20,7 @@ def get_main_parser() -> ArgumentParser: Selects a command. 'translate' exports an onnx graph into a piece of code replicating it, - 'compares' compares the execution of two onnx models + 'compare' compares the execution of two onnx models """ ), ) @@ -90,6 +90,13 @@ def get_parser_compare() -> ArgumentParser: required=True, help="second onnx model", ) + parser.add_argument( + "-m", + "--mode", + choices=["execute", "nodes"], + default="execute", + help="compare the execution ('execute') or the nodes only ('nodes')", + ) parser.add_argument( "-v", "--verbose", @@ -112,7 +119,9 @@ def _cmd_compare(argv: List[Any]): args = parser.parse_args(argv[1:]) onx1 = onnx.load(args.model1) onx2 = onnx.load(args.model2) - res1, res2, align, dc = compare_onnx_execution(onx1, onx2, verbose=args.verbose) + res1, res2, align, dc = compare_onnx_execution( + onx1, onx2, verbose=args.verbose, mode=args.mode + ) text = dc.to_str(res1, res2, align, column_size=args.column_size) print(text) @@ -127,7 +136,7 @@ def main(argv: Optional[List[Any]] = None): parser = get_main_parser() parser.parse_args(argv) else: - parsers = dict(translate=get_parser_translate) + parsers = dict(translate=get_parser_translate, compare=get_parser_compare) cmd = argv[0] if cmd not in parsers: raise ValueError( diff --git a/onnx_array_api/reference/evaluator_yield.py b/onnx_array_api/reference/evaluator_yield.py index df171a6..1beef3c 100644 --- a/onnx_array_api/reference/evaluator_yield.py +++ b/onnx_array_api/reference/evaluator_yield.py @@ -2,7 +2,9 @@ from typing import Any, Dict, List, Iterator, Optional, Tuple, Union from enum import IntEnum import numpy as np -from onnx import ModelProto, TensorProto, ValueInfoProto +from onnx import ModelProto, TensorProto, ValueInfoProto, load +from onnx.helper import tensor_dtype_to_np_dtype +from onnx.shape_inference import infer_shapes from .evaluator import ExtendedReferenceEvaluator @@ -20,6 +22,7 @@ class ResultType(IntEnum): SPARSE_INITIALIZER = 4 INPUT = 8 OUTPUT = 16 + NODE = 32 def __repr__(self): return f"{self.__class__.__name__}.{self._name_}" @@ -57,12 +60,13 @@ def __getitem__(self, i: int) -> Any: raise IndexError(f"i={i} out of boundary") def __str__(self): + dtype = self.dtype if self.dtype != 0 else "" els = [ _align(self.kind._name_, 6), - _align(str(self.dtype).replace("dtype(", "").replace(")", ""), 8), - _align("x".join(map(str, self.shape)), 15), + _align(str(dtype).replace("dtype(", "").replace(")", ""), 8), + _align("x".join("" if self.shape is None else map(str, self.shape)), 15), self.summary, - _align(self.op_type or "", 10), + _align(self.op_type or "", 12), self.name or "", ] return " ".join(els) @@ -270,6 +274,22 @@ def _cost_type(self, t1: "np.dtype", t2: "np.dtype") -> float: return 1 def _cost_shape(self, s1: Tuple[int, ...], s2: Tuple[int, ...]) -> float: + if s1 is None or s2 is None: + return self.rank_cost + if any(map(lambda s: isinstance(s, str), s1)) or any( + map(lambda s: isinstance(s, str), s2) + ): + # dynamic shapes + if len(s1) != len(s2): + return self.rank_cost + d = 0 + for i, j in zip(s1, s2): + if isinstance(i, int) and isinstance(j, int): + d += abs(i - j) + elif i != j: + d += self.rank_cost / 2 + return d + d = abs(np.prod(s1) - np.prod(s2)) if len(s1) != len(s2): return self.rank_cost + d @@ -424,12 +444,85 @@ def generate_inputs(model: ModelProto) -> List[np.ndarray]: return inputs +def _update_shape_types_with_proto( + proto: ModelProto, +) -> Dict[str, Tuple[int, Tuple[Union[int, str], ...]]]: + """ + Retrieves the shapes and types for a model. + """ + assert isinstance(proto, ModelProto), f"Unexpected type {type(proto)} for proto" + res = {} + + for val in proto.graph.input: + itype = val.type.tensor_type.elem_type + shape = tuple( + d.dim_param if d.dim_param else d.dim_value + for d in val.type.tensor_type.shape.dim + ) + res[val.name] = [itype, shape] + + for val in proto.graph.output: + itype = val.type.tensor_type.elem_type + shape = tuple( + d.dim_param if d.dim_param else d.dim_value + for d in val.type.tensor_type.shape.dim + ) + res[val.name] = [itype, shape] + + for val in proto.graph.initializer: + itype = val.data_type + shape = tuple(d for d in val.dims) + res[val.name] = [itype, shape] + + new_proto = infer_shapes(proto) + for val in new_proto.graph.value_info: + itype = val.type.tensor_type.elem_type + shape = tuple( + d.dim_param if d.dim_param else d.dim_value + for d in val.type.tensor_type.shape.dim + ) + res[val.name] = [itype, shape] + + return res + + +def _enumerate_result_no_execution(model: ModelProto) -> Iterator[ResultType]: + """ + Produces a list of results based on a model in order to + trigger the edit distance comparison. + """ + type_shape = _update_shape_types_with_proto(model) + for i in model.graph.initializer: + itype, shape = type_shape.get(i.name, (0, None)) + dtype = tensor_dtype_to_np_dtype(itype) + yield ResultExecution( + ResultType.INITIALIZER, dtype, shape, "????", "INIT", i.name + ) + for i in model.graph.input: + itype, shape = type_shape.get(i.name, (0, None)) + dtype = tensor_dtype_to_np_dtype(itype) + yield ResultExecution(ResultType.INPUT, dtype, shape, "????", "INPUT", i.name) + for node in model.graph.node: + yield ResultExecution(ResultType.NODE, 0, None, "????", node.op_type, node.name) + for o in node.output: + itype, shape = type_shape.get(o, (0, None)) + dtype = tensor_dtype_to_np_dtype(itype) + yield ResultExecution( + ResultType.RESULT, dtype, shape, "????", node.op_type, o + ) + for i in model.graph.output: + itype, shape = type_shape.get(i.name, (0, None)) + dtype = tensor_dtype_to_np_dtype(itype) + yield ResultExecution(ResultType.OUTPUT, dtype, shape, "????", "OUTPUT", i.name) + + def compare_onnx_execution( model1: ModelProto, model2: ModelProto, inputs: Optional[Union[List[Any], Tuple[Dict[str, Any]]]] = None, verbose: int = 0, raise_exc: bool = True, + mode: str = "execute", ) -> Tuple[List[ResultExecution], List[ResultExecution], List[Tuple[int, int]]]: """ Compares the execution of two onnx models. @@ -443,33 +536,55 @@ def compare_onnx_execution( the same number of inputs or two dictionaries, one for each model :param verbose: verbosity :param raise_exc: raise exception if the execution fails or stop at the error + :param mode: the model should be executed but the function can be executed + but the comparison may append on nodes only :return: four results, a sequence of results for the first model and the second model, the alignment between the two, DistanceExecution """ - if verbose: - print("[compare_onnx_execution] generate inputs") - if inputs is None: - inputs = generate_inputs(model1) - if isinstance(inputs, tuple): - assert len(inputs) == 2, f"Unexpected number {len(inputs)} of inputs." - feeds1, feeds2 = inputs + assert mode in {"execute", "nodes"}, f"Unexpected value for mode={mode!r}." + + if mode == "execute": + if inputs is None: + if verbose: + print("[compare_onnx_execution] generate inputs") + inputs = generate_inputs(model1) + if isinstance(inputs, tuple): + assert len(inputs) == 2, f"Unexpected number {len(inputs)} of inputs." + feeds1, feeds2 = inputs + else: + feeds1 = {i.name: v for i, v in zip(model1.graph.input, inputs)} + feeds2 = {i.name: v for i, v in zip(model2.graph.input, inputs)} + assert isinstance(feeds1, dict), f"Unexpected type {type(feeds1)} for inputs" + assert isinstance(feeds2, dict), f"Unexpected type {type(feeds2)} for inputs" + if verbose: + print(f"[compare_onnx_execution] execute with {len(inputs)} inputs") + print("[compare_onnx_execution] execute first model") + res1 = list( + YieldEvaluator(model1).enumerate_summarized( + None, feeds1, raise_exc=raise_exc + ) + ) + if verbose: + print(f"[compare_onnx_execution] got {len(res1)} results") + print("[compare_onnx_execution] execute second model") + res2 = list( + YieldEvaluator(model2).enumerate_summarized( + None, feeds2, raise_exc=raise_exc + ) + ) + elif mode == "nodes": + # No execution. + if verbose: + print("[compare_onnx_execution] loading first model") + proto1 = load(model1) if isinstance(model1, str) else model2 + if verbose: + print("[compare_onnx_execution] loading first model") + proto2 = load(model2) if isinstance(model2, str) else model1 + res1 = list(_enumerate_result_no_execution(proto1)) + res2 = list(_enumerate_result_no_execution(proto2)) else: - feeds1 = {i.name: v for i, v in zip(model1.graph.input, inputs)} - feeds2 = {i.name: v for i, v in zip(model2.graph.input, inputs)} - assert isinstance(feeds1, dict), f"Unexpected type {type(feeds1)} for inputs" - assert isinstance(feeds2, dict), f"Unexpected type {type(feeds2)} for inputs" - if verbose: - print(f"[compare_onnx_execution] got {len(inputs)} inputs") - print("[compare_onnx_execution] execute first model") - res1 = list( - YieldEvaluator(model1).enumerate_summarized(None, feeds1, raise_exc=raise_exc) - ) - if verbose: - print(f"[compare_onnx_execution] got {len(res1)} results") - print("[compare_onnx_execution] execute second model") - res2 = list( - YieldEvaluator(model2).enumerate_summarized(None, feeds2, raise_exc=raise_exc) - ) + return + if verbose: print(f"[compare_onnx_execution] got {len(res2)} results") print("[compare_onnx_execution] compute edit distance") From 918e0dd74896426555f6e668371680ff25a886c4 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Wed, 21 Feb 2024 12:10:12 +0100 Subject: [PATCH 3/6] changelogs --- CHANGELOGS.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOGS.rst b/CHANGELOGS.rst index e139c0a..13c81ab 100644 --- a/CHANGELOGS.rst +++ b/CHANGELOGS.rst @@ -4,6 +4,7 @@ Change Logs 0.2.0 +++++ +* :pr:`76`: add a mode to compare models without execution * :pr:`75`: add QuickGelu to ExtendedReferenceEvaluator * :pr:`71`: adds tools to compare two onnx graphs * :pr:`61`: adds function to plot onnx model as graphs From 3b10ea8547428075035edc0e3a4c76b588b6e2ac Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Wed, 21 Feb 2024 12:24:19 +0100 Subject: [PATCH 4/6] improve initializer --- _unittests/ut_reference/test_evaluator_yield.py | 2 +- onnx_array_api/_command_lines_parser.py | 2 +- onnx_array_api/reference/evaluator_yield.py | 12 +++++++++--- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/_unittests/ut_reference/test_evaluator_yield.py b/_unittests/ut_reference/test_evaluator_yield.py index 5d128ed..8aceec2 100644 --- a/_unittests/ut_reference/test_evaluator_yield.py +++ b/_unittests/ut_reference/test_evaluator_yield.py @@ -428,7 +428,7 @@ def test_distance_sequence_str(self): 002=|INPUTfloat322x2ABCDB|INPUTfloat322x2ABCDB 003~|INPUTfloat322x3ABCDX|INPUTfloat322x2ABCDX 004-|RESULTfloat322x2CEIOExpH| - 005=|RESULTfloat322x2CEIOLinearRegresY1|RESULTfloat322x2CEIOLinearRegresY1 + 005=|RESULTfloat322x2CEIOLinearRegressioY1|RESULTfloat322x2CEIOLinearRegressioY1 006~|RESULTfloat322x2CEIOAbsY|RESULTfloat322x3CEIPAbsZ 007~|OUTPUTfloat322x2CEIOY|OUTPUTfloat322x2CEIPY """.replace( diff --git a/onnx_array_api/_command_lines_parser.py b/onnx_array_api/_command_lines_parser.py index 5ac48db..0450977 100644 --- a/onnx_array_api/_command_lines_parser.py +++ b/onnx_array_api/_command_lines_parser.py @@ -122,7 +122,7 @@ def _cmd_compare(argv: List[Any]): res1, res2, align, dc = compare_onnx_execution( onx1, onx2, verbose=args.verbose, mode=args.mode ) - text = dc.to_str(res1, res2, align, column_size=args.column_size) + text = dc.to_str(res1, res2, align, column_size=int(args.column_size)) print(text) diff --git a/onnx_array_api/reference/evaluator_yield.py b/onnx_array_api/reference/evaluator_yield.py index 1beef3c..e70fc63 100644 --- a/onnx_array_api/reference/evaluator_yield.py +++ b/onnx_array_api/reference/evaluator_yield.py @@ -5,6 +5,7 @@ from onnx import ModelProto, TensorProto, ValueInfoProto, load from onnx.helper import tensor_dtype_to_np_dtype from onnx.shape_inference import infer_shapes +from . import to_array_extended from .evaluator import ExtendedReferenceEvaluator @@ -66,7 +67,7 @@ def __str__(self): _align(str(dtype).replace("dtype(", "").replace(")", ""), 8), _align("x".join("" if self.shape is None else map(str, self.shape)), 15), self.summary, - _align(self.op_type or "", 12), + _align(self.op_type or "", 15), self.name or "", ] return " ".join(els) @@ -496,7 +497,12 @@ def _enumerate_result_no_execution(model: ModelProto) -> Iterator[ResultType]: itype, shape = type_shape.get(i.name, (0, None)) dtype = tensor_dtype_to_np_dtype(itype) yield ResultExecution( - ResultType.INITIALIZER, dtype, shape, "????", "INIT", i.name + ResultType.INITIALIZER, + dtype, + shape, + make_summary(to_array_extended(i)), + "INIT", + i.name, ) for i in model.graph.input: itype, shape = type_shape.get(i.name, (0, None)) @@ -506,7 +512,7 @@ def _enumerate_result_no_execution(model: ModelProto) -> Iterator[ResultType]: yield ResultExecution(ResultType.NODE, 0, None, "????", node.op_type, node.name) for o in node.output: itype, shape = type_shape.get(o, (0, None)) - dtype = tensor_dtype_to_np_dtype(itype) + dtype = 0 if itype == 0 else tensor_dtype_to_np_dtype(itype) yield ResultExecution( ResultType.RESULT, dtype, shape, "????", node.op_type, o ) From 463bc86b1f10c1c5e2cd1b5ee8ad3b9701f96f7a Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Wed, 21 Feb 2024 12:39:52 +0100 Subject: [PATCH 5/6] fix display --- .../ut_reference/test_evaluator_yield.py | 14 +++++------ onnx_array_api/reference/evaluator_yield.py | 24 ++++++++++++++++++- 2 files changed, 30 insertions(+), 8 deletions(-) diff --git a/_unittests/ut_reference/test_evaluator_yield.py b/_unittests/ut_reference/test_evaluator_yield.py index 8aceec2..83fabe2 100644 --- a/_unittests/ut_reference/test_evaluator_yield.py +++ b/_unittests/ut_reference/test_evaluator_yield.py @@ -424,13 +424,13 @@ def test_distance_sequence_str(self): text = dc.to_str(s1, s2, align) self.assertIn("OUTPUT", text) expected = """ - 001=|INPUTfloat322x2ABCDA|INPUTfloat322x2ABCDA - 002=|INPUTfloat322x2ABCDB|INPUTfloat322x2ABCDB - 003~|INPUTfloat322x3ABCDX|INPUTfloat322x2ABCDX - 004-|RESULTfloat322x2CEIOExpH| - 005=|RESULTfloat322x2CEIOLinearRegressioY1|RESULTfloat322x2CEIOLinearRegressioY1 - 006~|RESULTfloat322x2CEIOAbsY|RESULTfloat322x3CEIPAbsZ - 007~|OUTPUTfloat322x2CEIOY|OUTPUTfloat322x2CEIPY + 001=|INPUTfloat322:2x2ABCDA|INPUTfloat322:2x2ABCDA + 002=|INPUTfloat322:2x2ABCDB|INPUTfloat322:2x2ABCDB + 003~|INPUTfloat322:2x3ABCDX|INPUTfloat322:2x2ABCDX + 004-|RESULTfloat322:2x2CEIOExpH| + 005=|RESULTfloat322:2x2CEIOLinearRegressioY1|RESULTfloat322:2x2CEIOLinearRegressioY1 + 006~|RESULTfloat322:2x2CEIOAbsY|RESULTfloat322:2x3CEIPAbsZ + 007~|OUTPUTfloat322:2x2CEIOY|OUTPUTfloat322:2x2CEIPY """.replace( " ", "" ).strip( diff --git a/onnx_array_api/reference/evaluator_yield.py b/onnx_array_api/reference/evaluator_yield.py index e70fc63..f1b30a3 100644 --- a/onnx_array_api/reference/evaluator_yield.py +++ b/onnx_array_api/reference/evaluator_yield.py @@ -29,6 +29,22 @@ def __repr__(self): return f"{self.__class__.__name__}.{self._name_}" +def _dimension_to_str(d): + if isinstance(d, int): + return str(d) + try: + int(d) + except ValueError: + return d + return f"{d!r}" + + +def _rank_to_str(shape): + if shape: + return f"{len(shape)}:" + return " " + + @dataclass class ResultExecution: """ @@ -65,7 +81,13 @@ def __str__(self): els = [ _align(self.kind._name_, 6), _align(str(dtype).replace("dtype(", "").replace(")", ""), 8), - _align("x".join("" if self.shape is None else map(str, self.shape)), 15), + _rank_to_str(self.shape) + + _align( + "x".join( + "" if self.shape is None else map(_dimension_to_str, self.shape) + ), + 18, + ), self.summary, _align(self.op_type or "", 15), self.name or "", From b627f818eca9965893e353a33828934a44ad25b6 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Wed, 21 Feb 2024 12:49:51 +0100 Subject: [PATCH 6/6] fix side --- onnx_array_api/reference/evaluator_yield.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/onnx_array_api/reference/evaluator_yield.py b/onnx_array_api/reference/evaluator_yield.py index f1b30a3..7942d8f 100644 --- a/onnx_array_api/reference/evaluator_yield.py +++ b/onnx_array_api/reference/evaluator_yield.py @@ -604,10 +604,10 @@ def compare_onnx_execution( # No execution. if verbose: print("[compare_onnx_execution] loading first model") - proto1 = load(model1) if isinstance(model1, str) else model2 + proto1 = load(model1) if isinstance(model1, str) else model1 if verbose: print("[compare_onnx_execution] loading first model") - proto2 = load(model2) if isinstance(model2, str) else model1 + proto2 = load(model2) if isinstance(model2, str) else model2 res1 = list(_enumerate_result_no_execution(proto1)) res2 = list(_enumerate_result_no_execution(proto2)) else: pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy