From 8bd1a66d08d174ecaeb7a20409ddd80094cf4fd9 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Fri, 10 Nov 2023 11:38:26 +0100 Subject: [PATCH 01/12] Extend to use inner API --- _doc/api/light_api.rst | 6 + .../_data/stft_inlined_batch_1.onnx | Bin 0 -> 9865 bytes .../ut_light_api/test_translate_classic.py | 236 ++++++++++++++++++ onnx_array_api/light_api/__init__.py | 39 ++- onnx_array_api/light_api/inner_emitter.py | 108 ++++++++ onnx_array_api/light_api/translate.py | 46 +++- 6 files changed, 420 insertions(+), 15 deletions(-) create mode 100644 _unittests/ut_light_api/_data/stft_inlined_batch_1.onnx create mode 100644 _unittests/ut_light_api/test_translate_classic.py create mode 100644 onnx_array_api/light_api/inner_emitter.py diff --git a/_doc/api/light_api.rst b/_doc/api/light_api.rst index a50f050..b68b923 100644 --- a/_doc/api/light_api.rst +++ b/_doc/api/light_api.rst @@ -60,6 +60,12 @@ EventType .. autoclass:: onnx_array_api.light_api.translate.EventType :members: +InnerEmitter +++++++++++++ + +.. autoclass:: onnx_array_api.light_api.inner_emitter.Emitter + :members: + Translater ++++++++++ diff --git a/_unittests/ut_light_api/_data/stft_inlined_batch_1.onnx b/_unittests/ut_light_api/_data/stft_inlined_batch_1.onnx new file mode 100644 index 0000000000000000000000000000000000000000..172de97f5f6dfce375190552708da148ec2699ee GIT binary patch literal 9865 zcma)C3vgW3dEV7yA4ih4I!LyWZQ`s+O=NJi4{0S?dyj1SfgQUTY#4AzFDvbpy|%O~ z@2+fNY%H9#8Nf+!%P@hashcS-)1<}INqJ=4lqsFWcu2yKz9vaS@#NtIN*Ev+Ak_5x z&%Ni~b5|?3p7E@7@Bjbje|-OU{&QB52oF!+T`iTz_jNZX*CyBY4t;Gq+s;Ggq1>dI z=w_p(Vx^iZR?XgcME%^;!9J&0%OE?MZCkYi!g1O4}Pb9?Kj#%ZAIbNCpj-=I$w7P3%3|5Dk|21=~ z!P_-GCqDzEsX^cqJS>Hz*+F{gC9lI*yMSG?ex?*yt$7|HQx$SCR7_9;;-O8m)45_E zZf;b;Ggpdt$i^|JbNPIsxK}Ak2!zlq3QhT!Fjm`oHsC6*J&nNf`_eQUfGkdXte9_sXDG{7L=NoKG!;LFXi*5HEB&* zXc8w337qC#lZA1D+b6uL>b^v!FBBDJTq4Qw<`3$6qxzJ2iuL@7S%<7*twR!Xu0y+6 zt#p=qs&YB^L< z=-fy>j_>n0K4p_>svgJpvs$-2x=-;C(Y@Rw;Al$kc18FDnzQ{^aP}_S$AJ2*CH%DQ zqaN>-34aaiFsoLvQYwSa?47aKVSlm(f7A2XKdsoGM9|)8mo9borac&>V}GgcgNMid^4&ai{QuZL?J+KpXtTexCtOceU&dM+N3V8YNc;B%pjYu1?Qhr@z_<3!fY!8khJJ_G z`p{cgvrpsMt69{YikGUf<3kL^G~-~Nb_Ph&L_g@7SEg4S733t{=Z=wmJe)5~VW=Qm zXpTO;{a5q0ignN|Ta}qfOqL{q9_fp>?XnKeSjBN`)6{hJZn$(kZz|aDfqC7Xx+DS0 zs^SP@RZXDYRs)`OwU{2iWrvM9)~ChflALP~?fsnG1lY-*&d5@n)e+}ml*;h~Fz$;t zZMP~FM1|Z`%QKekA&{wixI`TsT4K?jU~oedN!q7|Gf@^`O4Ig|`SZv_1(R0d?OJ+i z0eB8rlLw{JbkC0>tZA&2zdQN;2utuVkp; zX$nj5$dTSlR6+?epD<_?iKgk393Y@za&XQqLvdOPG*0+aebafup5~(@n~x)#_eMY|0o6x(;C{t5=LeQ z6NfWqZ)UYZe2Gs>j-Dw>54@lfn*+Z2%qh)nu;VoriL-_5_%K4rg<*nl{<-;1~f#Y1%4742Zy&+KX z7_8w~tu;YZp49uyL?X^d<;%_G(v(78Pp5PK_H9xbsqnRIoC z%NpC~l==Y^to@|Vw{FV6?oCP@Tcj=C-yPbxdAFIsbc0X#TBZ?n-?NC}TJ4Zku2}M# zhR{(eT9rcH$}c5!=z(PPK=cjw%7Ng(<@lj~$SBWnmffY&*B=i%KV!}7lxT{^oXH7O zUVEBm#)bjWzH`lpV~{;0(JRzEdtICtFNxoGr*VBF^@)}1wb;C4R3zP@K~`_p2|%Yj^wI^QqdLzE9yz} zvAPqWdse3e!!oE>SMlmkH?_4JSBA}7Ro&f^IW^*uUF^yz?WTh>Il6QGw>IXCe;FPz zCni9B$#F=54KwkEtrI<`npg|!Q8dTEO5^*Imm67{vx!pqaIP%N79q~a5b_t;wHyM> zGEIwfWg%C=-cXu^3GCP8tC_(3M+tSV?W1=6f6>6|=L+8lNC!P^!^~j^s@lM+`ACZu|e0dgy)z%5y?ucgit0_Mw7h`N%K}(*>$d+ z>C(=&=nEh*=E+cs?GXMv>JQ;$lnrz4hdnihN@h}@)$3v3o!fj~*XQwiUsM>Z&g!ZG z$fh&SP#Y?gBvW1&d@bxuA0)tA9Zb>GL*f*;nM_MiJ=^F9)tN@t&s!Cksb}4wb@uiJ z?KH%C=F{Vyo|XAck?gP66!$l{~q;56k0Y&UepA3%*!}($5U_iZ{yqio8p+GrxQ(|5uR?r+u-3)mQT%sPkGXDIu8i7w{&00qSi21D>AT zL>w+lVH(tv=@C?_Z!E@KxJ&-PZq(c}<4@E$TkkHhFjcvBoFI`#OMmHkP} z5_xL;l+k^2$)`prp$k#JI&fg4%-zm-w5lfR?pAlWHWLH-sNLa=+5wE(!4b@D;befb zsa&CG?jZ)<;n{Kf#nwl1G~qE0^tiob1$q|7l@Cq3w-9W zbTw5ojbDcX3Xr3KRB4(_t1&#onJheo%`7%*_TfJMRxd4zMegM z>FI3ispqr34}L%U(|`R@_MOtD?6tT5Ci}?dpm8x2F>ZW)k7yUw^KG-^Ea{x;*Kwq3@TjZYcBe{$U5zdc}d-cvP>b{{jI{pAVcR1xOn25#?yEH zqw&V@KO1b-3&yEM-!qy&eA#&I?SD7^>GdBQ{Fkp9cU=0pvFul`8UOL$ZyNmV-x%B8 zd!N3~7k7T9LCl7lMc4gd@it@PUmk1~vn}o7^!**8CDbYIom(uPd3UKeT3jX`cy)z% z{N|P7p>M@S+jSokuY7*BxbW+bi&HzU7w?@}E57}^Uh%C@ro`iq^oh|QXT+zM4~cWP z4U3OGXo#_AN5t@}8^p1OP2!E^Tg0B;t>WbHC&ex6cZgZzCh=nGX0hU=AElyi?qG`O{+0-fjRj}zteMiPjVzrdZbTwOxPp4WPdBxq4k=v?f}*&Kggdp`1K_G zBR`|?m;9cC{}jhth^G^AQG68Vmk=+-P4QD6Mv;#o^71?6hw`)q`J%kdB7a@THwrygz{a-+?;3hI$-DU4~E}s#6^G`X=hO2ld;4I(`-PJcqhIkNUog zI)4H6K99On{ab(o!oyzz7le-^z{#_~%Q4`l75Mox;OH>$^fGWo_~O9XVc_i!;EwRO z2{`Nk9)AE_b_1WgfzuA)^&f!SOTh2t%P$*W0G^wG>(_wqv%vW)!23nuem(GCLLdAm z`k@1TQAU4!4t??s^b7UPM)VK$(KpafN6=Rc{S`x>ZAQOQ-;_K| zUkrdZh(DeIj}V`H3A_>kzpNyl0pGmIFG~Iy0S^%$JqKR83;c8!cuMjWcuVrviN|a{ zBVG$#7?M0kduR%8$Ct}uQ4LyY}nuF{+$wI5!`t-!MjHHX$sz| zr=DzI-KPV-r(J4qPHK;X`{U|y%9nQAr8!zWxZhR9k str: +def translate(proto: ModelProto, single_line: bool = False, api: str = "light") -> str: """ Translates an ONNX proto into a code using :ref:`l-light-api` to describe the ONNX graph. :param proto: model to translate :param single_line: as a single line or not + :param api: API to export into, + default is `"light"` and this is handle by class + :class:`onnx_array_api.light_api.translate.Emitter`, + another value is `"onnx"` which is the inner API implemented + in onnx package. :return: code .. runpython:: @@ -75,9 +81,30 @@ def translate(proto: ModelProto, single_line=False) -> str: ) code = translate(onx) print(code) + + The inner API from onnx packahe is also available. + + .. runpython:: + :showcode: + + from onnx_array_api.light_api import start, translate + + onx = ( + start() + .vin("X") + .reshape((-1, 1)) + .Transpose(perm=[1, 0]) + .rename("Y") + .vout() + .to_onnx() + ) + code = translate(onx, api="onnx") + print(code) """ - tr = Translater(proto) - rows = tr.export() - if single_line: - return ".".join(rows) - return "".join(["(\n ", "\n .".join(rows), "\n)"]) + if api == "light": + tr = Translater(proto) + elif api == "onnx": + tr = Translater(proto, emitter=InnerEmitter()) + else: + raise ValueError(f"Unexpected value {api!r} for api.") + return tr.export(single_line=single_line, as_str=True) diff --git a/onnx_array_api/light_api/inner_emitter.py b/onnx_array_api/light_api/inner_emitter.py new file mode 100644 index 0000000..4510715 --- /dev/null +++ b/onnx_array_api/light_api/inner_emitter.py @@ -0,0 +1,108 @@ +from typing import Any, Dict, List +from .annotations import ELEMENT_TYPE_NAME +from .translate import Emitter, EventType + + +class InnerEmitter(Emitter): + """ + Converts event into proper code. + """ + + def join(self, rows: List[str], single_line: bool = False) -> str: + "Returns the separators. `single_line` is unused." + return "\n".join(rows) + + def __call__(self, event: EventType, **kwargs: Dict[str, Any]) -> List[str]: + """ + Converts an event into an instruction. + + :param event: event kind + :param kwargs: event parameters + :return: list of instructions + """ + if event == EventType.START: + lines = ["opset_imports = ["] + opsets = kwargs.get("opsets", {}) + for k, v in opsets.items(): + lines.append(f" make_opsetid({k!r}, {v!r}),") + lines.append("]") + return lines + + if event == EventType.TO_ONNX: + lines = [ + "model = make_model(", + " graph,", + " functions=functions,", + " opset_imports=opset_imports", + ")", + ] + return lines + + if event == EventType.BEGIN_GRAPH: + lines = [ + "inputs = []", + "outputs = []", + "nodes = []", + "initializers = []", + "sparse_initializers = []", + "functions = []", + ] + return lines + + if event == EventType.END_GRAPH: + lines = [ + "graph = make_graph(", + " nodes,", + " 'noname',", + " inputs,", + " outputs,", + " initializers,", + " sparse_initializer=sparse_initializers,", + ")", + ] + return lines + + if event in (EventType.INPUT, EventType.OUTPUT): + container = "inputs" if event == EventType.INPUT else "outputs" + name = kwargs["name"] + elem_type = kwargs.get("elem_type", None) + shape = kwargs.get("shape", None) + if elem_type and shape: + return [ + f"{container}.append(make_tensor_value_info({name!r}, TensorProto.{ELEMENT_TYPE_NAME[elem_type]}, shape={shape!r}))" + ] + if elem_type: + return [ + f"{container}.append(make_tensor_value_info({name!r}, TensorProto.{ELEMENT_TYPE_NAME[elem_type]}, shape=[]))" + ] + raise RuntimeError( + f"Element type must be known. Invalid syntax for event={event} and kwargs={kwargs}." + ) + + if event == EventType.NODE: + op_type = kwargs["op_type"] + inputs = kwargs["inputs"] + outputs = kwargs["outputs"] + if kwargs.get("domain", "") != "": + domain = kwargs["domain"] + raise NotImplementedError(f"domain={domain!r} not supported yet.") + + lines = [ + "nodes.append(", + " make_node(", + f" {op_type!r},", + f" {inputs},", + f" {outputs}", + ] + domain = kwargs.get("domain", "") + if domain: + lines.append(f" domain={domain!r},") + atts = kwargs.get("atts", {}) + if len(atts) > 0: + lines[-1] += "," + for k, v in atts.items(): + lines.append(f" {k}={self.render_attribute_value(v)}") + lines.extend([" )", ")"]) + return lines + + raise ValueError(f"Unexpected EventType {event}.") diff --git a/onnx_array_api/light_api/translate.py b/onnx_array_api/light_api/translate.py index db574df..0fc7671 100644 --- a/onnx_array_api/light_api/translate.py +++ b/onnx_array_api/light_api/translate.py @@ -12,6 +12,10 @@ class EventType(IntEnum): OUTPUT = 2 NODE = 3 TO_ONNX = 4 + BEGIN_GRAPH = 5 + END_GRAPH = 6 + BEGIN_FUNCTION = 5 + END_FUNCTION = 6 class Emitter: @@ -19,6 +23,12 @@ class Emitter: Converts event into proper code. """ + def join(self, rows: List[str], single_line: bool = False) -> str: + "Join the rows" + if single_line: + return ".".join(rows) + return "".join(["(\n ", "\n .".join(rows), "\n)"]) + def __call__(self, event: EventType, **kwargs: Dict[str, Any]) -> List[str]: """ Converts an event into an instruction. @@ -124,21 +134,23 @@ def __init__( emitter: Optional[Emitter] = None, ): self.proto_ = proto - self.emit = emitter or Emitter() + self.emitter = emitter or Emitter() def __repr__(self) -> str: return f"{self.__class__.__name__}(<{type(self.proto_)})" - def export(self) -> List[str]: + def export(self, as_str, single_line: bool = False) -> Union[str, List[str]]: """ Exports into a code. + :param as_str: as a single string or by rows + :param single_line: tries to compress the output into a single line :return: list of instructions """ rows = [] if isinstance(self.proto_, ModelProto): opsets = {d.domain: d.version for d in self.proto_.opset_import} - rows.extend(self.emit(EventType.START, opsets=opsets)) + rows.extend(self.emitter(EventType.START, opsets=opsets)) inputs = self.proto_.graph.input outputs = self.proto_.graph.output nodes = self.proto_.graph.node @@ -149,12 +161,19 @@ def export(self) -> List[str]: else: raise ValueError(f"Unexpected type {type(self.proto_)} for proto.") + rows.extend( + self.emitter( + EventType.BEGIN_FUNCTION + if isinstance(self.proto_, FunctionProto) + else EventType.BEGIN_GRAPH + ) + ) for i in inputs: if isinstance(i, str): - rows.extend(self.emit(EventType.INPUT, name=i)) + rows.extend(self.emitter(EventType.INPUT, name=i)) else: rows.extend( - self.emit( + self.emitter( EventType.INPUT, name=i.name, elem_type=i.type.tensor_type.elem_type, @@ -168,7 +187,7 @@ def export(self) -> List[str]: for node in nodes: atts = self.extract_attributes(node) rows.extend( - self.emit( + self.emitter( EventType.NODE, op_type=node.op_type, inputs=node.input, @@ -180,10 +199,10 @@ def export(self) -> List[str]: for o in outputs: if isinstance(i, str): - rows.extend(self.emit(EventType.INPUT, name=o)) + rows.extend(self.emitter(EventType.INPUT, name=o)) else: rows.extend( - self.emit( + self.emitter( EventType.OUTPUT, name=o.name, elem_type=o.type.tensor_type.elem_type, @@ -193,11 +212,20 @@ def export(self) -> List[str]: ), ) ) + rows.extend( + self.emitter( + EventType.END_FUNCTION + if isinstance(self.proto_, FunctionProto) + else EventType.END_GRAPH + ) + ) if isinstance(self.proto_, ModelProto) and len(self.proto_.functions) > 0: raise NotImplementedError("Local functions are not yet implemented.") - rows.extend(self.emit(EventType.TO_ONNX)) + rows.extend(self.emitter(EventType.TO_ONNX)) + if as_str: + return self.emitter.join(rows, single_line=single_line) return rows def extract_attributes( From 21c47943a1c7fd15a06ff52b2e3966e505b71da5 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Fri, 10 Nov 2023 12:11:32 +0100 Subject: [PATCH 02/12] export subgraphs --- CHANGELOGS.rst | 1 + .../ut_light_api/test_translate_classic.py | 22 ++++++++-- onnx_array_api/light_api/__init__.py | 8 ++-- onnx_array_api/light_api/inner_emitter.py | 43 ++++++++++++++----- onnx_array_api/light_api/translate.py | 28 ++++++++---- 5 files changed, 76 insertions(+), 26 deletions(-) diff --git a/CHANGELOGS.rst b/CHANGELOGS.rst index 1c385ca..055a05e 100644 --- a/CHANGELOGS.rst +++ b/CHANGELOGS.rst @@ -4,6 +4,7 @@ Change Logs 0.1.3 +++++ +* :pr:`47`: extends export onnx to code to support inner API * :pr:`46`: adds an export to convert an onnx graph into light API code * :pr:`45`: fixes light API for operators with two outputs diff --git a/_unittests/ut_light_api/test_translate_classic.py b/_unittests/ut_light_api/test_translate_classic.py index 6d0c152..6aad3c9 100644 --- a/_unittests/ut_light_api/test_translate_classic.py +++ b/_unittests/ut_light_api/test_translate_classic.py @@ -1,7 +1,8 @@ import unittest +import os from textwrap import dedent import numpy as np -from onnx import ModelProto, TensorProto +from onnx import ModelProto, TensorProto, load from onnx.defs import onnx_opset_version from onnx.reference import ReferenceEvaluator from onnx.helper import ( @@ -207,8 +208,8 @@ def test_topk_reverse(self): 'TopK', ['X', 'K'], ['Values', 'Indices'], - axis=-1 - largest=0 + axis=-1, + largest=0, sorted=1 ) ) @@ -228,8 +229,23 @@ def test_topk_reverse(self): opset_imports=opset_imports )""" ).strip("\n") + self.maxDiff = None self.assertEqual(expected, code) + def test_fft(self): + data = os.path.join( + os.path.dirname(__file__), "_data", "stft_inlined_batch_1.onnx" + ) + onx = load(data) + code = translate(onx, api="onnx") + try: + compile(code, "", mode="exec") + except Exception as e: + new_code = "\n".join( + [f"{i+1:04} {line}" for i, line in enumerate(code.split("\n"))] + ) + raise AssertionError(f"ERROR {e}\n{new_code}") + if __name__ == "__main__": # TestLightApi().test_topk() diff --git a/onnx_array_api/light_api/__init__.py b/onnx_array_api/light_api/__init__.py index 0a20415..079f443 100644 --- a/onnx_array_api/light_api/__init__.py +++ b/onnx_array_api/light_api/__init__.py @@ -103,8 +103,8 @@ def translate(proto: ModelProto, single_line: bool = False, api: str = "light") """ if api == "light": tr = Translater(proto) - elif api == "onnx": + return tr.export(single_line=single_line, as_str=True) + if api == "onnx": tr = Translater(proto, emitter=InnerEmitter()) - else: - raise ValueError(f"Unexpected value {api!r} for api.") - return tr.export(single_line=single_line, as_str=True) + return tr.export(as_str=True) + raise ValueError(f"Unexpected value {api!r} for api.") diff --git a/onnx_array_api/light_api/inner_emitter.py b/onnx_array_api/light_api/inner_emitter.py index 4510715..6bef563 100644 --- a/onnx_array_api/light_api/inner_emitter.py +++ b/onnx_array_api/light_api/inner_emitter.py @@ -1,6 +1,7 @@ -from typing import Any, Dict, List +from typing import Any, Dict, List, Tuple +from onnx import AttributeProto from .annotations import ELEMENT_TYPE_NAME -from .translate import Emitter, EventType +from .translate import Emitter, EventType, Translater class InnerEmitter(Emitter): @@ -75,10 +76,7 @@ def __call__(self, event: EventType, **kwargs: Dict[str, Any]) -> List[str]: return [ f"{container}.append(make_tensor_value_info({name!r}, TensorProto.{ELEMENT_TYPE_NAME[elem_type]}, shape=[]))" ] - raise RuntimeError( - f"Element type must be known. Invalid syntax for event={event} and kwargs={kwargs}." - ) - + return [f"{container}.append(make_tensor_value_info({name!r}))"] if event == EventType.NODE: op_type = kwargs["op_type"] inputs = kwargs["inputs"] @@ -87,22 +85,45 @@ def __call__(self, event: EventType, **kwargs: Dict[str, Any]) -> List[str]: domain = kwargs["domain"] raise NotImplementedError(f"domain={domain!r} not supported yet.") + before_lines = [] lines = [ "nodes.append(", " make_node(", f" {op_type!r},", f" {inputs},", - f" {outputs}", + f" {outputs},", ] domain = kwargs.get("domain", "") if domain: lines.append(f" domain={domain!r},") atts = kwargs.get("atts", {}) - if len(atts) > 0: - lines[-1] += "," for k, v in atts.items(): - lines.append(f" {k}={self.render_attribute_value(v)}") + before, value = self.render_attribute_value(v) + before_lines.extend(before) + lines.append(f" {k}={value},") + lines[-1] = lines[-1][:-1] lines.extend([" )", ")"]) - return lines + return before_lines + lines raise ValueError(f"Unexpected EventType {event}.") + + def render_attribute_value(self, value: Any) -> Tuple[List[str], str]: + """ + Renders an attribute value into a string. + + :param value: value to converter + :return: rows to append before, actual value + """ + if value[0].type == AttributeProto.GRAPH: + tr = Translater(value[0].g, emitter=self) + rows = tr.export(as_str=False, single_line=False) + new_rows = [f"def _make_local_graph_{value[0].name}():"] + for line in rows: + if "make_model" in line: + break + new_rows.append(" " + line) + new_rows.append(" return graph") + new_rows.append(f"{value[0].name} = _make_local_graph_{value[0].name}()") + return new_rows, value[0].name + + return super().render_attribute_value(value) diff --git a/onnx_array_api/light_api/translate.py b/onnx_array_api/light_api/translate.py index 0fc7671..51a8c57 100644 --- a/onnx_array_api/light_api/translate.py +++ b/onnx_array_api/light_api/translate.py @@ -14,8 +14,8 @@ class EventType(IntEnum): TO_ONNX = 4 BEGIN_GRAPH = 5 END_GRAPH = 6 - BEGIN_FUNCTION = 5 - END_FUNCTION = 6 + BEGIN_FUNCTION = 7 + END_FUNCTION = 8 class Emitter: @@ -52,6 +52,12 @@ def __call__(self, event: EventType, **kwargs: Dict[str, Any]) -> List[str]: if event == EventType.TO_ONNX: return ["to_onnx()"] + if event == EventType.BEGIN_GRAPH: + return [] + + if event == EventType.END_GRAPH: + return [] + if event == EventType.INPUT: name = kwargs["name"] elem_type = kwargs.get("elem_type", None) @@ -95,7 +101,10 @@ def __call__(self, event: EventType, **kwargs: Dict[str, Any]) -> List[str]: atts = kwargs.get("atts", {}) args = [] for k, v in atts.items(): - args.append(f"{k}={self.render_attribute_value(v)}") + before, vatt = self.render_attribute_value(v) + if before: + raise NotImplementedError("Graph attribute not supported yet.") + args.append(f"{k}={vatt}") str_inputs = ", ".join([f"{i!r}" for i in inputs]) inst = [f"bring({str_inputs})", f"{op_type}({', '.join(args)})"] @@ -108,18 +117,21 @@ def __call__(self, event: EventType, **kwargs: Dict[str, Any]) -> List[str]: raise ValueError(f"Unexpected EventType {event}.") - def render_attribute_value(self, value: Any) -> str: + def render_attribute_value(self, value: Any) -> Tuple[List[str], str]: """ Renders an attribute value into a string. + + :param value: value to converter + :return: rows to append before, actual value """ v = value[-1] if isinstance(v, (int, float, list)): - return str(v) + return [], str(v) if isinstance(v, np.ndarray): if len(v.shape) == 0: - return str(v) + return [], str(v) if len(v.shape) == 1: - return str(v.tolist()) + return [], str(v.tolist()) raise ValueError(f"Unable to render an attribute {value}.") @@ -198,7 +210,7 @@ def export(self, as_str, single_line: bool = False) -> Union[str, List[str]]: ) for o in outputs: - if isinstance(i, str): + if isinstance(o, str): rows.extend(self.emitter(EventType.INPUT, name=o)) else: rows.extend( From b6db1d326d668188b29a62fff4d252a5e132d9b9 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Fri, 10 Nov 2023 12:57:22 +0100 Subject: [PATCH 03/12] update code --- _doc/api/light_api.rst | 2 +- .../ut_light_api/test_translate_classic.py | 6 + onnx_array_api/light_api/emitter.py | 134 ++++++++++++++++ onnx_array_api/light_api/inner_emitter.py | 18 ++- onnx_array_api/light_api/translate.py | 151 +++--------------- 5 files changed, 178 insertions(+), 133 deletions(-) create mode 100644 onnx_array_api/light_api/emitter.py diff --git a/_doc/api/light_api.rst b/_doc/api/light_api.rst index b68b923..54fd821 100644 --- a/_doc/api/light_api.rst +++ b/_doc/api/light_api.rst @@ -51,7 +51,7 @@ Classes for the Translater Emitter +++++++ -.. autoclass:: onnx_array_api.light_api.translate.Emitter +.. autoclass:: onnx_array_api.light_api.emitter.Emitter :members: EventType diff --git a/_unittests/ut_light_api/test_translate_classic.py b/_unittests/ut_light_api/test_translate_classic.py index 6aad3c9..4150892 100644 --- a/_unittests/ut_light_api/test_translate_classic.py +++ b/_unittests/ut_light_api/test_translate_classic.py @@ -136,6 +136,12 @@ def test_transpose(self): initializers = [] sparse_initializers = [] functions = [] + initializers.append( + from_array( + np.array([-1, 1]).astype(np.int64), + name='r' + ) + ) inputs.append(make_tensor_value_info('X', TensorProto.FLOAT, shape=[])) nodes.append( make_node( diff --git a/onnx_array_api/light_api/emitter.py b/onnx_array_api/light_api/emitter.py new file mode 100644 index 0000000..eea502b --- /dev/null +++ b/onnx_array_api/light_api/emitter.py @@ -0,0 +1,134 @@ +from typing import Any, Dict, List, Tuple +from enum import IntEnum +import numpy as np +from .annotations import ELEMENT_TYPE_NAME + + +class EventType(IntEnum): + START = 0 + INPUT = 1 + OUTPUT = 2 + NODE = 3 + TO_ONNX = 4 + BEGIN_GRAPH = 5 + END_GRAPH = 6 + BEGIN_FUNCTION = 7 + END_FUNCTION = 8 + INITIALIZER = 9 + + +class Emitter: + """ + Converts event into proper code. + """ + + def join(self, rows: List[str], single_line: bool = False) -> str: + "Join the rows" + if single_line: + return ".".join(rows) + return "".join(["(\n ", "\n .".join(rows), "\n)"]) + + def __call__(self, event: EventType, **kwargs: Dict[str, Any]) -> List[str]: + """ + Converts an event into an instruction. + + :param event: event kind + :param kwargs: event parameters + :return: list of instructions + """ + if event == EventType.START: + opsets = kwargs.get("opsets", {}) + opset = opsets.get("", None) + if opset is not None: + del opsets[""] + args = [] + if opset: + args.append(f"opset={opset}") + if opsets: + args.append(f"opsets={opsets}") + return [f"start({', '.join(args)})"] + + if event == EventType.TO_ONNX: + return ["to_onnx()"] + + if event == EventType.BEGIN_GRAPH: + return [] + + if event == EventType.END_GRAPH: + return [] + + if event == EventType.INPUT: + name = kwargs["name"] + elem_type = kwargs.get("elem_type", None) + shape = kwargs.get("shape", None) + if elem_type and shape: + return [ + f"vin({name!r}, elem_type=TensorProto.{ELEMENT_TYPE_NAME[elem_type]}, shape={shape!r})" + ] + if elem_type: + return [ + f"vin({name!r}, elem_type=TensorProto.{ELEMENT_TYPE_NAME[elem_type]})" + ] + return [f"vin({name!r})"] + + if event == EventType.OUTPUT: + inst = [] + if "name" in kwargs: + name = kwargs["name"] + inst.append(f"bring({name!r})") + elem_type = kwargs.get("elem_type", None) + shape = kwargs.get("shape", None) + if elem_type and shape: + inst.append( + f"vout(elem_type=TensorProto.{ELEMENT_TYPE_NAME[elem_type]}, shape={shape!r})" + ) + elif elem_type: + inst.append( + f"vout(elem_type=TensorProto.{ELEMENT_TYPE_NAME[elem_type]})" + ) + else: + inst.append("vout()") + return inst + + if event == EventType.NODE: + op_type = kwargs["op_type"] + inputs = kwargs["inputs"] + outputs = kwargs["outputs"] + if kwargs.get("domain", "") != "": + domain = kwargs["domain"] + raise NotImplementedError(f"domain={domain!r} not supported yet.") + atts = kwargs.get("atts", {}) + args = [] + for k, v in atts.items(): + before, vatt = self.render_attribute_value(v) + if before: + raise NotImplementedError("Graph attribute not supported yet.") + args.append(f"{k}={vatt}") + + str_inputs = ", ".join([f"{i!r}" for i in inputs]) + inst = [f"bring({str_inputs})", f"{op_type}({', '.join(args)})"] + if len(outputs) == 1: + inst.append(f"rename({outputs[0]!r})") + else: + str_outputs = ", ".join([f"{o!r}" for o in outputs]) + inst.append(f"rename({str_outputs})") + return inst + + raise ValueError(f"Unexpected EventType {event}.") + + def render_attribute_value(self, value: Any) -> Tuple[List[str], str]: + """ + Renders an attribute value into a string. + + :param value: value to converter + :return: rows to append before, actual value + """ + v = value[-1] + if isinstance(v, (int, float, list)): + return [], str(v) + if isinstance(v, np.ndarray): + if len(v.shape) == 0: + return [], str(v) + if len(v.shape) == 1: + return [], str(v.tolist()) + raise ValueError(f"Unable to render an attribute {value}.") diff --git a/onnx_array_api/light_api/inner_emitter.py b/onnx_array_api/light_api/inner_emitter.py index 6bef563..8f2abbe 100644 --- a/onnx_array_api/light_api/inner_emitter.py +++ b/onnx_array_api/light_api/inner_emitter.py @@ -1,5 +1,5 @@ from typing import Any, Dict, List, Tuple -from onnx import AttributeProto +from onnx import AttributeProto, TensorProto from .annotations import ELEMENT_TYPE_NAME from .translate import Emitter, EventType, Translater @@ -63,6 +63,22 @@ def __call__(self, event: EventType, **kwargs: Dict[str, Any]) -> List[str]: ] return lines + if event == EventType.INITIALIZER: + name = kwargs["name"] + value = kwargs["value"] + dtype = { + TensorProto.FLOAT: "float32", + TensorProto.INT64: "int64", + }[kwargs["init"].data_type] + return [ + "initializers.append(", + " from_array(", + f" np.{repr(value).strip()}.astype(np.{dtype}),", + f" name={name!r}", + " )", + ")", + ] + if event in (EventType.INPUT, EventType.OUTPUT): container = "inputs" if event == EventType.INPUT else "outputs" name = kwargs["name"] diff --git a/onnx_array_api/light_api/translate.py b/onnx_array_api/light_api/translate.py index 51a8c57..b42dfc5 100644 --- a/onnx_array_api/light_api/translate.py +++ b/onnx_array_api/light_api/translate.py @@ -1,138 +1,8 @@ from typing import Any, Dict, List, Optional, Tuple, Union -from enum import IntEnum import numpy as np from onnx import AttributeProto, FunctionProto, GraphProto, ModelProto, NodeProto from onnx.numpy_helper import to_array -from .annotations import ELEMENT_TYPE_NAME - - -class EventType(IntEnum): - START = 0 - INPUT = 1 - OUTPUT = 2 - NODE = 3 - TO_ONNX = 4 - BEGIN_GRAPH = 5 - END_GRAPH = 6 - BEGIN_FUNCTION = 7 - END_FUNCTION = 8 - - -class Emitter: - """ - Converts event into proper code. - """ - - def join(self, rows: List[str], single_line: bool = False) -> str: - "Join the rows" - if single_line: - return ".".join(rows) - return "".join(["(\n ", "\n .".join(rows), "\n)"]) - - def __call__(self, event: EventType, **kwargs: Dict[str, Any]) -> List[str]: - """ - Converts an event into an instruction. - - :param event: event kind - :param kwargs: event parameters - :return: list of instructions - """ - if event == EventType.START: - opsets = kwargs.get("opsets", {}) - opset = opsets.get("", None) - if opset is not None: - del opsets[""] - args = [] - if opset: - args.append(f"opset={opset}") - if opsets: - args.append(f"opsets={opsets}") - return [f"start({', '.join(args)})"] - - if event == EventType.TO_ONNX: - return ["to_onnx()"] - - if event == EventType.BEGIN_GRAPH: - return [] - - if event == EventType.END_GRAPH: - return [] - - if event == EventType.INPUT: - name = kwargs["name"] - elem_type = kwargs.get("elem_type", None) - shape = kwargs.get("shape", None) - if elem_type and shape: - return [ - f"vin({name!r}, elem_type=TensorProto.{ELEMENT_TYPE_NAME[elem_type]}, shape={shape!r})" - ] - if elem_type: - return [ - f"vin({name!r}, elem_type=TensorProto.{ELEMENT_TYPE_NAME[elem_type]})" - ] - return [f"vin({name!r})"] - - if event == EventType.OUTPUT: - inst = [] - if "name" in kwargs: - name = kwargs["name"] - inst.append(f"bring({name!r})") - elem_type = kwargs.get("elem_type", None) - shape = kwargs.get("shape", None) - if elem_type and shape: - inst.append( - f"vout(elem_type=TensorProto.{ELEMENT_TYPE_NAME[elem_type]}, shape={shape!r})" - ) - elif elem_type: - inst.append( - f"vout(elem_type=TensorProto.{ELEMENT_TYPE_NAME[elem_type]})" - ) - else: - inst.append("vout()") - return inst - - if event == EventType.NODE: - op_type = kwargs["op_type"] - inputs = kwargs["inputs"] - outputs = kwargs["outputs"] - if kwargs.get("domain", "") != "": - domain = kwargs["domain"] - raise NotImplementedError(f"domain={domain!r} not supported yet.") - atts = kwargs.get("atts", {}) - args = [] - for k, v in atts.items(): - before, vatt = self.render_attribute_value(v) - if before: - raise NotImplementedError("Graph attribute not supported yet.") - args.append(f"{k}={vatt}") - - str_inputs = ", ".join([f"{i!r}" for i in inputs]) - inst = [f"bring({str_inputs})", f"{op_type}({', '.join(args)})"] - if len(outputs) == 1: - inst.append(f"rename({outputs[0]!r})") - else: - str_outputs = ", ".join([f"{o!r}" for o in outputs]) - inst.append(f"rename({str_outputs})") - return inst - - raise ValueError(f"Unexpected EventType {event}.") - - def render_attribute_value(self, value: Any) -> Tuple[List[str], str]: - """ - Renders an attribute value into a string. - - :param value: value to converter - :return: rows to append before, actual value - """ - v = value[-1] - if isinstance(v, (int, float, list)): - return [], str(v) - if isinstance(v, np.ndarray): - if len(v.shape) == 0: - return [], str(v) - if len(v.shape) == 1: - return [], str(v.tolist()) - raise ValueError(f"Unable to render an attribute {value}.") +from .emitter import EventType, Emitter class Translater: @@ -166,13 +36,24 @@ def export(self, as_str, single_line: bool = False) -> Union[str, List[str]]: inputs = self.proto_.graph.input outputs = self.proto_.graph.output nodes = self.proto_.graph.node + initializers = self.proto_.graph.initializer + sparse_initializers = self.proto_.graph.sparse_initializer elif isinstance(self.proto_, (FunctionProto, GraphProto)): inputs = self.proto_.input outputs = self.proto_.output nodes = self.proto_.node + if isinstance(self.proto_, GraphProto): + initializers = self.proto_.initializer + sparse_initializers = self.proto_.sparse_initializer + else: + initializers = [] + sparse_initializers = [] else: raise ValueError(f"Unexpected type {type(self.proto_)} for proto.") + if len(sparse_initializers) != 0: + raise NotImplementedError("Sparse initializer not supported yet.") + rows.extend( self.emitter( EventType.BEGIN_FUNCTION @@ -180,6 +61,14 @@ def export(self, as_str, single_line: bool = False) -> Union[str, List[str]]: else EventType.BEGIN_GRAPH ) ) + + for i in initializers: + rows.extend( + self.emitter( + EventType.INITIALIZER, name=i.name, init=i, value=to_array(i) + ) + ) + for i in inputs: if isinstance(i, str): rows.extend(self.emitter(EventType.INPUT, name=i)) From 4c67be765cb537d8a651dee97480a89a3154593d Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Sat, 11 Nov 2023 11:48:27 +0100 Subject: [PATCH 04/12] refactoring --- _doc/api/light_api.rst | 6 + _unittests/ut_light_api/test_translate.py | 8 + onnx_array_api/light_api/emitter.py | 261 +++++++++++++++------- onnx_array_api/light_api/inner_emitter.py | 232 +++++++++---------- 4 files changed, 310 insertions(+), 197 deletions(-) diff --git a/_doc/api/light_api.rst b/_doc/api/light_api.rst index 54fd821..d60c467 100644 --- a/_doc/api/light_api.rst +++ b/_doc/api/light_api.rst @@ -48,6 +48,12 @@ Vars Classes for the Translater ========================== +BaseEmitter ++++++++++++ + +.. autoclass:: onnx_array_api.light_api.emitter.BaseEmitter + :members: + Emitter +++++++ diff --git a/_unittests/ut_light_api/test_translate.py b/_unittests/ut_light_api/test_translate.py index c1f63f9..f5c1f64 100644 --- a/_unittests/ut_light_api/test_translate.py +++ b/_unittests/ut_light_api/test_translate.py @@ -6,11 +6,17 @@ from onnx.reference import ReferenceEvaluator from onnx_array_api.ext_test_case import ExtTestCase from onnx_array_api.light_api import start, translate +from onnx_array_api.light_api.emitter import EventType OPSET_API = min(19, onnx_opset_version() - 1) class TestTranslate(ExtTestCase): + def test_event_type(self): + self.assertEqual( + EventType.to_str(EventType.INITIALIZER), "EventType.INITIALIZER" + ) + def test_exp(self): onx = start(opset=19).vin("X").Exp().rename("Y").vout().to_onnx() self.assertIsInstance(onx, ModelProto) @@ -73,6 +79,8 @@ def test_transpose(self): """ ( start(opset=19) + .cst(np.array([-1, 1]).astype(int64)) + .rename('r') .vin('X', elem_type=TensorProto.FLOAT) .bring('X', 'r') .Reshape() diff --git a/onnx_array_api/light_api/emitter.py b/onnx_array_api/light_api/emitter.py index eea502b..c76ede9 100644 --- a/onnx_array_api/light_api/emitter.py +++ b/onnx_array_api/light_api/emitter.py @@ -1,6 +1,8 @@ +import inspect from typing import Any, Dict, List, Tuple from enum import IntEnum import numpy as np +from onnx import TensorProto from .annotations import ELEMENT_TYPE_NAME @@ -15,19 +17,16 @@ class EventType(IntEnum): BEGIN_FUNCTION = 7 END_FUNCTION = 8 INITIALIZER = 9 + SPARSE_INITIALIZER = 10 + @classmethod + def to_str(cls, self) -> str: + for k, v in EventType.__dict__.items(): + if self == v: + return f"{cls.__name__}.{k}" -class Emitter: - """ - Converts event into proper code. - """ - - def join(self, rows: List[str], single_line: bool = False) -> str: - "Join the rows" - if single_line: - return ".".join(rows) - return "".join(["(\n ", "\n .".join(rows), "\n)"]) +class BaseEmitter: def __call__(self, event: EventType, **kwargs: Dict[str, Any]) -> List[str]: """ Converts an event into an instruction. @@ -36,85 +35,35 @@ def __call__(self, event: EventType, **kwargs: Dict[str, Any]) -> List[str]: :param kwargs: event parameters :return: list of instructions """ - if event == EventType.START: - opsets = kwargs.get("opsets", {}) - opset = opsets.get("", None) - if opset is not None: - del opsets[""] - args = [] - if opset: - args.append(f"opset={opset}") - if opsets: - args.append(f"opsets={opsets}") - return [f"start({', '.join(args)})"] - if event == EventType.TO_ONNX: - return ["to_onnx()"] + if event == EventType.NODE: + return self._emit_node(**kwargs) - if event == EventType.BEGIN_GRAPH: - return [] + if event == EventType.INITIALIZER: + return self._emit_initializer(**kwargs) - if event == EventType.END_GRAPH: - return [] + if event == EventType.SPARSE_INITIALIZER: + return self._emit_sparse_initializer(**kwargs) if event == EventType.INPUT: - name = kwargs["name"] - elem_type = kwargs.get("elem_type", None) - shape = kwargs.get("shape", None) - if elem_type and shape: - return [ - f"vin({name!r}, elem_type=TensorProto.{ELEMENT_TYPE_NAME[elem_type]}, shape={shape!r})" - ] - if elem_type: - return [ - f"vin({name!r}, elem_type=TensorProto.{ELEMENT_TYPE_NAME[elem_type]})" - ] - return [f"vin({name!r})"] + return self._emit_input(**kwargs) if event == EventType.OUTPUT: - inst = [] - if "name" in kwargs: - name = kwargs["name"] - inst.append(f"bring({name!r})") - elem_type = kwargs.get("elem_type", None) - shape = kwargs.get("shape", None) - if elem_type and shape: - inst.append( - f"vout(elem_type=TensorProto.{ELEMENT_TYPE_NAME[elem_type]}, shape={shape!r})" - ) - elif elem_type: - inst.append( - f"vout(elem_type=TensorProto.{ELEMENT_TYPE_NAME[elem_type]})" - ) - else: - inst.append("vout()") - return inst + return self._emit_output(**kwargs) - if event == EventType.NODE: - op_type = kwargs["op_type"] - inputs = kwargs["inputs"] - outputs = kwargs["outputs"] - if kwargs.get("domain", "") != "": - domain = kwargs["domain"] - raise NotImplementedError(f"domain={domain!r} not supported yet.") - atts = kwargs.get("atts", {}) - args = [] - for k, v in atts.items(): - before, vatt = self.render_attribute_value(v) - if before: - raise NotImplementedError("Graph attribute not supported yet.") - args.append(f"{k}={vatt}") - - str_inputs = ", ".join([f"{i!r}" for i in inputs]) - inst = [f"bring({str_inputs})", f"{op_type}({', '.join(args)})"] - if len(outputs) == 1: - inst.append(f"rename({outputs[0]!r})") - else: - str_outputs = ", ".join([f"{o!r}" for o in outputs]) - inst.append(f"rename({str_outputs})") - return inst - - raise ValueError(f"Unexpected EventType {event}.") + if event == EventType.START: + return self._emit_start(**kwargs) + + if event == EventType.TO_ONNX: + return self._emit_to_onnx(**kwargs) + + if event == EventType.BEGIN_GRAPH: + return self._emit_begin_graph(**kwargs) + + if event == EventType.END_GRAPH: + return self._emit_end_graph(**kwargs) + + raise ValueError(f"Unexpected event {EventType.to_str(event)}.") def render_attribute_value(self, value: Any) -> Tuple[List[str], str]: """ @@ -132,3 +81,153 @@ def render_attribute_value(self, value: Any) -> Tuple[List[str], str]: if len(v.shape) == 1: return [], str(v.tolist()) raise ValueError(f"Unable to render an attribute {value}.") + + def join(self, rows: List[str], single_line: bool = False) -> str: + raise NotImplementedError( + f"Method {inspect.currentframe().f_code.co_name!r} was not overloaded." + ) + + def _emit_start(self, **kwargs: Dict[str, Any]) -> List[str]: + raise NotImplementedError( + f"Method {inspect.currentframe().f_code.co_name!r} was not overloaded." + ) + + def _emit_to_onnx(self, **kwargs: Dict[str, Any]) -> List[str]: + raise NotImplementedError( + f"Method {inspect.currentframe().f_code.co_name!r} was not overloaded." + ) + + def _emit_begin_graph(self, **kwargs: Dict[str, Any]) -> List[str]: + raise NotImplementedError( + f"Method {inspect.currentframe().f_code.co_name!r} was not overloaded." + ) + + def _emit_end_graph(self, **kwargs: Dict[str, Any]) -> List[str]: + raise NotImplementedError( + f"Method {inspect.currentframe().f_code.co_name!r} was not overloaded." + ) + + def _emit_initializer(self, **kwargs: Dict[str, Any]) -> List[str]: + raise NotImplementedError( + f"Method {inspect.currentframe().f_code.co_name!r} was not overloaded." + ) + + def _emit_input(self, **kwargs: Dict[str, Any]) -> List[str]: + raise NotImplementedError( + f"Method {inspect.currentframe().f_code.co_name!r} was not overloaded." + ) + + def _emit_output(self, **kwargs: Dict[str, Any]) -> List[str]: + raise NotImplementedError( + f"Method {inspect.currentframe().f_code.co_name!r} was not overloaded." + ) + + def _emit_node(self, **kwargs: Dict[str, Any]) -> List[str]: + raise NotImplementedError( + f"Method {inspect.currentframe().f_code.co_name!r} was not overloaded." + ) + + def _emit_sparse_initializer(self, **kwargs: Dict[str, Any]) -> List[str]: + raise NotImplementedError( + f"Method {inspect.currentframe().f_code.co_name!r} was not overloaded." + ) + + +class Emitter(BaseEmitter): + """ + Converts event into proper code. + """ + + def join(self, rows: List[str], single_line: bool = False) -> str: + "Join the rows" + if single_line: + return ".".join(rows) + return "".join(["(\n ", "\n .".join(rows), "\n)"]) + + def _emit_start(self, **kwargs: Dict[str, Any]) -> List[str]: + opsets = kwargs.get("opsets", {}) + opset = opsets.get("", None) + if opset is not None: + del opsets[""] + args = [] + if opset: + args.append(f"opset={opset}") + if opsets: + args.append(f"opsets={opsets}") + return [f"start({', '.join(args)})"] + + def _emit_to_onnx(self, **kwargs: Dict[str, Any]) -> List[str]: + return ["to_onnx()"] + + def _emit_begin_graph(self, **kwargs: Dict[str, Any]) -> List[str]: + return [] + + def _emit_end_graph(self, **kwargs: Dict[str, Any]) -> List[str]: + return [] + + def _emit_initializer(self, **kwargs: Dict[str, Any]) -> List[str]: + name = kwargs["name"] + value = kwargs["value"] + svalue = repr(value).strip() + if "dtype" not in svalue: + dtype = { + TensorProto.FLOAT: "float32", + TensorProto.INT64: "int64", + }[kwargs["init"].data_type] + svalue += f".astype(np.{dtype})" + return [f"cst(np.{svalue})", f"rename({name!r})"] + + def _emit_input(self, **kwargs: Dict[str, Any]) -> List[str]: + name = kwargs["name"] + elem_type = kwargs.get("elem_type", None) + shape = kwargs.get("shape", None) + if elem_type and shape: + return [ + f"vin({name!r}, elem_type=TensorProto.{ELEMENT_TYPE_NAME[elem_type]}, shape={shape!r})" + ] + if elem_type: + return [ + f"vin({name!r}, elem_type=TensorProto.{ELEMENT_TYPE_NAME[elem_type]})" + ] + return [f"vin({name!r})"] + + def _emit_output(self, **kwargs: Dict[str, Any]) -> List[str]: + inst = [] + if "name" in kwargs: + name = kwargs["name"] + inst.append(f"bring({name!r})") + elem_type = kwargs.get("elem_type", None) + shape = kwargs.get("shape", None) + if elem_type and shape: + inst.append( + f"vout(elem_type=TensorProto.{ELEMENT_TYPE_NAME[elem_type]}, shape={shape!r})" + ) + elif elem_type: + inst.append(f"vout(elem_type=TensorProto.{ELEMENT_TYPE_NAME[elem_type]})") + else: + inst.append("vout()") + return inst + + def _emit_node(self, **kwargs: Dict[str, Any]) -> List[str]: + op_type = kwargs["op_type"] + inputs = kwargs["inputs"] + outputs = kwargs["outputs"] + if kwargs.get("domain", "") != "": + domain = kwargs["domain"] + raise NotImplementedError(f"domain={domain!r} not supported yet.") + atts = kwargs.get("atts", {}) + args = [] + for k, v in atts.items(): + before, vatt = self.render_attribute_value(v) + if before: + raise NotImplementedError("Graph attribute not supported yet.") + args.append(f"{k}={vatt}") + + str_inputs = ", ".join([f"{i!r}" for i in inputs]) + inst = [f"bring({str_inputs})", f"{op_type}({', '.join(args)})"] + if len(outputs) == 1: + inst.append(f"rename({outputs[0]!r})") + else: + str_outputs = ", ".join([f"{o!r}" for o in outputs]) + inst.append(f"rename({str_outputs})") + return inst diff --git a/onnx_array_api/light_api/inner_emitter.py b/onnx_array_api/light_api/inner_emitter.py index 8f2abbe..eb686d8 100644 --- a/onnx_array_api/light_api/inner_emitter.py +++ b/onnx_array_api/light_api/inner_emitter.py @@ -1,128 +1,15 @@ from typing import Any, Dict, List, Tuple from onnx import AttributeProto, TensorProto from .annotations import ELEMENT_TYPE_NAME -from .translate import Emitter, EventType, Translater +from .emitter import BaseEmitter +from .translate import EventType, Translater -class InnerEmitter(Emitter): +class InnerEmitter(BaseEmitter): """ Converts event into proper code. """ - def join(self, rows: List[str], single_line: bool = False) -> str: - "Returns the separators. `single_line` is unused." - return "\n".join(rows) - - def __call__(self, event: EventType, **kwargs: Dict[str, Any]) -> List[str]: - """ - Converts an event into an instruction. - - :param event: event kind - :param kwargs: event parameters - :return: list of instructions - """ - if event == EventType.START: - lines = ["opset_imports = ["] - opsets = kwargs.get("opsets", {}) - for k, v in opsets.items(): - lines.append(f" make_opsetid({k!r}, {v!r}),") - lines.append("]") - return lines - - if event == EventType.TO_ONNX: - lines = [ - "model = make_model(", - " graph,", - " functions=functions,", - " opset_imports=opset_imports", - ")", - ] - return lines - - if event == EventType.BEGIN_GRAPH: - lines = [ - "inputs = []", - "outputs = []", - "nodes = []", - "initializers = []", - "sparse_initializers = []", - "functions = []", - ] - return lines - - if event == EventType.END_GRAPH: - lines = [ - "graph = make_graph(", - " nodes,", - " 'noname',", - " inputs,", - " outputs,", - " initializers,", - " sparse_initializer=sparse_initializers,", - ")", - ] - return lines - - if event == EventType.INITIALIZER: - name = kwargs["name"] - value = kwargs["value"] - dtype = { - TensorProto.FLOAT: "float32", - TensorProto.INT64: "int64", - }[kwargs["init"].data_type] - return [ - "initializers.append(", - " from_array(", - f" np.{repr(value).strip()}.astype(np.{dtype}),", - f" name={name!r}", - " )", - ")", - ] - - if event in (EventType.INPUT, EventType.OUTPUT): - container = "inputs" if event == EventType.INPUT else "outputs" - name = kwargs["name"] - elem_type = kwargs.get("elem_type", None) - shape = kwargs.get("shape", None) - if elem_type and shape: - return [ - f"{container}.append(make_tensor_value_info({name!r}, TensorProto.{ELEMENT_TYPE_NAME[elem_type]}, shape={shape!r}))" - ] - if elem_type: - return [ - f"{container}.append(make_tensor_value_info({name!r}, TensorProto.{ELEMENT_TYPE_NAME[elem_type]}, shape=[]))" - ] - return [f"{container}.append(make_tensor_value_info({name!r}))"] - if event == EventType.NODE: - op_type = kwargs["op_type"] - inputs = kwargs["inputs"] - outputs = kwargs["outputs"] - if kwargs.get("domain", "") != "": - domain = kwargs["domain"] - raise NotImplementedError(f"domain={domain!r} not supported yet.") - - before_lines = [] - lines = [ - "nodes.append(", - " make_node(", - f" {op_type!r},", - f" {inputs},", - f" {outputs},", - ] - domain = kwargs.get("domain", "") - if domain: - lines.append(f" domain={domain!r},") - atts = kwargs.get("atts", {}) - for k, v in atts.items(): - before, value = self.render_attribute_value(v) - before_lines.extend(before) - lines.append(f" {k}={value},") - lines[-1] = lines[-1][:-1] - lines.extend([" )", ")"]) - return before_lines + lines - - raise ValueError(f"Unexpected EventType {event}.") - def render_attribute_value(self, value: Any) -> Tuple[List[str], str]: """ Renders an attribute value into a string. @@ -143,3 +30,116 @@ def render_attribute_value(self, value: Any) -> Tuple[List[str], str]: return new_rows, value[0].name return super().render_attribute_value(value) + + def join(self, rows: List[str], single_line: bool = False) -> str: + "Returns the separators. `single_line` is unused." + return "\n".join(rows) + + def _emit_start(self, **kwargs: Dict[str, Any]) -> List[str]: + lines = ["opset_imports = ["] + opsets = kwargs.get("opsets", {}) + for k, v in opsets.items(): + lines.append(f" make_opsetid({k!r}, {v!r}),") + lines.append("]") + return lines + + def _emit_to_onnx(self, **kwargs: Dict[str, Any]) -> List[str]: + lines = [ + "model = make_model(", + " graph,", + " functions=functions,", + " opset_imports=opset_imports", + ")", + ] + return lines + + def _emit_begin_graph(self, **kwargs: Dict[str, Any]) -> List[str]: + lines = [ + "inputs = []", + "outputs = []", + "nodes = []", + "initializers = []", + "sparse_initializers = []", + "functions = []", + ] + return lines + + def _emit_end_graph(self, **kwargs: Dict[str, Any]) -> List[str]: + lines = [ + "graph = make_graph(", + " nodes,", + " 'noname',", + " inputs,", + " outputs,", + " initializers,", + " sparse_initializer=sparse_initializers,", + ")", + ] + return lines + + def _emit_initializer(self, **kwargs: Dict[str, Any]) -> List[str]: + name = kwargs["name"] + value = kwargs["value"] + svalue = repr(value).strip() + if "dtype" not in svalue: + dtype = { + TensorProto.FLOAT: "float32", + TensorProto.INT64: "int64", + }[kwargs["init"].data_type] + svalue += f".astype(np.{dtype})" + return [ + "initializers.append(", + " from_array(", + f" np.{svalue},", + f" name={name!r}", + " )", + ")", + ] + + def _emit_io(self, container: str, **kwargs: Dict[str, Any]) -> List[str]: + name = kwargs["name"] + elem_type = kwargs.get("elem_type", None) + shape = kwargs.get("shape", None) + if elem_type and shape: + return [ + f"{container}.append(make_tensor_value_info({name!r}, TensorProto.{ELEMENT_TYPE_NAME[elem_type]}, shape={shape!r}))" + ] + if elem_type: + return [ + f"{container}.append(make_tensor_value_info({name!r}, TensorProto.{ELEMENT_TYPE_NAME[elem_type]}, shape=[]))" + ] + return [f"{container}.append(make_tensor_value_info({name!r}))"] + + def _emit_input(self, **kwargs: Dict[str, Any]) -> List[str]: + return self._emit_io("inputs", **kwargs) + + def _emit_output(self, **kwargs: Dict[str, Any]) -> List[str]: + return self._emit_io("outputs", **kwargs) + + def _emit_node(self, **kwargs: Dict[str, Any]) -> List[str]: + op_type = kwargs["op_type"] + inputs = kwargs["inputs"] + outputs = kwargs["outputs"] + if kwargs.get("domain", "") != "": + domain = kwargs["domain"] + raise NotImplementedError(f"domain={domain!r} not supported yet.") + + before_lines = [] + lines = [ + "nodes.append(", + " make_node(", + f" {op_type!r},", + f" {inputs},", + f" {outputs},", + ] + domain = kwargs.get("domain", "") + if domain: + lines.append(f" domain={domain!r},") + atts = kwargs.get("atts", {}) + for k, v in atts.items(): + before, value = self.render_attribute_value(v) + before_lines.extend(before) + lines.append(f" {k}={value},") + lines[-1] = lines[-1][:-1] + lines.extend([" )", ")"]) + return before_lines + lines From f89451c0f6b00cae10ebf4787acd33f5f7278063 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Sat, 11 Nov 2023 13:08:16 +0100 Subject: [PATCH 05/12] add more tests --- .../ut_light_api/test_backend_export.py | 260 ++++++++++++++++++ ...st_backend_extended_reference_evaluator.py | 2 - onnx_array_api/light_api/emitter.py | 13 +- onnx_array_api/light_api/inner_emitter.py | 6 +- onnx_array_api/plotting/_helper.py | 2 + 5 files changed, 276 insertions(+), 7 deletions(-) create mode 100644 _unittests/ut_light_api/test_backend_export.py diff --git a/_unittests/ut_light_api/test_backend_export.py b/_unittests/ut_light_api/test_backend_export.py new file mode 100644 index 0000000..459cc23 --- /dev/null +++ b/_unittests/ut_light_api/test_backend_export.py @@ -0,0 +1,260 @@ +import unittest +from typing import Any, Dict, List, Optional +from difflib import unified_diff +import numpy +from numpy.testing import assert_allclose +import onnx.backend.base +import onnx.backend.test +import onnx.shape_inference +import onnx.version_converter +from onnx import ModelProto, TensorProto +from onnx.helper import ( + make_function, + make_graph, + make_model, + make_node, + make_opsetid, + make_tensor_value_info, +) +from onnx.numpy_helper import from_array, to_array +from onnx.backend.base import Device, DeviceType +from onnx_array_api.reference import ExtendedReferenceEvaluator +from onnx_array_api.light_api import translate +from onnx_array_api.plotting.text_plot import onnx_simple_text_plot + + +class ReferenceImplementationError(RuntimeError): + "Fails, export cannot be compared." + pass + + +class ExportWrapper: + apis = ["onnx", "light"] + + def __init__(self, model): + self.model = model + self.expected_sess = ExtendedReferenceEvaluator(self.model) + + @property + def input_names(self): + return self.expected_sess.input_names + + @property + def output_names(self): + return self.expected_sess.output_names + + def run( + self, names: Optional[List[str]], feeds: Optional[Dict[str, Any]] = None + ) -> List[Any]: + try: + expected = self.expected_sess.run(names, feeds) + except RuntimeError as e: + raise ReferenceImplementationError( + f"ReferenceImplementation fails with {self.model}" + ) from e + + for api in self.apis: + try: + code = translate(self.model, api=api) + except NotImplementedError: + continue + except ValueError as e: + raise AssertionError( + f"Unable to run the export model for api {api!r}, " + f"\n--BASE--\n{onnx_simple_text_plot(self.model)}" + f"\n--EXPECTED--\n{expected}" + ) from e + try: + code_compiled = compile(code, "", mode="exec") + except Exception as e: + new_code = "\n".join( + [f"{i+1:04} {line}" for i, line in enumerate(code.split("\n"))] + ) + raise AssertionError(f"ERROR {e}\n{new_code}") + + locs = { + "np": numpy, + "to_array": to_array, + "from_array": from_array, + "TensorProto": TensorProto, + "make_function": make_function, + "make_opsetid": make_opsetid, + "make_model": make_model, + "make_graph": make_graph, + "make_node": make_node, + "make_tensor_value_info": make_tensor_value_info, + } + globs = locs.copy() + try: + exec(code_compiled, globs, locs) + except (TypeError, NameError) as e: + new_code = "\n".join( + [f"{i+1:04} {line}" for i, line in enumerate(code.split("\n"))] + ) + raise AssertionError( + f"Unable to executed code for api {api!r}\n{new_code}" + ) from e + export_model = locs["model"] + ref = ExtendedReferenceEvaluator(export_model) + try: + got = ref.run(names, feeds) + except (TypeError, AttributeError) as e: + diff = "\n".join( + unified_diff( + str(self.model).split("\n"), + str(export_model).split("\n"), + fromfile="before", + tofile="after", + ) + ) + raise AssertionError( + f"Unable to run the exported model for api {api!r}, " + f"\n--BASE--\n{onnx_simple_text_plot(self.model)}" + f"\n--EXP[{api}]--\n{onnx_simple_text_plot(export_model)}" + f"\n--CODE--\n{code}" + f"\n--FEEDS--\n{feeds}" + f"\n--EXPECTED--\n{expected}" + f"\n--DIFF--\n{diff}" + ) from e + if len(expected) != len(got): + raise AssertionError( + f"Unexpected number of outputs for api {api!r}, " + f"{len(expected)} != {len(got)}." + f"\n--BASE--\n{onnx_simple_text_plot(self.model)}" + f"\n--EXP[{api}]--\n{onnx_simple_text_plot(export_model)}" + ) + for a, b in zip(expected, got): + if not isinstance(a, numpy.ndarray): + continue + if a.shape != b.shape or a.dtype != b.dtype: + raise AssertionError( + f"Shape or type discrepancies for api {api!r}." + f"\n--BASE--\n{onnx_simple_text_plot(self.model)}" + f"\n--EXP[{api}]--\n{onnx_simple_text_plot(export_model)}" + ) + try: + assert_allclose(a, b, atol=1e-3) + except AssertionError as e: + raise AssertionError( + f"Discrepancies for api {api!r}." + f"\n--BASE--\n{onnx_simple_text_plot(self.model)}" + f"\n--EXP[{api}]--\n{onnx_simple_text_plot(export_model)}" + ) from e + + return expected + + +class ExportBackendRep(onnx.backend.base.BackendRep): + def __init__(self, session): + self._session = session + + def run(self, inputs, **kwargs): + if isinstance(inputs, numpy.ndarray): + inputs = [inputs] + if isinstance(inputs, list): + if len(inputs) == len(self._session.input_names): + feeds = dict(zip(self._session.input_names, inputs)) + else: + feeds = {} + pos_inputs = 0 + for inp, tshape in zip( + self._session.input_names, self._session.input_types + ): + shape = tuple(d.dim_value for d in tshape.tensor_type.shape.dim) + if shape == inputs[pos_inputs].shape: + feeds[inp] = inputs[pos_inputs] + pos_inputs += 1 + if pos_inputs >= len(inputs): + break + elif isinstance(inputs, dict): + feeds = inputs + else: + raise TypeError(f"Unexpected input type {type(inputs)!r}.") + outs = self._session.run(None, feeds) + return outs + + +class ExportBackend(onnx.backend.base.Backend): + @classmethod + def is_opset_supported(cls, model): # pylint: disable=unused-argument + return True, "" + + @classmethod + def supports_device(cls, device: str) -> bool: + d = Device(device) + return d.type == DeviceType.CPU # type: ignore[no-any-return] + + @classmethod + def create_inference_session(cls, model): + return ExportWrapper(model) + + @classmethod + def prepare( + cls, model: Any, device: str = "CPU", **kwargs: Any + ) -> ExportBackendRep: + if isinstance(model, ExportWrapper): + return ExportBackendRep(model) + if isinstance(model, (str, bytes, ModelProto)): + inf = cls.create_inference_session(model) + return cls.prepare(inf, device, **kwargs) + raise TypeError(f"Unexpected type {type(model)} for model.") + + @classmethod + def run_model(cls, model, inputs, device=None, **kwargs): + rep = cls.prepare(model, device, **kwargs) + return rep.run(inputs, **kwargs) + + @classmethod + def run_node(cls, node, inputs, device=None, outputs_info=None, **kwargs): + raise NotImplementedError("Unable to run the model node by node.") + + +backend_test = onnx.backend.test.BackendTest(ExportBackend, __name__) + +# The following tests are too slow with the reference implementation (Conv). +backend_test.exclude( + "(FLOAT8|BFLOAT16|_opt_|_3d_|_momentum_" + "|test_adagrad" + "|test_adam" + "|test_ai_onnx_ml_" + "|test_cast_FLOAT16" + "|test_cast_FLOAT_to_STRING" + "|test_castlike_FLOAT16" + "|test_castlike_FLOAT_to_STRING" + "|test_bernoulli" + "|test_bvlc_alexnet" + "|test_conv" # too long + "|test_densenet121" + "|test_inception_v1" + "|test_inception_v2" + "|test_loop11_" + "|test_loop16_seq_none" + "|test_quantizelinear_e" + "|test_resnet50" + "|test_sequence_model" + "|test_shufflenet" + "|test_squeezenet" + "|test_vgg19" + "|test_zfnet512" + ")" +) + +# The following tests cannot pass because they consists in generating random number. +backend_test.exclude("(test_bernoulli)") + +# import all test cases at global scope to make them visible to python.unittest +globals().update(backend_test.test_cases) + +if __name__ == "__main__": + res = unittest.main(verbosity=2, exit=False) + tests_run = res.result.testsRun + errors = len(res.result.errors) + skipped = len(res.result.skipped) + unexpected_successes = len(res.result.unexpectedSuccesses) + expected_failures = len(res.result.expectedFailures) + print("---------------------------------") + print( + f"tests_run={tests_run} errors={errors} skipped={skipped} " + f"unexpected_successes={unexpected_successes} " + f"expected_failures={expected_failures}" + ) diff --git a/_unittests/ut_reference/test_backend_extended_reference_evaluator.py b/_unittests/ut_reference/test_backend_extended_reference_evaluator.py index 4bc0927..b35fb3c 100644 --- a/_unittests/ut_reference/test_backend_extended_reference_evaluator.py +++ b/_unittests/ut_reference/test_backend_extended_reference_evaluator.py @@ -61,8 +61,6 @@ def create_inference_session(cls, model): def prepare( cls, model: Any, device: str = "CPU", **kwargs: Any ) -> ExtendedReferenceEvaluatorBackendRep: - # if isinstance(model, ExtendedReferenceEvaluatorBackendRep): - # return model if isinstance(model, ExtendedReferenceEvaluator): return ExtendedReferenceEvaluatorBackendRep(model) if isinstance(model, (str, bytes, ModelProto)): diff --git a/onnx_array_api/light_api/emitter.py b/onnx_array_api/light_api/emitter.py index c76ede9..04c5b4d 100644 --- a/onnx_array_api/light_api/emitter.py +++ b/onnx_array_api/light_api/emitter.py @@ -2,7 +2,7 @@ from typing import Any, Dict, List, Tuple from enum import IntEnum import numpy as np -from onnx import TensorProto +from onnx import AttributeProto, TensorProto from .annotations import ELEMENT_TYPE_NAME @@ -73,14 +73,21 @@ def render_attribute_value(self, value: Any) -> Tuple[List[str], str]: :return: rows to append before, actual value """ v = value[-1] + if value[0].type == AttributeProto.TENSOR: + r = f"{v!r}".replace("dtype=", "dtype=np.") + return [], f"from_array(np.{r}, name={value[0].name!r})" if isinstance(v, (int, float, list)): return [], str(v) + if isinstance(v, str): + return [], f"{v!r}" if isinstance(v, np.ndarray): if len(v.shape) == 0: return [], str(v) if len(v.shape) == 1: - return [], str(v.tolist()) - raise ValueError(f"Unable to render an attribute {value}.") + if value[0].type in (AttributeProto.INTS, AttributeProto.FLOATS): + return [], str(v.tolist()) + + raise ValueError(f"Unable to render an attribute {type(v)}, {value}.") def join(self, rows: List[str], single_line: bool = False) -> str: raise NotImplementedError( diff --git a/onnx_array_api/light_api/inner_emitter.py b/onnx_array_api/light_api/inner_emitter.py index eb686d8..ad907aa 100644 --- a/onnx_array_api/light_api/inner_emitter.py +++ b/onnx_array_api/light_api/inner_emitter.py @@ -2,7 +2,7 @@ from onnx import AttributeProto, TensorProto from .annotations import ELEMENT_TYPE_NAME from .emitter import BaseEmitter -from .translate import EventType, Translater +from .translate import Translater class InnerEmitter(BaseEmitter): @@ -108,7 +108,9 @@ def _emit_io(self, container: str, **kwargs: Dict[str, Any]) -> List[str]: return [ f"{container}.append(make_tensor_value_info({name!r}, TensorProto.{ELEMENT_TYPE_NAME[elem_type]}, shape=[]))" ] - return [f"{container}.append(make_tensor_value_info({name!r}))"] + return [ + f"{container}.append(make_tensor_value_info({name!r}, TensorProto.UNDEFINED, []))" + ] def _emit_input(self, **kwargs: Dict[str, Any]) -> List[str]: return self._emit_io("inputs", **kwargs) diff --git a/onnx_array_api/plotting/_helper.py b/onnx_array_api/plotting/_helper.py index ddca631..21179ab 100644 --- a/onnx_array_api/plotting/_helper.py +++ b/onnx_array_api/plotting/_helper.py @@ -160,6 +160,8 @@ def _get_type(obj0): if hasattr(obj, "tensor_type"): obj = obj.tensor_type if hasattr(obj, "elem_type"): + if obj.elem_type == 0: + return "NOTENSOR" return tensor_dtype_to_np_dtype(obj.elem_type) raise RuntimeError(f"Unable to guess type from {obj0!r}.") # pragma: no cover From b218f30a567870626d41fe4ae11ee8ebdece4b92 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Sat, 11 Nov 2023 15:23:03 +0100 Subject: [PATCH 06/12] fix conversion --- .../ut_light_api/test_backend_export.py | 40 +++++++++++++++---- _unittests/ut_light_api/test_translate.py | 2 +- onnx_array_api/light_api/emitter.py | 37 +++++++++++------ onnx_array_api/light_api/inner_emitter.py | 13 ++---- 4 files changed, 62 insertions(+), 30 deletions(-) diff --git a/_unittests/ut_light_api/test_backend_export.py b/_unittests/ut_light_api/test_backend_export.py index 459cc23..52057b6 100644 --- a/_unittests/ut_light_api/test_backend_export.py +++ b/_unittests/ut_light_api/test_backend_export.py @@ -39,18 +39,27 @@ def __init__(self, model): def input_names(self): return self.expected_sess.input_names + @property + def input_types(self): + return self.expected_sess.input_types + @property def output_names(self): return self.expected_sess.output_names + @property + def output_types(self): + return self.expected_sess.output_types + def run( self, names: Optional[List[str]], feeds: Optional[Dict[str, Any]] = None ) -> List[Any]: try: expected = self.expected_sess.run(names, feeds) - except RuntimeError as e: + except (RuntimeError, AssertionError, TypeError, KeyError) as e: raise ReferenceImplementationError( - f"ReferenceImplementation fails with {self.model}" + f"ReferenceImplementation fails with {onnx_simple_text_plot(self.model)}" + f"\n--RAW--\n{self.model}" ) from e for api in self.apis: @@ -60,7 +69,7 @@ def run( continue except ValueError as e: raise AssertionError( - f"Unable to run the export model for api {api!r}, " + f"Unable to translate model for api {api!r}, " f"\n--BASE--\n{onnx_simple_text_plot(self.model)}" f"\n--EXPECTED--\n{expected}" ) from e @@ -87,7 +96,7 @@ def run( globs = locs.copy() try: exec(code_compiled, globs, locs) - except (TypeError, NameError) as e: + except (TypeError, NameError, ValueError) as e: new_code = "\n".join( [f"{i+1:04} {line}" for i, line in enumerate(code.split("\n"))] ) @@ -132,11 +141,23 @@ def run( f"\n--BASE--\n{onnx_simple_text_plot(self.model)}" f"\n--EXP[{api}]--\n{onnx_simple_text_plot(export_model)}" ) + if a.dtype in (numpy.str_, object, numpy.object_) or isinstance( + a.dtype, numpy.dtypes.StrDType + ): + if a.tolist() != b.tolist(): + raise AssertionError( + f"Text discrepancies for api {api!r} with a.dtype={a.dtype} " + f"and b.dtype={b.dtype}" + f"\n--BASE--\n{onnx_simple_text_plot(self.model)}" + f"\n--EXP[{api}]--\n{onnx_simple_text_plot(export_model)}" + ) + continue try: assert_allclose(a, b, atol=1e-3) - except AssertionError as e: + except (AssertionError, TypeError) as e: raise AssertionError( - f"Discrepancies for api {api!r}." + f"Discrepancies for api {api!r} with a.dtype={a.dtype} " + f"and b.dtype={b.dtype} (type-dtype={type(a.dtype)})" f"\n--BASE--\n{onnx_simple_text_plot(self.model)}" f"\n--EXP[{api}]--\n{onnx_simple_text_plot(export_model)}" ) from e @@ -213,7 +234,7 @@ def run_node(cls, node, inputs, device=None, outputs_info=None, **kwargs): # The following tests are too slow with the reference implementation (Conv). backend_test.exclude( - "(FLOAT8|BFLOAT16|_opt_|_3d_|_momentum_" + "(FLOAT8|BFLOAT16|_opt_|_3d_|_momentum_|_4d_" "|test_adagrad" "|test_adam" "|test_ai_onnx_ml_" @@ -224,14 +245,19 @@ def run_node(cls, node, inputs, device=None, outputs_info=None, **kwargs): "|test_bernoulli" "|test_bvlc_alexnet" "|test_conv" # too long + "|test_gradient_" "|test_densenet121" "|test_inception_v1" "|test_inception_v2" "|test_loop11_" "|test_loop16_seq_none" + "|test_MaxPool2d" "|test_quantizelinear_e" "|test_resnet50" "|test_sequence_model" + "|test_scan_sum" + "|test_scatter_with_axis" + "|test_scatter_without_axis" "|test_shufflenet" "|test_squeezenet" "|test_vgg19" diff --git a/_unittests/ut_light_api/test_translate.py b/_unittests/ut_light_api/test_translate.py index f5c1f64..cad762e 100644 --- a/_unittests/ut_light_api/test_translate.py +++ b/_unittests/ut_light_api/test_translate.py @@ -79,7 +79,7 @@ def test_transpose(self): """ ( start(opset=19) - .cst(np.array([-1, 1]).astype(int64)) + .cst(np.array([-1, 1]).astype(np.int64)) .rename('r') .vin('X', elem_type=TensorProto.FLOAT) .bring('X', 'r') diff --git a/onnx_array_api/light_api/emitter.py b/onnx_array_api/light_api/emitter.py index 04c5b4d..52d1033 100644 --- a/onnx_array_api/light_api/emitter.py +++ b/onnx_array_api/light_api/emitter.py @@ -2,7 +2,7 @@ from typing import Any, Dict, List, Tuple from enum import IntEnum import numpy as np -from onnx import AttributeProto, TensorProto +from onnx import AttributeProto from .annotations import ELEMENT_TYPE_NAME @@ -74,8 +74,12 @@ def render_attribute_value(self, value: Any) -> Tuple[List[str], str]: """ v = value[-1] if value[0].type == AttributeProto.TENSOR: - r = f"{v!r}".replace("dtype=", "dtype=np.") - return [], f"from_array(np.{r}, name={value[0].name!r})" + repl = {"bool": "bool_", "object": "object_", "str": "str_"} + sdtype = repl.get(str(v.dtype), str(str(v.dtype))) + return [], ( + f"from_array(np.array({v.tolist()}, dtype=np.{sdtype}), " + f"name={value[0].name!r})" + ) if isinstance(v, (int, float, list)): return [], str(v) if isinstance(v, str): @@ -84,10 +88,19 @@ def render_attribute_value(self, value: Any) -> Tuple[List[str], str]: if len(v.shape) == 0: return [], str(v) if len(v.shape) == 1: - if value[0].type in (AttributeProto.INTS, AttributeProto.FLOATS): + if value[0].type in ( + AttributeProto.INTS, + AttributeProto.FLOATS, + AttributeProto.STRINGS, + ): return [], str(v.tolist()) - raise ValueError(f"Unable to render an attribute {type(v)}, {value}.") + raise ValueError( + f"Unable to render an attribute {type(v)}, " + f"attribute type={value[0].type}, " + f"dtype={getattr(v, 'dtype', '-')}, " + f"shape={getattr(v, 'shape', '-')}, {value}." + ) def join(self, rows: List[str], single_line: bool = False) -> str: raise NotImplementedError( @@ -175,14 +188,12 @@ def _emit_end_graph(self, **kwargs: Dict[str, Any]) -> List[str]: def _emit_initializer(self, **kwargs: Dict[str, Any]) -> List[str]: name = kwargs["name"] value = kwargs["value"] - svalue = repr(value).strip() - if "dtype" not in svalue: - dtype = { - TensorProto.FLOAT: "float32", - TensorProto.INT64: "int64", - }[kwargs["init"].data_type] - svalue += f".astype(np.{dtype})" - return [f"cst(np.{svalue})", f"rename({name!r})"] + repl = {"bool": "bool_", "object": "object_", "str": "str_"} + sdtype = repl.get(str(value.dtype), str(str(value.dtype))) + return [ + f"cst(np.array({value.tolist()}, dtype=np.{sdtype}))", + f"rename({name!r})", + ] def _emit_input(self, **kwargs: Dict[str, Any]) -> List[str]: name = kwargs["name"] diff --git a/onnx_array_api/light_api/inner_emitter.py b/onnx_array_api/light_api/inner_emitter.py index ad907aa..6b70246 100644 --- a/onnx_array_api/light_api/inner_emitter.py +++ b/onnx_array_api/light_api/inner_emitter.py @@ -1,5 +1,5 @@ from typing import Any, Dict, List, Tuple -from onnx import AttributeProto, TensorProto +from onnx import AttributeProto from .annotations import ELEMENT_TYPE_NAME from .emitter import BaseEmitter from .translate import Translater @@ -80,17 +80,12 @@ def _emit_end_graph(self, **kwargs: Dict[str, Any]) -> List[str]: def _emit_initializer(self, **kwargs: Dict[str, Any]) -> List[str]: name = kwargs["name"] value = kwargs["value"] - svalue = repr(value).strip() - if "dtype" not in svalue: - dtype = { - TensorProto.FLOAT: "float32", - TensorProto.INT64: "int64", - }[kwargs["init"].data_type] - svalue += f".astype(np.{dtype})" + repl = {"bool": "bool_", "object": "object_", "str": "str_"} + sdtype = repl.get(str(value.dtype), str(str(value.dtype))) return [ "initializers.append(", " from_array(", - f" np.{svalue},", + f" np.array({value.tolist()}, dtype=np.{sdtype}),", f" name={name!r}", " )", ")", From 11130a24c7d03b887c63e09a8feb49146e05e2cd Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Sun, 12 Nov 2023 01:38:38 +0100 Subject: [PATCH 07/12] fix ut --- _unittests/ut_light_api/test_backend_export.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/_unittests/ut_light_api/test_backend_export.py b/_unittests/ut_light_api/test_backend_export.py index 52057b6..b0c1cbc 100644 --- a/_unittests/ut_light_api/test_backend_export.py +++ b/_unittests/ut_light_api/test_backend_export.py @@ -1,13 +1,14 @@ import unittest from typing import Any, Dict, List, Optional from difflib import unified_diff +import packaging.version as pv import numpy from numpy.testing import assert_allclose import onnx.backend.base import onnx.backend.test import onnx.shape_inference import onnx.version_converter -from onnx import ModelProto, TensorProto +from onnx import ModelProto, TensorProto, __version__ as onnx_version from onnx.helper import ( make_function, make_graph, @@ -142,7 +143,7 @@ def run( f"\n--EXP[{api}]--\n{onnx_simple_text_plot(export_model)}" ) if a.dtype in (numpy.str_, object, numpy.object_) or isinstance( - a.dtype, numpy.dtypes.StrDType + a.dtype, getattr(getattr(numpy, "dtypes", None), "StrDType", type) ): if a.tolist() != b.tolist(): raise AssertionError( @@ -265,6 +266,9 @@ def run_node(cls, node, inputs, device=None, outputs_info=None, **kwargs): ")" ) +if pv.Version(onnx_version) < pv.Version("1.16.0"): + backend_test.exclude("(test_strnorm|test_range_)") + # The following tests cannot pass because they consists in generating random number. backend_test.exclude("(test_bernoulli)") From ec49a87e773bedcf41d69c46e4e2841a12b73ac0 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Sun, 12 Nov 2023 01:41:15 +0100 Subject: [PATCH 08/12] fix ut --- _unittests/ut_light_api/test_translate.py | 2 +- _unittests/ut_light_api/test_translate_classic.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/_unittests/ut_light_api/test_translate.py b/_unittests/ut_light_api/test_translate.py index cad762e..8af161c 100644 --- a/_unittests/ut_light_api/test_translate.py +++ b/_unittests/ut_light_api/test_translate.py @@ -79,7 +79,7 @@ def test_transpose(self): """ ( start(opset=19) - .cst(np.array([-1, 1]).astype(np.int64)) + .cst(np.array([-1, 1], dtype=np.int64)) .rename('r') .vin('X', elem_type=TensorProto.FLOAT) .bring('X', 'r') diff --git a/_unittests/ut_light_api/test_translate_classic.py b/_unittests/ut_light_api/test_translate_classic.py index 4150892..ed51ce3 100644 --- a/_unittests/ut_light_api/test_translate_classic.py +++ b/_unittests/ut_light_api/test_translate_classic.py @@ -138,7 +138,7 @@ def test_transpose(self): functions = [] initializers.append( from_array( - np.array([-1, 1]).astype(np.int64), + np.array([-1, 1], dtype=np.int64), name='r' ) ) From acefa802e51a9cf0ac954f7542094a9abdc91453 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Sun, 12 Nov 2023 01:49:54 +0100 Subject: [PATCH 09/12] fix doc --- onnx_array_api/light_api/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/onnx_array_api/light_api/__init__.py b/onnx_array_api/light_api/__init__.py index 079f443..8969648 100644 --- a/onnx_array_api/light_api/__init__.py +++ b/onnx_array_api/light_api/__init__.py @@ -60,7 +60,7 @@ def translate(proto: ModelProto, single_line: bool = False, api: str = "light") :param single_line: as a single line or not :param api: API to export into, default is `"light"` and this is handle by class - :class:`onnx_array_api.light_api.translate.Emitter`, + :class:`onnx_array_api.light_api.emitter.Emitter`, another value is `"onnx"` which is the inner API implemented in onnx package. :return: code From 993454563bc0efed2b8f7833155a2231a2e2e85c Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Sun, 12 Nov 2023 02:01:22 +0100 Subject: [PATCH 10/12] doc --- _doc/api/light_api.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/_doc/api/light_api.rst b/_doc/api/light_api.rst index d60c467..28dc70d 100644 --- a/_doc/api/light_api.rst +++ b/_doc/api/light_api.rst @@ -69,7 +69,7 @@ EventType InnerEmitter ++++++++++++ -.. autoclass:: onnx_array_api.light_api.inner_emitter.Emitter +.. autoclass:: onnx_array_api.light_api.inner_emitter.InnerEmitter :members: Translater From 2fea5e2056bea8a3ebbf701d3534517e1be202f4 Mon Sep 17 00:00:00 2001 From: Xavier Dupre Date: Sun, 12 Nov 2023 02:21:34 +0100 Subject: [PATCH 11/12] verbostiy --- azure-pipelines.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 89a4ed9..907bb9f 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -214,7 +214,7 @@ jobs: - script: pip install onnxmltools --no-deps displayName: 'Install onnxmltools' - script: | - python -m pytest + python -m pytest -v displayName: 'Runs Unit Tests' - script: | python -u setup.py bdist_wheel From 045f9cca773e69777760da49e12b7e4c89f05200 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Xavier=20Dupr=C3=A9?= Date: Sun, 12 Nov 2023 14:47:57 +0100 Subject: [PATCH 12/12] disable unstable test --- _unittests/ut_light_api/test_light_api.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/_unittests/ut_light_api/test_light_api.py b/_unittests/ut_light_api/test_light_api.py index f99a4b5..88c54f8 100644 --- a/_unittests/ut_light_api/test_light_api.py +++ b/_unittests/ut_light_api/test_light_api.py @@ -1,4 +1,5 @@ import unittest +import sys from typing import Callable, Optional import numpy as np from onnx import ModelProto @@ -144,6 +145,7 @@ def list_ops_missing(self, n_inputs): f"{new_missing}\n{text}" ) + @unittest.skipIf(sys.platform == "win32", reason="unstable test on Windows") def test_list_ops_missing(self): self.list_ops_missing(1) self.list_ops_missing(2) pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy