Skip to content

Supports function full for the Array API #21

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Jun 18, 2023
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
Supports function full for the Array API
  • Loading branch information
xadupre committed Jun 18, 2023
commit 791eb37fa9fc018abb9a89c4464bec65f5e9516d
2 changes: 1 addition & 1 deletion _unittests/onnx-numpy-skips.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ array_api_tests/test_creation_functions.py::test_asarray_arrays
array_api_tests/test_creation_functions.py::test_empty
array_api_tests/test_creation_functions.py::test_empty_like
array_api_tests/test_creation_functions.py::test_eye
array_api_tests/test_creation_functions.py::test_full
# array_api_tests/test_creation_functions.py::test_full
array_api_tests/test_creation_functions.py::test_full_like
array_api_tests/test_creation_functions.py::test_linspace
array_api_tests/test_creation_functions.py::test_meshgrid
Expand Down
2 changes: 1 addition & 1 deletion _unittests/test_array_api.sh
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
export ARRAY_API_TESTS_MODULE=onnx_array_api.array_api.onnx_numpy
# pytest ../array-api-tests/array_api_tests/test_creation_functions.py::test_asarray_arrays || exit 1
pytest ../array-api-tests/array_api_tests/test_creation_functions.py::test_full || exit 1
# pytest ../array-api-tests/array_api_tests/test_creation_functions.py --help
pytest ../array-api-tests/array_api_tests/test_creation_functions.py --hypothesis-explain --skips-file=_unittests/onnx-numpy-skips.txt || exit 1
10 changes: 10 additions & 0 deletions _unittests/ut_array_api/test_onnx_numpy.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,16 @@ def test_zeros(self):
a = xp.absolute(mat)
self.assertEqualArray(np.absolute(mat.numpy()), a.numpy())

def test_full(self):
c = EagerTensor(np.array([4, 5], dtype=np.int64))
mat = xp.full(c, fill_value=5, dtype=xp.int64)
matnp = mat.numpy()
self.assertEqual(matnp.shape, (4, 5))
self.assertNotEmpty(matnp[0, 0])
a = xp.absolute(mat)
self.assertEqualArray(np.absolute(mat.numpy()), a.numpy())


if __name__ == "__main__":
TestOnnxNumpy().test_full()
unittest.main(verbosity=2)
30 changes: 29 additions & 1 deletion onnx_array_api/array_api/onnx_numpy.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,11 @@
reshape,
take,
)
from ..npx.npx_functions import full as generic_full
from ..npx.npx_functions import ones as generic_ones
from ..npx.npx_functions import zeros as generic_zeros
from ..npx.npx_numpy_tensors import EagerNumpyTensor
from ..npx.npx_types import DType, ElemType, TensorType, OptParType
from ..npx.npx_types import DType, ElemType, TensorType, OptParType, ParType, Scalar
from ._onnx_common import template_asarray
from . import _finalize_array_api

Expand All @@ -31,6 +32,7 @@
"astype",
"empty",
"equal",
"full",
"isdtype",
"isfinite",
"isnan",
Expand Down Expand Up @@ -103,6 +105,32 @@ def zeros(
return generic_zeros(shape, dtype=dtype, order=order)


def full(
shape: TensorType[ElemType.int64, "I", (None,)],
fill_value: ParType[Scalar] = None,
dtype: OptParType[DType] = DType(TensorProto.FLOAT),
order: OptParType[str] = "C",
) -> TensorType[ElemType.numerics, "T"]:
if fill_value is None:
raise AttributeError("fill_value cannot be None")
value = fill_value
if isinstance(shape, tuple):
return generic_full(
EagerNumpyTensor(np.array(shape, dtype=np.int64)),
fill_value=value,
dtype=dtype,
order=order,
)
if isinstance(shape, int):
return generic_full(
EagerNumpyTensor(np.array([shape], dtype=np.int64)),
fill_value=value,
dtype=dtype,
order=order,
)
return generic_full(shape, fill_value=value, dtype=dtype, order=order)


def _finalize():
"""
Adds common attributes to Array API defined in this modules
Expand Down
30 changes: 29 additions & 1 deletion onnx_array_api/npx/npx_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,15 @@
SequenceType,
TensorType,
TupleType,
Scalar,
)
from .npx_var import Var


def _cstv(x):
if isinstance(x, Var):
return x
if isinstance(x, (int, float, np.ndarray)):
if isinstance(x, (int, float, bool, np.ndarray)):
return cst(x)
raise TypeError(f"Unexpected constant type {type(x)}.")

Expand Down Expand Up @@ -376,6 +377,33 @@ def expit(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.numerics
return var(x, op="Sigmoid")


@npxapi_inline
def full(
shape: TensorType[ElemType.int64, "I", (None,)],
dtype: OptParType[DType] = DType(TensorProto.FLOAT),
fill_value: ParType[Scalar] = None,
order: OptParType[str] = "C",
) -> TensorType[ElemType.numerics, "T"]:
"""
Implements :func:`numpy.zeros`.
"""
if order != "C":
raise RuntimeError(f"order={order!r} != 'C' not supported.")
if fill_value is None:
raise AttributeError("fill_value cannot be None.")
if dtype is None:
dtype = DType(TensorProto.FLOAT)
if isinstance(fill_value, (float, int, bool)):
value = make_tensor(
name="cst", data_type=dtype.code, dims=[1], vals=[fill_value]
)
else:
raise NotImplementedError(
f"Unexpected type ({type(fill_value)} for fill_value={fill_value!r}."
)
return var(shape, value=value, op="ConstantOfShape")


@npxapi_inline
def floor(x: TensorType[ElemType.numerics, "T"]) -> TensorType[ElemType.numerics, "T"]:
"See :func:`numpy.floor`."
Expand Down
2 changes: 1 addition & 1 deletion onnx_array_api/npx/npx_graph_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -784,7 +784,7 @@ def to_onnx(
node_inputs.append(input_name)
continue

if isinstance(i, (int, float)):
if isinstance(i, (int, float, bool)):
ni = np.array(i)
c = Cst(ni)
input_name = self._unique(var._prefix)
Expand Down
14 changes: 8 additions & 6 deletions onnx_array_api/npx/npx_jit_eager.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ def make_key(*values, **kwargs):
for iv, v in enumerate(values):
if isinstance(v, (Var, EagerTensor, JitTensor)):
res.append(v.key)
elif isinstance(v, (int, float, DType)):
elif isinstance(v, (int, float, bool, DType)):
res.append(v)
elif isinstance(v, slice):
res.append(("slice", v.start, v.stop, v.step))
Expand All @@ -153,7 +153,7 @@ def make_key(*values, **kwargs):
)
if kwargs:
for k, v in sorted(kwargs.items()):
if isinstance(v, (int, float, str, type, DType)):
if isinstance(v, (int, float, str, type, bool, DType)):
res.append(k)
res.append(v)
elif isinstance(v, tuple):
Expand Down Expand Up @@ -543,12 +543,12 @@ def _preprocess_constants(self, *args):
elif isinstance(n, np.ndarray):
new_args.append(self.tensor_class(n))
modified = True
elif isinstance(n, (int, float)):
elif isinstance(n, (int, float, bool)):
new_args.append(self.tensor_class(np.array(n)))
modified = True
elif isinstance(n, DType):
new_args.append(n)
elif n in (int, float):
elif n in (int, float, bool):
# usually used to cast
new_args.append(n)
elif n is None:
Expand Down Expand Up @@ -586,6 +586,7 @@ def __call__(self, *args, already_eager=False, **kwargs):
EagerTensor,
Cst,
int,
bool,
float,
tuple,
slice,
Expand Down Expand Up @@ -616,12 +617,13 @@ def __call__(self, *args, already_eager=False, **kwargs):
else:
# tries to call the version
try:
res = self.f(*values)
res = self.f(*values, **kwargs)
except (AttributeError, TypeError) as e:
inp1 = ", ".join(map(str, map(type, args)))
inp2 = ", ".join(map(str, map(type, values)))
raise TypeError(
f"Unexpected types, input types are {inp1} " f"and {inp2}."
f"Unexpected types, input types are {inp1} "
f"and {inp2}, kwargs={kwargs}."
) from e

if isinstance(res, EagerTensor) or (
Expand Down
2 changes: 2 additions & 0 deletions onnx_array_api/npx/npx_numpy_tensors_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@ def _process(value):
cst = np.int64(cst)
elif isinstance(cst, float):
cst = np.float64(cst)
elif isinstance(cst, bool):
cst = np.bool_(cst)
elif cst is None:
cst = np.float32(0)
if not isinstance(
Expand Down
2 changes: 1 addition & 1 deletion onnx_array_api/npx/npx_tensors.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ def _generic_method_operator(self, method_name, *args: Any, **kwargs: Any) -> An
for a in args:
if isinstance(a, np.ndarray):
new_args.append(self.__class__(a.astype(self.dtype.np_dtype)))
elif isinstance(a, (int, float)):
elif isinstance(a, (int, float, bool)):
new_args.append(
self.__class__(np.array([a]).astype(self.dtype.np_dtype))
)
Expand Down
28 changes: 25 additions & 3 deletions onnx_array_api/npx/npx_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -292,6 +292,19 @@ def get_set_name(cls, dtypes):
return None


class Scalar:
"""
Defines a scalar.
"""

def __init__(self, value: Union[float, int, bool]):
self.value = value

def __repr__(self):
"usual"
return f"Scalar({self.value!r})"


class ParType(WrapperType):
"""
Defines a parameter type.
Expand All @@ -300,11 +313,18 @@ class ParType(WrapperType):
:param optional: is optional or not
"""

map_names = {int: "int", float: "float", str: "str", DType: "DType"}
map_names = {
int: "int",
float: "float",
str: "str",
DType: "DType",
bool: "bool",
Scalar: "Scalar",
}

@classmethod
def __class_getitem__(cls, dtype):
if isinstance(dtype, (int, float)):
if isinstance(dtype, (int, float, bool)):
msg = str(dtype)
else:
msg = getattr(dtype, "__name__", str(dtype))
Expand All @@ -331,6 +351,8 @@ def onnx_type(cls):
return AttributeProto.INT
if cls.dtype == float:
return AttributeProto.FLOAT
if cls.dtype == bool:
return AttributeProto.BOOL
if cls.dtype == str:
return AttributeProto.STRING
raise RuntimeError(
Expand All @@ -347,7 +369,7 @@ class OptParType(ParType):

@classmethod
def __class_getitem__(cls, dtype):
if isinstance(dtype, (int, float)):
if isinstance(dtype, (int, float, bool)):
msg = str(dtype)
else:
msg = dtype.__name__
Expand Down
16 changes: 10 additions & 6 deletions onnx_array_api/npx/npx_var.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ class Par:
Defines a named parameter.

:param name: parameter name
:param dtype: parameter type (int, str, float)
:param dtype: parameter type (bool, int, str, float)
:param value: value of the parameter if known
:param parent_op: node type it belongs to
"""
Expand Down Expand Up @@ -233,7 +233,7 @@ def __call__(self, new_values):

def _setitem1_where(self, index, new_values):
cst, var = Var.get_cst_var()
if isinstance(new_values, (int, float)):
if isinstance(new_values, (int, float, bool)):
new_values = np.array(new_values)
if isinstance(new_values, np.ndarray):
value = var(cst(new_values), self.parent, op="CastLike")
Expand Down Expand Up @@ -446,7 +446,7 @@ def _get_vars(self):
cst = Var.get_cst_var()[0]
replacement_cst[id(i)] = cst(i)
continue
if isinstance(i, (int, float)):
if isinstance(i, (int, float, bool)):
cst = Var.get_cst_var()[0]
replacement_cst[id(i)] = cst(np.array(i))
continue
Expand Down Expand Up @@ -595,13 +595,13 @@ def __iter__(self):

def _binary_op(self, ov: "Var", op_name: str, **kwargs) -> "Var":
var = Var.get_cst_var()[1]
if isinstance(ov, (int, float, np.ndarray, Cst)):
if isinstance(ov, (int, float, bool, np.ndarray, Cst)):
return var(self.self_var, var(ov, self.self_var, op="CastLike"), op=op_name)
return var(self.self_var, ov, op=op_name, **kwargs)

def _binary_op_right(self, ov: "Var", op_name: str, **kwargs) -> "Var":
var = Var.get_cst_var()[1]
if isinstance(ov, (int, float, np.ndarray, Cst)):
if isinstance(ov, (int, float, bool, np.ndarray, Cst)):
return var(var(ov, self.self_var, op="CastLike"), self.self_var, op=op_name)
return var(ov, self.self_var, op=op_name, **kwargs)

Expand Down Expand Up @@ -1112,10 +1112,14 @@ def __init__(self, cst: Any):
Var.__init__(self, np.array(cst, dtype=np.int64), op="Identity")
elif isinstance(cst, float):
Var.__init__(self, np.array(cst, dtype=np.float32), op="Identity")
elif isinstance(cst, bool):
Var.__init__(self, np.array(cst, dtype=np.bool_), op="Identity")
elif isinstance(cst, list):
if all(map(lambda t: isinstance(t, int), cst)):
Var.__init__(self, np.array(cst, dtype=np.int64), op="Identity")
elif all(map(lambda t: isinstance(t, (float, int)), cst)):
elif all(map(lambda t: isinstance(t, bool), cst)):
Var.__init__(self, np.array(cst, dtype=np.bool_), op="Identity")
elif all(map(lambda t: isinstance(t, (float, int, bool)), cst)):
Var.__init__(self, np.array(cst, dtype=np.float64), op="Identity")
else:
raise ValueError(
Expand Down
pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy