diff --git a/quaddtype/meson.build b/quaddtype/meson.build index c000675..e50f693 100644 --- a/quaddtype/meson.build +++ b/quaddtype/meson.build @@ -60,6 +60,8 @@ srcs = [ 'numpy_quaddtype/src/umath/binary_ops.cpp', 'numpy_quaddtype/src/umath/unary_ops.h', 'numpy_quaddtype/src/umath/unary_ops.cpp', + 'numpy_quaddtype/src/umath/unary_props.h', + 'numpy_quaddtype/src/umath/unary_props.cpp', 'numpy_quaddtype/src/umath/comparison_ops.h', 'numpy_quaddtype/src/umath/comparison_ops.cpp', 'numpy_quaddtype/src/umath/promoters.hpp', diff --git a/quaddtype/numpy_quaddtype/src/ops.hpp b/quaddtype/numpy_quaddtype/src/ops.hpp index f27f14b..50262bf 100644 --- a/quaddtype/numpy_quaddtype/src/ops.hpp +++ b/quaddtype/numpy_quaddtype/src/ops.hpp @@ -3,304 +3,347 @@ #include // Unary Quad Operations -typedef Sleef_quad (*unary_op_quad_def)(Sleef_quad *); +typedef Sleef_quad (*unary_op_quad_def)(const Sleef_quad *); static Sleef_quad -quad_negative(Sleef_quad *op) +quad_negative(const Sleef_quad *op) { return Sleef_negq1(*op); } static Sleef_quad -quad_positive(Sleef_quad *op) +quad_positive(const Sleef_quad *op) { return *op; } static inline Sleef_quad -quad_absolute(Sleef_quad *op) +quad_sign(const Sleef_quad *op) +{ + Sleef_quad zero = Sleef_cast_from_doubleq1(0.0); + int32_t sign = Sleef_icmpq1(*op, zero); + // sign(x=NaN) = x; otherwise sign(x) in { -1.0; 0.0; +1.0 } + return Sleef_iunordq1(*op, *op) ? *op : Sleef_cast_from_int64q1(sign); +} + +static inline Sleef_quad +quad_absolute(const Sleef_quad *op) { return Sleef_fabsq1(*op); } static inline Sleef_quad -quad_rint(Sleef_quad *op) +quad_rint(const Sleef_quad *op) { return Sleef_rintq1(*op); } static inline Sleef_quad -quad_trunc(Sleef_quad *op) +quad_trunc(const Sleef_quad *op) { return Sleef_truncq1(*op); } static inline Sleef_quad -quad_floor(Sleef_quad *op) +quad_floor(const Sleef_quad *op) { return Sleef_floorq1(*op); } static inline Sleef_quad -quad_ceil(Sleef_quad *op) +quad_ceil(const Sleef_quad *op) { return Sleef_ceilq1(*op); } static inline Sleef_quad -quad_sqrt(Sleef_quad *op) +quad_sqrt(const Sleef_quad *op) { return Sleef_sqrtq1_u05(*op); } static inline Sleef_quad -quad_square(Sleef_quad *op) +quad_square(const Sleef_quad *op) { return Sleef_mulq1_u05(*op, *op); } static inline Sleef_quad -quad_log(Sleef_quad *op) +quad_log(const Sleef_quad *op) { return Sleef_logq1_u10(*op); } static inline Sleef_quad -quad_log2(Sleef_quad *op) +quad_log2(const Sleef_quad *op) { return Sleef_log2q1_u10(*op); } static inline Sleef_quad -quad_log10(Sleef_quad *op) +quad_log10(const Sleef_quad *op) { return Sleef_log10q1_u10(*op); } static inline Sleef_quad -quad_log1p(Sleef_quad *op) +quad_log1p(const Sleef_quad *op) { return Sleef_log1pq1_u10(*op); } static inline Sleef_quad -quad_exp(Sleef_quad *op) +quad_exp(const Sleef_quad *op) { return Sleef_expq1_u10(*op); } static inline Sleef_quad -quad_exp2(Sleef_quad *op) +quad_exp2(const Sleef_quad *op) { return Sleef_exp2q1_u10(*op); } static inline Sleef_quad -quad_sin(Sleef_quad *op) +quad_sin(const Sleef_quad *op) { return Sleef_sinq1_u10(*op); } static inline Sleef_quad -quad_cos(Sleef_quad *op) +quad_cos(const Sleef_quad *op) { return Sleef_cosq1_u10(*op); } static inline Sleef_quad -quad_tan(Sleef_quad *op) +quad_tan(const Sleef_quad *op) { return Sleef_tanq1_u10(*op); } static inline Sleef_quad -quad_asin(Sleef_quad *op) +quad_asin(const Sleef_quad *op) { return Sleef_asinq1_u10(*op); } static inline Sleef_quad -quad_acos(Sleef_quad *op) +quad_acos(const Sleef_quad *op) { return Sleef_acosq1_u10(*op); } static inline Sleef_quad -quad_atan(Sleef_quad *op) +quad_atan(const Sleef_quad *op) { return Sleef_atanq1_u10(*op); } // Unary long double operations -typedef long double (*unary_op_longdouble_def)(long double *); +typedef long double (*unary_op_longdouble_def)(const long double *); static inline long double -ld_negative(long double *op) +ld_negative(const long double *op) { return -(*op); } static inline long double -ld_positive(long double *op) +ld_positive(const long double *op) { return *op; } static inline long double -ld_absolute(long double *op) +ld_absolute(const long double *op) { return fabsl(*op); } static inline long double -ld_rint(long double *op) +ld_sign(const long double *op) +{ + if (*op < 0.0) return -1.0; + if (*op == 0.0) return 0.0; + if (*op > 0.0) return 1.0; + // sign(x=NaN) = x + return *op; +} + +static inline long double +ld_rint(const long double *op) { return rintl(*op); } static inline long double -ld_trunc(long double *op) +ld_trunc(const long double *op) { return truncl(*op); } static inline long double -ld_floor(long double *op) +ld_floor(const long double *op) { return floorl(*op); } static inline long double -ld_ceil(long double *op) +ld_ceil(const long double *op) { return ceill(*op); } static inline long double -ld_sqrt(long double *op) +ld_sqrt(const long double *op) { return sqrtl(*op); } static inline long double -ld_square(long double *op) +ld_square(const long double *op) { return (*op) * (*op); } static inline long double -ld_log(long double *op) +ld_log(const long double *op) { return logl(*op); } static inline long double -ld_log2(long double *op) +ld_log2(const long double *op) { return log2l(*op); } static inline long double -ld_log10(long double *op) +ld_log10(const long double *op) { return log10l(*op); } static inline long double -ld_log1p(long double *op) +ld_log1p(const long double *op) { return log1pl(*op); } static inline long double -ld_exp(long double *op) +ld_exp(const long double *op) { return expl(*op); } static inline long double -ld_exp2(long double *op) +ld_exp2(const long double *op) { return exp2l(*op); } static inline long double -ld_sin(long double *op) +ld_sin(const long double *op) { return sinl(*op); } static inline long double -ld_cos(long double *op) +ld_cos(const long double *op) { return cosl(*op); } static inline long double -ld_tan(long double *op) +ld_tan(const long double *op) { return tanl(*op); } static inline long double -ld_asin(long double *op) +ld_asin(const long double *op) { return asinl(*op); } static inline long double -ld_acos(long double *op) +ld_acos(const long double *op) { return acosl(*op); } static inline long double -ld_atan(long double *op) +ld_atan(const long double *op) { return atanl(*op); } +// Unary Quad properties +typedef npy_bool (*unary_prop_quad_def)(const Sleef_quad *); + +static inline npy_bool +quad_signbit(const Sleef_quad *op) +{ + // FIXME @juntyr or @SwayamInSync: replace with binary implementation + // once we test big and little endian in CI + Sleef_quad zero = Sleef_cast_from_doubleq1(0.0); + Sleef_quad one = Sleef_cast_from_doubleq1(1.0); + Sleef_quad one_signed = Sleef_copysignq1(one, *op); + // signbit(x) = 1 iff copysign(1, x) == -1 + return Sleef_icmpltq1(one_signed, zero); +} + +// Unary Quad properties +typedef npy_bool (*unary_prop_longdouble_def)(const long double *); + +static inline npy_bool +ld_signbit(const long double *op) +{ + return signbit(*op); +} + // Binary Quad operations -typedef Sleef_quad (*binary_op_quad_def)(Sleef_quad *, Sleef_quad *); +typedef Sleef_quad (*binary_op_quad_def)(const Sleef_quad *, const Sleef_quad *); static inline Sleef_quad -quad_add(Sleef_quad *in1, Sleef_quad *in2) +quad_add(const Sleef_quad *in1, const Sleef_quad *in2) { return Sleef_addq1_u05(*in1, *in2); } static inline Sleef_quad -quad_sub(Sleef_quad *in1, Sleef_quad *in2) +quad_sub(const Sleef_quad *in1, const Sleef_quad *in2) { return Sleef_subq1_u05(*in1, *in2); } static inline Sleef_quad -quad_mul(Sleef_quad *a, Sleef_quad *b) +quad_mul(const Sleef_quad *a, const Sleef_quad *b) { return Sleef_mulq1_u05(*a, *b); } static inline Sleef_quad -quad_div(Sleef_quad *a, Sleef_quad *b) +quad_div(const Sleef_quad *a, const Sleef_quad *b) { return Sleef_divq1_u05(*a, *b); } static inline Sleef_quad -quad_pow(Sleef_quad *a, Sleef_quad *b) +quad_pow(const Sleef_quad *a, const Sleef_quad *b) { return Sleef_powq1_u10(*a, *b); } static inline Sleef_quad -quad_mod(Sleef_quad *a, Sleef_quad *b) +quad_mod(const Sleef_quad *a, const Sleef_quad *b) { return Sleef_fmodq1(*a, *b); } static inline Sleef_quad -quad_minimum(Sleef_quad *in1, Sleef_quad *in2) +quad_minimum(const Sleef_quad *in1, const Sleef_quad *in2) { return Sleef_iunordq1(*in1, *in2) ? ( Sleef_iunordq1(*in1, *in1) ? *in1 : *in2 @@ -308,7 +351,7 @@ quad_minimum(Sleef_quad *in1, Sleef_quad *in2) } static inline Sleef_quad -quad_maximum(Sleef_quad *in1, Sleef_quad *in2) +quad_maximum(const Sleef_quad *in1, const Sleef_quad *in2) { return Sleef_iunordq1(*in1, *in2) ? ( Sleef_iunordq1(*in1, *in1) ? *in1 : *in2 @@ -316,68 +359,80 @@ quad_maximum(Sleef_quad *in1, Sleef_quad *in2) } static inline Sleef_quad -quad_atan2(Sleef_quad *in1, Sleef_quad *in2) +quad_atan2(const Sleef_quad *in1, const Sleef_quad *in2) { return Sleef_atan2q1_u10(*in1, *in2); } +static inline Sleef_quad +quad_copysign(const Sleef_quad *in1, const Sleef_quad *in2) +{ + return Sleef_copysignq1(*in1, *in2); +} + // Binary long double operations -typedef long double (*binary_op_longdouble_def)(long double *, long double *); +typedef long double (*binary_op_longdouble_def)(const long double *, const long double *); static inline long double -ld_add(long double *in1, long double *in2) +ld_add(const long double *in1, const long double *in2) { return (*in1) + (*in2); } static inline long double -ld_sub(long double *in1, long double *in2) +ld_sub(const long double *in1, const long double *in2) { return (*in1) - (*in2); } static inline long double -ld_mul(long double *a, long double *b) +ld_mul(const long double *a, const long double *b) { return (*a) * (*b); } static inline long double -ld_div(long double *a, long double *b) +ld_div(const long double *a, const long double *b) { return (*a) / (*b); } static inline long double -ld_pow(long double *a, long double *b) +ld_pow(const long double *a, const long double *b) { return powl(*a, *b); } static inline long double -ld_mod(long double *a, long double *b) +ld_mod(const long double *a, const long double *b) { return fmodl(*a, *b); } static inline long double -ld_minimum(long double *in1, long double *in2) +ld_minimum(const long double *in1, const long double *in2) { return (*in1 < *in2) ? *in1 : *in2; } static inline long double -ld_maximum(long double *in1, long double *in2) +ld_maximum(const long double *in1, const long double *in2) { return (*in1 > *in2) ? *in1 : *in2; } static inline long double -ld_atan2(long double *in1, long double *in2) +ld_atan2(const long double *in1, const long double *in2) { return atan2l(*in1, *in2); } +static inline long double +ld_copysign(const long double *in1, const long double *in2) +{ + return copysignl(*in1, *in2); +} + // comparison quad functions typedef npy_bool (*cmp_quad_def)(const Sleef_quad *, const Sleef_quad *); @@ -454,4 +509,4 @@ static inline npy_bool ld_greaterequal(const long double *a, const long double *b) { return *a >= *b; -} \ No newline at end of file +} diff --git a/quaddtype/numpy_quaddtype/src/umath/binary_ops.cpp b/quaddtype/numpy_quaddtype/src/umath/binary_ops.cpp index aa6d19c..e6fc9fa 100644 --- a/quaddtype/numpy_quaddtype/src/umath/binary_ops.cpp +++ b/quaddtype/numpy_quaddtype/src/umath/binary_ops.cpp @@ -231,5 +231,8 @@ init_quad_binary_ops(PyObject *numpy) if (create_quad_binary_ufunc(numpy, "arctan2") < 0) { return -1; } + if (create_quad_binary_ufunc(numpy, "copysign") < 0) { + return -1; + } return 0; } \ No newline at end of file diff --git a/quaddtype/numpy_quaddtype/src/umath/umath.cpp b/quaddtype/numpy_quaddtype/src/umath/umath.cpp index 50f9562..0c72d32 100644 --- a/quaddtype/numpy_quaddtype/src/umath/umath.cpp +++ b/quaddtype/numpy_quaddtype/src/umath/umath.cpp @@ -20,6 +20,7 @@ extern "C" { #include "umath.h" #include "../ops.hpp" #include "unary_ops.h" +#include "unary_props.h" #include "binary_ops.h" #include "comparison_ops.h" #include "matmul.h" @@ -92,6 +93,11 @@ init_quad_umath(void) goto err; } + if (init_quad_unary_props(numpy) < 0) { + PyErr_SetString(PyExc_RuntimeError, "Failed to initialize quad unary properties"); + goto err; + } + if (init_quad_binary_ops(numpy) < 0) { PyErr_SetString(PyExc_RuntimeError, "Failed to initialize quad binary operations"); goto err; diff --git a/quaddtype/numpy_quaddtype/src/umath/unary_ops.cpp b/quaddtype/numpy_quaddtype/src/umath/unary_ops.cpp index 4c8f31f..c0a0be8 100644 --- a/quaddtype/numpy_quaddtype/src/umath/unary_ops.cpp +++ b/quaddtype/numpy_quaddtype/src/umath/unary_ops.cpp @@ -156,6 +156,9 @@ init_quad_unary_ops(PyObject *numpy) if (create_quad_unary_ufunc(numpy, "absolute") < 0) { return -1; } + if (create_quad_unary_ufunc(numpy, "sign") < 0) { + return -1; + } if (create_quad_unary_ufunc(numpy, "rint") < 0) { return -1; } diff --git a/quaddtype/numpy_quaddtype/src/umath/unary_props.cpp b/quaddtype/numpy_quaddtype/src/umath/unary_props.cpp new file mode 100644 index 0000000..a37983b --- /dev/null +++ b/quaddtype/numpy_quaddtype/src/umath/unary_props.cpp @@ -0,0 +1,143 @@ +#define PY_ARRAY_UNIQUE_SYMBOL QuadPrecType_ARRAY_API +#define PY_UFUNC_UNIQUE_SYMBOL QuadPrecType_UFUNC_API +#define NPY_NO_DEPRECATED_API NPY_2_0_API_VERSION +#define NPY_TARGET_VERSION NPY_2_0_API_VERSION +#define NO_IMPORT_ARRAY +#define NO_IMPORT_UFUNC + +extern "C" { +#include +#include + +#include "numpy/arrayobject.h" +#include "numpy/ndarraytypes.h" +#include "numpy/ufuncobject.h" + +#include "numpy/dtype_api.h" +} +#include "../quad_common.h" +#include "../scalar.h" +#include "../dtype.h" +#include "../ops.hpp" + +static NPY_CASTING +quad_unary_prop_resolve_descriptors(PyObject *self, PyArray_DTypeMeta *const dtypes[], + PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], + npy_intp *NPY_UNUSED(view_offset)) +{ + Py_INCREF(given_descrs[0]); + loop_descrs[0] = given_descrs[0]; + + loop_descrs[1] = PyArray_DescrFromType(NPY_BOOL); + + return NPY_NO_CASTING; +} + +template +int +quad_generic_unary_prop_strided_loop_unaligned(PyArrayMethod_Context *context, char *const data[], + npy_intp const dimensions[], npy_intp const strides[], + NpyAuxData *auxdata) +{ + npy_intp N = dimensions[0]; + char *in_ptr = data[0]; + char *out_ptr = data[1]; + npy_intp in_stride = strides[0]; + npy_intp out_stride = strides[1]; + + QuadPrecDTypeObject *descr = (QuadPrecDTypeObject *)context->descriptors[0]; + QuadBackendType backend = descr->backend; + size_t elem_size = (backend == BACKEND_SLEEF) ? sizeof(Sleef_quad) : sizeof(long double); + + quad_value in; + while (N--) { + memcpy(&in, in_ptr, elem_size); + npy_bool result; + if (backend == BACKEND_SLEEF) { + result = sleef_op(&in.sleef_value); + } + else { + result = longdouble_op(&in.longdouble_value); + } + memcpy(out_ptr, &result, sizeof(npy_bool)); + + in_ptr += in_stride; + out_ptr += out_stride; + } + return 0; +} + +template +int +quad_generic_unary_prop_strided_loop_aligned(PyArrayMethod_Context *context, char *const data[], + npy_intp const dimensions[], npy_intp const strides[], + NpyAuxData *auxdata) +{ + npy_intp N = dimensions[0]; + char *in_ptr = data[0]; + char *out_ptr = data[1]; + npy_intp in_stride = strides[0]; + npy_intp out_stride = strides[1]; + + QuadPrecDTypeObject *descr = (QuadPrecDTypeObject *)context->descriptors[0]; + QuadBackendType backend = descr->backend; + + while (N--) { + npy_bool result; + if (backend == BACKEND_SLEEF) { + result = sleef_op((Sleef_quad *)in_ptr); + } + else { + result = longdouble_op((long double *)in_ptr); + } + *(npy_bool *)out_ptr = result; + in_ptr += in_stride; + out_ptr += out_stride; + } + return 0; +} + +template +int +create_quad_unary_prop_ufunc(PyObject *numpy, const char *ufunc_name) +{ + PyObject *ufunc = PyObject_GetAttrString(numpy, ufunc_name); + if (ufunc == NULL) { + return -1; + } + + PyArray_DTypeMeta *dtypes[2] = {&QuadPrecDType, &PyArray_BoolDType}; + + PyType_Slot slots[] = { + {NPY_METH_resolve_descriptors, (void *)&quad_unary_prop_resolve_descriptors}, + {NPY_METH_strided_loop, + (void *)&quad_generic_unary_prop_strided_loop_aligned}, + {NPY_METH_unaligned_strided_loop, + (void *)&quad_generic_unary_prop_strided_loop_unaligned}, + {0, NULL}}; + + PyArrayMethod_Spec Spec = { + .name = "quad_unary_prop", + .nin = 1, + .nout = 1, + .casting = NPY_NO_CASTING, + .flags = NPY_METH_SUPPORTS_UNALIGNED, + .dtypes = dtypes, + .slots = slots, + }; + + if (PyUFunc_AddLoopFromSpec(ufunc, &Spec) < 0) { + return -1; + } + + return 0; +} + +int +init_quad_unary_props(PyObject *numpy) +{ + if (create_quad_unary_prop_ufunc(numpy, "signbit") < 0) { + return -1; + } + return 0; +} diff --git a/quaddtype/numpy_quaddtype/src/umath/unary_props.h b/quaddtype/numpy_quaddtype/src/umath/unary_props.h new file mode 100644 index 0000000..f319552 --- /dev/null +++ b/quaddtype/numpy_quaddtype/src/umath/unary_props.h @@ -0,0 +1,9 @@ +#ifndef _QUADDTYPE_UNARY_PROPS_H +#define _QUADDTYPE_UNARY_PROPS_H + +#include + +int +init_quad_unary_props(PyObject *numpy); + +#endif diff --git a/quaddtype/release_tracker.md b/quaddtype/release_tracker.md index fe398e6..83022a7 100644 --- a/quaddtype/release_tracker.md +++ b/quaddtype/release_tracker.md @@ -22,7 +22,7 @@ | absolute | ✅ | ✅ | | fabs | | | | rint | ✅ | ❌ _Need: basic tests + edge cases (NaN/inf/±0.0/halfway cases)_ | -| sign | | | +| sign | #122 | ✅ | | heaviside | | | | conj | | | | conjugate | | | @@ -81,8 +81,8 @@ | isinf | | | | isnan | | | | isnat | | | -| signbit | | | -| copysign | | | +| signbit | #122 | ✅ | +| copysign | #122 | ✅ | | nextafter | | | | spacing | | | | modf | | | diff --git a/quaddtype/tests/test_quaddtype.py b/quaddtype/tests/test_quaddtype.py index a8e271f..af8ca56 100644 --- a/quaddtype/tests/test_quaddtype.py +++ b/quaddtype/tests/test_quaddtype.py @@ -29,13 +29,13 @@ def test_basic_equality(): "12.0") == QuadPrecision("12.00") -@pytest.mark.parametrize("op", ["add", "sub", "mul", "truediv", "pow"]) +@pytest.mark.parametrize("op", ["add", "sub", "mul", "truediv", "pow", "copysign"]) @pytest.mark.parametrize("other", ["3.0", "12.5", "100.0", "0.0", "-0.0", "inf", "-inf", "nan", "-nan"]) def test_binary_ops(op, other): if op == "truediv" and float(other) == 0: pytest.xfail("float division by zero") - op_func = getattr(operator, op) + op_func = getattr(operator, op, None) or getattr(np, op) quad_a = QuadPrecision("12.5") quad_b = QuadPrecision(other) float_a = 12.5 @@ -44,6 +44,7 @@ def test_binary_ops(op, other): quad_result = op_func(quad_a, quad_b) float_result = op_func(float_a, float_b) + # FIXME: @juntyr: replace with array_equal once isnan is supported with np.errstate(invalid="ignore"): assert ( (np.float64(quad_result) == float_result) or @@ -118,31 +119,31 @@ def test_array_aminmax(op, a, b): assert np.all((quad_res == float_res) | ((quad_res != quad_res) & (float_res != float_res))) -@pytest.mark.parametrize("op, val, expected", [ - ("neg", "3.0", "-3.0"), - ("neg", "-3.0", "3.0"), - ("pos", "3.0", "3.0"), - ("pos", "-3.0", "-3.0"), - ("abs", "3.0", "3.0"), - ("abs", "-3.0", "3.0"), - ("neg", "12.5", "-12.5"), - ("pos", "100.0", "100.0"), - ("abs", "-25.5", "25.5"), -]) -def test_unary_ops(op, val, expected): +@pytest.mark.parametrize("op", ["negative", "positive", "absolute", "sign", "signbit"]) +@pytest.mark.parametrize("val", ["3.0", "-3.0", "12.5", "100.0", "0.0", "-0.0", "inf", "-inf", "nan", "-nan"]) +def test_unary_ops(op, val): + op_func = dict(negative=operator.neg, positive=operator.pos, absolute=operator.abs).get(op, None) + nop_func = getattr(np, op) + quad_val = QuadPrecision(val) - expected_val = QuadPrecision(expected) - - if op == "neg": - result = -quad_val - elif op == "pos": - result = +quad_val - elif op == "abs": - result = abs(quad_val) - else: - raise ValueError(f"Unsupported operation: {op}") - - assert result == expected_val, f"{op}({val}) should be {expected}, but got {result}" + float_val = float(val) + + for of in [op_func, nop_func]: + if of is None: + continue + + quad_result = of(quad_val) + float_result = of(float_val) + + # FIXME: @juntyr: replace with array_equal once isnan is supported + with np.errstate(invalid="ignore"): + assert ( + (np.float64(quad_result) == float_result) or + ((float_result != float_result) and (quad_result != quad_result)) + ) and ( + np.signbit(float_result) == np.signbit(quad_result) + ), f"{op}({val}) should be {float_result}, but got {quad_result}" + def test_inf(): assert QuadPrecision("inf") > QuadPrecision("1e1000") pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy