text
stringlengths 1
2.05k
|
---|
"split_to_sequence_fp16x16_2d_variable_parts"
make_test(
[_x], _y, "input_0.split_to_sequence(1, 1, Option::Some(TensorTrait::<u32>::new(shape: array![2].span(), data: array![2, 4].span(),)))", name)
def split_to_sequence_zero_size():
x = to_fp(np.array([]).astype(np.int64
).astype(np.int64), FixedImpl.FP16x16)
y = [
np.array([]).astype(np.int64),
np.array([]).astype(np.int64),
np.array([]).astype(np.int64),
]
_x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
_y = [
Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()),
Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()),
Tensor(Dtype.FP16x16, y[2].shape, y[2].flatten()),
]
name = "split_to_sequence_fp16x16_zero_size"
make_test(
[_x], _y, "input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::<u32>::new(shape: array![3].span(), data: array![0, 0, 0].span(),)))", name)
def split_to_sequence_1d_uneven():
x = to_fp(np.random.randint(-127, 127, 7
).astype(np.int64), FixedImpl.FP16x16)
y = [
np.array(x[0:2]).astype(np.int64),
np.array(x[2:4]).astype(np.int64),
np.array(x[4:6]).astype(np.int64),
np.array(x[6:7]).astype(np.int64),
]
_x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
_y = [
Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()),
Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()),
Tensor(Dtype.FP16x16, y[2].shape, y[2].flatten()),
Tensor(Dtype.FP16x16, y[3].shape, y[3].flatten()),
]
name = "split_to_sequence_fp16x16_1d_uneven"
make_test(
[_x], _ |
y, "input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::<u32>::new(shape: array![1].span(), data: array![4].span())))", name)
def split_to_sequence_2d_uneven():
x = to_fp(np.random.randint(-127, 127, (2, 8)
).astype(np.int64), FixedImpl.FP16x16)
y = [
np.array(x[0:2, 0:3]).astype(np.int64),
np.array(x[0:2, 3:6]).astype(np.int64),
np.array(x[0:2, 6:8]).astype(np.int64)
]
_x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
_y = [
Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()),
Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()),
Tensor(Dtype.FP16x16, y[2].shape, y[2].flatten()),
]
name = "split_to_sequence_fp16x16_2d_uneven"
make_test(
[_x], _y, "input_0.split_to_sequence(1, 1, Option::Some(TensorTrait::<u32>::new(shape: array![1].span(), data: array![3].span(),)))", name)
split_to_sequence_1D()
split_to_sequence_2D()
split_to_sequence_zero_size()
split_to_sequence_1d_uneven()
split_to_sequence_2d_uneven() |
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
class Sqrt(RunAll):
@staticmethod
def sqrt_fp8x23():
x = np.random.uniform(0, 6, (2, 2)).astype(np.float64)
y = np.sqrt(x)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "sqrt_fp8x23"
make_test([x], y, "input_0.sqrt()", name)
@staticmethod
def sqrt_fp16x16():
x = np.random.uniform(0, 6, (2, 2)).astype(np.float64)
y = np.sqrt(x)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "sqrt_fp16x16"
make_test([x], y, "input_0.sqrt()", name)
|
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl |
class Squeeze(RunAll):
@staticmethod
def squeeze_i8():
def squeeze():
x = np.ones((1, 2, 1, 2, 1), dtype=np.int8)
y = np.ones((2, 2, 1), dtype=np.int8)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "squeeze_i8"
make_test(
[x], y, "input_0.squeeze(Option::Some(array![0, 2].span()))", name)
squeeze()
@staticmethod
def squeeze():
def squeeze():
x = np.ones((1, 2, 1, 2, 1), dtype=np.int32)
y = np.ones((2, 2, 1), dtype=np.int32)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "squeeze"
make_test(
[x], y, "input_0.squeeze(Option::Some(array![0, 2].span()))", name)
squeeze()
@staticmethod
def squeeze_u32():
def squeeze():
x = np.ones((1, 2, 1, 2, 1), dtype=np.uint32)
y = np.ones((2, 2, 1), dtype=np.uint32)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "squeeze_u32"
make_test(
[x], y, "input_0.squeeze(Option::Some(array![0, 2].span()))", name)
squeeze()
@staticmethod
def squeeze_fP16x16():
def squeeze():
x = to_fp(np.random.randint(0, 255, (1, 2, 1, 2, 1)
).astype(np.int64), FixedImpl.FP16x16)
y = to_fp(np.random.randint(0, 255, (2, 2, 1)
).astype(np.int64), FixedImpl.FP16x16)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "squeeze_fP16x16"
make_test(
[x], y, "input_0.squeeze(Option::Some(array![0, 2].span()))", name)
squeeze()
@staticmethod
def squeeze_fP8x23():
d |
ef squeeze():
x = to_fp(np.random.randint(0, 255, (1, 2, 1, 2, 1)
).astype(np.int64), FixedImpl.FP8x23)
y = to_fp(np.random.randint(0, 255, (2, 2, 1)
).astype(np.int64), FixedImpl.FP8x23)
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "squeeze_fP8x23"
make_test(
[x], y, "input_0.squeeze(Option::Some(array![0, 2].span()))", name)
squeeze() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl |
class Sub(RunAll):
@staticmethod
def sub_u32():
def default():
x = np.random.randint(3, 6, (3, 3, 3)).astype(np.uint32)
y = np.random.randint(0, 3, (3, 3, 3)).astype(np.uint32)
z = x - y
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "sub_u32"
make_test([x, y], z, "input_0 - input_1", name)
def broadcast():
x = np.random.randint(3, 6, (3, 3, 3)).astype(np.uint32)
y = np.random.randint(0, 3, (1, 3, 1)).astype(np.uint32)
z = x - y
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "sub_u32_broadcast"
make_test([x, y], z, "input_0 - input_1", name)
default()
broadcast()
@staticmethod
def sub_i32():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32)
z = x - y
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "sub_i32"
make_test([x, y], z, "input_0 - input_1", name)
def broadcast():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32)
y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.int32)
z = x - y
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "sub_i32_broadcast"
make_test([x, y], z, "input_0 - input_1", name)
default()
broadcast()
@staticmethod
def sub_i8():
def default():
x = np.ran |
dom.randint(-3, 3, (3, 3, 3)).astype(np.int8)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8)
z = x - y
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.I8, z.shape, z.flatten())
name = "sub_i8"
make_test([x, y], z, "input_0 - input_1", name)
def broadcast():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8)
y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.int8)
z = x - y
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.I8, z.shape, z.flatten())
name = "sub_i8_broadcast"
make_test([x, y], z, "input_0 - input_1", name)
default()
broadcast()
@staticmethod
def sub_fp8x23():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
z = x - y
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
z = Tensor(Dtype.FP8x23, z.shape, to_fp(
z.flatten(), FixedImpl.FP8x23))
name = "sub_fp8x23"
make_test([x, y], z, "input_0 - input_1", name)
def broadcast():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.float64)
z = x - y
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
z = Tensor(Dtype.FP8x23, z.shape, to_fp(
z.flatten(), FixedImpl.FP8x23))
name = "sub_fp8x23_broadcast" |
make_test([x, y], z, "input_0 - input_1", name)
default()
broadcast()
@staticmethod
def sub_fp16x16():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
z = x - y
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.FP16x16, z.shape, to_fp(
z.flatten(), FixedImpl.FP16x16))
name = "sub_fp16x16"
make_test([x, y], z, "input_0 - input_1", name)
def broadcast():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.float64)
z = x - y
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.FP16x16, z.shape, to_fp(
z.flatten(), FixedImpl.FP16x16))
name = "sub_fp16x16_broadcast"
make_test([x, y], z, "input_0 - input_1", name)
default()
broadcast() |
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
class Tanh(RunAll):
@staticmethod
def tanh_fp8x23():
x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64)
y = np.tanh(x)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "tanh_fp8x23"
make_test([x], y, "input_0.tanh()", name)
@staticmethod
def tanh_fp16x16():
x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64)
y = np.tanh(x)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "tanh_fp16x16"
make_test([x], y, "input_0.tanh()", name)
|
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
class Thresholded_relu(RunAll):
@staticmethod
def thresholded_relu_fp8x23():
alpha = 1.0
x = np.random.uniform(-5, 7, (2, 2)).astype(np.float64)
y = np.clip(x, alpha, np.inf)
y[y == alpha] = 0
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "thresholded_relu_fp8x23"
make_test([x], y, "NNTrait::thresholded_relu(@input_0, @FixedTrait::new(256, false))",
name, Trait.NN)
@staticmethod
def thresholded_relu_fp16x16():
alpha = 1.0
x = np.random.uniform(-5, 7, (2, 2)).astype(np.float64)
y = np.clip(x, alpha, np.inf)
y[y == alpha] = 0
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "thresholded_relu_fp16x16"
make_test([x], y, "NNTrait::thresholded_relu(@input_0, @FixedTrait::new(65536, false))",
name, Trait.NN)
|
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl |
class Transpose(RunAll):
@staticmethod
def transpose_u32():
def transpose_2D():
x = np.random.randint(0, 255, (2, 2)).astype(np.uint32)
y = np.transpose(x, [1, 0])
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "transpose_u32_2d"
make_test(
[x], y, "input_0.transpose(array![1, 0].span())", name)
def transpose_3D():
x = np.random.randint(0, 255, (2, 2, 2)).astype(np.uint32)
y = np.transpose(x, [1, 2, 0])
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "transpose_u32_3d"
make_test(
[x], y, "input_0.transpose(array![1, 2, 0].span())", name)
transpose_2D()
transpose_3D()
@staticmethod
def transpose_i32():
def transpose_2D():
x = np.random.randint(-127, 127, (2, 2)).astype(np.int32)
y = np.transpose(x, [1, 0])
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "transpose_i32_2d"
make_test(
[x], y, "input_0.transpose(array![1, 0].span())", name)
def transpose_3D():
x = np.random.randint(-127, 127, (2, 2, 2)).astype(np.int32)
y = np.transpose(x, [1, 2, 0])
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "transpose_i32_3d"
make_test(
[x], y, "input_0.transpose(array![1, 2, 0].span())", name)
transpose_2D()
transpose_3D()
@staticmethod
def transpose_i8():
def transpose_2D():
x = np.random.randint(-127, 127, (2, 2)).astype(np.int8)
y = np.transpose(x, [1, 0])
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatt |
en())
name = "transpose_i8_2d"
make_test(
[x], y, "input_0.transpose(array![1, 0].span())", name)
def transpose_3D():
x = np.random.randint(-127, 127, (2, 2, 2)).astype(np.int8)
y = np.transpose(x, [1, 2, 0])
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "transpose_i8_3d"
make_test(
[x], y, "input_0.transpose(array![1, 2, 0].span())", name)
transpose_2D()
transpose_3D()
@staticmethod
def transpose_fp8x23():
def transpose_2D():
x = to_fp(np.random.randint(-127, 127, (2, 2)
).astype(np.int64), FixedImpl.FP8x23)
y = np.transpose(x, [1, 0])
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "transpose_fp8x23_2d"
make_test(
[x], y, "input_0.transpose(array![1, 0].span())", name)
def transpose_3D():
x = to_fp(np.random.randint(-127, 127, (2, 2, 2)
).astype(np.int64), FixedImpl.FP8x23)
y = np.transpose(x, [1, 2, 0])
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "transpose_fp8x23_3d"
make_test(
[x], y, "input_0.transpose(array![1, 2, 0].span())", name)
transpose_2D()
transpose_3D()
@staticmethod
def transpose_fp16x16():
def transpose_2D():
x = to_fp(np.random.randint(-127, 127, (2, 2)
).astype(np.int64), FixedImpl.FP16x16)
y = np.transpose(x, [1, 0])
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "transpose_fp16x16_2d" |
make_test(
[x], y, "input_0.transpose(array![1, 0].span())", name)
def transpose_3D():
x = to_fp(np.random.randint(-127, 127, (2, 2, 2)
).astype(np.int64), FixedImpl.FP16x16)
y = np.transpose(x, [1, 2, 0])
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "transpose_fp16x16_3d"
make_test(
[x], y, "input_0.transpose(array![1, 2, 0].span())", name)
transpose_2D()
transpose_3D() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl |
class Trilu(RunAll):
@staticmethod
def trilu_u32():
def tril():
x = np.random.randint(0, 255, (4, 5)).astype(np.uint32)
y = np.tril(x)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "tril_u32"
make_test(
[x], y, "input_0.trilu(false, 0)", name)
def tril_neg():
x = np.random.randint(0, 255, (4, 5)).astype(np.uint32)
y = np.tril(x, k=-1)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "tril_u32_neg"
make_test(
[x], y, "input_0.trilu(false, -1)", name)
def tril_one_row():
x = np.random.randint(0, 255, (3, 1, 5)).astype(np.uint32)
y = np.tril(x)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "tril_u32_one_row"
make_test(
[x], y, "input_0.trilu(false, 0)", name)
def tril_out_neg():
x = np.random.randint(0, 255, (4, 5)).astype(np.uint32)
y = np.tril(x, k=-7)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "tril_u32_out_neg"
make_test(
[x], y, "input_0.trilu(false, -7)", name)
def tril_out_pos():
x = np.random.randint(0, 255, (4, 5)).astype(np.uint32)
y = np.tril(x, k=6)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "tril_u32_out_pos"
make_test(
[x], y, "input_0.trilu(false, 6)", name)
def tril_pos():
x = np.random.randint(0, 255, (4, 5)).astype(np.uint32)
y = np.tril(x, k=2)
x = Te |
nsor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "tril_u32_pos"
make_test(
[x], y, "input_0.trilu(false, 2)", name)
def tril_square():
x = np.random.randint(0, 255, (2, 3, 3)).astype(np.uint32)
y = np.tril(x, k=0)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "tril_u32_square"
make_test(
[x], y, "input_0.trilu(false, 0)", name)
def tril_square_neg():
x = np.random.randint(0, 255, (2, 3, 3)).astype(np.uint32)
y = np.tril(x, k=-1)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "tril_u32_square_neg"
make_test(
[x], y, "input_0.trilu(false, -1)", name)
def tril_zero():
x = np.random.randint(0, 255, (3, 0, 5)).astype(np.uint32)
y = np.tril(x, k=6)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "tril_u32_zero"
make_test(
[x], y, "input_0.trilu(false, 6)", name)
def triu():
x = np.random.randint(0, 255, (4, 5)).astype(np.uint32)
y = np.triu(x)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "triu_u32"
make_test(
[x], y, "input_0.trilu(true, 0)", name)
def triu_neg():
x = np.random.randint(0, 255, (4, 5)).astype(np.uint32)
y = np.triu(x, k=-1)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "triu_u32_neg"
make_test(
[x], y, "inpu |
t_0.trilu(true, -1)", name)
def triu_one_row():
x = np.random.randint(0, 255, (3, 1, 5)).astype(np.uint32)
y = np.triu(x)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "triu_u32_one_row"
make_test(
[x], y, "input_0.trilu(true, 0)", name)
def triu_out_neg():
x = np.random.randint(0, 255, (4, 5)).astype(np.uint32)
y = np.triu(x, k=-7)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "triu_u32_out_neg"
make_test(
[x], y, "input_0.trilu(true, -7)", name)
def triu_out_pos():
x = np.random.randint(0, 255, (4, 5)).astype(np.uint32)
y = np.triu(x, k=6)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "triu_u32_out_pos"
make_test(
[x], y, "input_0.trilu(true, 6)", name)
def triu_pos():
x = np.random.randint(0, 255, (4, 5)).astype(np.uint32)
y = np.triu(x, k=2)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "triu_u32_pos"
make_test(
[x], y, "input_0.trilu(true, 2)", name)
def triu_square():
x = np.random.randint(0, 255, (2, 3, 3)).astype(np.uint32)
y = np.triu(x, k=0)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "triu_u32_square"
make_test(
[x], y, "input_0.trilu(true, 0)", name)
def triu_square_neg():
x = np.random.randint(0, 255, (2, 3, 3)).astype(np.uint32)
y = np.triu(x, k=-1)
x = Tenso |
r(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "triu_u32_square_neg"
make_test(
[x], y, "input_0.trilu(true, -1)", name)
def triu_zero():
x = np.random.randint(0, 255, (3, 0, 5)).astype(np.uint32)
y = np.triu(x, k=6)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "triu_u32_zero"
make_test(
[x], y, "input_0.trilu(true, 6)", name)
tril()
tril_neg()
tril_one_row()
tril_out_neg()
tril_out_pos()
tril_pos()
tril_square()
tril_square_neg()
tril_zero()
triu()
triu_neg()
triu_one_row()
triu_out_neg()
triu_out_pos()
triu_pos()
triu_square()
triu_square_neg()
triu_zero()
@staticmethod
def trilu_i32():
def tril():
x = np.random.randint(-127, 127, (4, 5)).astype(np.int32)
y = np.tril(x)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "tril_i32"
make_test(
[x], y, "input_0.trilu(false, 0)", name)
def tril_neg():
x = np.random.randint(-127, 127, (4, 5)).astype(np.int32)
y = np.tril(x, k=-1)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "tril_neg_i32"
make_test(
[x], y, "input_0.trilu(false, -1)", name)
def tril_one_row():
x = np.random.randint(-127, 127, (3, 1, 5)).astype(np.int32)
y = np.tril(x)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "tril_i32_one_row" |
make_test(
[x], y, "input_0.trilu(false, 0)", name)
def tril_out_neg():
x = np.random.randint(-127, 127, (4, 5)).astype(np.int32)
y = np.tril(x, k=-7)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "tril_i32_out_neg"
make_test(
[x], y, "input_0.trilu(false, -7)", name)
def tril_out_pos():
x = np.random.randint(-127, 127, (4, 5)).astype(np.int32)
y = np.tril(x, k=6)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "tril_i32_out_pos"
make_test(
[x], y, "input_0.trilu(false, 6)", name)
def tril_pos():
x = np.random.randint(-127, 127, (4, 5)).astype(np.int32)
y = np.tril(x, k=2)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "tril_i32_pos"
make_test(
[x], y, "input_0.trilu(false, 2)", name)
def tril_square():
x = np.random.randint(-127, 127, (2, 3, 3)).astype(np.int32)
y = np.tril(x, k=0)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "tril_i32_square"
make_test(
[x], y, "input_0.trilu(false, 0)", name)
def tril_square_neg():
x = np.random.randint(-127, 127, (2, 3, 3)).astype(np.int32)
y = np.tril(x, k=-1)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "tril_i32_square_neg"
make_test(
[x], y, "input_0.trilu(false, -1)", name)
def tril_zero():
x = np.random.randint(-127, 127, (3, 0, 5)).astype |
(np.int32)
y = np.tril(x, k=6)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "tril_i32_zero"
make_test(
[x], y, "input_0.trilu(false, 6)", name)
def triu():
x = np.random.randint(-127, 127, (4, 5)).astype(np.int32)
y = np.triu(x)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "triu_i32"
make_test(
[x], y, "input_0.trilu(true, 0)", name)
def triu_neg():
x = np.random.randint(-127, 127, (4, 5)).astype(np.int32)
y = np.triu(x, k=-1)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "triu_i32_neg"
make_test(
[x], y, "input_0.trilu(true, -1)", name)
def triu_one_row():
x = np.random.randint(-127, 127, (3, 1, 5)).astype(np.int32)
y = np.triu(x)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "triu_i32_one_row"
make_test(
[x], y, "input_0.trilu(true, 0)", name)
def triu_out_neg():
x = np.random.randint(-127, 127, (4, 5)).astype(np.int32)
y = np.triu(x, k=-7)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "triu_i32_out_neg"
make_test(
[x], y, "input_0.trilu(true, -7)", name)
def triu_out_pos():
x = np.random.randint(-127, 127, (4, 5)).astype(np.int32)
y = np.triu(x, k=6)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "triu_i32_out_pos" |
make_test(
[x], y, "input_0.trilu(true, 6)", name)
def triu_pos():
x = np.random.randint(-127, 127, (4, 5)).astype(np.int32)
y = np.triu(x, k=2)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "triu_i32_pos"
make_test(
[x], y, "input_0.trilu(true, 2)", name)
def triu_square():
x = np.random.randint(-127, 127, (2, 3, 3)).astype(np.int32)
y = np.triu(x, k=0)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "triu_i32_square"
make_test(
[x], y, "input_0.trilu(true, 0)", name)
def triu_square_neg():
x = np.random.randint(-127, 127, (2, 3, 3)).astype(np.int32)
y = np.triu(x, k=-1)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "triu_i32_square_neg"
make_test(
[x], y, "input_0.trilu(true, -1)", name)
def triu_zero():
x = np.random.randint(-127, 127, (3, 0, 5)).astype(np.int32)
y = np.triu(x, k=6)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "triu_i32_zero"
make_test(
[x], y, "input_0.trilu(true, 6)", name)
tril()
tril_neg()
tril_one_row()
tril_out_neg()
tril_out_pos()
tril_pos()
tril_square()
tril_square_neg()
tril_zero()
triu()
triu_neg()
triu_one_row()
triu_out_neg()
triu_out_pos()
triu_pos()
triu_square()
triu_square_neg()
triu_zero()
@staticmethod
def trilu_i8():
def tril():
x = |
np.random.randint(-127, 127, (4, 5)).astype(np.int8)
y = np.tril(x)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "tril_i8"
make_test(
[x], y, "input_0.trilu(false, 0)", name)
def tril_neg():
x = np.random.randint(-127, 127, (4, 5)).astype(np.int8)
y = np.tril(x, k=-1)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "tril_i8_neg"
make_test(
[x], y, "input_0.trilu(false, -1)", name)
def tril_one_row():
x = np.random.randint(-127, 127, (3, 1, 5)).astype(np.int8)
y = np.tril(x)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "tril_i8_one_row"
make_test(
[x], y, "input_0.trilu(false, 0)", name)
def tril_out_neg():
x = np.random.randint(-127, 127, (4, 5)).astype(np.int8)
y = np.tril(x, k=-7)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "tril_i8_out_neg"
make_test(
[x], y, "input_0.trilu(false, -7)", name)
def tril_out_pos():
x = np.random.randint(-127, 127, (4, 5)).astype(np.int8)
y = np.tril(x, k=6)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "tril_i8_out_pos"
make_test(
[x], y, "input_0.trilu(false, 6)", name)
def tril_pos():
x = np.random.randint(-127, 127, (4, 5)).astype(np.int8)
y = np.tril(x, k=2)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten()) |
name = "tril_i8_pos"
make_test(
[x], y, "input_0.trilu(false, 2)", name)
def tril_square():
x = np.random.randint(-127, 127, (2, 3, 3)).astype(np.int8)
y = np.tril(x, k=0)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "tril_i8_square"
make_test(
[x], y, "input_0.trilu(false, 0)", name)
def tril_square_neg():
x = np.random.randint(-127, 127, (2, 3, 3)).astype(np.int8)
y = np.tril(x, k=-1)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "tril_i8_square_neg"
make_test(
[x], y, "input_0.trilu(false, -1)", name)
def tril_zero():
x = np.random.randint(-127, 127, (3, 0, 5)).astype(np.int8)
y = np.tril(x, k=6)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "tril_i8_zero"
make_test(
[x], y, "input_0.trilu(false, 6)", name)
def triu():
x = np.random.randint(-127, 127, (4, 5)).astype(np.int8)
y = np.triu(x)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "triu_i8"
make_test(
[x], y, "input_0.trilu(true, 0)", name)
def triu_neg():
x = np.random.randint(-127, 127, (4, 5)).astype(np.int8)
y = np.triu(x, k=-1)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "triu_i8_neg"
make_test(
[x], y, "input_0.trilu(true, -1)", name)
def triu_one_row():
x = np.random.randint(-127, 127, (3, 1, 5)).asty |
pe(np.int8)
y = np.triu(x)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "triu_i8_one_row"
make_test(
[x], y, "input_0.trilu(true, 0)", name)
def triu_out_neg():
x = np.random.randint(-127, 127, (4, 5)).astype(np.int8)
y = np.triu(x, k=-7)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "triu_i8_out_neg"
make_test(
[x], y, "input_0.trilu(true, -7)", name)
def triu_out_pos():
x = np.random.randint(-127, 127, (4, 5)).astype(np.int8)
y = np.triu(x, k=6)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "triu_i8_out_pos"
make_test(
[x], y, "input_0.trilu(true, 6)", name)
def triu_pos():
x = np.random.randint(-127, 127, (4, 5)).astype(np.int8)
y = np.triu(x, k=2)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "triu_i8_pos"
make_test(
[x], y, "input_0.trilu(true, 2)", name)
def triu_square():
x = np.random.randint(-127, 127, (2, 3, 3)).astype(np.int8)
y = np.triu(x, k=0)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "triu_i8_square"
make_test(
[x], y, "input_0.trilu(true, 0)", name)
def triu_square_neg():
x = np.random.randint(-127, 127, (2, 3, 3)).astype(np.int8)
y = np.triu(x, k=-1)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "triu_i8_square_n |
eg"
make_test(
[x], y, "input_0.trilu(true, -1)", name)
def triu_zero():
x = np.random.randint(-127, 127, (3, 0, 5)).astype(np.int8)
y = np.triu(x, k=6)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "triu_i8_zero"
make_test(
[x], y, "input_0.trilu(true, 6)", name)
tril()
tril_neg()
tril_one_row()
tril_out_neg()
tril_out_pos()
tril_pos()
tril_square()
tril_square_neg()
tril_zero()
triu()
triu_neg()
triu_one_row()
triu_out_neg()
triu_out_pos()
triu_pos()
triu_square()
triu_square_neg()
triu_zero()
@staticmethod
def trilu_fp8x23():
def tril():
x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP8x23)
y = np.tril(x)
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "tril_fp8x23"
make_test(
[x], y, "input_0.trilu(false, 0)", name)
def tril_neg():
x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP8x23)
y = np.tril(x, k=-1)
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "tril_fp8x23_neg"
make_test(
[x], y, "input_0.trilu(false, -1)", name)
def tril_one_row():
x = to_fp(np.random.randint(-127, 127, (3, 1, 5)).astype(np.int64), FixedImpl.FP8x23)
y = np.tril(x)
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "tril_fp8x23_one_row"
make_test( |
[x], y, "input_0.trilu(false, 0)", name)
def tril_out_neg():
x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP8x23)
y = np.tril(x, k=-7)
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "tril_fp8x23_out_neg"
make_test(
[x], y, "input_0.trilu(false, -7)", name)
def tril_out_pos():
x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP8x23)
y = np.tril(x, k=6)
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "tril_fp8x23_out_pos"
make_test(
[x], y, "input_0.trilu(false, 6)", name)
def tril_pos():
x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP8x23)
y = np.tril(x, k=2)
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "tril_fp8x23_pos"
make_test(
[x], y, "input_0.trilu(false, 2)", name)
def tril_square():
x = to_fp(np.random.randint(-127, 127, (2, 3, 3)).astype(np.int64), FixedImpl.FP8x23)
y = np.tril(x, k=0)
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "tril_fp8x23_square"
make_test(
[x], y, "input_0.trilu(false, 0)", name)
def tril_square_neg():
x = to_fp(np.random.randint(-127, 127, (2, 3, 3)).astype(np.int64), FixedImpl.FP8x23)
y = np.tril(x, k=-1)
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "tril_fp8x23_square_neg"
make_test( |
[x], y, "input_0.trilu(false, -1)", name)
def tril_zero():
x = to_fp(np.random.randint(-127, 127, (3, 0, 5)).astype(np.int64), FixedImpl.FP8x23)
y = np.tril(x, k=6)
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "tril_fp8x23_zero"
make_test(
[x], y, "input_0.trilu(false, 6)", name)
def triu():
x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP8x23)
y = np.triu(x)
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "triu_fp8x23"
make_test(
[x], y, "input_0.trilu(true, 0)", name)
def triu_neg():
x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP8x23)
y = np.triu(x, k=-1)
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "triu_fp8x23_neg"
make_test(
[x], y, "input_0.trilu(true, -1)", name)
def triu_one_row():
x = to_fp(np.random.randint(-127, 127, (3, 1, 5)).astype(np.int64), FixedImpl.FP8x23)
y = np.triu(x)
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "triu_fp8x23_one_row"
make_test(
[x], y, "input_0.trilu(true, 0)", name)
def triu_out_neg():
x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP8x23)
y = np.triu(x, k=-7)
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "triu_fp8x23_out_neg"
make_test(
[x], y, "input_0 |
.trilu(true, -7)", name)
def triu_out_pos():
x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP8x23)
y = np.triu(x, k=6)
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "triu_fp8x23_out_pos"
make_test(
[x], y, "input_0.trilu(true, 6)", name)
def triu_pos():
x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP8x23)
y = np.triu(x, k=2)
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "triu_fp8x23_pos"
make_test(
[x], y, "input_0.trilu(true, 2)", name)
def triu_square():
x = to_fp(np.random.randint(-127, 127, (2, 3, 3)).astype(np.int64), FixedImpl.FP8x23)
y = np.triu(x, k=0)
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "triu_fp8x23_square"
make_test(
[x], y, "input_0.trilu(true, 0)", name)
def triu_square_neg():
x = to_fp(np.random.randint(-127, 127, (2, 3, 3)).astype(np.int64), FixedImpl.FP8x23)
y = np.triu(x, k=-1)
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "triu_fp8x23_square_neg"
make_test(
[x], y, "input_0.trilu(true, -1)", name)
def triu_zero():
x = to_fp(np.random.randint(-127, 127, (3, 0, 5)).astype(np.int64), FixedImpl.FP8x23)
y = np.triu(x, k=6)
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "triu_fp8x23_zero"
make_test(
[x], y, "inp |
ut_0.trilu(true, 6)", name)
tril()
tril_neg()
tril_one_row()
tril_out_neg()
tril_out_pos()
tril_pos()
tril_square()
tril_square_neg()
tril_zero()
triu()
triu_neg()
triu_one_row()
triu_out_neg()
triu_out_pos()
triu_pos()
triu_square()
triu_square_neg()
triu_zero()
@staticmethod
def trilu_fp16x16():
def tril():
x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP16x16)
y = np.tril(x)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "tril_fp16x16"
make_test(
[x], y, "input_0.trilu(false, 0)", name)
def tril_neg():
x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP16x16)
y = np.tril(x, k=-1)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "tril_fp16x16_neg"
make_test(
[x], y, "input_0.trilu(false, -1)", name)
def tril_one_row():
x = to_fp(np.random.randint(-127, 127, (3, 1, 5)).astype(np.int64), FixedImpl.FP16x16)
y = np.tril(x)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "tril_fp16x16_one_row"
make_test(
[x], y, "input_0.trilu(false, 0)", name)
def tril_out_neg():
x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP16x16)
y = np.tril(x, k=-7)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "tril_fp16x16_out_neg"
make_test( |
[x], y, "input_0.trilu(false, -7)", name)
def tril_out_pos():
x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP16x16)
y = np.tril(x, k=6)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "tril_fp16x16_out_pos"
make_test(
[x], y, "input_0.trilu(false, 6)", name)
def tril_pos():
x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP16x16)
y = np.tril(x, k=2)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "tril_fp16x16_pos"
make_test(
[x], y, "input_0.trilu(false, 2)", name)
def tril_square():
x = to_fp(np.random.randint(-127, 127, (2, 3, 3)).astype(np.int64), FixedImpl.FP16x16)
y = np.tril(x, k=0)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "tril_fp16x16_square"
make_test(
[x], y, "input_0.trilu(false, 0)", name)
def tril_square_neg():
x = to_fp(np.random.randint(-127, 127, (2, 3, 3)).astype(np.int64), FixedImpl.FP16x16)
y = np.tril(x, k=-1)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "tril_fp16x16_square_neg"
make_test(
[x], y, "input_0.trilu(false, -1)", name)
def tril_zero():
x = to_fp(np.random.randint(-127, 127, (3, 0, 5)).astype(np.int64), FixedImpl.FP16x16)
y = np.tril(x, k=6)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "tril_fp16x16_zero" |
make_test(
[x], y, "input_0.trilu(false, 6)", name)
def triu():
x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP16x16)
y = np.triu(x)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "triu_fp16x16"
make_test(
[x], y, "input_0.trilu(true, 0)", name)
def triu_neg():
x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP16x16)
y = np.triu(x, k=-1)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "triu_fp16x16_neg"
make_test(
[x], y, "input_0.trilu(true, -1)", name)
def triu_one_row():
x = to_fp(np.random.randint(-127, 127, (3, 1, 5)).astype(np.int64), FixedImpl.FP16x16)
y = np.triu(x)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "triu_fp16x16_one_row"
make_test(
[x], y, "input_0.trilu(true, 0)", name)
def triu_out_neg():
x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP16x16)
y = np.triu(x, k=-7)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "triu_fp16x16_out_neg"
make_test(
[x], y, "input_0.trilu(true, -7)", name)
def triu_out_pos():
x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP16x16)
y = np.triu(x, k=6)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "triu_fp16x16_out_pos"
make_ |
test(
[x], y, "input_0.trilu(true, 6)", name)
def triu_pos():
x = to_fp(np.random.randint(-127, 127, (4, 5)).astype(np.int64), FixedImpl.FP16x16)
y = np.triu(x, k=2)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "triu_fp16x16_pos"
make_test(
[x], y, "input_0.trilu(true, 2)", name)
def triu_square():
x = to_fp(np.random.randint(-127, 127, (2, 3, 3)).astype(np.int64), FixedImpl.FP16x16)
y = np.triu(x, k=0)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "triu_fp16x16_square"
make_test(
[x], y, "input_0.trilu(true, 0)", name)
def triu_square_neg():
x = to_fp(np.random.randint(-127, 127, (2, 3, 3)).astype(np.int64), FixedImpl.FP16x16)
y = np.triu(x, k=-1)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "triu_fp16x16_square_neg"
make_test(
[x], y, "input_0.trilu(true, -1)", name)
def triu_zero():
x = to_fp(np.random.randint(-127, 127, (3, 0, 5)).astype(np.int64), FixedImpl.FP16x16)
y = np.triu(x, k=6)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "triu_fp16x16_zero"
make_test(
[x], y, "input_0.trilu(true, 6)", name)
tril()
tril_neg()
tril_one_row()
tril_out_neg()
tril_out_pos()
tril_pos()
tril_square()
tril_square_neg()
tril_zero()
triu()
triu_neg()
triu_one_row()
triu_out_neg()
triu_out_pos()
triu_pos()
triu_squ |
are()
triu_square_neg()
triu_zero() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl
from typing |
import Optional
def _unsort_outputs(
x: np.ndarray,
axis: Optional[int],
unique_values: np.ndarray,
indices: np.ndarray,
inverse_indices: np.ndarray,
counts: np.ndarray,
) -> (np.ndarray, np.ndarray, np.ndarray, np.ndarray):
"""Unsort the result of np.unique().
This is done because numpy unique does not retain original order (it sorts
the output unique values).
(see: https:
Code taken from onnx:
https:
"""
argsorted_indices = np.argsort(indices)
inverse_indices_map = dict(
zip(argsorted_indices, np.arange(len(argsorted_indices)))
)
indices = indices[argsorted_indices]
unique_values = np.take(x, indices, axis=axis)
inverse_indices = np.asarray(
[inverse_indices_map[i] for i in inverse_indices], dtype=np.int32
)
counts = counts[argsorted_indices]
return (unique_values, indices, inverse_indices, counts) |
class Unique(RunAll):
@staticmethod
def unique_u32():
def without_axis_sorted():
x = np.random.randint(0, 5, (3, 3, 3)).astype(np.uint32)
axis = None
unique_values, indices, inverse_indices, counts = np.unique(
x, axis=axis, return_index=True, return_inverse=True, return_counts=True
)
x = Tensor(Dtype.U32, x.shape, x.flatten())
unique_values = Tensor(
Dtype.U32, unique_values.shape, unique_values.flatten()
)
indices = Tensor(Dtype.I32, indices.shape, indices.flatten())
inverse_indices = Tensor(
Dtype.I32, inverse_indices.shape, inverse_indices.flatten()
)
counts = Tensor(Dtype.I32, counts.shape, counts.flatten())
name = "unique_u32_without_axis_sorted"
make_test(
[x],
(unique_values, indices, inverse_indices, counts),
"input_0.unique(Option::None(()), Option::Some(true))",
name,
)
def without_axis_not_sorted():
x = np.random.randint(0, 5, (3, 3, 3)).astype(np.uint32)
axis = None
unique_values, indices, inverse_indices, counts = np.unique(
x, axis=axis, return_index=True, return_inverse=True, return_counts=True
)
unique_values, indices, inverse_indices, counts = _unsort_outputs(
x, axis, unique_values, indices, inverse_indices, counts
)
x = Tensor(Dtype.U32, x.shape, x.flatten())
unique_values = Tensor(
Dtype.U32, unique_values.shape, unique_values.flatten()
)
indices = Tensor(Dtype.I32, indices.shape, indices.flatten())
inverse_indices = Tensor(
Dtype.I32, inverse_indices.shape, inverse_indices.flatten()
)
counts = Tensor(Dtype.I32, counts.shape, counts.flatten())
name = "unique_u |
32_without_axis_not_sorted"
make_test(
[x],
(unique_values, indices, inverse_indices, counts),
"input_0.unique(Option::None(()), Option::Some(false))",
name,
)
def with_axis_zero_sorted():
x = np.random.randint(0, 5, (3, 3, 3)).astype(np.uint32)
axis = 0
unique_values, indices, inverse_indices, counts = np.unique(
x, axis=axis, return_index=True, return_inverse=True, return_counts=True
)
x = Tensor(Dtype.U32, x.shape, x.flatten())
unique_values = Tensor(
Dtype.U32, unique_values.shape, unique_values.flatten()
)
indices = Tensor(Dtype.I32, indices.shape, indices.flatten())
inverse_indices = Tensor(
Dtype.I32, inverse_indices.shape, inverse_indices.flatten()
)
counts = Tensor(Dtype.I32, counts.shape, counts.flatten())
name = "unique_u32_with_axis_zero_sorted"
make_test(
[x],
(unique_values, indices, inverse_indices, counts),
"input_0.unique(Option::Some(0), Option::Some(true))",
name,
)
def with_axis_zero_not_sorted():
x = np.random.randint(0, 5, (3, 3, 3)).astype(np.uint32)
axis = 0
unique_values, indices, inverse_indices, counts = np.unique(
x, axis=axis, return_index=True, return_inverse=True, return_counts=True
)
unique_values, indices, inverse_indices, counts = _unsort_outputs(
x, axis, unique_values, indices, inverse_indices, counts
)
x = Tensor(Dtype.U32, x.shape, x.flatten())
unique_values = Tensor(
Dtype.U32, unique_values.shape, unique_values.flatten()
)
indices = Tensor(Dtype.I32, indices.shape, indices.flatten())
inverse_indices = Tensor( |
Dtype.I32, inverse_indices.shape, inverse_indices.flatten()
)
counts = Tensor(Dtype.I32, counts.shape, counts.flatten())
name = "unique_u32_with_axis_zero_not_sorted"
make_test(
[x],
(unique_values, indices, inverse_indices, counts),
"input_0.unique(Option::Some(0), Option::Some(false))",
name,
)
def with_axis_one_sorted():
x = np.random.randint(0, 5, (3, 3, 3)).astype(np.uint32)
axis = 1
unique_values, indices, inverse_indices, counts = np.unique(
x, axis=axis, return_index=True, return_inverse=True, return_counts=True
)
x = Tensor(Dtype.U32, x.shape, x.flatten())
unique_values = Tensor(
Dtype.U32, unique_values.shape, unique_values.flatten()
)
indices = Tensor(Dtype.I32, indices.shape, indices.flatten())
inverse_indices = Tensor(
Dtype.I32, inverse_indices.shape, inverse_indices.flatten()
)
counts = Tensor(Dtype.I32, counts.shape, counts.flatten())
name = "unique_u32_with_axis_one_sorted"
make_test(
[x],
(unique_values, indices, inverse_indices, counts),
"input_0.unique(Option::Some(1), Option::Some(true))",
name,
)
def with_axis_one_not_sorted():
x = np.random.randint(0, 5, (3, 3, 3)).astype(np.uint32)
axis = 1
unique_values, indices, inverse_indices, counts = np.unique(
x, axis=axis, return_index=True, return_inverse=True, return_counts=True
)
unique_values, indices, inverse_indices, counts = _unsort_outputs(
x, axis, unique_values, indices, inverse_indices, counts
)
x = Tensor(Dtype.U32, x.shape, x.flatten())
unique_values = Tensor(
Dt |
ype.U32, unique_values.shape, unique_values.flatten()
)
indices = Tensor(Dtype.I32, indices.shape, indices.flatten())
inverse_indices = Tensor(
Dtype.I32, inverse_indices.shape, inverse_indices.flatten()
)
counts = Tensor(Dtype.I32, counts.shape, counts.flatten())
name = "unique_u32_with_axis_one_not_sorted"
make_test(
[x],
(unique_values, indices, inverse_indices, counts),
"input_0.unique(Option::Some(1), Option::Some(false))",
name,
)
without_axis_sorted()
without_axis_not_sorted()
with_axis_zero_sorted()
with_axis_zero_not_sorted()
with_axis_one_sorted()
with_axis_one_not_sorted()
@staticmethod
def unique_fp16x16():
def without_axis_sorted():
x = np.random.uniform(0, 3, (3, 3, 3)).astype(np.float64)
axis = None
unique_values, indices, inverse_indices, counts = np.unique(
x, axis=axis, return_index=True, return_inverse=True, return_counts=True
)
unique_values = unique_values.astype(np.float16)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
unique_values = Tensor(
Dtype.FP16x16,
unique_values.shape,
to_fp(unique_values.flatten(), FixedImpl.FP16x16),
)
indices = Tensor(Dtype.I32, indices.shape, indices.flatten())
inverse_indices = Tensor(
Dtype.I32, inverse_indices.shape, inverse_indices.flatten()
)
counts = Tensor(Dtype.I32, counts.shape, counts.flatten())
name = "unique_fp16x16_without_axis_sorted"
make_test(
[x],
(unique_values, indices, inverse_indices, counts),
"input_0.unique(Option::None(()), Option::Some(true))",
name, |
)
def with_axis_zero_sorted():
x = np.random.uniform(-3, 3, (3, 3, 3)).astype(np.float64)
axis = 0
unique_values, indices, inverse_indices, counts = np.unique(
x, axis=axis, return_index=True, return_inverse=True, return_counts=True
)
unique_values = unique_values.astype(np.float16)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
unique_values = Tensor(
Dtype.FP16x16,
unique_values.shape,
to_fp(unique_values.flatten(), FixedImpl.FP16x16),
)
indices = Tensor(Dtype.I32, indices.shape, indices.flatten())
inverse_indices = Tensor(
Dtype.I32, inverse_indices.shape, inverse_indices.flatten()
)
counts = Tensor(Dtype.I32, counts.shape, counts.flatten())
name = "unique_fp16x16_with_axis_zero_sorted"
make_test(
[x],
(unique_values, indices, inverse_indices, counts),
"input_0.unique(Option::Some(0), Option::Some(true))",
name,
)
without_axis_sorted()
with_axis_zero_sorted() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl |
class Unsqueeze(RunAll):
@staticmethod
def unsqueeze_u32():
def unsqueeze_2D():
x = np.random.randint(0, 255, (2, 4)).astype(np.uint32)
y = np.expand_dims(x, axis=0)
y = np.expand_dims(y, axis=1)
y = np.expand_dims(y, axis=4)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "unsqueeze_u32_2d"
make_test(
[x], y, "input_0.unsqueeze(array![1, 4, 0].span())", name)
def unsqueeze_3D():
x = np.random.randint(0, 255, (20, 10, 5)).astype(np.uint32)
y = np.expand_dims(x, axis=2)
y = np.expand_dims(y, axis=4)
y = np.expand_dims(y, axis=5)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "unsqueeze_u32_3d"
make_test(
[x], y, "input_0.unsqueeze(array![5, 4, 2].span())", name)
unsqueeze_2D()
unsqueeze_3D()
@staticmethod
def unsqueeze_i32():
def unsqueeze_2D():
x = np.random.randint(-127, 127, (2, 4)).astype(np.int32)
y = np.expand_dims(x, axis=0)
y = np.expand_dims(y, axis=1)
y = np.expand_dims(y, axis=4)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "unsqueeze_i32_2d"
make_test(
[x], y, "input_0.unsqueeze(array![1, 4, 0].span())", name)
def unsqueeze_3D():
x = np.random.randint(-127, 127, (20, 10, 5)).astype(np.int32)
y = np.expand_dims(x, axis=2)
y = np.expand_dims(y, axis=4)
y = np.expand_dims(y, axis=5)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "unsqueeze_i32_3d"
make_test(
[x], y, "input_0.unsqueeze(array![5, 4, 2].s |
pan())", name)
unsqueeze_2D()
unsqueeze_3D()
@staticmethod
def unsqueeze_i8():
def unsqueeze_2D():
x = np.random.randint(-127, 127, (2, 4)).astype(np.int8)
y = np.expand_dims(x, axis=0)
y = np.expand_dims(y, axis=1)
y = np.expand_dims(y, axis=4)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "unsqueeze_i8_2d"
make_test(
[x], y, "input_0.unsqueeze(array![1, 4, 0].span())", name)
def unsqueeze_3D():
x = np.random.randint(-127, 127, (20, 10, 5)).astype(np.int8)
y = np.expand_dims(x, axis=2)
y = np.expand_dims(y, axis=4)
y = np.expand_dims(y, axis=5)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "unsqueeze_i8_3d"
make_test(
[x], y, "input_0.unsqueeze(array![5, 4, 2].span())", name)
unsqueeze_2D()
unsqueeze_3D()
@staticmethod
def unsqueeze_fp8x23():
def unsqueeze_2D():
x = to_fp(np.random.randint(-127, 127, (2, 4)
).astype(np.int64), FixedImpl.FP8x23)
y = np.expand_dims(x, axis=0)
y = np.expand_dims(y, axis=1)
y = np.expand_dims(y, axis=4)
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "unsqueeze_fp8x23_2d"
make_test(
[x], y, "input_0.unsqueeze(array![1, 4, 0].span())", name)
def unsqueeze_3D():
x = to_fp(np.random.randint(-127, 127, (20, 10, 5)
).astype(np.int64), FixedImpl.FP8x23)
y = np.expand_dims(x, axis=2)
y = np.expand_dims(y, axis=4)
y = np.expand_dims(y, axis=5)
x = Tensor(Dtype.FP8x23, x.shape |
, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "unsqueeze_fp8x23_3d"
make_test(
[x], y, "input_0.unsqueeze(array![5, 4, 2].span())", name)
unsqueeze_2D()
unsqueeze_3D()
@staticmethod
def unsqueeze_fp16x16():
def unsqueeze_2D():
x = to_fp(np.random.randint(-127, 127, (2, 4)
).astype(np.int64), FixedImpl.FP16x16)
y = np.expand_dims(x, axis=0)
y = np.expand_dims(y, axis=1)
y = np.expand_dims(y, axis=4)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "unsqueeze_fp16x16_2d"
make_test(
[x], y, "input_0.unsqueeze(array![1, 4, 0].span())", name)
def unsqueeze_3D():
x = to_fp(np.random.randint(-127, 127, (20, 10, 5)
).astype(np.int64), FixedImpl.FP16x16)
y = np.expand_dims(x, axis=2)
y = np.expand_dims(y, axis=4)
y = np.expand_dims(y, axis=5)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "unsqueeze_fp16x16_3d"
make_test(
[x], y, "input_0.unsqueeze(array![5, 4, 2].span())", name)
unsqueeze_2D()
unsqueeze_3D() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl |
class Where(RunAll):
@staticmethod
def where_u32():
def default():
cond = np.random.choice([1, 0], (3, 3, 3)).astype(np.uint32)
x = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32)
y = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32)
z = np.where(cond, x, y).astype(x.dtype)
cond = Tensor(Dtype.U32, cond.shape, cond.flatten())
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "where_u32"
make_test([cond, x, y], z, "input_0.where(@input_1,@input_2)", name)
def broadcast():
cond = np.random.choice([1, 0], (1, 1)).astype(np.uint32)
x = np.random.randint(0, 6, (2, 2)).astype(np.uint32)
y = np.random.randint(0, 6, (1, 2)).astype(np.uint32)
z = np.where(cond, x, y).astype(x.dtype)
cond = Tensor(Dtype.U32, cond.shape, cond.flatten())
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "where_u32_broadcast"
make_test([cond, x, y], z, "input_0.where(@input_1,@input_2)", name)
default()
broadcast()
@staticmethod
def where_i32():
def default():
cond = np.random.choice([1, 0], (3, 3, 3)).astype(np.int32)
x = np.random.randint(0, 6, (3, 3, 3)).astype(np.int32)
y = np.random.randint(0, 6, (3, 3, 3)).astype(np.int32)
z = np.where(cond, x, y).astype(x.dtype)
cond = Tensor(Dtype.I32, cond.shape, cond.flatten())
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "where_i32"
make_test([cond, x, y], z, "input_0 |
.where(@input_1,@input_2)", name)
def broadcast():
cond = np.random.choice([1, 0], (1, 1)).astype(np.int32)
x = np.random.randint(0, 6, (2, 2)).astype(np.int32)
y = np.random.randint(0, 6, (1, 2)).astype(np.int32)
z = np.where(cond, x, y).astype(x.dtype)
cond = Tensor(Dtype.I32, cond.shape, cond.flatten())
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "where_i32_broadcast"
make_test([cond, x, y], z, "input_0.where(@input_1,@input_2)", name)
default()
broadcast()
@staticmethod
def where_i8():
def default():
cond = np.random.choice([1, 0], (3, 3, 3)).astype(np.int8)
x = np.random.randint(0, 6, (3, 3, 3)).astype(np.int8)
y = np.random.randint(0, 6, (3, 3, 3)).astype(np.int8)
z = np.where(cond, x, y).astype(x.dtype)
cond = Tensor(Dtype.I8, cond.shape, cond.flatten())
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.I8, z.shape, z.flatten())
name = "where_i8"
make_test([cond, x, y], z, "input_0.where(@input_1,@input_2)", name)
def broadcast():
cond = np.random.choice([1, 0], (1, 1)).astype(np.int8)
x = np.random.randint(0, 6, (2, 2)).astype(np.int8)
y = np.random.randint(0, 6, (1, 2)).astype(np.int8)
z = np.where(cond, x, y).astype(x.dtype)
cond = Tensor(Dtype.I8, cond.shape, cond.flatten())
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.I8, z.shape, z.flatten())
name = "where_i8_broadcast"
make_test([cond, x, y], z, "input_0.where(@input_1,@input_2)", name)
default()
broadcast |
()
@staticmethod
def where_fp8x23():
def default():
cond = np.random.choice([1, 0], (3, 3, 3)).astype(np.float64)
x = np.random.randint(0, 6, (3, 3, 3)).astype(np.float64)
y = np.random.randint(0, 6, (3, 3, 3)).astype(np.float64)
z = np.where(cond, x, y).astype(x.dtype)
cond = Tensor(Dtype.FP8x23, cond.shape, to_fp(
cond.flatten(), FixedImpl.FP8x23))
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
z = Tensor(Dtype.FP8x23, z.shape, to_fp(
z.flatten(), FixedImpl.FP8x23))
name = "where_fp8x23"
make_test([cond, x, y], z, "input_0.where(@input_1,@input_2)", name)
def broadcast():
cond = np.random.choice([1, 0], (1, 1)).astype(np.float64)
x = np.random.randint(0, 6, (2, 2)).astype(np.float64)
y = np.random.randint(0, 6, (1, 2)).astype(np.float64)
z = np.where(cond, x, y).astype(x.dtype)
cond = Tensor(Dtype.FP8x23, cond.shape, to_fp(
cond.flatten(), FixedImpl.FP8x23))
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
z = Tensor(Dtype.FP8x23, z.shape, to_fp(
z.flatten(), FixedImpl.FP8x23))
name = "where_fp8x23_broadcast"
make_test([cond, x, y], z, "input_0.where(@input_1,@input_2)", name)
default()
broadcast()
@staticmethod
def where_fp16x16():
def default():
cond = np.random.choice([1, 0], (3, 3, 3)).astype(np.float64)
x = np.random.randint(0, 6, (3, 3, 3)).astype(np.float64)
y = np.random.randint(0, 6, (3, 3, 3)).astype(np.float64) |
z = np.where(cond, x, y).astype(x.dtype)
cond = Tensor(Dtype.FP16x16, cond.shape, to_fp(
cond.flatten(), FixedImpl.FP16x16))
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.FP16x16, z.shape, to_fp(
z.flatten(), FixedImpl.FP16x16))
name = "where_fp16x16"
make_test([cond, x, y], z, "input_0.where(@input_1,@input_2)", name)
def broadcast():
cond = np.random.choice([1, 0], (1, 1)).astype(np.float64)
x = np.random.randint(0, 6, (2, 2)).astype(np.float64)
y = np.random.randint(0, 6, (1, 2)).astype(np.float64)
z = np.where(cond, x, y).astype(x.dtype)
cond = Tensor(Dtype.FP16x16, cond.shape, to_fp(
cond.flatten(), FixedImpl.FP16x16))
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.FP16x16, z.shape, to_fp(
z.flatten(), FixedImpl.FP16x16))
name = "where_fp16x16_broadcast"
make_test([cond, x, y], z, "input_0.where(@input_1,@input_2)", name)
default()
broadcast() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl |
class Xor(RunAll):
@staticmethod
def xor_u32():
def default():
x = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32)
y = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32)
z = np.logical_xor(x, y)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "xor_u32"
make_test([x, y], z, "input_0.xor(@input_1)", name)
def broadcast():
x = np.random.randint(0, 6, (2, 2)).astype(np.uint32)
y = np.random.randint(0, 6, (1, 2)).astype(np.uint32)
z = np.logical_xor(x, y)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "xor_u32_broadcast"
make_test([x, y], z, "input_0.xor(@input_1)", name)
default()
broadcast()
@staticmethod
def xor_i32():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32)
z = np.logical_xor(x, y)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "xor_i32"
make_test([x, y], z, "input_0.xor(@input_1)", name)
def broadcast():
x = np.random.randint(-3, 3, (2, 2)).astype(np.int32)
y = np.random.randint(-3, 3, (1, 2)).astype(np.int32)
z = np.logical_xor(x, y)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "xor_i32_broadcast"
make_test([x, y], z, "input_0.xor(@input_1)", name)
default()
broadcast()
@staticmetho |
d
def xor_i8():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8)
z = np.logical_xor(x, y)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "xor_i8"
make_test([x, y], z, "input_0.xor(@input_1)", name)
def broadcast():
x = np.random.randint(-3, 3, (2, 2)).astype(np.int8)
y = np.random.randint(-3, 3, (1, 2)).astype(np.int8)
z = np.logical_xor(x, y)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "xor_i8_broadcast"
make_test([x, y], z, "input_0.xor(@input_1)", name)
default()
broadcast()
@staticmethod
def xor_fp8x23():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
z = np.logical_xor(x, y)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "xor_fp8x23"
make_test([x, y], z, "input_0.xor(@input_1)", name)
def broadcast():
x = np.random.randint(-3, 3, (2, 2)).astype(np.float64)
y = np.random.randint(-3, 3, (1, 2)).astype(np.float64)
z = np.logical_xor(x, y)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
z = Tensor(Dtype.U32, z.shape, z.flatten()) |
name = "xor_fp8x23_broadcast"
make_test([x, y], z, "input_0.xor(@input_1)", name)
default()
broadcast()
@staticmethod
def xor_fp16x16():
def default():
x = np.random.randint(-3, 3, (2, 2)).astype(np.float64)
y = np.random.randint(-3, 3, (1, 2)).astype(np.float64)
z = np.logical_xor(x, y)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "xor_fp16x16"
make_test([x, y], z, "input_0.xor(@input_1)", name)
def broadcast():
x = np.random.randint(-3, 3, (2, 2)).astype(np.float64)
y = np.random.randint(-3, 3, (1, 2)).astype(np.float64)
z = np.logical_xor(x, y)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "xor_fp16x16_broadcast"
make_test([x, y], z, "input_0.xor(@input_1)", name)
default()
broadcast() |
import os
import glob
import subprocess
# Directory path where Python files/modules are located
directory_path = 'nodegen/node/'
# Get all files in the directory
all_files = os.listdir(directory_path)
# Filter Python files using glob and '*.py' pattern
python_files = [file[:-3] for file in all_files if file.endswith('.py')]
# Print the names of Python files/modules
command = 'python --version'
os.system(command) |
mod operators;
mod numbers;
mod utils;
mod test_helper;
|
mod fixed_point;
mod complex_number;
use orion::numbers::fixed_point::core::FixedTrait;
use orion::numbers::fixed_point::implementations::fp8x23::core::{ONE as ONE_fp8x23};
use orion::numbers::fixed_point::implementations::fp16x16::core::{ONE as ONE_fp16x16};
use orion::numbers::fixed_point::implementations::fp64x64::core::{ONE as ONE_fp64x64};
use orion::numbers::fixed_point::implementations::fp32x32::core::{ONE as ONE_fp32x32};
trait NumberTrait<T, MAG> {
fn new(mag: MAG, sign: bool) -> T;
fn new_unscaled(mag: MAG, sign: bool) -> T;
fn from_felt(val: felt252) -> T;
fn abs(self: T) -> T;
fn neg(self: T) -> T;
fn ceil(self: T) -> T;
fn exp(self: T) -> T;
fn exp2(self: T) -> T;
fn floor(self: T) -> T;
fn ln(self: T) -> T;
fn log2(self: T) -> T;
fn log10(self: T) -> T;
fn pow(self: T, b: T) -> T;
fn round(self: T) -> T;
fn sqrt(self: T) -> T;
fn acos(self: T) -> T;
fn asin(self: T) -> T;
fn atan(self: T) -> T;
fn cos(self: T) -> T;
fn sin(self: T) -> T;
fn tan(self: T) -> T;
fn acosh(self: T) -> T;
fn asinh(self: T) -> T;
fn atanh(self: T) -> T;
fn cosh(self: T) -> T;
fn sinh(self: T) -> T;
fn tanh(self: T) -> T;
fn zero() -> T;
fn is_zero(self: T) -> bool;
fn half() -> T;
fn one() -> T;
fn is_one(self: T) -> bool;
fn neg_one() -> T;
fn min_value() -> T;
fn max_value() -> T;
fn min(self: T, other: T) -> T;
fn max(self: T, other: T) -> T;
fn mag(self: T) -> MAG;
fn is_neg(self: T) -> bool;
fn xor(lhs: T, rhs: T) -> bool;
fn or(lhs: T, rhs: T) -> bool;
fn sign(self: T) -> T;
fn and(lhs: T, rhs: T) -> bool;
fn where(self: T, x: T, y: T) -> T;
fn NaN() -> T;
fn is_nan(self: T) -> bool;
fn INF() -> T;
fn is_inf(self: T) -> bool;
fn is_pos_inf(self: T) -> bool;
fn is_neg_inf(self: T) -> bool;
fn bitwise_and(lhs: T, rhs: T) -> T;
fn bitwise_xor(lhs: T, rhs: T) -> T;
fn bitwise_or(lhs: T, rhs: T) -> T;
fn ad |
d(lhs: T, rhs: T) -> T;
fn sub(lhs: T, rhs: T) -> T;
}
use orion::numbers::fixed_point::implementations::fp8x23::core::{
FP8x23Impl, FP8x23, FP8x23Add, FP8x23Sub
};
use orion::numbers::fixed_point::implementations::fp8x23::math::core as core_fp8x23;
use orion::numbers::fixed_point::implementations::fp8x23::math::comp as comp_fp8x23;
impl FP8x23Number of NumberTrait<FP8x23, u32> {
fn new(mag: u32, sign: bool) -> FP8x23 {
FP8x23Impl::new(mag, sign)
}
fn new_unscaled(mag: u32, sign: bool) -> FP8x23 {
FP8x23Impl::new_unscaled(mag, sign)
}
fn from_felt(val: felt252) -> FP8x23 {
FP8x23Impl::from_felt(val)
}
fn ceil(self: FP8x23) -> FP8x23 {
FP8x23Impl::ceil(self)
}
fn exp(self: FP8x23) -> FP8x23 {
FP8x23Impl::exp(self)
}
fn exp2(self: FP8x23) -> FP8x23 {
FP8x23Impl::exp2(self)
}
fn floor(self: FP8x23) -> FP8x23 {
FP8x23Impl::floor(self)
}
fn ln(self: FP8x23) -> FP8x23 {
FP8x23Impl::ln(self)
}
fn log2(self: FP8x23) -> FP8x23 {
FP8x23Impl::log2(self)
}
fn log10(self: FP8x23) -> FP8x23 {
FP8x23Impl::log10(self)
}
fn pow(self: FP8x23, b: FP8x23) -> FP8x23 {
FP8x23Impl::pow(self, b)
}
fn round(self: FP8x23) -> FP8x23 {
FP8x23Impl::round(self)
}
fn sqrt(self: FP8x23) -> FP8x23 {
FP8x23Impl::sqrt(self)
}
fn acos(self: FP8x23) -> FP8x23 {
FP8x23Impl::acos(self)
}
fn asin(self: FP8x23) -> FP8x23 {
FP8x23Impl::asin(self)
}
fn atan(self: FP8x23) -> FP8x23 {
FP8x23Impl::atan(self)
}
fn cos(self: FP8x23) -> FP8x23 {
FP8x23Impl::cos(self)
}
fn sin(self: FP8x23) -> FP8x23 {
FP8x23Impl::sin(self)
}
fn tan(self: FP8x23) -> FP8x23 {
FP8x23Impl::tan(self)
}
fn acosh(self: FP8x23) -> FP8x23 {
FP8x23Impl::acosh(self)
}
fn asinh(self: FP8x23) -> FP8x23 {
FP8x23Impl::asinh(self)
} |
fn atanh(self: FP8x23) -> FP8x23 {
FP8x23Impl::atanh(self)
}
fn cosh(self: FP8x23) -> FP8x23 {
FP8x23Impl::cosh(self)
}
fn sinh(self: FP8x23) -> FP8x23 {
FP8x23Impl::sinh(self)
}
fn tanh(self: FP8x23) -> FP8x23 {
FP8x23Impl::tanh(self)
}
fn zero() -> FP8x23 {
FP8x23Impl::ZERO()
}
fn is_zero(self: FP8x23) -> bool {
core_fp8x23::eq(@self, @FP8x23Impl::ZERO())
}
fn half() -> FP8x23 {
FP8x23Impl::HALF()
}
fn one() -> FP8x23 {
FP8x23Impl::ONE()
}
fn neg_one() -> FP8x23 {
FP8x23 { mag: core_fp8x23::ONE, sign: true }
}
fn is_one(self: FP8x23) -> bool {
core_fp8x23::eq(@self, @FP8x23Impl::ONE())
}
fn abs(self: FP8x23) -> FP8x23 {
core_fp8x23::abs(self)
}
fn neg(self: FP8x23) -> FP8x23 {
core_fp8x23::neg(self)
}
fn min_value() -> FP8x23 {
FP8x23 { mag: core_fp8x23::MAX, sign: true }
}
fn max_value() -> FP8x23 {
FP8x23 { mag: core_fp8x23::MAX, sign: false }
}
fn min(self: FP8x23, other: FP8x23) -> FP8x23 {
comp_fp8x23::min(self, other)
}
fn max(self: FP8x23, other: FP8x23) -> FP8x23 {
comp_fp8x23::max(self, other)
}
fn mag(self: FP8x23) -> u32 {
self.mag
}
fn is_neg(self: FP8x23) -> bool {
self.sign
}
fn xor(lhs: FP8x23, rhs: FP8x23) -> bool {
comp_fp8x23::xor(lhs, rhs)
}
fn or(lhs: FP8x23, rhs: FP8x23) -> bool {
comp_fp8x23::or(lhs, rhs)
}
fn sign(self: FP8x23) -> FP8x23 {
core_fp8x23::sign(self)
}
fn and(lhs: FP8x23, rhs: FP8x23) -> bool {
comp_fp8x23::and(lhs, rhs)
}
fn where(self: FP8x23, x: FP8x23, y: FP8x23) -> FP8x23 {
comp_fp8x23::where(self, x, y)
}
fn NaN() -> FP8x23 {
FP8x23Impl::NaN()
}
fn is_nan(self: FP8x23) -> bool {
FP8x23Impl::is_nan(self)
}
fn INF() -> FP8x23 {
FP8x23Impl::INF( |
)
}
fn is_inf(self: FP8x23) -> bool {
FP8x23Impl::is_inf(self)
}
fn is_pos_inf(self: FP8x23) -> bool {
FP8x23Impl::is_pos_inf(self)
}
fn is_neg_inf(self: FP8x23) -> bool {
FP8x23Impl::is_neg_inf(self)
}
fn bitwise_and(lhs: FP8x23, rhs: FP8x23) -> FP8x23 {
comp_fp8x23::bitwise_and(lhs, rhs)
}
fn bitwise_xor(lhs: FP8x23, rhs: FP8x23) -> FP8x23 {
comp_fp8x23::bitwise_xor(lhs, rhs)
}
fn bitwise_or(lhs: FP8x23, rhs: FP8x23) -> FP8x23 {
comp_fp8x23::bitwise_or(lhs, rhs)
}
fn add(lhs: FP8x23, rhs: FP8x23) -> FP8x23 {
FP8x23Add::add(lhs, rhs)
}
fn sub(lhs: FP8x23, rhs: FP8x23) -> FP8x23 {
FP8x23Sub::sub(lhs, rhs)
}
}
use orion::numbers::fixed_point::implementations::fp8x23wide::core::{
FP8x23WImpl, FP8x23W, FP8x23WAdd, FP8x23WSub
};
use orion::numbers::fixed_point::implementations::fp8x23wide::math::core as core_fp8x23wide;
use orion::numbers::fixed_point::implementations::fp8x23wide::math::comp as comp_fp8x23wide;
impl FP8x23WNumber of NumberTrait<FP8x23W, u64> {
fn new(mag: u64, sign: bool) -> FP8x23W {
FP8x23WImpl::new(mag, sign)
}
fn new_unscaled(mag: u64, sign: bool) -> FP8x23W {
FP8x23WImpl::new_unscaled(mag, sign)
}
fn from_felt(val: felt252) -> FP8x23W {
FP8x23WImpl::from_felt(val)
}
fn ceil(self: FP8x23W) -> FP8x23W {
FP8x23WImpl::ceil(self)
}
fn exp(self: FP8x23W) -> FP8x23W {
FP8x23WImpl::exp(self)
}
fn exp2(self: FP8x23W) -> FP8x23W {
FP8x23WImpl::exp2(self)
}
fn floor(self: FP8x23W) -> FP8x23W {
FP8x23WImpl::floor(self)
}
fn ln(self: FP8x23W) -> FP8x23W {
FP8x23WImpl::ln(self)
}
fn log2(self: FP8x23W) -> FP8x23W {
FP8x23WImpl::log2(self)
}
fn log10(self: FP8x23W) -> FP8x23W {
FP8x23WImpl::log10(self)
}
fn pow(self: FP8x23W, b: FP8x23W) -> FP8x23W {
FP8x23WImpl::pow(self, b)
}
f |
n round(self: FP8x23W) -> FP8x23W {
FP8x23WImpl::round(self)
}
fn sqrt(self: FP8x23W) -> FP8x23W {
FP8x23WImpl::sqrt(self)
}
fn acos(self: FP8x23W) -> FP8x23W {
FP8x23WImpl::acos(self)
}
fn asin(self: FP8x23W) -> FP8x23W {
FP8x23WImpl::asin(self)
}
fn atan(self: FP8x23W) -> FP8x23W {
FP8x23WImpl::atan(self)
}
fn cos(self: FP8x23W) -> FP8x23W {
FP8x23WImpl::cos(self)
}
fn sin(self: FP8x23W) -> FP8x23W {
FP8x23WImpl::sin(self)
}
fn tan(self: FP8x23W) -> FP8x23W {
FP8x23WImpl::tan(self)
}
fn acosh(self: FP8x23W) -> FP8x23W {
FP8x23WImpl::acosh(self)
}
fn asinh(self: FP8x23W) -> FP8x23W {
FP8x23WImpl::asinh(self)
}
fn atanh(self: FP8x23W) -> FP8x23W {
FP8x23WImpl::atanh(self)
}
fn cosh(self: FP8x23W) -> FP8x23W {
FP8x23WImpl::cosh(self)
}
fn sinh(self: FP8x23W) -> FP8x23W {
FP8x23WImpl::sinh(self)
}
fn tanh(self: FP8x23W) -> FP8x23W {
FP8x23WImpl::tanh(self)
}
fn zero() -> FP8x23W {
FP8x23WImpl::ZERO()
}
fn is_zero(self: FP8x23W) -> bool {
core_fp8x23wide::eq(@self, @FP8x23WImpl::ZERO())
}
fn half() -> FP8x23W {
FP8x23WImpl::HALF()
}
fn one() -> FP8x23W {
FP8x23WImpl::ONE()
}
fn neg_one() -> FP8x23W {
FP8x23W { mag: core_fp8x23wide::ONE, sign: true }
}
fn is_one(self: FP8x23W) -> bool {
core_fp8x23wide::eq(@self, @FP8x23WImpl::ONE())
}
fn abs(self: FP8x23W) -> FP8x23W {
core_fp8x23wide::abs(self)
}
fn neg(self: FP8x23W) -> FP8x23W {
core_fp8x23wide::neg(self)
}
fn min_value() -> FP8x23W {
FP8x23W { mag: core_fp8x23wide::MAX, sign: true }
}
fn max_value() -> FP8x23W {
FP8x23W { mag: core_fp8x23wide::MAX, sign: false }
}
fn min(self: FP8x23W, other: FP8x23W) -> FP8x23W {
comp_fp8x23wide::min(self, other)
} |
fn max(self: FP8x23W, other: FP8x23W) -> FP8x23W {
comp_fp8x23wide::max(self, other)
}
fn mag(self: FP8x23W) -> u64 {
self.mag
}
fn is_neg(self: FP8x23W) -> bool {
self.sign
}
fn xor(lhs: FP8x23W, rhs: FP8x23W) -> bool {
comp_fp8x23wide::xor(lhs, rhs)
}
fn or(lhs: FP8x23W, rhs: FP8x23W) -> bool {
comp_fp8x23wide::or(lhs, rhs)
}
fn sign(self: FP8x23W) -> FP8x23W {
core_fp8x23wide::sign(self)
}
fn and(lhs: FP8x23W, rhs: FP8x23W) -> bool {
comp_fp8x23wide::and(lhs, rhs)
}
fn where(self: FP8x23W, x: FP8x23W, y: FP8x23W) -> FP8x23W {
comp_fp8x23wide::where(self, x, y)
}
fn NaN() -> FP8x23W {
FP8x23WImpl::NaN()
}
fn is_nan(self: FP8x23W) -> bool {
FP8x23WImpl::is_nan(self)
}
fn INF() -> FP8x23W {
FP8x23WImpl::INF()
}
fn is_inf(self: FP8x23W) -> bool {
FP8x23WImpl::is_inf(self)
}
fn is_pos_inf(self: FP8x23W) -> bool {
FP8x23WImpl::is_pos_inf(self)
}
fn is_neg_inf(self: FP8x23W) -> bool {
FP8x23WImpl::is_neg_inf(self)
}
fn bitwise_and(lhs: FP8x23W, rhs: FP8x23W) -> FP8x23W {
comp_fp8x23wide::bitwise_and(lhs, rhs)
}
fn bitwise_xor(lhs: FP8x23W, rhs: FP8x23W) -> FP8x23W {
comp_fp8x23wide::bitwise_xor(lhs, rhs)
}
fn bitwise_or(lhs: FP8x23W, rhs: FP8x23W) -> FP8x23W {
comp_fp8x23wide::bitwise_or(lhs, rhs)
}
fn add(lhs: FP8x23W, rhs: FP8x23W) -> FP8x23W {
FP8x23WAdd::add(lhs, rhs)
}
fn sub(lhs: FP8x23W, rhs: FP8x23W) -> FP8x23W {
FP8x23WSub::sub(lhs, rhs)
}
}
use orion::numbers::fixed_point::implementations::fp16x16::core::{
FP16x16Impl, FP16x16, FP16x16Add, FP16x16Sub
};
use orion::numbers::fixed_point::implementations::fp16x16::math::core as core_fp16x16;
use orion::numbers::fixed_point::implementations::fp16x16::math::comp as comp_fp16x16;
impl FP16x16Number of NumberTrait<FP16x16, u32> {
fn new(m |
ag: u32, sign: bool) -> FP16x16 {
FP16x16Impl::new(mag, sign)
}
fn new_unscaled(mag: u32, sign: bool) -> FP16x16 {
FP16x16Impl::new_unscaled(mag, sign)
}
fn from_felt(val: felt252) -> FP16x16 {
FP16x16Impl::from_felt(val)
}
fn ceil(self: FP16x16) -> FP16x16 {
FP16x16Impl::ceil(self)
}
fn exp(self: FP16x16) -> FP16x16 {
FP16x16Impl::exp(self)
}
fn exp2(self: FP16x16) -> FP16x16 {
FP16x16Impl::exp2(self)
}
fn floor(self: FP16x16) -> FP16x16 {
FP16x16Impl::floor(self)
}
fn ln(self: FP16x16) -> FP16x16 {
FP16x16Impl::ln(self)
}
fn log2(self: FP16x16) -> FP16x16 {
FP16x16Impl::log2(self)
}
fn log10(self: FP16x16) -> FP16x16 {
FP16x16Impl::log10(self)
}
fn pow(self: FP16x16, b: FP16x16) -> FP16x16 {
FP16x16Impl::pow(self, b)
}
fn round(self: FP16x16) -> FP16x16 {
FP16x16Impl::round(self)
}
fn sqrt(self: FP16x16) -> FP16x16 {
FP16x16Impl::sqrt(self)
}
fn acos(self: FP16x16) -> FP16x16 {
FP16x16Impl::acos(self)
}
fn asin(self: FP16x16) -> FP16x16 {
FP16x16Impl::asin(self)
}
fn atan(self: FP16x16) -> FP16x16 {
FP16x16Impl::atan(self)
}
fn cos(self: FP16x16) -> FP16x16 {
FP16x16Impl::cos(self)
}
fn sin(self: FP16x16) -> FP16x16 {
FP16x16Impl::sin(self)
}
fn tan(self: FP16x16) -> FP16x16 {
FP16x16Impl::tan(self)
}
fn acosh(self: FP16x16) -> FP16x16 {
FP16x16Impl::acosh(self)
}
fn asinh(self: FP16x16) -> FP16x16 {
FP16x16Impl::asinh(self)
}
fn atanh(self: FP16x16) -> FP16x16 {
FP16x16Impl::atanh(self)
}
fn cosh(self: FP16x16) -> FP16x16 {
FP16x16Impl::cosh(self)
}
fn sinh(self: FP16x16) -> FP16x16 {
FP16x16Impl::sinh(self)
}
fn tanh(self: FP16x16) -> FP16x16 {
FP16x16Impl::tanh(self)
}
fn zero() -> FP16x16 { |
FP16x16Impl::ZERO()
}
fn is_zero(self: FP16x16) -> bool {
core_fp16x16::eq(@self, @FP16x16Impl::ZERO())
}
fn half() -> FP16x16 {
FP16x16Impl::HALF()
}
fn one() -> FP16x16 {
FP16x16Impl::ONE()
}
fn neg_one() -> FP16x16 {
FP16x16 { mag: core_fp16x16::ONE, sign: true }
}
fn is_one(self: FP16x16) -> bool {
core_fp16x16::eq(@self, @FP16x16Impl::ONE())
}
fn abs(self: FP16x16) -> FP16x16 {
core_fp16x16::abs(self)
}
fn neg(self: FP16x16) -> FP16x16 {
core_fp16x16::neg(self)
}
fn min_value() -> FP16x16 {
FP16x16 { mag: core_fp16x16::MAX, sign: true }
}
fn max_value() -> FP16x16 {
FP16x16 { mag: core_fp16x16::MAX, sign: false }
}
fn min(self: FP16x16, other: FP16x16) -> FP16x16 {
comp_fp16x16::min(self, other)
}
fn max(self: FP16x16, other: FP16x16) -> FP16x16 {
comp_fp16x16::max(self, other)
}
fn mag(self: FP16x16) -> u32 {
self.mag
}
fn is_neg(self: FP16x16) -> bool {
self.sign
}
fn xor(lhs: FP16x16, rhs: FP16x16) -> bool {
comp_fp16x16::xor(lhs, rhs)
}
fn or(lhs: FP16x16, rhs: FP16x16) -> bool {
comp_fp16x16::or(lhs, rhs)
}
fn sign(self: FP16x16) -> FP16x16 {
core_fp16x16::sign(self)
}
fn and(lhs: FP16x16, rhs: FP16x16) -> bool {
comp_fp16x16::and(lhs, rhs)
}
fn where(self: FP16x16, x: FP16x16, y: FP16x16) -> FP16x16 {
comp_fp16x16::where(self, x, y)
}
fn NaN() -> FP16x16 {
FP16x16Impl::NaN()
}
fn is_nan(self: FP16x16) -> bool {
FP16x16Impl::is_nan(self)
}
fn INF() -> FP16x16 {
FP16x16Impl::INF()
}
fn is_inf(self: FP16x16) -> bool {
FP16x16Impl::is_inf(self)
}
fn is_pos_inf(self: FP16x16) -> bool {
FP16x16Impl::is_pos_inf(self)
}
fn is_neg_inf(self: FP16x16) -> bool {
FP16x16Impl::is_neg_inf(self)
}
fn |
bitwise_and(lhs: FP16x16, rhs: FP16x16) -> FP16x16 {
comp_fp16x16::bitwise_and(lhs, rhs)
}
fn bitwise_xor(lhs: FP16x16, rhs: FP16x16) -> FP16x16 {
comp_fp16x16::bitwise_xor(lhs, rhs)
}
fn bitwise_or(lhs: FP16x16, rhs: FP16x16) -> FP16x16 {
comp_fp16x16::bitwise_or(lhs, rhs)
}
fn add(lhs: FP16x16, rhs: FP16x16) -> FP16x16 {
FP16x16Add::add(lhs, rhs)
}
fn sub(lhs: FP16x16, rhs: FP16x16) -> FP16x16 {
FP16x16Sub::sub(lhs, rhs)
}
}
use orion::numbers::fixed_point::implementations::fp16x16wide::core::{
FP16x16WImpl, FP16x16W, FP16x16WAdd, FP16x16WSub
};
use orion::numbers::fixed_point::implementations::fp16x16wide::math::core as core_fp16x16wide;
use orion::numbers::fixed_point::implementations::fp16x16wide::math::comp as comp_fp16x16wide;
impl FP16x16WNumber of NumberTrait<FP16x16W, u64> {
fn new(mag: u64, sign: bool) -> FP16x16W {
FP16x16WImpl::new(mag, sign)
}
fn new_unscaled(mag: u64, sign: bool) -> FP16x16W {
FP16x16WImpl::new_unscaled(mag, sign)
}
fn from_felt(val: felt252) -> FP16x16W {
FP16x16WImpl::from_felt(val)
}
fn ceil(self: FP16x16W) -> FP16x16W {
FP16x16WImpl::ceil(self)
}
fn exp(self: FP16x16W) -> FP16x16W {
FP16x16WImpl::exp(self)
}
fn exp2(self: FP16x16W) -> FP16x16W {
FP16x16WImpl::exp2(self)
}
fn floor(self: FP16x16W) -> FP16x16W {
FP16x16WImpl::floor(self)
}
fn ln(self: FP16x16W) -> FP16x16W {
FP16x16WImpl::ln(self)
}
fn log2(self: FP16x16W) -> FP16x16W {
FP16x16WImpl::log2(self)
}
fn log10(self: FP16x16W) -> FP16x16W {
FP16x16WImpl::log10(self)
}
fn pow(self: FP16x16W, b: FP16x16W) -> FP16x16W {
FP16x16WImpl::pow(self, b)
}
fn round(self: FP16x16W) -> FP16x16W {
FP16x16WImpl::round(self)
}
fn sqrt(self: FP16x16W) -> FP16x16W {
FP16x16WImpl::sqrt(self)
}
fn acos(self: FP16x16W) -> FP16x16W { |
FP16x16WImpl::acos(self)
}
fn asin(self: FP16x16W) -> FP16x16W {
FP16x16WImpl::asin(self)
}
fn atan(self: FP16x16W) -> FP16x16W {
FP16x16WImpl::atan(self)
}
fn cos(self: FP16x16W) -> FP16x16W {
FP16x16WImpl::cos(self)
}
fn sin(self: FP16x16W) -> FP16x16W {
FP16x16WImpl::sin(self)
}
fn tan(self: FP16x16W) -> FP16x16W {
FP16x16WImpl::tan(self)
}
fn acosh(self: FP16x16W) -> FP16x16W {
FP16x16WImpl::acosh(self)
}
fn asinh(self: FP16x16W) -> FP16x16W {
FP16x16WImpl::asinh(self)
}
fn atanh(self: FP16x16W) -> FP16x16W {
FP16x16WImpl::atanh(self)
}
fn cosh(self: FP16x16W) -> FP16x16W {
FP16x16WImpl::cosh(self)
}
fn sinh(self: FP16x16W) -> FP16x16W {
FP16x16WImpl::sinh(self)
}
fn tanh(self: FP16x16W) -> FP16x16W {
FP16x16WImpl::tanh(self)
}
fn zero() -> FP16x16W {
FP16x16WImpl::ZERO()
}
fn is_zero(self: FP16x16W) -> bool {
core_fp16x16wide::eq(@self, @FP16x16WImpl::ZERO())
}
fn half() -> FP16x16W {
FP16x16WImpl::HALF()
}
fn one() -> FP16x16W {
FP16x16WImpl::ONE()
}
fn neg_one() -> FP16x16W {
FP16x16W { mag: core_fp16x16wide::ONE, sign: true }
}
fn is_one(self: FP16x16W) -> bool {
core_fp16x16wide::eq(@self, @FP16x16WImpl::ONE())
}
fn abs(self: FP16x16W) -> FP16x16W {
core_fp16x16wide::abs(self)
}
fn neg(self: FP16x16W) -> FP16x16W {
core_fp16x16wide::neg(self)
}
fn min_value() -> FP16x16W {
FP16x16W { mag: core_fp16x16wide::MAX, sign: true }
}
fn max_value() -> FP16x16W {
FP16x16W { mag: core_fp16x16wide::MAX, sign: false }
}
fn min(self: FP16x16W, other: FP16x16W) -> FP16x16W {
comp_fp16x16wide::min(self, other)
}
fn max(self: FP16x16W, other: FP16x16W) -> FP16x16W {
comp_fp16x16wide::max(self, other)
}
fn mag(self: FP16x1 |
6W) -> u64 {
self.mag
}
fn is_neg(self: FP16x16W) -> bool {
self.sign
}
fn xor(lhs: FP16x16W, rhs: FP16x16W) -> bool {
comp_fp16x16wide::xor(lhs, rhs)
}
fn or(lhs: FP16x16W, rhs: FP16x16W) -> bool {
comp_fp16x16wide::or(lhs, rhs)
}
fn sign(self: FP16x16W) -> FP16x16W {
core_fp16x16wide::sign(self)
}
fn and(lhs: FP16x16W, rhs: FP16x16W) -> bool {
comp_fp16x16wide::and(lhs, rhs)
}
fn where(self: FP16x16W, x: FP16x16W, y: FP16x16W) -> FP16x16W {
comp_fp16x16wide::where(self, x, y)
}
fn NaN() -> FP16x16W {
FP16x16WImpl::NaN()
}
fn is_nan(self: FP16x16W) -> bool {
FP16x16WImpl::is_nan(self)
}
fn INF() -> FP16x16W {
FP16x16WImpl::INF()
}
fn is_inf(self: FP16x16W) -> bool {
FP16x16WImpl::is_inf(self)
}
fn is_pos_inf(self: FP16x16W) -> bool {
FP16x16WImpl::is_pos_inf(self)
}
fn is_neg_inf(self: FP16x16W) -> bool {
FP16x16WImpl::is_neg_inf(self)
}
fn bitwise_and(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W {
comp_fp16x16wide::bitwise_and(lhs, rhs)
}
fn bitwise_xor(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W {
comp_fp16x16wide::bitwise_xor(lhs, rhs)
}
fn bitwise_or(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W {
comp_fp16x16wide::bitwise_or(lhs, rhs)
}
fn add(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W {
FP16x16WAdd::add(lhs, rhs)
}
fn sub(lhs: FP16x16W, rhs: FP16x16W) -> FP16x16W {
FP16x16WSub::sub(lhs, rhs)
}
}
use orion::numbers::fixed_point::implementations::fp64x64::core::{
FP64x64Impl, FP64x64, FP64x64Add, FP64x64Sub
};
use orion::numbers::fixed_point::implementations::fp64x64::{core as core_fp64x64};
use orion::numbers::fixed_point::implementations::fp64x64::comp as comp_fp64x64;
use cubit::f128 as fp64x64;
impl FP64x64Number of NumberTrait<FP64x64, u128> {
fn new(mag: u128, sign: bool) -> FP64x64 {
FP64x64Impl::n |
ew(mag, sign)
}
fn new_unscaled(mag: u128, sign: bool) -> FP64x64 {
FP64x64Impl::new_unscaled(mag, sign)
}
fn from_felt(val: felt252) -> FP64x64 {
FP64x64Impl::from_felt(val)
}
fn ceil(self: FP64x64) -> FP64x64 {
FP64x64Impl::ceil(self)
}
fn exp(self: FP64x64) -> FP64x64 {
FP64x64Impl::exp(self)
}
fn exp2(self: FP64x64) -> FP64x64 {
FP64x64Impl::exp2(self)
}
fn floor(self: FP64x64) -> FP64x64 {
FP64x64Impl::floor(self)
}
fn ln(self: FP64x64) -> FP64x64 {
FP64x64Impl::ln(self)
}
fn log2(self: FP64x64) -> FP64x64 {
FP64x64Impl::log2(self)
}
fn log10(self: FP64x64) -> FP64x64 {
FP64x64Impl::log10(self)
}
fn pow(self: FP64x64, b: FP64x64) -> FP64x64 {
FP64x64Impl::pow(self, b)
}
fn round(self: FP64x64) -> FP64x64 {
FP64x64Impl::round(self)
}
fn sqrt(self: FP64x64) -> FP64x64 {
FP64x64Impl::sqrt(self)
}
fn acos(self: FP64x64) -> FP64x64 {
FP64x64Impl::acos(self)
}
fn asin(self: FP64x64) -> FP64x64 {
FP64x64Impl::asin(self)
}
fn atan(self: FP64x64) -> FP64x64 {
FP64x64Impl::atan(self)
}
fn cos(self: FP64x64) -> FP64x64 {
FP64x64Impl::cos(self)
}
fn sin(self: FP64x64) -> FP64x64 {
FP64x64Impl::sin(self)
}
fn tan(self: FP64x64) -> FP64x64 {
FP64x64Impl::tan(self)
}
fn acosh(self: FP64x64) -> FP64x64 {
FP64x64Impl::acosh(self)
}
fn asinh(self: FP64x64) -> FP64x64 {
FP64x64Impl::asinh(self)
}
fn atanh(self: FP64x64) -> FP64x64 {
FP64x64Impl::atanh(self)
}
fn cosh(self: FP64x64) -> FP64x64 {
FP64x64Impl::cosh(self)
}
fn sinh(self: FP64x64) -> FP64x64 {
FP64x64Impl::sinh(self)
}
fn tanh(self: FP64x64) -> FP64x64 {
FP64x64Impl::tanh(self)
}
fn zero() -> FP64x64 {
FP64x64Impl::ZERO()
}
fn is_zero(self: |
FP64x64) -> bool {
fp64x64::ops::eq(@self, @FP64x64Impl::ZERO())
}
fn half() -> FP64x64 {
FP64x64Impl::HALF()
}
fn one() -> FP64x64 {
FP64x64Impl::ONE()
}
fn neg_one() -> FP64x64 {
FP64x64 { mag: core_fp64x64::ONE, sign: true }
}
fn is_one(self: FP64x64) -> bool {
core_fp64x64::eq(@self, @FP64x64Impl::ONE())
}
fn abs(self: FP64x64) -> FP64x64 {
fp64x64::ops::abs(self)
}
fn neg(self: FP64x64) -> FP64x64 {
fp64x64::ops::neg(self)
}
fn min_value() -> FP64x64 {
FP64x64 { mag: core_fp64x64::MAX, sign: true }
}
fn max_value() -> FP64x64 {
FP64x64 { mag: core_fp64x64::MAX, sign: false }
}
fn min(self: FP64x64, other: FP64x64) -> FP64x64 {
fp64x64::comp::min(self, other)
}
fn max(self: FP64x64, other: FP64x64) -> FP64x64 {
fp64x64::comp::max(self, other)
}
fn mag(self: FP64x64) -> u128 {
self.mag
}
fn is_neg(self: FP64x64) -> bool {
self.sign
}
fn xor(lhs: FP64x64, rhs: FP64x64) -> bool {
comp_fp64x64::xor(lhs, rhs)
}
fn or(lhs: FP64x64, rhs: FP64x64) -> bool {
comp_fp64x64::or(lhs, rhs)
}
fn sign(self: FP64x64) -> FP64x64 {
FP64x64Impl::sign(self)
}
fn and(lhs: FP64x64, rhs: FP64x64) -> bool {
comp_fp64x64::and(lhs, rhs)
}
fn where(self: FP64x64, x: FP64x64, y: FP64x64) -> FP64x64 {
comp_fp64x64::where(self, x, y)
}
fn NaN() -> FP64x64 {
FP64x64Impl::NaN()
}
fn is_nan(self: FP64x64) -> bool {
FP64x64Impl::is_nan(self)
}
fn INF() -> FP64x64 {
FP64x64Impl::INF()
}
fn is_inf(self: FP64x64) -> bool {
FP64x64Impl::is_inf(self)
}
fn is_pos_inf(self: FP64x64) -> bool {
FP64x64Impl::is_pos_inf(self)
}
fn is_neg_inf(self: FP64x64) -> bool {
FP64x64Impl::is_neg_inf(self)
}
fn bitwise_and(lhs: FP64x64, rhs: FP64x64) -> FP64x64 { |
comp_fp64x64::bitwise_and(lhs, rhs)
}
fn bitwise_xor(lhs: FP64x64, rhs: FP64x64) -> FP64x64 {
comp_fp64x64::bitwise_xor(lhs, rhs)
}
fn bitwise_or(lhs: FP64x64, rhs: FP64x64) -> FP64x64 {
comp_fp64x64::bitwise_or(lhs, rhs)
}
fn add(lhs: FP64x64, rhs: FP64x64) -> FP64x64 {
FP64x64Add::add(lhs, rhs)
}
fn sub(lhs: FP64x64, rhs: FP64x64) -> FP64x64 {
FP64x64Sub::sub(lhs, rhs)
}
}
use orion::numbers::fixed_point::implementations::fp32x32::core::{
FP32x32Impl, FP32x32, FP32x32Add, FP32x32Sub
};
use orion::numbers::fixed_point::implementations::fp32x32::core as core_fp32x32;
use orion::numbers::fixed_point::implementations::fp32x32::comp as comp_fp32x32;
use cubit::f64 as fp32x32;
impl FP32x32Number of NumberTrait<FP32x32, u64> {
fn new(mag: u64, sign: bool) -> FP32x32 {
FP32x32Impl::new(mag, sign)
}
fn new_unscaled(mag: u64, sign: bool) -> FP32x32 {
FP32x32Impl::new_unscaled(mag, sign)
}
fn from_felt(val: felt252) -> FP32x32 {
FP32x32Impl::from_felt(val)
}
fn ceil(self: FP32x32) -> FP32x32 {
FP32x32Impl::ceil(self)
}
fn exp(self: FP32x32) -> FP32x32 {
FP32x32Impl::exp(self)
}
fn exp2(self: FP32x32) -> FP32x32 {
FP32x32Impl::exp2(self)
}
fn floor(self: FP32x32) -> FP32x32 {
FP32x32Impl::floor(self)
}
fn ln(self: FP32x32) -> FP32x32 {
FP32x32Impl::ln(self)
}
fn log2(self: FP32x32) -> FP32x32 {
FP32x32Impl::log2(self)
}
fn log10(self: FP32x32) -> FP32x32 {
FP32x32Impl::log10(self)
}
fn pow(self: FP32x32, b: FP32x32) -> FP32x32 {
FP32x32Impl::pow(self, b)
}
fn round(self: FP32x32) -> FP32x32 {
FP32x32Impl::round(self)
}
fn sqrt(self: FP32x32) -> FP32x32 {
FP32x32Impl::sqrt(self)
}
fn acos(self: FP32x32) -> FP32x32 {
FP32x32Impl::acos(self)
}
fn asin(self: FP32x32) -> FP32x32 {
FP32x32Impl::asin |
(self)
}
fn atan(self: FP32x32) -> FP32x32 {
FP32x32Impl::atan(self)
}
fn cos(self: FP32x32) -> FP32x32 {
FP32x32Impl::cos(self)
}
fn sin(self: FP32x32) -> FP32x32 {
FP32x32Impl::sin(self)
}
fn tan(self: FP32x32) -> FP32x32 {
FP32x32Impl::tan(self)
}
fn acosh(self: FP32x32) -> FP32x32 {
FP32x32Impl::acosh(self)
}
fn asinh(self: FP32x32) -> FP32x32 {
FP32x32Impl::asinh(self)
}
fn atanh(self: FP32x32) -> FP32x32 {
FP32x32Impl::atanh(self)
}
fn cosh(self: FP32x32) -> FP32x32 {
FP32x32Impl::cosh(self)
}
fn sinh(self: FP32x32) -> FP32x32 {
FP32x32Impl::sinh(self)
}
fn tanh(self: FP32x32) -> FP32x32 {
FP32x32Impl::tanh(self)
}
fn zero() -> FP32x32 {
FP32x32Impl::ZERO()
}
fn is_zero(self: FP32x32) -> bool {
fp32x32::ops::eq(@self, @FP32x32Impl::ZERO())
}
fn half() -> FP32x32 {
FP32x32Impl::HALF()
}
fn one() -> FP32x32 {
FP32x32Impl::ONE()
}
fn neg_one() -> FP32x32 {
FP32x32 { mag: core_fp32x32::ONE, sign: true }
}
fn is_one(self: FP32x32) -> bool {
core_fp32x32::eq(@self, @FP32x32Impl::ONE())
}
fn abs(self: FP32x32) -> FP32x32 {
fp32x32::ops::abs(self)
}
fn neg(self: FP32x32) -> FP32x32 {
fp32x32::ops::neg(self)
}
fn min_value() -> FP32x32 {
FP32x32 { mag: core_fp32x32::MAX, sign: true }
}
fn max_value() -> FP32x32 {
FP32x32 { mag: core_fp32x32::MAX, sign: false }
}
fn min(self: FP32x32, other: FP32x32) -> FP32x32 {
fp32x32::comp::min(self, other)
}
fn max(self: FP32x32, other: FP32x32) -> FP32x32 {
fp32x32::comp::max(self, other)
}
fn mag(self: FP32x32) -> u64 {
self.mag
}
fn is_neg(self: FP32x32) -> bool {
self.sign
}
fn xor(lhs: FP32x32, rhs: FP32x32) -> bool {
comp_fp32x32::xor(lhs, rhs)
}
f |
n or(lhs: FP32x32, rhs: FP32x32) -> bool {
comp_fp32x32::or(lhs, rhs)
}
fn sign(self: FP32x32) -> FP32x32 {
FP32x32Impl::sign(self)
}
fn and(lhs: FP32x32, rhs: FP32x32) -> bool {
comp_fp32x32::and(lhs, rhs)
}
fn where(self: FP32x32, x: FP32x32, y: FP32x32) -> FP32x32 {
comp_fp32x32::where(self, x, y)
}
fn NaN() -> FP32x32 {
FP32x32Impl::NaN()
}
fn is_nan(self: FP32x32) -> bool {
FP32x32Impl::is_nan(self)
}
fn INF() -> FP32x32 {
FP32x32Impl::INF()
}
fn is_inf(self: FP32x32) -> bool {
FP32x32Impl::is_inf(self)
}
fn is_pos_inf(self: FP32x32) -> bool {
FP32x32Impl::is_pos_inf(self)
}
fn is_neg_inf(self: FP32x32) -> bool {
FP32x32Impl::is_neg_inf(self)
}
fn bitwise_and(lhs: FP32x32, rhs: FP32x32) -> FP32x32 {
comp_fp32x32::bitwise_and(lhs, rhs)
}
fn bitwise_xor(lhs: FP32x32, rhs: FP32x32) -> FP32x32 {
comp_fp32x32::bitwise_xor(lhs, rhs)
}
fn bitwise_or(lhs: FP32x32, rhs: FP32x32) -> FP32x32 {
comp_fp32x32::bitwise_or(lhs, rhs)
}
fn add(lhs: FP32x32, rhs: FP32x32) -> FP32x32 {
FP32x32Add::add(lhs, rhs)
}
fn sub(lhs: FP32x32, rhs: FP32x32) -> FP32x32 {
FP32x32Sub::sub(lhs, rhs)
}
}
impl I8Number of NumberTrait<i8, i8> {
fn new(mag: i8, sign: bool) -> i8 {
if sign {
return -mag;
}
mag
}
fn new_unscaled(mag: i8, sign: bool) -> i8 {
mag
}
fn from_felt(val: felt252) -> i8 {
panic(array!['not supported!'])
}
fn ceil(self: i8) -> i8 {
panic(array!['not supported!'])
}
fn exp(self: i8) -> i8 {
panic(array!['not supported!'])
}
fn exp2(self: i8) -> i8 {
panic(array!['not supported!'])
}
fn floor(self: i8) -> i8 {
panic(array!['not supported!'])
}
fn ln(self: i8) -> i8 {
panic(array!['not supported!'])
}
fn log2(self |
: i8) -> i8 {
panic(array!['not supported!'])
}
fn log10(self: i8) -> i8 {
panic(array!['not supported!'])
}
fn pow(self: i8, b: i8) -> i8 {
panic(array!['not supported!'])
}
fn round(self: i8) -> i8 {
panic(array!['not supported!'])
}
fn sqrt(self: i8) -> i8 {
panic(array!['not supported!'])
}
fn acos(self: i8) -> i8 {
panic(array!['not supported!'])
}
fn asin(self: i8) -> i8 {
panic(array!['not supported!'])
}
fn atan(self: i8) -> i8 {
panic(array!['not supported!'])
}
fn cos(self: i8) -> i8 {
panic(array!['not supported!'])
}
fn sin(self: i8) -> i8 {
panic(array!['not supported!'])
}
fn tan(self: i8) -> i8 {
panic(array!['not supported!'])
}
fn acosh(self: i8) -> i8 {
panic(array!['not supported!'])
}
fn asinh(self: i8) -> i8 {
panic(array!['not supported!'])
}
fn atanh(self: i8) -> i8 {
panic(array!['not supported!'])
}
fn cosh(self: i8) -> i8 {
panic(array!['not supported!'])
}
fn sinh(self: i8) -> i8 {
panic(array!['not supported!'])
}
fn tanh(self: i8) -> i8 {
panic(array!['not supported!'])
}
fn zero() -> i8 {
0
}
fn is_zero(self: i8) -> bool {
self == 0
}
fn half() -> i8 {
panic(array!['not supported!'])
}
fn one() -> i8 {
1
}
fn neg_one() -> i8 {
-1
}
fn is_one(self: i8) -> bool {
self == 1
}
fn abs(self: i8) -> i8 {
if self >= 0 {
self
} else {
self * -1_i8
}
}
fn neg(self: i8) -> i8 {
self * -1_i8
}
fn min_value() -> i8 {
-127
}
fn max_value() -> i8 {
127
}
fn min(self: i8, other: i8) -> i8 {
if self < other {
self
} else {
other
}
}
fn max(self: i8, other: |
i8) -> i8 {
if self > other {
self
} else {
other
}
}
fn mag(self: i8) -> i8 {
self
}
fn is_neg(self: i8) -> bool {
self < 0
}
fn xor(lhs: i8, rhs: i8) -> bool {
if (lhs == 0 || rhs == 0) && lhs != rhs {
true
} else {
false
}
}
fn or(lhs: i8, rhs: i8) -> bool {
if lhs == 0 && rhs == 0 {
false
} else {
true
}
}
fn sign(self: i8) -> i8 {
if self == 0 {
0_i8
} else if self > 0 {
1_i8
} else {
-1_i8
}
}
fn and(lhs: i8, rhs: i8) -> bool {
if lhs == 0 || rhs == 0 {
false
} else {
true
}
}
fn where(self: i8, x: i8, y: i8) -> i8 {
if self == 0 {
y
} else {
x
}
}
fn NaN() -> i8 {
panic(array!['not supported!'])
}
fn is_nan(self: i8) -> bool {
panic(array!['not supported!'])
}
fn INF() -> i8 {
127
}
fn is_inf(self: i8) -> bool {
self == 127 || self == -127
}
fn is_pos_inf(self: i8) -> bool {
self == 127
}
fn is_neg_inf(self: i8) -> bool {
self == -127
}
fn bitwise_and(lhs: i8, rhs: i8) -> i8 {
panic(array!['not supported!'])
}
fn bitwise_xor(lhs: i8, rhs: i8) -> i8 {
panic(array!['not supported!'])
}
fn bitwise_or(lhs: i8, rhs: i8) -> i8 {
panic(array!['not supported!'])
}
fn add(lhs: i8, rhs: i8) -> i8 {
lhs + rhs
}
fn sub(lhs: i8, rhs: i8) -> i8 {
lhs - rhs
}
}
impl I8Div of Div<i8> {
fn div(lhs: i8, rhs: i8) -> i8 {
assert(rhs != 0, 'divisor cannot be 0');
let mut lhs_positive = lhs;
let mut rhs_positive = rhs;
if lhs < 0 {
lhs_positive = lhs * -1;
}
if rhs < 0 {
rh |
s_positive = rhs * -1;
}
let lhs_felt: felt252 = lhs_positive.into();
let rhs_felt: felt252 = rhs_positive.into();
let lhs_u128: u128 = lhs_felt.try_into().unwrap();
let rhs_u128: u128 = rhs_felt.try_into().unwrap();
let mut result = lhs_u128 / rhs_u128;
let felt_result: felt252 = result.into();
let signed_int_result: i8 = felt_result.try_into().unwrap();
if lhs * rhs < 0 {
signed_int_result * -1
} else {
signed_int_result
}
}
}
impl I8DivEq of DivEq<i8> { |
fn div_eq(ref self: i8, other: i8) {
self = Div::div(self, other);
}
}
impl I8IntoFP8x23 of Into<i8, FP8x23> {
fn into(self: i8) -> FP8x23 {
let number_sign: bool = self < 0;
let mut self_positive: i8 = self;
if number_sign {
self_positive = self_positive * -1_i8
}
let number_felt: felt252 = self_positive.into();
let number_u32: u32 = number_felt.try_into().unwrap();
FP8x23 { mag: number_u32 * ONE_fp8x23, sign: number_sign }
}
}
impl I8IntoFP16x16 of Into<i8, FP16x16> {
fn into(self: i8) -> FP16x16 {
let number_sign: bool = self < 0;
let mut self_positive: i8 = self;
if number_sign {
self_positive = self_positive * -1_i8
}
let number_felt: felt252 = self_positive.into();
let number_u32: u32 = number_felt.try_into().unwrap();
FP16x16 { mag: number_u32 * ONE_fp16x16, sign: number_sign }
}
}
impl I8IntoFP64x64 of Into<i8, FP64x64> {
fn into(self: i8) -> FP64x64 {
let number_sign: bool = self < 0;
let mut self_positive: i8 = self;
if number_sign {
self_positive = self_positive * -1_i8
}
let number_felt: felt252 = self_positive.into();
let number_u128: u128 = number_felt.try_into().unwrap();
FP64x64 { mag: number_u128 * ONE_fp64x64, sign: number_sign }
}
}
impl I8IntoFP32x32 of Into<i8, FP32x32> {
fn into(self: i8) -> FP32x32 {
let number_sign: bool = self < 0;
let mut self_positive: i8 = self;
if number_sign {
self_positive = self_positive * -1_i8
}
let number_felt: felt252 = self_positive.into();
let number_u128: u64 = number_felt.try_into().unwrap();
FP32x32 { mag: number_u128 * ONE_fp32x32, sign: number_sign }
}
}
impl I16Number of NumberTrait<i16, i16> {
fn new(mag: i16, sign: bool) -> i16 {
if sign {
return -mag;
}
mag
}
fn new_unscaled |
(mag: i16, sign: bool) -> i16 {
mag
}
fn from_felt(val: felt252) -> i16 {
panic(array!['not supported!'])
}
fn ceil(self: i16) -> i16 {
panic(array!['not supported!'])
}
fn exp(self: i16) -> i16 {
panic(array!['not supported!'])
}
fn exp2(self: i16) -> i16 {
panic(array!['not supported!'])
}
fn floor(self: i16) -> i16 {
panic(array!['not supported!'])
}
fn ln(self: i16) -> i16 {
panic(array!['not supported!'])
}
fn log2(self: i16) -> i16 {
panic(array!['not supported!'])
}
fn log10(self: i16) -> i16 {
panic(array!['not supported!'])
}
fn pow(self: i16, b: i16) -> i16 {
panic(array!['not supported!'])
}
fn round(self: i16) -> i16 {
panic(array!['not supported!'])
}
fn sqrt(self: i16) -> i16 {
panic(array!['not supported!'])
}
fn acos(self: i16) -> i16 {
panic(array!['not supported!'])
}
fn asin(self: i16) -> i16 {
panic(array!['not supported!'])
}
fn atan(self: i16) -> i16 {
panic(array!['not supported!'])
}
fn cos(self: i16) -> i16 {
panic(array!['not supported!'])
}
fn sin(self: i16) -> i16 {
panic(array!['not supported!'])
}
fn tan(self: i16) -> i16 {
panic(array!['not supported!'])
}
fn acosh(self: i16) -> i16 {
panic(array!['not supported!'])
}
fn asinh(self: i16) -> i16 {
panic(array!['not supported!'])
}
fn atanh(self: i16) -> i16 {
panic(array!['not supported!'])
}
fn cosh(self: i16) -> i16 {
panic(array!['not supported!'])
}
fn sinh(self: i16) -> i16 {
panic(array!['not supported!'])
}
fn tanh(self: i16) -> i16 {
panic(array!['not supported!'])
}
fn zero() -> i16 {
0
}
fn is_zero(self: i16) -> bool {
self == 0
}
fn half() -> i16 {
panic(array!['not supported!']) |
}
fn one() -> i16 {
1
}
fn neg_one() -> i16 {
-1
}
fn is_one(self: i16) -> bool {
self == 1
}
fn abs(self: i16) -> i16 {
if self >= 0 {
self
} else {
self * -1_i16
}
}
fn neg(self: i16) -> i16 {
self * -1_i16
}
fn min_value() -> i16 {
-32767
}
fn max_value() -> i16 {
32767
}
fn min(self: i16, other: i16) -> i16 {
if self < other {
self
} else {
other
}
}
fn max(self: i16, other: i16) -> i16 {
if self > other {
self
} else {
other
}
}
fn mag(self: i16) -> i16 {
self
}
fn is_neg(self: i16) -> bool {
self < 0
}
fn xor(lhs: i16, rhs: i16) -> bool {
if (lhs == 0 || rhs == 0) && lhs != rhs {
true
} else {
false
}
}
fn or(lhs: i16, rhs: i16) -> bool {
if lhs == 0 && rhs == 0 {
false
} else {
true
}
}
fn sign(self: i16) -> i16 {
if self == 0 {
0_i16
} else if self > 0 {
1_i16
} else {
-1_i16
}
}
fn and(lhs: i16, rhs: i16) -> bool {
if lhs == 0 || rhs == 0 {
false
} else {
true
}
}
fn where(self: i16, x: i16, y: i16) -> i16 {
if self == 0 {
y
} else {
x
}
}
fn NaN() -> i16 {
panic(array!['not supported!'])
}
fn is_nan(self: i16) -> bool {
panic(array!['not supported!'])
}
fn INF() -> i16 {
32767
}
fn is_inf(self: i16) -> bool {
self == 32767 || self == -32767
}
fn is_pos_inf(self: i16) -> bool {
self == 32767
}
fn is_neg_inf(self: i16) -> bool {
self == -32767
}
fn bitwise_and(lhs: i16, rhs: i16) -> i16 {
p |
anic(array!['not supported!'])
}
fn bitwise_xor(lhs: i16, rhs: i16) -> i16 {
panic(array!['not supported!'])
}
fn bitwise_or(lhs: i16, rhs: i16) -> i16 {
panic(array!['not supported!'])
}
fn add(lhs: i16, rhs: i16) -> i16 {
lhs + rhs
}
fn sub(lhs: i16, rhs: i16) -> i16 {
lhs - rhs
}
}
impl I16Div of Div<i16> {
fn div(lhs: i16, rhs: i16) -> i16 {
assert(rhs != 0, 'divisor cannot be 0');
let mut lhs_positive = lhs;
let mut rhs_positive = rhs;
if lhs < 0 {
lhs_positive = lhs * -1;
}
if rhs < 0 {
rhs_positive = rhs * -1;
}
let lhs_felt: felt252 = lhs_positive.into();
let rhs_felt: felt252 = rhs_positive.into();
let lhs_u128: u128 = lhs_felt.try_into().unwrap();
let rhs_u128: u128 = rhs_felt.try_into().unwrap();
let mut result = lhs_u128 / rhs_u128;
let felt_result: felt252 = result.into();
let signed_int_result: i16 = felt_result.try_into().unwrap();
if lhs * rhs < 0 {
signed_int_result * -1
} else {
signed_int_result
}
}
}
impl I16DivEq of DivEq<i16> { |
fn div_eq(ref self: i16, other: i16) {
self = Div::div(self, other);
}
}
impl I32Number of NumberTrait<i32, i32> {
fn new(mag: i32, sign: bool) -> i32 {
if sign {
return -mag;
}
mag
}
fn new_unscaled(mag: i32, sign: bool) -> i32 {
mag
}
fn from_felt(val: felt252) -> i32 {
panic(array!['not supported!'])
}
fn ceil(self: i32) -> i32 {
panic(array!['not supported!'])
}
fn exp(self: i32) -> i32 {
panic(array!['not supported!'])
}
fn exp2(self: i32) -> i32 {
panic(array!['not supported!'])
}
fn floor(self: i32) -> i32 {
panic(array!['not supported!'])
}
fn ln(self: i32) -> i32 {
panic(array!['not supported!'])
}
fn log2(self: i32) -> i32 {
panic(array!['not supported!'])
}
fn log10(self: i32) -> i32 {
panic(array!['not supported!'])
}
fn pow(self: i32, b: i32) -> i32 {
panic(array!['not supported!'])
}
fn round(self: i32) -> i32 {
panic(array!['not supported!'])
}
fn sqrt(self: i32) -> i32 {
panic(array!['not supported!'])
}
fn acos(self: i32) -> i32 {
panic(array!['not supported!'])
}
fn asin(self: i32) -> i32 {
panic(array!['not supported!'])
}
fn atan(self: i32) -> i32 {
panic(array!['not supported!'])
}
fn cos(self: i32) -> i32 {
panic(array!['not supported!'])
}
fn sin(self: i32) -> i32 {
panic(array!['not supported!'])
}
fn tan(self: i32) -> i32 {
panic(array!['not supported!'])
}
fn acosh(self: i32) -> i32 {
panic(array!['not supported!'])
}
fn asinh(self: i32) -> i32 {
panic(array!['not supported!'])
}
fn atanh(self: i32) -> i32 {
panic(array!['not supported!'])
}
fn cosh(self: i32) -> i32 {
panic(array!['not supported!'])
}
fn sinh(self: i32) -> i32 {
panic(array!['not sup |
ported!'])
}
fn tanh(self: i32) -> i32 {
panic(array!['not supported!'])
}
fn zero() -> i32 {
0
}
fn is_zero(self: i32) -> bool {
self == 0
}
fn half() -> i32 {
panic(array!['not supported!'])
}
fn one() -> i32 {
1
}
fn neg_one() -> i32 {
-1
}
fn is_one(self: i32) -> bool {
self == 1
}
fn abs(self: i32) -> i32 {
if self >= 0 {
self
} else {
self * -1_i32
}
}
fn neg(self: i32) -> i32 {
self * -1_i32
}
fn min_value() -> i32 {
-2147483647
}
fn max_value() -> i32 {
2147483647
}
fn min(self: i32, other: i32) -> i32 {
if self < other {
self
} else {
other
}
}
fn max(self: i32, other: i32) -> i32 {
if self > other {
self
} else {
other
}
}
fn mag(self: i32) -> i32 {
self
}
fn is_neg(self: i32) -> bool {
self < 0
}
fn xor(lhs: i32, rhs: i32) -> bool {
if (lhs == 0 || rhs == 0) && lhs != rhs {
true
} else {
false
}
}
fn or(lhs: i32, rhs: i32) -> bool {
if lhs == 0 && rhs == 0 {
false
} else {
true
}
}
fn sign(self: i32) -> i32 {
if self == 0 {
0_i32
} else if self > 0 {
1_i32
} else {
-1_i32
}
}
fn and(lhs: i32, rhs: i32) -> bool {
if lhs == 0 || rhs == 0 {
false
} else {
true
}
}
fn where(self: i32, x: i32, y: i32) -> i32 {
if self == 0 {
y
} else {
x
}
}
fn NaN() -> i32 {
panic(array!['not supported!'])
}
fn is_nan(self: i32) -> bool {
panic(array!['not supported!'])
}
fn INF() -> i32 {
2147483647
} |
fn is_inf(self: i32) -> bool {
self == 2147483647 || self == -2147483647
}
fn is_pos_inf(self: i32) -> bool {
self == 2147483647
}
fn is_neg_inf(self: i32) -> bool {
self == -2147483647
}
fn bitwise_and(lhs: i32, rhs: i32) -> i32 {
panic(array!['not supported!'])
}
fn bitwise_xor(lhs: i32, rhs: i32) -> i32 {
panic(array!['not supported!'])
}
fn bitwise_or(lhs: i32, rhs: i32) -> i32 {
panic(array!['not supported!'])
}
fn add(lhs: i32, rhs: i32) -> i32 {
lhs + rhs
}
fn sub(lhs: i32, rhs: i32) -> i32 {
lhs - rhs
}
}
impl I32Div of Div<i32> {
fn div(lhs: i32, rhs: i32) -> i32 {
assert(rhs != 0, 'divisor cannot be 0');
let mut lhs_positive = lhs;
let mut rhs_positive = rhs;
if lhs < 0 {
lhs_positive = lhs * -1;
}
if rhs < 0 {
rhs_positive = rhs * -1;
}
let lhs_felt: felt252 = lhs_positive.into();
let rhs_felt: felt252 = rhs_positive.into();
let lhs_u128: u128 = lhs_felt.try_into().unwrap();
let rhs_u128: u128 = rhs_felt.try_into().unwrap();
let mut result = lhs_u128 / rhs_u128;
let felt_result: felt252 = result.into();
let signed_int_result: i32 = felt_result.try_into().unwrap();
if lhs * rhs < 0 {
signed_int_result * -1
} else {
signed_int_result
}
}
}
impl I32DivEq of DivEq<i32> { |