text
stringlengths 1
2.05k
|
---|
output = y, func_sig = "input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('none'))",
name= name)
def axis1_max():
x1 = np.zeros((3, 3)).astype(np.int8)
x2 = np.arange(1, 10).reshape((3, 3)).astype(np.int8)
x3 = np.array(
[[0,1,2],
[2,0,1],
[1,0,1]],
)
y = scatter_elements(x1, x3, x2, 1, 'max')
x1 = Tensor(Dtype.I8, x1.shape, x1.flatten())
x2 = Tensor(Dtype.I8, x2.shape, x2.flatten())
x3 = Tensor(Dtype.U32, x3.shape, x3.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "scatter_i8_axis1_max"
make_test(
inputs = [x1, x2, x3], output = y, func_sig = "input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('max'))",
name= name)
default()
axis1()
axis1_max()
scatter_3D()
@staticmethod
def scatter_i32():
def scatter_3D():
def default():
x1 = np.zeros((3, 3)).astype(np.int32)
x2 = np.arange(1, 10).reshape((3, 3)).astype(np.int32)
x3 = np.array(
[[0,1,2],
[2,0,1],
[1,0,1]],
)
y = scatter_elements(x1, x3, x2, 0, 'none')
x1 = Tensor(Dtype.I32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.I32, x2.shape, x2.flatten())
x3 = Tensor(Dtype.U32, x3.shape, x3.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "scatter_i8_default"
make_test(
inputs = [x1, x2, x3], output = y, func_sig = "input_0.scat |
ter(updates:input_1, indices:input_2, axis:Option::Some(0), reduction:Option::Some('none'))",
name= name)
def axis1():
x1 = np.zeros((3, 3)).astype(np.int32)
x2 = np.arange(1, 10).reshape((3, 3)).astype(np.int32)
x3 = np.array(
[[0,1,2],
[2,0,1],
[1,0,1]],
)
y = scatter_elements(x1, x3, x2, 1, 'none')
x1 = Tensor(Dtype.I32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.I32, x2.shape, x2.flatten())
x3 = Tensor(Dtype.U32, x3.shape, x3.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "scatter_i8_axis1"
make_test(
inputs = [x1, x2, x3], output = y, func_sig = "input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('none'))",
name= name)
def axis_min():
x1 = np.zeros((3, 3)).astype(np.int32)
x2 = np.arange(1, 10).reshape((3, 3)).astype(np.int32)
x3 = np.array(
[[0,1,2],
[2,0,1],
[1,0,1]],
)
y = scatter_elements(x1, x3, x2, 1, 'min')
x1 = Tensor(Dtype.I32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.I32, x2.shape, x2.flatten())
x3 = Tensor(Dtype.U32, x3.shape, x3.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "scatter_i8_default"
make_test(
inputs = [x1, x2, x3], output = y, func_sig = "input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('min'))",
name= name)
default()
axis1()
axis_min() |
scatter_3D()
@staticmethod
def scatter_u32():
def scatter_3D():
def default():
x1 = np.zeros((3, 3)).astype(np.uint32)
x2 = np.arange(1, 10).reshape((3, 3)).astype(np.uint32)
x3 = np.array(
[[0,1,2],
[2,0,1],
[1,0,1]],
)
y = scatter_elements(x1, x3, x2, 0, 'none')
x1 = Tensor(Dtype.U32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
x3 = Tensor(Dtype.U32, x3.shape, x3.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "scatter_u32_default"
make_test(
inputs = [x1, x2, x3], output = y, func_sig = "input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(0), reduction:Option::Some('none'))",
name= name)
def axis1():
x1 = np.zeros((3, 3)).astype(np.uint32)
x2 = np.arange(1, 10).reshape((3, 3)).astype(np.uint32)
x3 = np.array(
[[0,1,2],
[2,0,1],
[1,0,1]],
)
y = scatter_elements(x1, x3, x2, 1, 'none')
x1 = Tensor(Dtype.U32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
x3 = Tensor(Dtype.U32, x3.shape, x3.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "scatter_u32_axis1"
make_test(
inputs = [x1, x2, x3], output = y, func_sig = "input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(1), reduction:Option::Some('none'))",
name= name)
def axis_add():
x1 = np.zeros((3, 3)).astype(np.uint32) |
x2 = np.arange(1, 10).reshape((3, 3)).astype(np.uint32)
x3 = np.array(
[[0,1,2],
[2,0,1],
[1,0,1]],
)
y = scatter_elements(x1, x3, x2, 0, 'add')
x1 = Tensor(Dtype.U32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
x3 = Tensor(Dtype.U32, x3.shape, x3.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "scatter_u32_add"
make_test(
inputs = [x1, x2, x3], output = y, func_sig = "input_0.scatter(updates:input_1, indices:input_2, axis:Option::Some(0), reduction:Option::Some('add'))",
name= name)
default()
axis1()
axis_add()
scatter_3D() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl
def scatter_nd_impl(data, indices, updates, reduction="none"):
assert indices.shape[-1] <= len(data.shape)
assert updates.shape == indices.shape[:-1] + data.shape[indices.shape[-1] :]
output = np.copy(data)
for i in np.ndindex(indices.shape[:-1]):
if reduction == "add":
output[tuple(indices[i])] += updates[i]
elif reduction == "mul":
output[tuple(indices[i])] *= updates[i]
elif reduction == "max":
output[tuple(indices[i])] = np.maximum(output[indices[i]], updates[i])
elif reduction == "min":
output[tuple(indices[i])] = np.minimum(output[indices[i]], updates[i])
else:
output[tuple(indices[i])] = updates[i]
return output
data = np.array(
[
[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
[[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],
[[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],
],
dtype=np.float32,
)
indices = np.array([[0], [2]], dtype=np.int64)
updates = np.array(
[
[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
],
dtype=np.float32,
) |
class Scatter_nd(RunAll):
@staticmethod
def scatter_nd_fp16x16():
def scatter_nd_3D():
def default():
x1 = data.astype(np.int64)
x2 = indices.astype(np.int64)
x3 = updates.astype(np.uint32)
y = scatter_nd_impl(x1, x2, x3, reduction='none')
x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16))
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
x3 = Tensor(Dtype.FP16x16, x3.shape, to_fp(x3.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "scatter_nd_fp16x16_3d_default"
make_test(
inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::None(()))",
name= name)
def add():
x1 = data.astype(np.int64)
x2 = indices.astype(np.int64)
x3 = updates.astype(np.uint32)
y = scatter_nd_impl(x1, x2, x3, reduction='add')
x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16))
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
x3 = Tensor(Dtype.FP16x16, x3.shape, to_fp(x3.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "scatter_nd_fp16x16_3d_add"
make_test(
inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('add'))",
name= name)
def mul():
x1 = data.astype(np.int64)
x2 = indices.astype(np.int64)
x3 = updates.astype(np.uint32)
y = scatter_nd_impl(x1, x2, x3, r |
eduction='mul')
x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16))
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
x3 = Tensor(Dtype.FP16x16, x3.shape, to_fp(x3.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "scatter_nd_fp16x16_3d_mul"
make_test(
inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('mul'))",
name= name)
def max():
x1 = data.astype(np.int64)
x2 = indices.astype(np.int64)
x3 = updates.astype(np.uint32)
y = scatter_nd_impl(x1, x2, x3, reduction='max')
x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16))
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
x3 = Tensor(Dtype.FP16x16, x3.shape, to_fp(x3.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "scatter_nd_fp16x16_3d_max"
make_test(
inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('max'))",
name= name)
def min():
x1 = data.astype(np.int64)
x2 = indices.astype(np.int64)
x3 = updates.astype(np.uint32)
y = scatter_nd_impl(x1, x2, x3, reduction='min')
x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16))
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
x3 = Tensor(Dtype.FP16x16, x3.shape, to_fp(x3.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, |
to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "scatter_nd_fp16x16_3d_min"
make_test(
inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('min'))",
name= name)
default()
add()
mul()
max()
min()
scatter_nd_3D()
@staticmethod
def scatter_nd_fp8x23():
def scatter_nd_3D():
def default():
x1 = data.astype(np.int64)
x2 = indices.astype(np.int64)
x3 = updates.astype(np.uint32)
y = scatter_nd_impl(x1, x2, x3, reduction='none')
x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23))
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
x3 = Tensor(Dtype.FP8x23, x3.shape, to_fp(x3.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "scatter_nd_fp8x23_3d_default"
make_test(
inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::None(()))",
name= name)
def add():
x1 = data.astype(np.int64)
x2 = indices.astype(np.int64)
x3 = updates.astype(np.uint32)
y = scatter_nd_impl(x1, x2, x3, reduction='add')
x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23))
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
x3 = Tensor(Dtype.FP8x23, x3.shape, to_fp(x3.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "scatter_nd_fp8x23_3d_add"
make_ |
test(
inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('add'))",
name= name)
def mul():
x1 = data.astype(np.int64)
x2 = indices.astype(np.int64)
x3 = updates.astype(np.uint32)
y = scatter_nd_impl(x1, x2, x3, reduction='mul')
x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23))
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
x3 = Tensor(Dtype.FP8x23, x3.shape, to_fp(x3.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "scatter_nd_fp8x23_3d_mul"
make_test(
inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('mul'))",
name= name)
def max():
x1 = data.astype(np.int64)
x2 = indices.astype(np.int64)
x3 = updates.astype(np.uint32)
y = scatter_nd_impl(x1, x2, x3, reduction='max')
x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23))
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
x3 = Tensor(Dtype.FP8x23, x3.shape, to_fp(x3.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "scatter_nd_fp8x23_3d_max"
make_test(
inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('max'))",
name= name)
def min():
x1 = data.astype(np.int64)
x2 = indices.astype(np.int64) |
x3 = updates.astype(np.uint32)
y = scatter_nd_impl(x1, x2, x3, reduction='min')
x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23))
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
x3 = Tensor(Dtype.FP8x23, x3.shape, to_fp(x3.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "scatter_nd_fp8x23_3d_min"
make_test(
inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('min'))",
name= name)
default()
add()
mul()
max()
min()
scatter_nd_3D()
@staticmethod
def scatter_nd_u32():
def scatter_nd_3D():
def default():
x1 = np.arange(0,12).reshape((4,3)).astype(np.int32)
x2 = np.array([[0],[1]]).astype(np.uint32)
x3 = np.random.randint(low = 0,high=100, size=(2,3)).astype(np.uint32)
y = scatter_nd_impl(x1, x2, x3, reduction='none')
x1 = Tensor(Dtype.U32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
x3 = Tensor(Dtype.U32, x3.shape, x3.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "scatter_nd_u32_default"
make_test(
inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::None(()))",
name= name)
def add():
x1 = np.arange(0,12).reshape((4,3)).astype(np.int32)
x2 = np.array([[1],[0]]).astype(np.uint32)
x3 = np.random.randint(low = 0,high=100, size=(2,3)).astype(np.uint32)
y = scatter_nd_impl(x1, x2 |
, x3, reduction='add')
x1 = Tensor(Dtype.U32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
x3 = Tensor(Dtype.U32, x3.shape, x3.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "scatter_nd_u32_add"
make_test(
inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('add'))",
name= name)
def mul():
x1 = np.arange(0,12).reshape((4,3)).astype(np.int32)
x2 =np.array([[0],[1]]).astype(np.uint32)
x3 = np.random.randint(low = 0,high=100, size=(2,3)).astype(np.uint32)
y = scatter_nd_impl(x1, x2, x3, reduction='mul')
x1 = Tensor(Dtype.U32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
x3 = Tensor(Dtype.U32, x3.shape, x3.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "scatter_nd_u32_mul"
make_test(
inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('mul'))",
name= name)
def max():
x1 = np.arange(0,12).reshape((4,3)).astype(np.int32)
x2 =np.array([[0],[1]]).astype(np.uint32)
x3 = np.random.randint(low = 0,high=100, size=(2,3)).astype(np.uint32)
y = scatter_nd_impl(x1, x2, x3, reduction='max')
x1 = Tensor(Dtype.U32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
x3 = Tensor(Dtype.U32, x3.shape, x3.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "scatter_nd_u32_max"
make_test(
inputs = [x1 |
, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('max'))",
name= name)
def min():
x1 = np.arange(0,12).reshape((4,3)).astype(np.int32)
x2 = np.array([[0],[1]]).astype(np.uint32)
x3 = np.random.randint(low = 0,high=100, size=(2,3)).astype(np.uint32)
y = scatter_nd_impl(x1, x2, x3, reduction='min')
x1 = Tensor(Dtype.U32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
x3 = Tensor(Dtype.U32, x3.shape, x3.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "scatter_nd_u32_min"
make_test(
inputs = [x1, x3, x2], output = y, func_sig = "input_0.scatter_nd(updates:input_1, indices:input_2, reduction:Option::Some('min'))",
name= name)
default()
add()
mul()
max()
min()
scatter_nd_3D() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
scalar = lambda x: Tensor(Dtype.I32, (), np.array([x]).astype(np.int32).flatten()) |
class Sequence_at(RunAll):
@staticmethod
def sequence_at_u32():
def positive_position():
sequence = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(0, 6, shape).astype(np.uint32)
tensor = Tensor(Dtype.U32, values.shape, values.flatten())
sequence.append(tensor)
position = scalar(2)
name = "sequence_at_u32_positive"
make_test([sequence, position], sequence[2], "SequenceTrait::sequence_at(input_0, input_1)", name, Trait.SEQUENCE)
def negative_position():
sequence = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(0, 6, shape).astype(np.uint32)
tensor = Tensor(Dtype.U32, values.shape, values.flatten())
sequence.append(tensor)
position = scalar(-2)
name = "sequence_at_u32_negative"
make_test([sequence, position], sequence[-2], "SequenceTrait::sequence_at(input_0, input_1)", name, Trait.SEQUENCE)
positive_position()
negative_position()
@staticmethod
def sequence_at_i32():
def positive_position():
sequence = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(-6, 6, shape).astype(np.int32)
tensor = Tensor(Dtype.I32, values.shape, values.flatten())
sequence.append(tensor)
position = scalar(2)
name = "sequence_at_i32_positive"
make_test([sequence, position], sequence[2], "SequenceTrait::sequence_at(input_0, input_1)", name, Trait.SEQUENCE)
def negative_position():
sequence = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(-6, 6, shape).astype(np.int32)
tensor = Tensor(Dtype.I32, valu |
es.shape, values.flatten())
sequence.append(tensor)
position = scalar(-2)
name = "sequence_at_i32_negative"
make_test([sequence, position], sequence[-2], "SequenceTrait::sequence_at(input_0, input_1)", name, Trait.SEQUENCE)
positive_position()
negative_position()
@staticmethod
def sequence_at_i8():
def positive_position():
sequence = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(-6, 6, shape).astype(np.int8)
tensor = Tensor(Dtype.I8, values.shape, values.flatten())
sequence.append(tensor)
position = scalar(2)
name = "sequence_at_i8_positive"
make_test([sequence, position], sequence[2], "SequenceTrait::sequence_at(input_0, input_1)", name, Trait.SEQUENCE)
def negative_position():
sequence = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(-6, 6, shape).astype(np.int8)
tensor = Tensor(Dtype.I8, values.shape, values.flatten())
sequence.append(tensor)
position = scalar(-2)
name = "sequence_at_i8_negative"
make_test([sequence, position], sequence[-2], "SequenceTrait::sequence_at(input_0, input_1)", name, Trait.SEQUENCE)
positive_position()
negative_position()
@staticmethod
def sequence_at_fp8x23():
def positive_position():
sequence = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(-6, 6, shape).astype(np.float64)
tensor = Tensor(Dtype.FP8x23, values.shape, to_fp(values.flatten(), FixedImpl.FP8x23))
sequence.append(tensor)
position = scalar(2)
name = "sequence_at_fp8x23_positive"
make_test([sequence, position], sequ |
ence[2], "SequenceTrait::sequence_at(input_0, input_1)", name, Trait.SEQUENCE)
def negative_position():
sequence = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(-6, 6, shape).astype(np.float64)
tensor = Tensor(Dtype.FP8x23, values.shape, to_fp(values.flatten(), FixedImpl.FP8x23))
sequence.append(tensor)
position = scalar(-2)
name = "sequence_at_fp8x23_negative"
make_test([sequence, position], sequence[-2], "SequenceTrait::sequence_at(input_0, input_1)", name, Trait.SEQUENCE)
positive_position()
negative_position()
@staticmethod
def sequence_at_fp16x16():
def positive_position():
sequence = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(-6, 6, shape).astype(np.float64)
tensor = Tensor(Dtype.FP16x16, values.shape, to_fp(values.flatten(), FixedImpl.FP16x16))
sequence.append(tensor)
position = scalar(2)
name = "sequence_at_fp16x16_positive"
make_test([sequence, position], sequence[2], "SequenceTrait::sequence_at(input_0, input_1)", name, Trait.SEQUENCE)
def negative_position():
sequence = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(-6, 6, shape).astype(np.float64)
tensor = Tensor(Dtype.FP16x16, values.shape, to_fp(values.flatten(), FixedImpl.FP16x16))
sequence.append(tensor)
position = scalar(-2)
name = "sequence_at_fp16x16_negative"
make_test([sequence, position], sequence[-2], "SequenceTrait::sequence_at(input_0, input_1)", name, Trait.SEQUENCE)
positive_position()
negative_position() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait |
class Sequence_construct(RunAll):
@staticmethod
def sequence_construct_u32():
sequence = []
tensor_cnt = np.random.randint(1, 10)
shape = np.random.randint(1, 4, 2)
for _ in range(tensor_cnt):
values = np.random.randint(0, 6, shape).astype(np.uint32)
tensor = Tensor(Dtype.U32, values.shape, values.flatten())
sequence.append(tensor)
name = "sequence_construct_u32"
make_test([sequence], sequence, "SequenceTrait::sequence_construct(input_0)", name, Trait.SEQUENCE)
@staticmethod
def sequence_construct_i32():
sequence = []
tensor_cnt = np.random.randint(1, 10)
shape = np.random.randint(1, 4, 2)
for _ in range(tensor_cnt):
values = np.random.randint(-6, 6, shape).astype(np.int32)
tensor = Tensor(Dtype.I32, values.shape, values.flatten())
sequence.append(tensor)
name = "sequence_construct_i32"
make_test([sequence], sequence, "SequenceTrait::sequence_construct(input_0)", name, Trait.SEQUENCE)
@staticmethod
def sequence_construct_i8():
sequence = []
tensor_cnt = np.random.randint(1, 10)
shape = np.random.randint(1, 4, 2)
for _ in range(tensor_cnt):
values = np.random.randint(-6, 6, shape).astype(np.int8)
tensor = Tensor(Dtype.I8, values.shape, values.flatten())
sequence.append(tensor)
name = "sequence_construct_i8"
make_test([sequence], sequence, "SequenceTrait::sequence_construct(input_0)", name, Trait.SEQUENCE)
@staticmethod
def sequence_construct_fp8x23():
sequence = []
tensor_cnt = np.random.randint(1, 10)
shape = np.random.randint(1, 4, 2)
for _ in range(tensor_cnt):
values = np.random.randint(-6, 6, shape).astype(np.float64)
tensor = Tensor(Dtype.FP8x23, values.shape, to_fp(values.flatten(), FixedImpl.FP8x23))
sequence.append(tensor)
name = "se |
quence_construct_fp8x23"
make_test([sequence], sequence, "SequenceTrait::sequence_construct(input_0)", name, Trait.SEQUENCE)
@staticmethod
def sequence_construct_fp16x16():
sequence = []
tensor_cnt = np.random.randint(1, 10)
shape = np.random.randint(1, 4, 2)
for _ in range(tensor_cnt):
values = np.random.randint(-6, 6, shape).astype(np.float64)
tensor = Tensor(Dtype.FP16x16, values.shape, to_fp(values.flatten(), FixedImpl.FP16x16))
sequence.append(tensor)
name = "sequence_construct_fp16x16"
make_test([sequence], sequence, "SequenceTrait::sequence_construct(input_0)", name, Trait.SEQUENCE) |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, Dtype, Tensor, Trait |
class Sequence_empty(RunAll):
@staticmethod
def sequence_empty_u32():
def default():
shape=(0,)
x = np.zeros(shape, dtype=np.uint32)
t = Tensor(Dtype.U32, shape, x.flatten())
make_test(
inputs=[],
output=[t],
func_sig="SequenceTrait::sequence_empty()",
name="sequence_empty_u32",
trait=Trait.SEQUENCE
)
default()
@staticmethod
def sequence_empty_i32():
def default():
shape=(0,)
x = np.zeros(shape, dtype=np.int32)
t = Tensor(Dtype.I32, shape, x.flatten())
make_test(
inputs=[],
output=[t],
func_sig="SequenceTrait::sequence_empty()",
name="sequence_empty_i32",
trait=Trait.SEQUENCE
)
default()
@staticmethod
def sequence_empty_i8():
def default():
shape=(0,)
x = np.zeros(shape, dtype=np.int8)
t = Tensor(Dtype.I8, shape, x.flatten())
make_test(
inputs=[],
output=[t],
func_sig="SequenceTrait::sequence_empty()",
name="sequence_empty_i8",
trait=Trait.SEQUENCE
)
default()
@staticmethod
def sequence_empty_fp8x23():
def default():
shape=(0,)
x = np.zeros(shape, dtype=np.float64)
t = Tensor(Dtype.FP8x23, shape, x.flatten())
make_test(
inputs=[],
output=[t],
func_sig="SequenceTrait::sequence_empty()",
name="sequence_empty_fp8x23",
trait=Trait.SEQUENCE
)
default()
@staticmethod
def sequence_empty_fp16x16():
def default():
shape=(0,)
x = np.zeros(shape, dtype=np.float64)
t = Tensor(Dtype.FP16x16, shape, x.flatten()) |
make_test(
inputs=[],
output=[t],
func_sig="SequenceTrait::sequence_empty()",
name="sequence_empty_fp16x16",
trait=Trait.SEQUENCE
)
default() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
scalar = lambda x: Tensor(Dtype.I32, (), np.array([x]).astype(np.int32).flatten()) |
class Sequence_erase(RunAll):
@staticmethod
def sequence_erase_u32():
def positive_position():
sequence = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(0, 6, shape).astype(np.uint32)
tensor = Tensor(Dtype.U32, values.shape, values.flatten())
sequence.append(tensor)
position = scalar(2)
output_sequence = sequence.copy()
output_sequence.pop(2)
name = "sequence_erase_u32_positive"
make_test([sequence, position], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::Some(input_1))", name, Trait.SEQUENCE)
def negative_position():
sequence = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(0, 6, shape).astype(np.uint32)
tensor = Tensor(Dtype.U32, values.shape, values.flatten())
sequence.append(tensor)
position = scalar(-2)
output_sequence = sequence.copy()
output_sequence.pop(-2)
name = "sequence_erase_u32_negative"
make_test([sequence, position], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::Some(input_1))", name, Trait.SEQUENCE)
def empty_position():
sequence = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(0, 6, shape).astype(np.uint32)
tensor = Tensor(Dtype.U32, values.shape, values.flatten())
sequence.append(tensor)
output_sequence = sequence.copy()
output_sequence.pop(-1)
name = "sequence_erase_u32_empty"
make_test([sequence], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::None(()))", name, Trait.SEQUENCE)
positive_position()
negative_position()
empt |
y_position()
@staticmethod
def sequence_erase_i32():
def positive_position():
sequence = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(-6, 6, shape).astype(np.int32)
tensor = Tensor(Dtype.I32, values.shape, values.flatten())
sequence.append(tensor)
position = scalar(2)
output_sequence = sequence.copy()
output_sequence.pop(2)
name = "sequence_erase_i32_positive"
make_test([sequence, position], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::Some(input_1))", name, Trait.SEQUENCE)
def negative_position():
sequence = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(-6, 6, shape).astype(np.int32)
tensor = Tensor(Dtype.I32, values.shape, values.flatten())
sequence.append(tensor)
position = scalar(-2)
output_sequence = sequence.copy()
output_sequence.pop(-2)
name = "sequence_erase_i32_negative"
make_test([sequence, position], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::Some(input_1))", name, Trait.SEQUENCE)
def empty_position():
sequence = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(-6, 6, shape).astype(np.int32)
tensor = Tensor(Dtype.I32, values.shape, values.flatten())
sequence.append(tensor)
output_sequence = sequence.copy()
output_sequence.pop(-1)
name = "sequence_erase_i32_empty"
make_test([sequence], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::None(()))", name, Trait.SEQUENCE)
positive_position()
negative_position()
empty_position()
@staticmethod
def |
sequence_erase_i8():
def positive_position():
sequence = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(-6, 6, shape).astype(np.int8)
tensor = Tensor(Dtype.I8, values.shape, values.flatten())
sequence.append(tensor)
position = scalar(2)
output_sequence = sequence.copy()
output_sequence.pop(2)
name = "sequence_erase_i8_positive"
make_test([sequence, position], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::Some(input_1))", name, Trait.SEQUENCE)
def negative_position():
sequence = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(-6, 6, shape).astype(np.int8)
tensor = Tensor(Dtype.I8, values.shape, values.flatten())
sequence.append(tensor)
position = scalar(-2)
output_sequence = sequence.copy()
output_sequence.pop(-2)
name = "sequence_erase_i8_negative"
make_test([sequence, position], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::Some(input_1))", name, Trait.SEQUENCE)
def empty_position():
sequence = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(-6, 6, shape).astype(np.int8)
tensor = Tensor(Dtype.I8, values.shape, values.flatten())
sequence.append(tensor)
output_sequence = sequence.copy()
output_sequence.pop(-1)
name = "sequence_erase_i8_empty"
make_test([sequence], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::None(()))", name, Trait.SEQUENCE)
positive_position()
negative_position()
empty_position()
@staticmethod
def sequence_erase_fp8x23():
def positive_pos |
ition():
sequence = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(-6, 6, shape).astype(np.float64)
tensor = Tensor(Dtype.FP8x23, values.shape, to_fp(values.flatten(), FixedImpl.FP8x23))
sequence.append(tensor)
position = scalar(2)
output_sequence = sequence.copy()
output_sequence.pop(2)
name = "sequence_erase_fp8x23_positive"
make_test([sequence, position], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::Some(input_1))", name, Trait.SEQUENCE)
def negative_position():
sequence = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(-6, 6, shape).astype(np.float64)
tensor = Tensor(Dtype.FP8x23, values.shape, to_fp(values.flatten(), FixedImpl.FP8x23))
sequence.append(tensor)
position = scalar(-2)
output_sequence = sequence.copy()
output_sequence.pop(-2)
name = "sequence_erase_fp8x23_negative"
make_test([sequence, position], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::Some(input_1))", name, Trait.SEQUENCE)
def empty_position():
sequence = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(-6, 6, shape).astype(np.float64)
tensor = Tensor(Dtype.FP8x23, values.shape, to_fp(values.flatten(), FixedImpl.FP8x23))
sequence.append(tensor)
output_sequence = sequence.copy()
output_sequence.pop(-1)
name = "sequence_erase_fp8x23_empty"
make_test([sequence], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::None(()))", name, Trait.SEQUENCE)
positive_position()
negative_position()
empty_position()
@staticme |
thod
def sequence_erase_fp16x16():
def positive_position():
sequence = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(-6, 6, shape).astype(np.float64)
tensor = Tensor(Dtype.FP16x16, values.shape, to_fp(values.flatten(), FixedImpl.FP16x16))
sequence.append(tensor)
position = scalar(2)
output_sequence = sequence.copy()
output_sequence.pop(2)
name = "sequence_erase_fp16x16_positive"
make_test([sequence, position], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::Some(input_1))", name, Trait.SEQUENCE)
def negative_position():
sequence = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(-6, 6, shape).astype(np.float64)
tensor = Tensor(Dtype.FP16x16, values.shape, to_fp(values.flatten(), FixedImpl.FP16x16))
sequence.append(tensor)
position = scalar(-2)
output_sequence = sequence.copy()
output_sequence.pop(-2)
name = "sequence_erase_fp16x16_negative"
make_test([sequence, position], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::Some(input_1))", name, Trait.SEQUENCE)
def empty_position():
sequence = []
shape = np.random.randint(1, 4, 2)
for _ in range(5):
values = np.random.randint(-6, 6, shape).astype(np.float64)
tensor = Tensor(Dtype.FP16x16, values.shape, to_fp(values.flatten(), FixedImpl.FP16x16))
sequence.append(tensor)
output_sequence = sequence.copy()
output_sequence.pop(-1)
name = "sequence_erase_fp16x16_empty"
make_test([sequence], output_sequence, "SequenceTrait::sequence_erase(input_0, Option::None(()))", name, Trait.SEQUENCE)
positive_positio |
n()
negative_position()
empty_position() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
scalar = lambda x: Tensor(Dtype.I32, (), np.array([x]).astype(np.int32).flatten()) |
class Sequence_insert(RunAll):
@staticmethod
def sequence_insert_u32():
def default():
sequence = []
tensor_cnt = 3
shape = np.random.randint(1, 4, 2)
for _ in range(tensor_cnt):
val = np.random.randint(0, 6, shape).astype(np.uint32)
t = Tensor(Dtype.U32, val.shape, val.flatten())
sequence.append(t)
val = np.random.randint(0, 6, shape).astype(np.uint32)
tensor = Tensor(Dtype.U32, val.shape, val.flatten())
position = np.random.randint(-2, 2)
expected_sequence = sequence.copy()
expected_sequence.insert(position, tensor)
name = "sequence_insert_u32"
make_test([sequence, tensor, scalar(position)], expected_sequence, "input_0.sequence_insert(@input_1,Option::Some(input_2))", name, Trait.SEQUENCE)
default()
@staticmethod
def sequence_insert_i32():
def default():
sequence = []
tensor_cnt = 3
shape = np.random.randint(1, 4, 2)
for _ in range(tensor_cnt):
val = np.random.randint(0, 6, shape).astype(np.int32)
t = Tensor(Dtype.I32, val.shape, val.flatten())
sequence.append(t)
val = np.random.randint(0, 6, shape).astype(np.int32)
tensor = Tensor(Dtype.I32, val.shape, val.flatten())
position = np.random.randint(-2, 2)
expected_sequence = sequence.copy()
expected_sequence.insert(position, tensor)
name = "sequence_insert_i32"
make_test([sequence, tensor, scalar(position)], expected_sequence, "input_0.sequence_insert(@input_1,Option::Some(input_2))", name, Trait.SEQUENCE)
default()
@staticmethod
def sequence_insert_i8():
def default():
sequence = []
tensor_cnt = 3
shape = np.random.randint(1, 4, 2)
for _ in range(tensor_cnt):
val = |
np.random.randint(0, 6, shape).astype(np.int8)
t = Tensor(Dtype.I8, val.shape, val.flatten())
sequence.append(t)
val = np.random.randint(0, 6, shape).astype(np.int8)
tensor = Tensor(Dtype.I8, val.shape, val.flatten())
position = np.random.randint(-2, 2)
expected_sequence = sequence.copy()
expected_sequence.insert(position, tensor)
name = "sequence_insert_i8"
make_test([sequence, tensor, scalar(position)], expected_sequence, "input_0.sequence_insert(@input_1,Option::Some(input_2))", name, Trait.SEQUENCE)
default()
@staticmethod
def sequence_insert_fp8x23():
def default():
sequence = []
tensor_cnt = 3
shape = np.random.randint(1, 4, 2)
for _ in range(tensor_cnt):
val = np.random.randint(0, 6, shape).astype(np.float64)
t = Tensor(Dtype.FP8x23, val.shape, to_fp(
val.flatten(), FixedImpl.FP8x23))
sequence.append(t)
val = np.random.randint(0, 6, shape).astype(np.float64)
tensor = Tensor(Dtype.FP8x23, val.shape, to_fp(
val.flatten(), FixedImpl.FP8x23))
position = np.random.randint(-2, 2)
expected_sequence = sequence.copy()
expected_sequence.insert(position, tensor)
name = "sequence_insert_fp8x23"
make_test([sequence, tensor, scalar(position)], expected_sequence, "input_0.sequence_insert(@input_1,Option::Some(input_2))", name, Trait.SEQUENCE)
default()
@staticmethod
def sequence_insert_fp16x16():
def default():
sequence = []
tensor_cnt = 3
shape = np.random.randint(1, 4, 2)
for _ in range(tensor_cnt):
val = np.random.randint(0, 6, shape).astype(np.float64)
t = Tensor(Dtype.FP16x16, val.shape, to_fp(
val.flatten(), FixedImpl.FP16x16)) |
sequence.append(t)
val = np.random.randint(0, 6, shape).astype(np.float64)
tensor = Tensor(Dtype.FP16x16, val.shape, to_fp(
val.flatten(), FixedImpl.FP16x16))
position = np.random.randint(-2, 2)
expected_sequence = sequence.copy()
expected_sequence.insert(position, tensor)
name = "sequence_insert_fp16x16"
make_test([sequence, tensor, scalar(position)], expected_sequence, "input_0.sequence_insert(@input_1,Option::Some(input_2))", name, Trait.SEQUENCE)
default() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
scalar = lambda x: Tensor(Dtype.U32, (), np.array([x]).astype(np.uint32).flatten()) |
class Sequence_length(RunAll):
@staticmethod
def sequence_length_u32():
def default():
sequence = []
tensor_cnt = np.random.randint(1, 10)
shape = np.random.randint(1, 4, 2)
for _ in range(tensor_cnt):
values = np.random.randint(0, 6, shape).astype(np.uint32)
tensor = Tensor(Dtype.U32, values.shape, values.flatten())
sequence.append(tensor)
name = "sequence_length_u32"
make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name, Trait.SEQUENCE)
def broadcast():
sequence = []
tensor_cnt = np.random.randint(1, 10)
for _ in range(tensor_cnt):
shape = np.random.randint(1, 4, 2)
values = np.random.randint(0, 6, shape).astype(np.uint32)
tensor = Tensor(Dtype.U32, values.shape, values.flatten())
sequence.append(tensor)
name = "sequence_length_u32_broadcast"
make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name, Trait.SEQUENCE)
default()
broadcast()
@staticmethod
def sequence_length_i32():
def default():
sequence = []
tensor_cnt = np.random.randint(1, 10)
shape = np.random.randint(1, 4, 2)
for _ in range(tensor_cnt):
values = np.random.randint(-6, 6, shape).astype(np.int32)
tensor = Tensor(Dtype.I32, values.shape, values.flatten())
sequence.append(tensor)
name = "sequence_length_i32"
make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name, Trait.SEQUENCE)
def broadcast():
sequence = []
tensor_cnt = np.random.randint(1, 10)
for _ in range(tensor_cnt):
shape = np.random.randint(1, 4, 2)
values = np.random.randint(-6, 6, shape).astype(np.int32) |
tensor = Tensor(Dtype.I32, values.shape, values.flatten())
sequence.append(tensor)
name = "sequence_length_i32_broadcast"
make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name, Trait.SEQUENCE)
default()
broadcast()
@staticmethod
def sequence_length_i8():
def default():
sequence = []
tensor_cnt = np.random.randint(1, 10)
shape = np.random.randint(1, 4, 2)
for _ in range(tensor_cnt):
values = np.random.randint(-6, 6, shape).astype(np.int8)
tensor = Tensor(Dtype.I8, values.shape, values.flatten())
sequence.append(tensor)
name = "sequence_length_i8"
make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name, Trait.SEQUENCE)
def broadcast():
sequence = []
tensor_cnt = np.random.randint(1, 10)
for _ in range(tensor_cnt):
shape = np.random.randint(1, 4, 2)
values = np.random.randint(-6, 6, shape).astype(np.int8)
tensor = Tensor(Dtype.I8, values.shape, values.flatten())
sequence.append(tensor)
name = "sequence_length_i8_broadcast"
make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name, Trait.SEQUENCE)
default()
broadcast()
@staticmethod
def sequence_length_fp8x23():
def default():
sequence = []
tensor_cnt = np.random.randint(1, 10)
shape = np.random.randint(1, 4, 2)
for _ in range(tensor_cnt):
values = np.random.randint(-6, 6, shape).astype(np.float64)
tensor = Tensor(Dtype.FP8x23, values.shape, to_fp(values.flatten(), FixedImpl.FP8x23))
sequence.append(tensor)
name = "sequence_length_fp8x23"
make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", na |
me, Trait.SEQUENCE)
def broadcast():
sequence = []
tensor_cnt = np.random.randint(1, 10)
for _ in range(tensor_cnt):
shape = np.random.randint(1, 4, 2)
values = np.random.randint(-6, 6, shape).astype(np.float64)
tensor = Tensor(Dtype.FP8x23, values.shape, to_fp(values.flatten(), FixedImpl.FP8x23))
sequence.append(tensor)
name = "sequence_length_fp8x23_broadcast"
make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name, Trait.SEQUENCE)
default()
broadcast()
@staticmethod
def sequence_length_fp16x16():
def default():
sequence = []
tensor_cnt = np.random.randint(1, 10)
shape = np.random.randint(1, 4, 2)
for _ in range(tensor_cnt):
values = np.random.randint(-6, 6, shape).astype(np.float64)
tensor = Tensor(Dtype.FP16x16, values.shape, to_fp(values.flatten(), FixedImpl.FP16x16))
sequence.append(tensor)
name = "sequence_length_fp16x16"
make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name, Trait.SEQUENCE)
def broadcast():
sequence = []
tensor_cnt = np.random.randint(1, 10)
for _ in range(tensor_cnt):
shape = np.random.randint(1, 4, 2)
values = np.random.randint(-6, 6, shape).astype(np.float64)
tensor = Tensor(Dtype.FP16x16, values.shape, to_fp(values.flatten(), FixedImpl.FP16x16))
sequence.append(tensor)
name = "sequence_length_fp16x16_broadcast"
make_test([sequence], scalar(len(sequence)), "input_0.sequence_length()", name, Trait.SEQUENCE)
default()
broadcast() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl
def shrink(input_array: np.ndarray, bias: float, lambd: float) -> np.ndarray:
output_array = np.where(input_array > lambd, input_array - bias,
np.where(input_array < -lambd, input_array + bias, 0))
return output_array |
class Shrink(RunAll):
@staticmethod
def shrink_fp8x23():
def shrink_hard():
x = np.random.uniform(-3, 3, (3, 3, 3)).astype(np.float64)
bias = np.float64(0)
lambd = np.float64(1)
y = shrink(x, bias, lambd)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "shrink_hard_fp8x23"
make_test([x], y, "TensorTrait::shrink(input_0, Option::None(()), Option::Some(FixedTrait::new(8388608, false)))", name)
def shrink_soft():
x = np.random.uniform(-3, 3, (3, 3, 3)).astype(np.float64)
bias = np.float64(1)
lambd = np.float64(1)
y = shrink(x, bias, lambd)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "shrink_soft_fp8x23"
make_test([x], y, "TensorTrait::shrink(input_0, Option::Some(FixedTrait::new(8388608, false)), Option::Some(FixedTrait::new(8388608, false)))", name)
shrink_hard()
shrink_soft()
@staticmethod
def shrink_fp16x16():
def shrink_hard():
x = np.random.uniform(-3, 3, (3, 3, 3)).astype(np.float64)
bias = np.float64(0)
lambd = np.float64(1)
y = shrink(x, bias, lambd)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "shrink_hard_fp16x16"
make_test([x], y, "TensorTrait::shrink(input_0, Option::None(()), Option::Some(FixedTrait::new(65536, false)))", name)
def shrink_soft():
x = np.random.uniform(-3, 3, (3, 3, 3)).astype(np.float64)
bi |
as = np.float64(1)
lambd = np.float64(1)
y = shrink(x, bias, lambd)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "shrink_soft_fp16x16"
make_test([x], y, "TensorTrait::shrink(input_0, Option::Some(FixedTrait::new(65536, false)), Option::Some(FixedTrait::new(65536, false)))", name)
shrink_hard()
shrink_soft() |
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
import tensorflow as tf
class Sigmoid(RunAll):
@staticmethod
def fp8x23():
x = np.random.uniform(-3, 3, (2, 2)).astype(np.float32)
y = tf.keras.activations.sigmoid(x).numpy()
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "sigmoid_fp8x23"
make_test([x], y, "NNTrait::sigmoid(@input_0)",
name, Trait.NN)
@staticmethod
def fp16x16():
x = np.random.uniform(-3, 3, (2, 2)).astype(np.float32)
y = tf.keras.activations.sigmoid(x).numpy()
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "sigmoid_fp16x16"
make_test([x], y, "NNTrait::sigmoid(@input_0)",
name, Trait.NN)
|
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl |
class Sign(RunAll):
@staticmethod
def sign_i8():
def sign():
x = np.array(range(-5, 6)).astype(np.int8)
y = np.array([-1, -1, -1, -1, -1, 0, 1, 1, 1, 1, 1]).astype(np.int8)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "sign_i8"
make_test(
[x], y, "input_0.sign()", name)
sign()
@staticmethod
def sign_i32():
def sign():
x = np.array(range(-5, 6)).astype(np.int32)
y = np.array([-1, -1, -1, -1, -1, 0, 1, 1, 1, 1, 1]).astype(np.int32)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "sign_i32"
make_test(
[x], y, "input_0.sign()", name)
sign()
@staticmethod
def sign_fail():
def sign():
x = np.array(range(-5, 6)).astype(np.int32)
y = np.array([1, -1, -1, -1, -1, 0, 1, 1, 1, 1, -1]).astype(np.int32)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "sign_fail"
make_test(
[x], y, "input_0.sign()", name)
sign()
@staticmethod
def sign_fP16x16():
def sign():
x = to_fp (np.array(range(-5, 6)).astype(np.int64), FixedImpl.FP16x16)
y = to_fp (np.array([-1, -1, -1, -1, -1, 0, 1, 1, 1, 1, 1]).astype(np.int64), FixedImpl.FP16x16)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "sign_fP16x16"
make_test(
[x], y, "input_0.sign()", name)
sign()
@staticmethod
def sign_fP8x23():
def sign():
x = to_fp (np.array(range(-5, 6)).astype(np.int64), FixedImpl.FP8x23)
y = to_fp (np.array([-1, -1, -1, -1, -1, 0, |
1, 1, 1, 1, 1]).astype(np.int64), FixedImpl.FP8x23)
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "sign_fP8x23"
make_test(
[x], y, "input_0.sign()", name)
sign() |
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
class Sin(RunAll):
@staticmethod
def sin_fp8x23():
x = np.random.uniform(-3, 7, (2, 2)).astype(np.float64)
y = np.sin(x)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "sin_fp8x23"
make_test([x], y, "input_0.sin()", name)
@staticmethod
def sin_fp16x16():
x = np.random.uniform(-3, 7, (2, 2)).astype(np.float64)
y = np.sin(x)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "sin_fp16x16"
make_test([x], y, "input_0.sin()", name)
|
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
class Sinh(RunAll):
@staticmethod
def sinh_fp8x23():
x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64)
y = np.sinh(x)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "sinh_fp8x23"
make_test([x], y, "input_0.sinh()", name)
@staticmethod
def sinh_fp16x16():
x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64)
y = np.sinh(x)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "sinh_fp16x16"
make_test([x], y, "input_0.sinh()", name)
|
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl |
class Slice(RunAll):
@staticmethod
def slice_u32():
def slice_2D():
x = np.random.randint(0, 255, (2, 4)).astype(np.uint32)
y = x[0:2, 2:4]
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "slice_u32_2d"
make_test(
[x], y, "input_0.slice(array![0, 2].span(), array![2, 4].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 1].span()))", name)
def slice_3D():
x = np.random.randint(0, 255, (20, 10, 5)).astype(np.uint32)
y = x[0:3, 0:10:3]
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "slice_u32_3d"
make_test(
[x], y, "input_0.slice(array![0, 0].span(), array![3, 10].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 3].span()))", name)
slice_2D()
slice_3D()
@staticmethod
def slice_i32():
def slice_2D():
x = np.random.randint(-127, 127, (2, 4)).astype(np.int32)
y = x[0:2, 2:4]
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "slice_i32_2d"
make_test(
[x], y, "input_0.slice(array![0, 2].span(), array![2, 4].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 1].span()))", name)
def slice_3D():
x = np.random.randint(-127, 127, (20, 10, 5)).astype(np.int32)
y = x[0:3, 0:10:3]
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "slice_i32_3d"
make_test(
[x], y, "input_0.slice(array![0, 0].span(), array![3, 10].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 3].span()))", name)
slice_2D()
slice_3D()
@staticmethod
def slice_i8(): |
def slice_2D():
x = np.random.randint(-127, 127, (2, 4)).astype(np.int8)
y = x[0:2, 2:4]
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "slice_i8_2d"
make_test(
[x], y, "input_0.slice(array![0, 2].span(), array![2, 4].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 1].span()))", name)
def slice_3D():
x = np.random.randint(-127, 127, (20, 10, 5)).astype(np.int8)
y = x[0:3, 0:10:3]
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "slice_i8_3d"
make_test(
[x], y, "input_0.slice(array![0, 0].span(), array![3, 10].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 3].span()))", name)
slice_2D()
slice_3D()
@staticmethod
def slice_fp8x23():
def slice_2D():
x = to_fp(np.random.randint(-127, 127, (2, 4)
).astype(np.int64), FixedImpl.FP8x23)
y = x[0:2, 2:4]
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "slice_fp8x23_2d"
make_test(
[x], y, "input_0.slice(array![0, 2].span(), array![2, 4].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 1].span()))", name)
def slice_3D():
x = to_fp(np.random.randint(-127, 127, (20, 10, 5)
).astype(np.int64), FixedImpl.FP8x23)
y = x[0:3, 0:10:3]
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "slice_fp8x23_3d"
make_test(
[x], y, "input_0.slice(array![0, 0].span(), array![3, 10].span(), Option::Some(array![0, 1].span()), Option::Some(array![1 |
, 3].span()))", name)
slice_2D()
slice_3D()
@staticmethod
def slice_fp16x16():
def slice_2D():
x = to_fp(np.random.randint(-127, 127, (2, 4)
).astype(np.int64), FixedImpl.FP16x16)
y = x[0:2, 2:4]
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "slice_fp16x16_2d"
make_test(
[x], y, "input_0.slice(array![0, 2].span(), array![2, 4].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 1].span()))", name)
def slice_3D():
x = to_fp(np.random.randint(-127, 127, (20, 10, 5)
).astype(np.int64), FixedImpl.FP16x16)
y = x[0:3, 0:10:3]
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "slice_fp16x16_3d"
make_test(
[x], y, "input_0.slice(array![0, 0].span(), array![3, 10].span(), Option::Some(array![0, 1].span()), Option::Some(array![1, 3].span()))", name)
slice_2D()
slice_3D() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
def softmax(x: np.ndarray, axis: int = -1) -> np.ndarray:
x_max = np.max(x, axis=axis, keepdims=True)
tmp = np.exp(x - x_max)
s = np.sum(tmp, axis=axis, keepdims=True)
return tmp / s |
class Softmax(RunAll):
@staticmethod
def axis_0():
x = np.abs(np.random.randn(3, 4, 5).astype(np.float32))
y = softmax(x, axis=0)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "softmax_axis_0"
make_test([x], y, "NNTrait::softmax(@input_0, Option::Some(0))",
name, Trait.NN)
@staticmethod
def axis_1():
x = np.abs(np.random.randn(3, 4, 5).astype(np.float32))
y = softmax(x, axis=1)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "softmax_axis_1"
make_test([x], y, "NNTrait::softmax(@input_0, Option::Some(1))",
name, Trait.NN)
@staticmethod
def axis_2():
x = np.abs(np.random.randn(3, 4, 5).astype(np.float32))
y = softmax(x, axis=2)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "softmax_axis_2"
make_test([x], y, "NNTrait::softmax(@input_0, Option::Some(2))",
name, Trait.NN)
@staticmethod
def axis_minus_1():
x = np.abs(np.random.randn(3, 4, 5).astype(np.float32))
y = softmax(x, axis=-1)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "softmax_axis_minus_1"
make_test([x], y, "NNTrait::softmax(@input_0, Option::None)",
name, Trait.NN) |
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
def softmax_zero(x: np.ndarray, axis: int = -1) -> np.ndarray:
x_max = np.max(x, axis=axis, keepdims=True)
tmp = np.exp(x - x_max)
tmp = np.where(x == 0.0, 0.0, tmp)
s = np.sum(tmp, axis=axis, keepdims=True)
s = np.where(s == 0.0, 1, s)
return tmp / s
class Softmax_zero(RunAll):
@staticmethod
def fp8x23():
x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64)
y = softmax_zero(x)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "softmax_zero_fp8x23"
make_test([x], y, "NNTrait::softmax_zero(@input_0, 1)",
name, Trait.NN)
@staticmethod
def fp16x16():
x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64)
y = softmax_zero(x)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "softmax_zero_fp16x16"
make_test([x], y, "NNTrait::softmax_zero(@input_0, 1)",
name, Trait.NN)
|
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
def softplus(x: np.ndarray) -> np.ndarray:
return np.log(np.exp(x) + 1)
class Softplus(RunAll):
@staticmethod
def softplus_fp():
def fp8x23():
x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64)
y = softplus(x)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "softplus_fp8x23"
make_test([x], y, "NNTrait::softplus(@input_0)",
name, Trait.NN)
def fp16x16():
x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64)
y = softplus(x)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "softplus_fp16x16"
make_test([x], y, "NNTrait::softplus(@input_0)",
name, Trait.NN)
fp8x23()
fp16x16()
|
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
def softsign(x: np.ndarray) -> np.ndarray:
return x / (1 + np.abs(x))
class Softsign(RunAll):
@staticmethod
def softsign_fp():
def fp8x23():
x = np.random.uniform(-5, 7, (2, 2)).astype(np.float64)
y = softsign(x)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "softsign_fp8x23"
make_test([x], y, "NNTrait::softsign(@input_0)",
name, Trait.NN)
def fp16x16():
x = np.random.uniform(-5, 7, (2, 2)).astype(np.float64)
y = softsign(x)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "softsign_fp16x16"
make_test([x], y, "NNTrait::softsign(@input_0)",
name, Trait.NN)
fp8x23()
fp16x16()
|
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
def space_to_depth(data: np.ndarray, blocksize: int = 2) -> np.ndarray:
if len(data.shape) != 4:
raise RuntimeError(f"Unexpected shape {data.shape!r}.")
b, C, H, W = data.shape
tmpshape = (
b,
C,
H
blocksize,
W
blocksize,
)
reshaped = np.reshape(data, tmpshape)
transposed = np.transpose(reshaped, [0, 3, 5, 1, 2, 4])
finalshape = (
b,
C * blocksize * blocksize,
H
W
)
y = np.reshape(transposed, finalshape).astype(data.dtype)
return y |
class Space_to_depth(RunAll):
@staticmethod
def fp8x23():
x = np.random.uniform(-3, 3, (1, 2, 2, 4)).astype(np.float64)
y = space_to_depth(x)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "space_to_depth_fp8x23"
make_test([x], y, "NNTrait::space_to_depth(@input_0, 2)",
name, Trait.NN)
@staticmethod
def fp16x16():
x = np.random.uniform(-3, 3, (1, 2, 2, 4)).astype(np.float16)
y = space_to_depth(x)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "space_to_depth_fp16x16"
make_test([x], y, "NNTrait::space_to_depth(@input_0, 2)",
name, Trait.NN)
@staticmethod
def fpi8():
x = np.random.randint(-3, 3, (1, 2, 2, 4)).astype(np.int8)
y = space_to_depth(x)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "space_to_depth_i8"
make_test([x], y, "NNTrait::space_to_depth(@input_0, 2)",
name, Trait.NN)
@staticmethod
def fpi32():
x = np.random.randint(-3, 3, (1, 2, 2, 4)).astype(np.int32)
y = space_to_depth(x)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "space_to_depth_i32"
make_test([x], y, "NNTrait::space_to_depth(@input_0, 2)",
name, Trait.NN)
@staticmethod
def fpu32():
x = np.random.randint(-3, 3, (1, 2, 2, 4)).astype(np.uint32)
y = space_to_depth(x)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "spac |
e_to_depth_u32"
make_test([x], y, "NNTrait::space_to_depth(@input_0, 2)",
name, Trait.NN) |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl |
class Split(RunAll):
@staticmethod
def split_u32():
def split_1D():
x = np.random.randint(0, 255, 6).astype(np.uint32)
y = [
np.array(x[0:2]).astype(np.uint32),
np.array(x[2:4]).astype(np.uint32),
np.array(x[4:6]).astype(np.uint32),
]
_x = Tensor(Dtype.U32, x.shape, x.flatten())
_y = [
Tensor(Dtype.U32, y[0].shape, y[0].flatten()),
Tensor(Dtype.U32, y[1].shape, y[1].flatten()),
Tensor(Dtype.U32, y[2].shape, y[2].flatten()),
]
name = "split_u32_1d_equal_parts"
make_test(
[_x], _y, "input_0.split(0, Option::Some(3), Option::None(()))", name)
y = [
np.array(x[0:2]).astype(np.uint32),
np.array(x[2:6]).astype(np.uint32),
]
_y = [
Tensor(Dtype.U32, y[0].shape, y[0].flatten()),
Tensor(Dtype.U32, y[1].shape, y[1].flatten()),
]
name = "split_u32_1d_variable_parts"
make_test(
[_x], _y, "input_0.split(0, Option::None(()), Option::Some(TensorTrait::<u32>::new(shape: array![2].span(), data: array![2, 4].span(),)))", name)
def split_2D():
x = np.random.randint(0, 255, (2, 6)).astype(np.uint32)
y = [
np.array(x[0:2, 0:3]).astype(np.uint32),
np.array(x[0:2, 3:6]).astype(np.uint32),
]
_x = Tensor(Dtype.U32, x.shape, x.flatten())
_y = [
Tensor(Dtype.U32, y[0].shape, y[0].flatten()),
Tensor(Dtype.U32, y[1].shape, y[1].flatten()),
]
name = "split_u32_2d_equal_parts"
make_test(
[_x], _y, "input_0.split(1, Option::Some(2), Option::None(()))", name)
y = [
np.array(x[0:2, 0:2]).astype(np.uint32),
np.array(x[0:2, 2:6]).astype(np.uint32) |
]
_y = [
Tensor(Dtype.U32, y[0].shape, y[0].flatten()),
Tensor(Dtype.U32, y[1].shape, y[1].flatten()),
]
name = "split_u32_2d_variable_parts"
make_test(
[_x], _y, "input_0.split(1, Option::None(()), Option::Some(TensorTrait::<u32>::new(shape: array![2].span(), data: array![2, 4].span(),)))", name)
def split_zero_size():
x = np.array([]).astype(np.uint32)
y = [
np.array([]).astype(np.uint32),
np.array([]).astype(np.uint32),
np.array([]).astype(np.uint32),
]
_x = Tensor(Dtype.U32, x.shape, x.flatten())
_y = [
Tensor(Dtype.U32, y[0].shape, y[0].flatten()),
Tensor(Dtype.U32, y[1].shape, y[1].flatten()),
Tensor(Dtype.U32, y[2].shape, y[2].flatten()),
]
name = "split_u32_zero_size"
make_test(
[_x], _y, "input_0.split(0, Option::None(()), Option::Some(TensorTrait::<u32>::new(shape: array![3].span(), data: array![0, 0, 0].span(),)))", name)
def split_1d_uneven():
x = np.random.randint(0, 255, 7).astype(np.uint32)
y = [
np.array(x[0:2]).astype(np.uint32),
np.array(x[2:4]).astype(np.uint32),
np.array(x[4:6]).astype(np.uint32),
np.array(x[6:7]).astype(np.uint32),
]
_x = Tensor(Dtype.U32, x.shape, x.flatten())
_y = [
Tensor(Dtype.U32, y[0].shape, y[0].flatten()),
Tensor(Dtype.U32, y[1].shape, y[1].flatten()),
Tensor(Dtype.U32, y[2].shape, y[2].flatten()),
Tensor(Dtype.U32, y[3].shape, y[3].flatten()),
]
name = "split_u32_1d_uneven"
make_test(
[_x], _y, "input_0.split(0, Option::Some(4), Option:: |
None(()))", name)
def split_2d_uneven():
x = np.random.randint(0, 255, (2, 8)).astype(np.uint32)
y = [
np.array(x[0:2, 0:3]).astype(np.uint32),
np.array(x[0:2, 3:6]).astype(np.uint32),
np.array(x[0:2, 6:8]).astype(np.uint32)
]
_x = Tensor(Dtype.U32, x.shape, x.flatten())
_y = [
Tensor(Dtype.U32, y[0].shape, y[0].flatten()),
Tensor(Dtype.U32, y[1].shape, y[1].flatten()),
Tensor(Dtype.U32, y[2].shape, y[2].flatten()),
]
name = "split_u32_2d_uneven"
make_test(
[_x], _y, "input_0.split(1, Option::Some(3), Option::None(()))", name)
split_1D()
split_2D()
split_zero_size()
split_1d_uneven()
split_2d_uneven()
@staticmethod
def split_fp16x16():
def split_1D():
x = to_fp(np.random.randint(-127, 127, 6
).astype(np.int64), FixedImpl.FP16x16)
y = [
np.array(x[0:2]).astype(np.int64),
np.array(x[2:4]).astype(np.int64),
np.array(x[4:6]).astype(np.int64),
]
_x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
_y = [
Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()),
Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()),
Tensor(Dtype.FP16x16, y[2].shape, y[2].flatten()),
]
name = "split_fp16x16_1d_equal_parts"
make_test(
[_x], _y, "input_0.split(0, Option::Some(3), Option::None(()))", name)
y = [
np.array(x[0:2]).astype(np.int64),
np.array(x[2:6]).astype(np.int64),
]
_y = [
Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()),
Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()),
] |
name = "split_fp16x16_1d_variable_parts"
make_test(
[_x], _y, "input_0.split(0, Option::None(()), Option::Some(TensorTrait::<u32>::new(shape: array![2].span(), data: array![2, 4].span(),)))", name)
def split_2D():
x = to_fp(np.random.randint(-127, 127, (2, 6)
).astype(np.int64), FixedImpl.FP16x16)
y = [
np.array(x[0:2, 0:3]).astype(np.int64),
np.array(x[0:2, 3:6]).astype(np.int64),
]
_x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
_y = [
Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()),
Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()),
]
name = "split_fp16x16_2d_equal_parts"
make_test(
[_x], _y, "input_0.split(1, Option::Some(2), Option::None(()))", name)
y = [
np.array(x[0:2, 0:2]).astype(np.int64),
np.array(x[0:2, 2:6]).astype(np.int64)
]
_y = [
Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()),
Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()),
]
name = "split_fp16x16_2d_variable_parts"
make_test(
[_x], _y, "input_0.split(1, Option::None(()), Option::Some(TensorTrait::<u32>::new(shape: array![2].span(), data: array![2, 4].span(),)))", name)
def split_zero_size():
x = to_fp(np.array([]).astype(np.int64
).astype(np.int64), FixedImpl.FP16x16)
y = [
np.array([]).astype(np.int64),
np.array([]).astype(np.int64),
np.array([]).astype(np.int64),
]
_x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
_y = [
Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()),
Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()), |
Tensor(Dtype.FP16x16, y[2].shape, y[2].flatten()),
]
name = "split_fp16x16_zero_size"
make_test(
[_x], _y, "input_0.split(0, Option::None(()), Option::Some(TensorTrait::<u32>::new(shape: array![3].span(), data: array![0, 0, 0].span(),)))", name)
def split_1d_uneven():
x = to_fp(np.random.randint(-127, 127, 7
).astype(np.int64), FixedImpl.FP16x16)
y = [
np.array(x[0:2]).astype(np.int64),
np.array(x[2:4]).astype(np.int64),
np.array(x[4:6]).astype(np.int64),
np.array(x[6:7]).astype(np.int64),
]
_x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
_y = [
Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()),
Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()),
Tensor(Dtype.FP16x16, y[2].shape, y[2].flatten()),
Tensor(Dtype.FP16x16, y[3].shape, y[3].flatten()),
]
name = "split_fp16x16_1d_uneven"
make_test(
[_x], _y, "input_0.split(0, Option::Some(4), Option::None(()))", name)
def split_2d_uneven():
x = to_fp(np.random.randint(-127, 127, (2, 8)
).astype(np.int64), FixedImpl.FP16x16)
y = [
np.array(x[0:2, 0:3]).astype(np.int64),
np.array(x[0:2, 3:6]).astype(np.int64),
np.array(x[0:2, 6:8]).astype(np.int64)
]
_x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
_y = [
Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()),
Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()),
Tensor(Dtype.FP16x16, y[2].shape, y[2].flatten()),
]
name = "split_fp16x16_2d_uneven"
make_test(
[_x], _y, "input_0.split( |
1, Option::Some(3), Option::None(()))", name)
split_1D()
split_2D()
split_zero_size()
split_1d_uneven()
split_2d_uneven() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl |
class Split_to_sequence(RunAll):
@staticmethod
def split_to_sequence_u32():
def split_to_sequence_1D():
x = np.random.randint(0, 255, 6).astype(np.uint32)
y = [
np.array(x[0:2]).astype(np.uint32),
np.array(x[2:4]).astype(np.uint32),
np.array(x[4:6]).astype(np.uint32),
]
_x = Tensor(Dtype.U32, x.shape, x.flatten())
_y = [
Tensor(Dtype.U32, y[0].shape, y[0].flatten()),
Tensor(Dtype.U32, y[1].shape, y[1].flatten()),
Tensor(Dtype.U32, y[2].shape, y[2].flatten()),
]
name = "split_to_sequence_u32_1d_equal_parts"
make_test(
[_x], _y, "input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::<u32>::new(shape: array![1].span(), data: array![3].span(),)))", name)
y = [
np.array(x[0:2]).astype(np.uint32),
np.array(x[2:6]).astype(np.uint32),
]
_y = [
Tensor(Dtype.U32, y[0].shape, y[0].flatten()),
Tensor(Dtype.U32, y[1].shape, y[1].flatten()),
]
name = "split_to_sequence_u32_1d_variable_parts"
make_test(
[_x], _y, "input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::<u32>::new(shape: array![2].span(), data: array![2, 4].span(),)))", name)
def split_to_sequence_2D():
x = np.random.randint(0, 255, (2, 6)).astype(np.uint32)
y = [
np.array(x[0:2, 0:3]).astype(np.uint32),
np.array(x[0:2, 3:6]).astype(np.uint32),
]
_x = Tensor(Dtype.U32, x.shape, x.flatten())
_y = [
Tensor(Dtype.U32, y[0].shape, y[0].flatten()),
Tensor(Dtype.U32, y[1].shape, y[1].flatten()),
]
name = "split_to_sequence_u32_2d_equal_parts"
make_test(
[_x], _y, "input_0.split_to_sequence(1, 1, Option::Some( |
TensorTrait::<u32>::new(shape: array![1].span(), data: array![2].span(),)))", name)
y = [
np.array(x[0:2, 0:2]).astype(np.uint32),
np.array(x[0:2, 2:6]).astype(np.uint32)
]
_y = [
Tensor(Dtype.U32, y[0].shape, y[0].flatten()),
Tensor(Dtype.U32, y[1].shape, y[1].flatten()),
]
name = "split_to_sequence_u32_2d_variable_parts"
make_test(
[_x], _y, "input_0.split_to_sequence(1, 1, Option::Some(TensorTrait::<u32>::new(shape: array![2].span(), data: array![2, 4].span(),)))", name)
def split_to_sequence_zero_size():
x = np.array([]).astype(np.uint32)
y = [
np.array([]).astype(np.uint32),
np.array([]).astype(np.uint32),
np.array([]).astype(np.uint32),
]
_x = Tensor(Dtype.U32, x.shape, x.flatten())
_y = [
Tensor(Dtype.U32, y[0].shape, y[0].flatten()),
Tensor(Dtype.U32, y[1].shape, y[1].flatten()),
Tensor(Dtype.U32, y[2].shape, y[2].flatten()),
]
name = "split_to_sequence_u32_zero_size"
make_test(
[_x], _y, "input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::<u32>::new(shape: array![3].span(), data: array![0, 0, 0].span(),)))", name)
def split_to_sequence_1d_uneven():
x = np.random.randint(0, 255, 7).astype(np.uint32)
y = [
np.array(x[0:2]).astype(np.uint32),
np.array(x[2:4]).astype(np.uint32),
np.array(x[4:6]).astype(np.uint32),
np.array(x[6:7]).astype(np.uint32),
]
_x = Tensor(Dtype.U32, x.shape, x.flatten())
_y = [
Tensor(Dtype.U32, y[0].shape, y[0].flatten()),
Tensor(Dtype.U32, y[1].shape, y[1].flatten()), |
Tensor(Dtype.U32, y[2].shape, y[2].flatten()),
Tensor(Dtype.U32, y[3].shape, y[3].flatten()),
]
name = "split_to_sequence_u32_1d_uneven"
make_test(
[_x], _y, "input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::<u32>::new(shape: array![1].span(), data: array![4].span(),)))", name)
def split_to_sequence_2d_uneven():
x = np.random.randint(0, 255, (2, 8)).astype(np.uint32)
y = [
np.array(x[0:2, 0:3]).astype(np.uint32),
np.array(x[0:2, 3:6]).astype(np.uint32),
np.array(x[0:2, 6:8]).astype(np.uint32)
]
_x = Tensor(Dtype.U32, x.shape, x.flatten())
_y = [
Tensor(Dtype.U32, y[0].shape, y[0].flatten()),
Tensor(Dtype.U32, y[1].shape, y[1].flatten()),
Tensor(Dtype.U32, y[2].shape, y[2].flatten()),
]
name = "split_to_sequence_u32_2d_uneven"
make_test(
[_x], _y, "input_0.split_to_sequence(1, 1, Option::Some(TensorTrait::<u32>::new(shape: array![1].span(), data: array![3].span(),)))", name)
def split_to_sequence_2d_scalar():
x = np.random.randint(0, 255, (2, 8)).astype(np.uint32)
y = [
np.array(x[0:2, 0:1]).astype(np.uint32),
np.array(x[0:2, 1:2]).astype(np.uint32),
np.array(x[0:2, 2:3]).astype(np.uint32),
np.array(x[0:2, 3:4]).astype(np.uint32),
np.array(x[0:2, 4:5]).astype(np.uint32),
np.array(x[0:2, 5:6]).astype(np.uint32),
np.array(x[0:2, 6:7]).astype(np.uint32),
np.array(x[0:2, 7:8]).astype(np.uint32)
]
_x = Tensor(Dtype.U32, x.shape, x.flatten())
_y = [
Tensor(Dtype.U32, y[0].shape, y[0].flatten()),
Tensor(Dtype.U32, y[1].shape, y[1].flatten()),
Tensor(Dtype.U32 |
, y[2].shape, y[2].flatten()),
Tensor(Dtype.U32, y[3].shape, y[3].flatten()),
Tensor(Dtype.U32, y[4].shape, y[4].flatten()),
Tensor(Dtype.U32, y[5].shape, y[5].flatten()),
Tensor(Dtype.U32, y[6].shape, y[6].flatten()),
Tensor(Dtype.U32, y[7].shape, y[7].flatten()),
]
name = "split_to_sequence_2d_scalar"
make_test(
[_x], _y, "input_0.split_to_sequence(1, 1, Option::None(()))", name)
def split_to_sequence_2d_nokeepdims():
x = np.random.randint(0, 255, (2, 8)).astype(np.uint32)
y = [
np.array(x[0:2, 0:1]).astype(np.uint32),
np.array(x[0:2, 1:2]).astype(np.uint32),
np.array(x[0:2, 2:3]).astype(np.uint32),
np.array(x[0:2, 3:4]).astype(np.uint32),
np.array(x[0:2, 4:5]).astype(np.uint32),
np.array(x[0:2, 5:6]).astype(np.uint32),
np.array(x[0:2, 6:7]).astype(np.uint32),
np.array(x[0:2, 7:8]).astype(np.uint32)
]
_x = Tensor(Dtype.U32, x.shape, x.flatten())
_y = [
Tensor(Dtype.U32, y[0].shape, y[0].flatten()),
Tensor(Dtype.U32, y[1].shape, y[1].flatten()),
Tensor(Dtype.U32, y[2].shape, y[2].flatten()),
Tensor(Dtype.U32, y[3].shape, y[3].flatten()),
Tensor(Dtype.U32, y[4].shape, y[4].flatten()),
Tensor(Dtype.U32, y[5].shape, y[5].flatten()),
Tensor(Dtype.U32, y[6].shape, y[6].flatten()),
Tensor(Dtype.U32, y[7].shape, y[7].flatten()),
]
name = "split_to_sequence_2d_nokeepdims"
make_test(
[_x], _y, "input_0.split_to_sequence(1, 0, Option::None(()))", name)
def split_to_sequence_1d_nokeepdims():
x = np.random.randint(0, 255, 8).astype(np.uint32)
y = [ |
np.array(x[0:1]).astype(np.uint32),
np.array(x[1:2]).astype(np.uint32),
np.array(x[2:3]).astype(np.uint32),
np.array(x[3:4]).astype(np.uint32),
np.array(x[4:5]).astype(np.uint32),
np.array(x[5:6]).astype(np.uint32),
np.array(x[6:7]).astype(np.uint32),
np.array(x[7:8]).astype(np.uint32)
]
_x = Tensor(Dtype.U32, x.shape, x.flatten())
_y = [
Tensor(Dtype.U32, y[0].shape, y[0].flatten()),
Tensor(Dtype.U32, y[1].shape, y[1].flatten()),
Tensor(Dtype.U32, y[2].shape, y[2].flatten()),
Tensor(Dtype.U32, y[3].shape, y[3].flatten()),
Tensor(Dtype.U32, y[4].shape, y[4].flatten()),
Tensor(Dtype.U32, y[5].shape, y[5].flatten()),
Tensor(Dtype.U32, y[6].shape, y[6].flatten()),
Tensor(Dtype.U32, y[7].shape, y[7].flatten()),
]
name = "split_to_sequence_1d_nokeepdims"
make_test(
[_x], _y, "input_0.split_to_sequence(0, 0, Option::None(()))", name)
split_to_sequence_1D()
split_to_sequence_2D()
split_to_sequence_zero_size()
split_to_sequence_1d_uneven()
split_to_sequence_2d_uneven()
split_to_sequence_2d_scalar()
split_to_sequence_1d_nokeepdims()
split_to_sequence_2d_nokeepdims()
@staticmethod
def split_to_sequence_fp16x16():
def split_to_sequence_1D():
x = to_fp(np.random.randint(-127, 127, 6
).astype(np.int64), FixedImpl.FP16x16)
y = [
np.array(x[0:2]).astype(np.int64),
np.array(x[2:4]).astype(np.int64),
np.array(x[4:6]).astype(np.int64),
]
_x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
_y = [
Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()), |
Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()),
Tensor(Dtype.FP16x16, y[2].shape, y[2].flatten()),
]
name = "split_to_sequence_fp16x16_1d_equal_parts"
make_test(
[_x], _y, "input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::<u32>::new(shape: array![1].span(), data: array![3].span(),)))", name)
y = [
np.array(x[0:2]).astype(np.int64),
np.array(x[2:6]).astype(np.int64),
]
_y = [
Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()),
Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()),
]
name = "split_to_sequence_fp16x16_1d_variable_parts"
make_test(
[_x], _y, "input_0.split_to_sequence(0, 1, Option::Some(TensorTrait::<u32>::new(shape: array![2].span(), data: array![2, 4].span(),)))", name)
def split_to_sequence_2D():
x = to_fp(np.random.randint(-127, 127, (2, 6)
).astype(np.int64), FixedImpl.FP16x16)
y = [
np.array(x[0:2, 0:3]).astype(np.int64),
np.array(x[0:2, 3:6]).astype(np.int64),
]
_x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
_y = [
Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()),
Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()),
]
name = "split_to_sequence_fp16x16_2d_equal_parts"
make_test(
[_x], _y, "input_0.split_to_sequence(1, 1, Option::Some(TensorTrait::<u32>::new(shape: array![1].span(), data: array![2].span(),)))", name)
y = [
np.array(x[0:2, 0:2]).astype(np.int64),
np.array(x[0:2, 2:6]).astype(np.int64)
]
_y = [
Tensor(Dtype.FP16x16, y[0].shape, y[0].flatten()),
Tensor(Dtype.FP16x16, y[1].shape, y[1].flatten()),
]
name = |