text
stringlengths 1
2.05k
|
---|
use regex::Regex;
use std::fs;
use std::path::Path; |
fn main() {
let trait_path = "src/operators/tensor/core.cairo";
let doc_path = "docs/framework/operators/tensor";
let label = "tensor";
let trait_name = "TensorTrait";
doc_trait(trait_path, doc_path, label);
doc_functions(trait_path, doc_path, trait_name, label);
let trait_path = "src/operators/nn/core.cairo";
let doc_path = "docs/framework/operators/neural-network";
let label = "nn";
let trait_name = "NNTrait";
doc_trait(trait_path, doc_path, label);
doc_functions(trait_path, doc_path, trait_name, label);
let trait_path = "src/operators/sequence/core.cairo";
let doc_path = "docs/framework/operators/sequence";
let label = "sequence";
let trait_name = "SequenceTrait";
doc_trait(trait_path, doc_path, label);
doc_functions(trait_path, doc_path, trait_name, label);
let trait_path = "src/numbers/fixed_point/core.cairo";
let doc_path = "docs/framework/numbers/fixed-point";
let label = "fp";
let trait_name = "FixedTrait";
doc_trait(trait_path, doc_path, label);
doc_functions(trait_path, doc_path, trait_name, label);
let trait_path = "src/numbers/complex_number/complex_trait.cairo";
let doc_path = "docs/framework/numbers/complex-number";
let label = "complex";
let trait_name: &str = "ComplexTrait";
doc_trait(trait_path, doc_path, label);
doc_functions(trait_path, doc_path, trait_name, label);
let trait_path = "src/operators/ml/tree_ensemble/tree_ensemble_classifier.cairo";
let doc_path = "docs/framework/operators/machine-learning/tree-ensemble-classifier";
let label = "tree_ensemble_classifier";
let trait_name: &str = "TreeEnsembleClassifierTrait";
doc_trait(trait_path, doc_path, label);
doc_functions(trait_path, doc_path, trait_name, label);
let trait_path = "src/operators/ml/tree_ensemble/tree_ensemble_regressor.cairo";
let doc_path = "docs/framework/operators/machine-learning/tree-ensemble-regressor";
let label = "tree_ensem |
ble_regressor";
let trait_name: &str = "TreeEnsembleRegressorTrait";
doc_trait(trait_path, doc_path, label);
doc_functions(trait_path, doc_path, trait_name, label);
let trait_path = "src/operators/ml/linear/linear_regressor.cairo";
let doc_path = "docs/framework/operators/machine-learning/linear-regressor";
let label = "linear_regressor";
let trait_name: &str = "LinearRegressorTrait";
doc_trait(trait_path, doc_path, label);
doc_functions(trait_path, doc_path, trait_name, label);
let trait_path = "src/operators/ml/linear/linear_classifier.cairo";
let doc_path = "docs/framework/operators/machine-learning/linear-classifier";
let label = "linear_classifier";
let trait_name: &str = "LinearClassifierTrait";
doc_trait(trait_path, doc_path, label);
doc_functions(trait_path, doc_path, trait_name, label);
let trait_path = "src/operators/ml/svm/svm_regressor.cairo";
let doc_path = "docs/framework/operators/machine-learning/svm-regressor";
let label = "svm_regressor";
let trait_name: &str = "SVMRegressorTrait";
doc_trait(trait_path, doc_path, label);
doc_functions(trait_path, doc_path, trait_name, label);
let trait_path = "src/operators/ml/svm/svm_classifier.cairo";
let doc_path = "docs/framework/operators/machine-learning/svm-classifier";
let label = "svm_classifier";
let trait_name: &str = "SVMClassifierTrait";
doc_trait(trait_path, doc_path, label);
doc_functions(trait_path, doc_path, trait_name, label);
let trait_path = "src/operators/ml/normalizer/normalizer.cairo";
let doc_path = "docs/framework/operators/machine-learning/normalizer";
let label = "normalizer";
let trait_name: &str = "NormalizerTrait";
doc_trait(trait_path, doc_path, label);
doc_functions(trait_path, doc_path, trait_name, label);
} |
fn doc_trait(trait_path: &str, doc_path: &str, label: &str) {
let path_str = format!("../{}", trait_path);
let path = Path::new(&path_str);
let contents = fs::read_to_string(&path).expect("Could not read the file");
let re = Regex::new(r
let mut table = String::from("| function | description |\n| --- | --- |\n");
for cap in re.captures_iter(&contents) {
if &cap[1] == "Trait" {
continue;
}
let func_name = format!(
"[`{}.{}`]({}.{}.md)",
label,
&cap[1],
label,
&cap[1].replace('_', r"\_")
);
let func_desc = &cap[2];
table += &format!("| {} | {} |\n", func_name, func_desc);
}
let readme_path_str = format!("../{}/README.md", doc_path);
let readme_path = Path::new(&readme_path_str);
let readme = fs::read_to_string(&readme_path).expect("Could not read the file");
let re_table = Regex::new(r"(?ms)\n\n\| fun.*?(\n[^|]|\z)").unwrap();
let new_readme = re_table.replace(&readme, &("\n\n".to_owned() + &table + "\n"));
fs::write(&readme_path, &*new_readme).expect("Could not write the file");
} |
fn doc_functions(trait_path: &str, doc_path: &str, trait_name: &str, label: &str) {
let filepath_str = format!("../{}", trait_path);
let filepath = Path::new(&filepath_str);
let contents = fs::read_to_string(filepath).expect("Something went wrong reading the file");
let trait_re = Regex::new(&format!(
r"(?s)trait\s+{}\s*(<[\w\s,]*>)?\s*\{{.*?\n\s*\}}",
trait_name
))
.unwrap();
let trait_match = trait_re.captures(&contents).unwrap();
let trait_block = trait_match.get(0).unwrap().as_str();
let func_re = Regex::new(r"(?s)(
for func_match in func_re.captures_iter(trait_block) {
let func_name = func_match.get(2).unwrap().as_str();
let doc_comment = func_match.get(1).unwrap().as_str();
let markdown_filename = format!("../{}/{}.{}.md", doc_path, label, func_name);
let transformed_comment = doc_comment
.lines()
.map(|line| {
line.trim_start().strip_prefix("
line.trim_start()
.strip_prefix("
.unwrap_or(line.trim_start()),
)
})
.collect::<Vec<_>>()
.join("\n");
fs::write(markdown_filename, transformed_comment).expect("Unable to write file");
}
} |
import os
from pathlib |
import Path
BASE_PATH = "./tests/nodes"
class ModFile:
def __init__(self):
"""
Initialize a ModFile object.
This method creates a new file with a .cairo extension in the BASE_PATH directory.
If the directory doesn't exist, it's created. The contents of the file are then read
into the buffer attribute.
"""
self.path = Path(f"{BASE_PATH}.cairo")
self.path.parent.mkdir(parents=True, exist_ok=True)
with self.path.open("r") as f:
self.buffer = f.readlines()
def update(self, name: str):
"""
Update the .cairo file with a new module statement.
Args:
name (str): The name of the module to be added.
This method checks if a module statement for the given name already exists in the buffer.
If it doesn't, the new module statement is appended to the file.
"""
statement = f"mod {name};"
if any([line.startswith(statement) for line in self.buffer]):
return
with self.path.open("a") as f:
f.write(f"{statement}\n")
class File:
def __init__(self, path: str):
"""
Initialize a File object.
Args:
path (str): The file path where the File object will operate.
This method creates a new file at the specified path. If the file already exists, its
contents are read into the buffer attribute.
"""
self.path = Path(path)
self.path.parent.mkdir(parents=True, exist_ok=True)
self.buffer = []
if os.path.isfile(path):
with self.path.open("r") as f:
self.buffer = f.readlines()
def dump(self):
"""
Write the contents of the buffer to the file.
This method writes each line in the buffer to the file, ensuring each line is
properly terminated with a newline character.
"""
with self.path.open("w") as f:
f.writelines([f"{line}\n" for line in self.buffer]) |
class CairoTest(File):
def __init__(self, file: str):
super().__init__(os.path.join(BASE_PATH, file))
@classmethod
def base_template(
cls, name: str, arg_cnt: int, refs: list[str], func_sig: str, out_cnt: int = 1
) -> list[str]:
"""
Create a template for a Cairo test function which expects a tensor output.
Args:
name (str): Name of the test function.
arg_cnt (int): Number of arguments for the function.
refs (list[str]): List of references (modules) to be used in the function.
func_sig (str): The function signature.
out_cnt (int): Number of outputs for the function. Defaults to 1.
Returns:
list[str]: A list of strings that together form the template of a Cairo test function.
This method generates a list of strings that form the template of a Cairo test function,
including module imports, function definition, and assertions.
"""
template = [
*[f"mod input_{i};" for i in range(arg_cnt)],
*[f"mod output_{i};" for i in range(out_cnt)],
"",
"",
*[f"use {ref};" for ref in refs],
"",
"
"
f"fn test_{name}()" + " {",
*[f" let input_{i} = input_{i}::input_{i}();" for i in range(arg_cnt)],
*[f" let z_{i} = output_{i}::output_{i}();" for i in range(out_cnt)],
""
]
if out_cnt > 1:
template.append(f" let ({', '.join(f'y_{i}' for i in range(out_cnt))}) = {func_sig};")
else:
template.append(f" let y_0 = {func_sig};")
template.extend([
"",
*[f" assert_eq(y_{i}, z_{i});" for i in range(out_cnt)],
"}"
])
return template
@classmethod
def sequence_template(cls, name: str, arg_cnt: int, refs: list[str], func_sig: str) -> list[str]:
"""
Create a template for a |
Cairo test function which expects a tensor sequence.
Args:
name (str): Name of the test function.
arg_cnt (int): Number of arguments for the function.
refs (list[str]): List of references (modules) to be used in the function.
func_sig (str): The function signature.
Returns:
list[str]: A list of strings that together form the template of a Cairo test function.
This method generates a list of strings that form the template of a Cairo test function,
including module imports, function definition, and assertions.
"""
return [
*[f"mod input_{i};" for i in range(arg_cnt)],
*[ "mod output_0;"],
*[ ""],
*[ ""],
*[f"use {ref};" for ref in refs],
*[ ""],
*[ "
*[ "
*[f"fn test_{name}()"+" {"],
*[f" let input_{i} = input_{i}::input_{i}();" for i in range(arg_cnt)],
*[ " let z = output_0::output_0();"],
*[ ""],
*[f" let y = {func_sig};"],
*[ ""],
*[ " assert_seq_eq(y, z);"],
*[ "}"],
] |
class CairoData(File):
def __init__(self, file: str):
super().__init__(os.path.join(BASE_PATH, file))
@classmethod
def base_template(
cls, func: str, dtype: str, refs: list[str], data: list[str], shape: tuple
) -> list[str]:
"""
Create a base template for data representation in Cairo.
Args:
func (str): The function name.
dtype (str): The data type of the tensor.
refs (list[str]): A list of module references.
data (list[str]): The data to be included in the tensor.
shape (tuple): The shape of the tensor.
Returns:
list[str]: A list of strings that together form the template of a data function in Cairo.
This method generates a list of strings representing a function in Cairo for data handling,
defining the shape and contents of a tensor.
"""
template = [
*[f"use {ref};" for ref in refs],
*[""],
*[f"fn {func}() -> Tensor<{dtype}>" + " {"],
*[" let mut shape = ArrayTrait::<usize>::new();"],
*[f" shape.append({s});" for s in shape],
*[""],
*[" let mut data = ArrayTrait::new();"],
*[f" data.append({d});" for d in data],
*[" TensorTrait::new(shape.span(), data.span())"],
*["}"],
]
return template
@classmethod
def sequence_template(
cls,
func: str,
dtype: str,
refs: list[str],
data: list[list[str]],
shape: list[tuple],
) -> list[str]:
"""
Create a template for handling tensor sequences in Cairo.
Args:
func (str): The function name.
dtype (str): The data type of the tensor sequence.
refs (list[str]): A list of module references.
data (list[list[str]]): The data to be included in each tensor.
shape (list[tuple]): The shapes of each tensor in the sequence. |
Returns:
list[str]: A list of strings that together form the template of a sequence tensor function in Cairo.
This method generates a list of strings representing a function in Cairo for handling a sequence
of tensors, each with its own data and shape.
"""
def expand_sequence_init(s: list[tuple], d: list[list[str]]) -> list[str]:
snippet = []
for i in range(len(s)):
snippet += [
*[" let mut shape = ArrayTrait::<usize>::new();"],
*[f" shape.append({s});" for s in s[i]],
*[""],
*[" let mut data = ArrayTrait::new();"],
*[f" data.append({d});" for d in d[i]],
*[""],
*[
" sequence.append(TensorTrait::new(shape.span(), data.span()));"
],
*[""],
]
return snippet
template = [
*[f"use {ref};" for ref in refs],
*[""],
*[f"fn {func}() -> Array<Tensor<{dtype}>>" + " {"],
*[" let mut sequence = ArrayTrait::new();"],
*[""],
*expand_sequence_init(shape, data),
*[" sequence"],
*["}"],
]
return template |
from enum |
import Enum |
import os
from typing |
import List
from .file_manager |
import CairoTest, CairoData, ModFile |
import numpy as np |
class FixedImpl(Enum):
FP8x23 = 'FP8x23'
FP16x16 = 'FP16x16'
FP32x32 = 'FP32x32'
def to_fp(x: np.ndarray, fp_impl: FixedImpl):
match fp_impl:
case FixedImpl.FP8x23:
return (x * 2**23).astype(np.int64)
case FixedImpl.FP16x16:
return (x * 2**16).astype(np.int64)
case FixedImpl.FP32x32:
return (x * 2**32).astype(np.int64) |
class Dtype(Enum):
FP8x23 = 'FP8x23'
FP16x16 = 'FP16x16'
FP32x32 = 'FP32x32'
I8 = 'i8'
I32 = 'i32'
U32 = 'u32'
BOOL = 'bool'
COMPLEX64 = 'complex64'
class Tensor:
def __init__(self, dtype: Dtype, shape: tuple, data: np.ndarray):
self.dtype = dtype
self.shape = shape
self.data = data
Sequence = List[Tensor] |
class Trait(Enum):
TENSOR = 'TENSOR'
NN = 'NN'
SEQUENCE = 'SEQUENCE'
def make_test(inputs: list[Tensor | Sequence], output: Tensor | Sequence, func_sig: str, name: str, trait: Trait = Trait.TENSOR):
"""
Generate and write Cairo tests based on the provided inputs and output.
Args:
inputs (list[Tensor | list[Tensor]]): A list of input tensors or tensor sequences.
output (Tensor | list[Tensor]): The expected output tensor or tensor sequences.
func_sig (str): The signature of the function to be tested.
name (str): The name of the test.
trait (Trait, optional): The trait of the tensors. Defaults to Trait.TENSOR.
"""
ModFile().update(name)
for i, input in enumerate(inputs):
input_data = CairoData(os.path.join(name, f"input_{i}.cairo"))
match input:
case list():
input_data.buffer = CairoData.sequence_template(
func=f"input_{i}",
dtype=input[0].dtype.value,
refs=get_data_refs(input[0].dtype),
data=get_data_statement_for_sequences(
input, input[0].dtype),
shape=[x.shape for x in input],
)
case Tensor():
input_data.buffer = CairoData.base_template(
func=f"input_{i}",
dtype=input.dtype.value,
refs=get_data_refs(input.dtype),
data=get_data_statement(input.data, input.dtype),
shape=input.shape,
)
input_data.dump()
match output:
case list():
output_data = CairoData(os.path.join(name, "output_0.cairo"))
output_data.buffer = CairoData.sequence_template(
func="output_0",
dtype=output[0].dtype.value,
refs=get_data_refs(output[0].dtype),
data=get_data_statement_for_sequences(output, output[0].dtype),
shape=[ |
x.shape for x in output],
)
output_data.dump()
case tuple():
for i, out in enumerate(output):
output_data = CairoData(
os.path.join(name, f"output_{i}.cairo"))
output_data.buffer = CairoData.base_template(
func=f"output_{i}",
dtype=out.dtype.value,
refs=get_data_refs(out.dtype),
data=get_data_statement(out.data, out.dtype),
shape=out.shape,
)
output_data.dump()
case Tensor():
output_data = CairoData(os.path.join(name, "output_0.cairo"))
output_data.buffer = CairoData.base_template(
func="output_0",
dtype=output.dtype.value,
refs=get_data_refs(output.dtype),
data=get_data_statement(output.data, output.dtype),
shape=output.shape,
)
output_data.dump()
test_file = CairoTest(f"{name}.cairo")
match output:
case list():
test_file.buffer = CairoTest.sequence_template(
name=name,
arg_cnt=len(inputs),
refs=get_all_test_refs(find_all_types([*inputs, *output]), trait),
func_sig=func_sig,
)
case Tensor():
test_file.buffer = CairoTest.base_template(
name=name,
arg_cnt=len(inputs),
refs=get_all_test_refs(find_all_types([*inputs, output]), trait),
func_sig=func_sig,
)
case tuple():
test_file.buffer = CairoTest.base_template(
name=name,
arg_cnt=len(inputs),
out_cnt=len(output),
refs=get_all_test_refs(find_all_types([*inputs, output]), trait),
func_sig=func_sig,
)
test_file.dump()
def get_data_refs(dtype: Dtype) -> list[str]:
refs = [
*trait_to_r |
ef[Trait.TENSOR],
*dtype_to_tensor[dtype],
*dtype_to_numbers[dtype],
]
return refs
def get_data_statement(data: np.ndarray, dtype: Dtype) -> list[str]:
match dtype:
case Dtype.U32:
return [f"{int(x)}" for x in data.flatten()]
case Dtype.I32:
return [f"{int(x)}" for x in data.flatten()]
case Dtype.I8:
return [f"{int(x)}" for x in data.flatten()]
case Dtype.FP8x23:
return ["FP8x23 { "+f"mag: {abs(int(x))}, sign: {str(x < 0).lower()} "+"}" for x in data.flatten()]
case Dtype.FP16x16:
return ["FP16x16 { "+f"mag: {abs(int(x))}, sign: {str(x < 0).lower()} "+"}" for x in data.flatten()]
case Dtype.FP32x32:
return ["FP32x32 { "+f"mag: {abs(int(x))}, sign: {str(x < 0).lower()} "+"}" for x in data.flatten()]
case Dtype.BOOL:
return [str(x).lower() for x in data.flatten()]
case Dtype.COMPLEX64:
return ["complex64 { "+"real: FP64x64 { "+f"mag: {abs(int(np.real(x)))}, sign: {str(np.real(x) < 0).lower()} "+"} , img: FP64x64 { "+f"mag: {abs(int(np.imag(x)))}, sign: {str(np.imag(x) < 0).lower()} "+"} }" for x in data.flatten()]
def get_data_statement_for_sequences(data: Sequence, dtype: Dtype) -> list[list[str]]:
return [get_data_statement(x.data, dtype) for x in data]
def get_all_test_refs(dtypes: list[Dtype], trait: Trait) -> list[str]:
refs = []
for dtype in dtypes:
refs += get_test_refs(dtype, trait)
return list(set(refs))
def get_test_refs(dtype: Dtype, trait: Trait) -> list[str]:
if trait == Trait.NN and dtype == Dtype.BOOL:
raise Exception("NN trait does not support bool dtype")
if trait == Trait.NN:
dtype_ref = dtype_to_nn[dtype]
elif trait == Trait.SEQUENCE:
dtype_ref = dtype_to_sequence[dtype]
else:
dtype_ref = dtype_to_tensor[dtype]
refs = [
*trait_to_ref[trait],
*dtype_ref,
*dtype_to_partial_eq[dtype], |
"orion::utils::{assert_eq, assert_seq_eq}",
]
return refs
def find_all_types(tensors: list[Tensor | Sequence]) -> list[Dtype]:
dtypes = []
for tensor in tensors:
if isinstance(tensor, list) or isinstance(tensor, tuple):
dtypes += [x.dtype for x in tensor]
else:
dtypes.append(tensor.dtype)
return list(set(dtypes))
trait_to_ref = {
Trait.TENSOR: [
"core::array::{ArrayTrait, SpanTrait}",
"orion::operators::tensor::{TensorTrait, Tensor}",
],
Trait.NN: [
"orion::numbers::FixedTrait",
"orion::operators::nn::NNTrait",
],
Trait.SEQUENCE: [
"core::array::{ArrayTrait, SpanTrait}",
"orion::operators::sequence::SequenceTrait",
],
}
dtype_to_tensor = {
Dtype.U32: ["orion::operators::tensor::{U32Tensor, U32TensorAdd}",],
Dtype.I32: ["orion::operators::tensor::{I32Tensor, I32TensorAdd}",],
Dtype.I8: ["orion::operators::tensor::{I8Tensor, I8TensorAdd}",],
Dtype.FP8x23: ["orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}",],
Dtype.FP16x16: ["orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}",],
Dtype.BOOL: ["orion::operators::tensor::BoolTensor",],
Dtype.COMPLEX64: ["orion::operators::tensor::Complex64Tensor",],
Dtype.FP32x32: ["orion::operators::tensor::FP32x32Tensor",],
}
dtype_to_nn = {
Dtype.U32: ["orion::operators::nn::U32NN",],
Dtype.I32: ["orion::operators::nn::I32NN",],
Dtype.I8: ["orion::operators::nn::I8NN",],
Dtype.FP8x23: ["orion::operators::nn::FP8x23NN",],
Dtype.FP16x16: ["orion::operators::nn::FP16x16NN",],
}
dtype_to_sequence = {
Dtype.U32: ["orion::operators::sequence::U32Sequence",],
Dtype.I32: ["orion::operators::sequence::I32Sequence",],
Dtype.I8: ["orion::operators::sequence::I8Sequence",],
Dtype.FP8x23: ["orion::operators::sequence::FP8x23Sequence",],
Dtype.FP16x16: ["orion::operators::sequence::FP16x16Sequence",],
}
dtype_to_partial_eq = {
Dtype.U32: ["orion::oper |
ators::tensor::U32TensorPartialEq",],
Dtype.I32: ["orion::operators::tensor::I32TensorPartialEq",],
Dtype.I8: ["orion::operators::tensor::I8TensorPartialEq",],
Dtype.FP8x23: ["orion::operators::tensor::FP8x23TensorPartialEq",],
Dtype.FP16x16: ["orion::operators::tensor::FP16x16TensorPartialEq",],
Dtype.FP32x32: ["orion::operators::tensor::FP32x32TensorPartialEq",],
Dtype.BOOL: ["orion::operators::tensor::BoolTensorPartialEq",],
Dtype.COMPLEX64: ["orion::operators::tensor::Complex64TensorPartialEq",],
}
dtype_to_numbers = {
Dtype.U32: ["orion::numbers::NumberTrait"],
Dtype.I32: ["orion::numbers::NumberTrait"],
Dtype.I8: ["orion::numbers::NumberTrait"],
Dtype.FP8x23: ["orion::numbers::{FixedTrait, FP8x23}",],
Dtype.FP16x16: ["orion::numbers::{FixedTrait, FP16x16}",],
Dtype.FP32x32: ["orion::numbers::{FixedTrait, FP32x32}",],
Dtype.BOOL: [],
Dtype.COMPLEX64: ["orion::numbers::{NumberTrait, complex64}",],
} |
import argparse
import importlib
import os
import sys
class RunAll:
@classmethod
def run_all(cls):
for method_name in dir(cls):
if method_name.startswith('__') or method_name == 'run_all':
continue
method = getattr(cls, method_name)
if callable(method):
method()
# Add the path to the 'orion' directory to the Python path
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
def main():
parser = argparse.ArgumentParser(description="Generate nodes.")
parser.add_argument('node_class', help="The class of node to run.")
args = parser.parse_args()
class_name = args.node_class.capitalize()
# Verify that the specified Python file exists
filename = os.path.join('nodegen/node', args.node_class + '.py')
if not os.path.exists(filename):
print(f"Error: {filename} does not exist.")
return
# Import the module dynamically
module = importlib.import_module('nodegen.node.' + args.node_class)
# Get the class from the module
node_class = getattr(module, class_name)
# Instantiate the class and call the run_all method
node_instance = node_class()
node_instance.run_all()
if __name__ == "__main__":
main()
|
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
class Abs(RunAll):
@staticmethod
def abs_i32():
x = np.random.randint(-127, 127, (2, 2)).astype(np.int32)
y = abs(x)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "abs_i32"
make_test([x], y, "input_0.abs()", name)
@staticmethod
def abs_i8():
x = np.random.randint(-127, 127, (2, 2)).astype(np.int8)
y = abs(x)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "abs_i8"
make_test([x], y, "input_0.abs()", name)
@staticmethod
def abs_fp8x23():
x = to_fp(np.random.randint(-127, 127, (2, 2)
).astype(np.int64), FixedImpl.FP8x23)
y = abs(x)
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "abs_fp8x23"
make_test([x], y, "input_0.abs()", name)
@staticmethod
def abs_fp16x16():
x = to_fp(np.random.randint(-127, 127, (2, 2)
).astype(np.int64), FixedImpl.FP16x16)
y = abs(x)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "abs_fp16x16"
make_test([x], y, "input_0.abs()", name)
|
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
class Acos(RunAll):
@staticmethod
def acos_fp8x23():
x = np.random.uniform(-1, 1, (2, 2)).astype(np.float64)
y = np.arccos(x)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23))
name = "acos_fp8x23"
make_test([x], y, "input_0.acos()", name)
@staticmethod
def acos_fp16x16():
x = np.random.uniform(-1, 1, (2, 2)).astype(np.float64)
y = np.arccos(x)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "acos_fp16x16"
make_test([x], y, "input_0.acos()", name)
|
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
class Acosh(RunAll):
@staticmethod
def acosh_fp8x23():
x = np.random.uniform(1, 5, (2, 2)).astype(np.float64)
y = np.arccosh(x)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "acosh_fp8x23"
make_test([x], y, "input_0.acosh()", name)
@staticmethod
def acosh_fp16x16():
x = np.random.uniform(1, 5, (2, 2)).astype(np.float64)
y = np.arccosh(x)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "acosh_fp16x16"
make_test([x], y, "input_0.acosh()", name)
|
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl |
class Add(RunAll):
@staticmethod
def add_u32():
def default():
x = np.random.randint(0, 3, (3, 3, 3)).astype(np.uint32)
y = np.random.randint(0, 3, (3, 3, 3)).astype(np.uint32)
z = x + y
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "add_u32"
make_test([x, y], z, "input_0 + input_1", name)
def broadcast():
x = np.random.randint(0, 3, (3, 3, 3)).astype(np.uint32)
y = np.random.randint(0, 3, (1, 3, 1)).astype(np.uint32)
z = x + y
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.U32, z.shape, z.flatten())
name = "add_u32_broadcast"
make_test([x, y], z, "input_0 + input_1", name)
default()
broadcast()
@staticmethod
def add_i32():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32)
z = x + y
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "add_i32"
make_test([x, y], z, "input_0 + input_1", name)
def broadcast():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32)
y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.int32)
z = x + y
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "add_i32_broadcast"
make_test([x, y], z, "input_0 + input_1", name)
default()
broadcast()
@staticmethod
def add_i8():
def default():
x = np.ran |
dom.randint(-3, 3, (3, 3, 3)).astype(np.int8)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8)
z = x + y
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.I8, z.shape, z.flatten())
name = "add_i8"
make_test([x, y], z, "input_0 + input_1", name)
def broadcast():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8)
y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.int8)
z = x + y
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.I8, z.shape, z.flatten())
name = "add_i8_broadcast"
make_test([x, y], z, "input_0 + input_1", name)
default()
broadcast()
@staticmethod
def add_fp8x23():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
z = x + y
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
z = Tensor(Dtype.FP8x23, z.shape, to_fp(
z.flatten(), FixedImpl.FP8x23))
name = "add_fp8x23"
make_test([x, y], z, "input_0 + input_1", name)
def broadcast():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.float64)
z = x + y
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
z = Tensor(Dtype.FP8x23, z.shape, to_fp(
z.flatten(), FixedImpl.FP8x23))
name = "add_fp8x23_broadcast" |
make_test([x, y], z, "input_0 + input_1", name)
default()
broadcast()
@staticmethod
def add_fp16x16():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
z = x + y
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.FP16x16, z.shape, to_fp(
z.flatten(), FixedImpl.FP16x16))
name = "add_fp16x16"
make_test([x, y], z, "input_0 + input_1", name)
def broadcast():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (1, 3, 1)).astype(np.float64)
z = x + y
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.FP16x16, z.shape, to_fp(
z.flatten(), FixedImpl.FP16x16))
name = "add_fp16x16_broadcast"
make_test([x, y], z, "input_0 + input_1", name)
default()
broadcast() |
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
class And(RunAll):
@staticmethod
def and_bool():
def default():
x = (np.random.randn(3, 4) > 0).astype(bool)
y = (np.random.randn(3, 4) > 0).astype(bool)
z = np.logical_and(x, y)
x = Tensor(Dtype.BOOL, x.shape, x.flatten())
y = Tensor(Dtype.BOOL, y.shape, y.flatten())
z = Tensor(Dtype.BOOL, z.shape, z.flatten())
name = "and_bool"
make_test([x, y], z, "BoolTensor::and(@input_0, @input_1)", name)
def broadcast():
x = (np.random.randn(3, 4, 5) > 0).astype(bool)
y = (np.random.randn(3, 4, 5) > 0).astype(bool)
z = np.logical_and(x, y)
x = Tensor(Dtype.BOOL, x.shape, x.flatten())
y = Tensor(Dtype.BOOL, y.shape, y.flatten())
z = Tensor(Dtype.BOOL, z.shape, z.flatten())
name = "and_bool_broadcast"
make_test([x, y], z, "BoolTensor::and(@input_0, @input_1)", name)
default()
broadcast()
|
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl
def argmax_use_numpy(data: np.ndarray, axis: int = 0, keepdims: int = 1) -> np.ndarray:
result = np.argmax(data, axis=axis)
if keepdims == 1:
result = np.expand_dims(result, axis)
return result.astype(np.int64)
def argmax_use_numpy_select_last_index(
data: np.ndarray, axis: int = 0, keepdims: int = True
) -> np.ndarray:
data = np.flip(data, axis)
result = np.argmax(data, axis=axis)
result = data.shape[axis] - result - 1
if keepdims:
result = np.expand_dims(result, axis)
return result.astype(np.int64) |
class Argmax(RunAll):
@staticmethod
def no_keepdims():
data = np.array([[2, 1], [3, 10]], dtype=np.float32)
axis = 1
keepdims = 0
result = argmax_use_numpy(data, axis=axis, keepdims=keepdims)
x = Tensor(Dtype.FP16x16, data.shape, data.flatten())
y = Tensor(Dtype.I32, result.shape, result.flatten())
name = "argmax_no_keepdims"
make_test(
[x], y, "input_0.argmax(1, Option::Some(false), Option::None(()))", name)
@staticmethod
def keepdims():
data = np.array([[2, 1], [3, 10]], dtype=np.float32)
axis = 1
keepdims = 1
result = argmax_use_numpy(data, axis=axis, keepdims=keepdims)
x = Tensor(Dtype.FP16x16, data.shape, data.flatten())
y = Tensor(Dtype.I32, result.shape, result.flatten())
name = "argmax_keepdims"
make_test(
[x], y, "input_0.argmax(1, Option::Some(true), Option::None(()))", name)
@staticmethod
def default_axes_keepdims():
data = np.array([[2, 1], [3, 10]], dtype=np.float32)
keepdims = 1
result = argmax_use_numpy(data, keepdims=keepdims)
x = Tensor(Dtype.FP16x16, data.shape, data.flatten())
y = Tensor(Dtype.I32, result.shape, result.flatten())
name = "argmax_default_axes_keepdims"
make_test(
[x], y, "input_0.argmax(0, Option::Some(true), Option::None(()))", name)
@staticmethod
def negative_axis_keepdims():
data = np.array([[2, 1], [3, 10]], dtype=np.float32)
axis = -1
keepdims = 1
result = argmax_use_numpy(data, axis=axis, keepdims=keepdims)
x = Tensor(Dtype.FP16x16, data.shape, data.flatten())
y = Tensor(Dtype.I32, result.shape, result.flatten())
name = "argmax_negative_axis_keepdims"
make_test(
[x], y, "input_0.argmax(-1, Option::Some(true), Option::None(()))", name)
@staticmethod
def no_keepdims_select_last_index():
data = np.array([[2, 2], [3, 10]], dtyp |
e=np.float32)
axis = 1
keepdims = 0
result = argmax_use_numpy_select_last_index(
data, axis=axis, keepdims=keepdims)
x = Tensor(Dtype.FP16x16, data.shape, data.flatten())
y = Tensor(Dtype.I32, result.shape, result.flatten())
name = "argmax_no_keepdims_select_last_index"
make_test(
[x], y, "input_0.argmax(1, Option::Some(false), Option::Some(true))", name)
@staticmethod
def keepdims_select_last_index():
data = np.array([[2, 2], [3, 10]], dtype=np.float32)
axis = 1
keepdims = 1
result = argmax_use_numpy_select_last_index(
data, axis=axis, keepdims=keepdims)
x = Tensor(Dtype.FP16x16, data.shape, data.flatten())
y = Tensor(Dtype.I32, result.shape, result.flatten())
name = "argmax_keepdims_select_last_index"
make_test(
[x], y, "input_0.argmax(1, Option::Some(true), Option::Some(true))", name)
@staticmethod
def default_axes_keepdims_select_last_index():
data = np.array([[2, 2], [3, 10]], dtype=np.float32)
keepdims = 1
result = argmax_use_numpy_select_last_index(data, keepdims=keepdims)
x = Tensor(Dtype.FP16x16, data.shape, data.flatten())
y = Tensor(Dtype.I32, result.shape, result.flatten())
name = "argmax_default_axes_keepdims_select_last_index"
make_test(
[x], y, "input_0.argmax(0, Option::Some(true), Option::Some(true))", name)
@staticmethod
def negative_axis_keepdims_select_last_index():
data = np.array([[2, 2], [3, 10]], dtype=np.float32)
axis = -1
keepdims = 1
result = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)
x = Tensor(Dtype.FP16x16, data.shape, data.flatten())
y = Tensor(Dtype.I32, result.shape, result.flatten())
name = "argmax_negative_axis_keepdims_select_last_index"
make_test(
[x], y, "input_0.argmax(-1, Option::Some(true), Option::Some(t |
rue))", name) |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl
def argmin_use_numpy(data: np.ndarray, axis: int = 0, keepdims: int = 1, dtype=np.int64) -> np.ndarray:
result = np.argmin(data, axis=axis)
if keepdims == 1:
result = np.expand_dims(result, axis)
return result.astype(dtype)
def argmin_use_numpy_select_last_index(
data: np.ndarray, axis: int = 0, keepdims: int = True, dtype=np.int64
) -> np.ndarray:
data = np.flip(data, axis)
result = np.argmin(data, axis=axis)
result = data.shape[axis] - result - 1
if keepdims:
result = np.expand_dims(result, axis)
return result.astype(dtype) |
class Argmin(RunAll):
@staticmethod
def argmin_u32():
def argmin_1D():
def default_params():
x = np.random.randint(0, 255, (3)).astype(np.uint32)
y = argmin_use_numpy(x, dtype=np.uint32).reshape((1))
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_u32_1D_default"
make_test(
[x], y, "input_0.argmin(0, Option::None(()), Option::None(()))", name)
def keepdims_false():
x = np.random.randint(0, 255, (3)).astype(np.uint32)
y = argmin_use_numpy(
x, keepdims=0, dtype=np.uint32).reshape((1))
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_u32_1D_keepdims_false"
make_test(
[x], y, "input_0.argmin(0, Option::Some(false), Option::None(()))", name)
def last_index():
x = np.random.randint(0, 255, (3)).astype(np.uint32)
y = argmin_use_numpy_select_last_index(
x, dtype=np.uint32).reshape((1))
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_u32_1D_last_index"
make_test(
[x], y, "input_0.argmin(0, Option::None(()), Option::Some(true))", name)
default_params()
keepdims_false()
last_index()
argmin_1D()
def argmin_2D():
def default_params():
x = np.random.randint(0, 255, (2, 2)).astype(np.uint32)
y = argmin_use_numpy(x, dtype=np.uint32)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_u32_2D_default"
make_test( |
[x], y, "input_0.argmin(0, Option::None(()), Option::None(()))", name)
def keepdims_false():
x = np.random.randint(0, 255, (2, 2)).astype(np.uint32)
y = argmin_use_numpy(
x, keepdims=0, dtype=np.uint32)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_u32_2D_keepdims_false"
make_test(
[x], y, "input_0.argmin(0, Option::Some(false), Option::None(()))", name)
def last_index():
x = np.random.randint(0, 255, (2, 2)).astype(np.uint32)
y = argmin_use_numpy_select_last_index(
x, dtype=np.uint32)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_u32_2D_last_index"
make_test(
[x], y, "input_0.argmin(0, Option::None(()), Option::Some(true))", name)
default_params()
keepdims_false()
last_index()
argmin_2D()
def argmin_3D():
def default_params():
x = np.random.randint(0, 255, (2, 2, 2)).astype(np.uint32)
y = argmin_use_numpy(x, dtype=np.uint32)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_u32_3D_default"
make_test(
[x], y, "input_0.argmin(0, Option::None(()), Option::None(()))", name)
def keepdims_false():
x = np.random.randint(0, 255, (2, 2, 2)).astype(np.uint32)
y = argmin_use_numpy(
x, keepdims=0, dtype=np.uint32)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_u32_3D_keepdims_false"
mak |
e_test(
[x], y, "input_0.argmin(0, Option::Some(false), Option::None(()))", name)
def last_index():
x = np.random.randint(0, 255, (2, 2, 2)).astype(np.uint32)
y = argmin_use_numpy_select_last_index(
x, dtype=np.uint32)
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_u32_3D_last_index"
make_test(
[x], y, "input_0.argmin(0, Option::None(()), Option::Some(true))", name)
default_params()
keepdims_false()
last_index()
argmin_3D()
@staticmethod
def argmin_i32():
def argmin_1D():
def default_params():
x = np.random.randint(-127, 127, (3)).astype(np.int32)
y = argmin_use_numpy(x, dtype=np.uint32).reshape((1))
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_i32_1D_default"
make_test(
[x], y, "input_0.argmin(0, Option::None(()), Option::None(()))", name)
def keepdims_false():
x = np.random.randint(-127, 127, (3)).astype(np.int32)
y = argmin_use_numpy(
x, keepdims=0, dtype=np.uint32).reshape((1))
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_i32_1D_keepdims_false"
make_test(
[x], y, "input_0.argmin(0, Option::Some(false), Option::None(()))", name)
def last_index():
x = np.random.randint(0, 255, (3)).astype(np.int32)
y = argmin_use_numpy_select_last_index(
x, dtype=np.uint32).reshape((1))
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape |
, y.flatten())
name = "argmin_i32_1D_last_index"
make_test(
[x], y, "input_0.argmin(0, Option::None(()), Option::Some(true))", name)
default_params()
keepdims_false()
last_index()
argmin_1D()
def argmin_2D():
def default_params():
x = np.random.randint(-127, 127, (2, 2)).astype(np.int32)
y = argmin_use_numpy(x, dtype=np.uint32)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_i32_2D_default"
make_test(
[x], y, "input_0.argmin(0, Option::None(()), Option::None(()))", name)
def keepdims_false():
x = np.random.randint(-127, 127, (2, 2)).astype(np.int32)
y = argmin_use_numpy(
x, keepdims=0, dtype=np.uint32)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_i32_2D_keepdims_false"
make_test(
[x], y, "input_0.argmin(0, Option::Some(false), Option::None(()))", name)
def last_index():
x = np.random.randint(-127, 127, (2, 2)).astype(np.int32)
y = argmin_use_numpy_select_last_index(
x, dtype=np.int32)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_i32_2D_last_index"
make_test(
[x], y, "input_0.argmin(0, Option::None(()), Option::Some(true))", name)
default_params()
keepdims_false()
last_index()
argmin_2D()
def argmin_3D():
def default_params():
x = np.random.randint(-127, 127, (2, 2, 2)).astype(np.int32)
y = argmin_use_numpy(x, dtype=np.ui |
nt32)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_i32_3D_default"
make_test(
[x], y, "input_0.argmin(0, Option::None(()), Option::None(()))", name)
def keepdims_false():
x = np.random.randint(-127, 127, (2, 2, 2)).astype(np.int32)
y = argmin_use_numpy(
x, keepdims=0, dtype=np.uint32)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_i32_3D_keepdims_false"
make_test(
[x], y, "input_0.argmin(0, Option::Some(false), Option::None(()))", name)
def last_index():
x = np.random.randint(-127, 127, (2, 2, 2)).astype(np.int32)
y = argmin_use_numpy_select_last_index(
x, dtype=np.uint32)
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_i32_3D_last_index"
make_test(
[x], y, "input_0.argmin(0, Option::None(()), Option::Some(true))", name)
default_params()
keepdims_false()
last_index()
argmin_3D()
@staticmethod
def argmin_i8():
def argmin_1D():
def default_params():
x = np.random.randint(-127, 127, (3)).astype(np.int8)
y = argmin_use_numpy(x, dtype=np.uint32).reshape((1))
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_i8_1D_default"
make_test(
[x], y, "input_0.argmin(0, Option::None(()), Option::None(()))", name)
def keepdims_false():
x = np.random.randint(-127, 127, (3)).astype(np.int8)
y = argmin_ |
use_numpy(
x, keepdims=0, dtype=np.uint32).reshape((1))
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_i8_1D_keepdims_false"
make_test(
[x], y, "input_0.argmin(0, Option::Some(false), Option::None(()))", name)
def last_index():
x = np.random.randint(0, 255, (3)).astype(np.int8)
y = argmin_use_numpy_select_last_index(
x, dtype=np.uint32).reshape((1))
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_i8_1D_last_index"
make_test(
[x], y, "input_0.argmin(0, Option::None(()), Option::Some(true))", name)
default_params()
keepdims_false()
last_index()
argmin_1D()
def argmin_2D():
def default_params():
x = np.random.randint(-127, 127, (2, 2)).astype(np.int8)
y = argmin_use_numpy(x, dtype=np.uint32)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_i8_2D_default"
make_test(
[x], y, "input_0.argmin(0, Option::None(()), Option::None(()))", name)
def keepdims_false():
x = np.random.randint(-127, 127, (2, 2)).astype(np.int8)
y = argmin_use_numpy(
x, keepdims=0, dtype=np.uint32)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_i8_2D_keepdims_false"
make_test(
[x], y, "input_0.argmin(0, Option::Some(false), Option::None(()))", name)
def last_index():
x = np.random.randint(-127, 127, (2, 2)).astype(np.int8) |
y = argmin_use_numpy_select_last_index(
x, dtype=np.int8)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_i8_2D_last_index"
make_test(
[x], y, "input_0.argmin(0, Option::None(()), Option::Some(true))", name)
default_params()
keepdims_false()
last_index()
argmin_2D()
def argmin_3D():
def default_params():
x = np.random.randint(-127, 127, (2, 2, 2)).astype(np.int8)
y = argmin_use_numpy(x, dtype=np.uint32)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_i8_3D_default"
make_test(
[x], y, "input_0.argmin(0, Option::None(()), Option::None(()))", name)
def keepdims_false():
x = np.random.randint(-127, 127, (2, 2, 2)).astype(np.int8)
y = argmin_use_numpy(
x, keepdims=0, dtype=np.uint32)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_i8_3D_keepdims_false"
make_test(
[x], y, "input_0.argmin(0, Option::Some(false), Option::None(()))", name)
def last_index():
x = np.random.randint(-127, 127, (2, 2, 2)).astype(np.int8)
y = argmin_use_numpy_select_last_index(
x, dtype=np.uint32)
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_i8_3D_last_index"
make_test(
[x], y, "input_0.argmin(0, Option::None(()), Option::Some(true))", name)
default_params()
keepdims_false()
last_index()
argmi |
n_3D()
@staticmethod
def argmin_fp16x16():
def argmin_1D():
def default_params():
x = to_fp(np.random.randint(-127, 127, (3)
).astype(np.int8), FixedImpl.FP16x16)
y = argmin_use_numpy(x, dtype=np.uint32).reshape((1))
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_fp16x16_1D_default"
make_test(
[x], y, "input_0.argmin(0, Option::None(()), Option::None(()))", name)
def keepdims_false():
x = to_fp(np.random.randint(-127, 127, (3)
).astype(np.int8), FixedImpl.FP16x16)
y = argmin_use_numpy(
x, keepdims=0, dtype=np.uint32).reshape((1))
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_fp16x16_1D_keepdims_false"
make_test(
[x], y, "input_0.argmin(0, Option::Some(false), Option::None(()))", name)
def last_index():
x = to_fp(np.random.randint(0, 255, (3)).astype(
np.int8), FixedImpl.FP16x16)
y = argmin_use_numpy_select_last_index(
x, dtype=np.uint32).reshape((1))
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_fp16x16_1D_last_index"
make_test(
[x], y, "input_0.argmin(0, Option::None(()), Option::Some(true))", name)
default_params()
keepdims_false()
last_index()
argmin_1D()
def argmin_2D():
def default_params():
x = to_fp(np.random.randint(-127, 127, (2, 2)
).astype(np.int8), Fixe |
dImpl.FP16x16)
y = argmin_use_numpy(x, dtype=np.uint32)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_fp16x16_2D_default"
make_test(
[x], y, "input_0.argmin(0, Option::None(()), Option::None(()))", name)
def keepdims_false():
x = to_fp(np.random.randint(-127, 127, (2, 2)
).astype(np.int8), FixedImpl.FP16x16)
y = argmin_use_numpy(
x, keepdims=0, dtype=np.uint32)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_fp16x16_2D_keepdims_false"
make_test(
[x], y, "input_0.argmin(0, Option::Some(false), Option::None(()))", name)
def last_index():
x = to_fp(np.random.randint(-127, 127, (2, 2)
).astype(np.int8), FixedImpl.FP16x16)
y = argmin_use_numpy_select_last_index(
x, dtype=np.int8)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_fp16x16_2D_last_index"
make_test(
[x], y, "input_0.argmin(0, Option::None(()), Option::Some(true))", name)
default_params()
keepdims_false()
last_index()
argmin_2D()
def argmin_3D():
def default_params():
x = to_fp(np.random.randint(-127, 127, (2, 2, 2)
).astype(np.int8), FixedImpl.FP16x16)
y = argmin_use_numpy(x, dtype=np.uint32)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_fp16x16_3D_de |
fault"
make_test(
[x], y, "input_0.argmin(0, Option::None(()), Option::None(()))", name)
def keepdims_false():
x = to_fp(np.random.randint(-127, 127, (2, 2, 2)
).astype(np.int8), FixedImpl.FP16x16)
y = argmin_use_numpy(
x, keepdims=0, dtype=np.uint32)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_fp16x16_3D_keepdims_false"
make_test(
[x], y, "input_0.argmin(0, Option::Some(false), Option::None(()))", name)
def last_index():
x = to_fp(np.random.randint(-127, 127, (2, 2, 2)
).astype(np.int8), FixedImpl.FP16x16)
y = argmin_use_numpy_select_last_index(
x, dtype=np.uint32)
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_fp16x16_3D_last_index"
make_test(
[x], y, "input_0.argmin(0, Option::None(()), Option::Some(true))", name)
default_params()
keepdims_false()
last_index()
argmin_3D()
@staticmethod
def argmin_fp8x23():
def argmin_1D():
def default_params():
x = to_fp(np.random.randint(-127, 127, (3)
).astype(np.int8), FixedImpl.FP8x23)
y = argmin_use_numpy(x, dtype=np.uint32).reshape((1))
x = Tensor(Dtype.FP8x23, x.shape,
x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_fp8x23_1D_default"
make_test(
[x], y, "input_0.argmin(0, Option::None(()), Option::None(()))", name)
def keepdims_false(): |
x = to_fp(np.random.randint(-127, 127, (3)
).astype(np.int8), FixedImpl.FP8x23)
y = argmin_use_numpy(
x, keepdims=0, dtype=np.uint32).reshape((1))
x = Tensor(Dtype.FP8x23, x.shape,
x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_fp8x23_1D_keepdims_false"
make_test(
[x], y, "input_0.argmin(0, Option::Some(false), Option::None(()))", name)
def last_index():
x = to_fp(np.random.randint(0, 255, (3)).astype(
np.int8), FixedImpl.FP8x23)
y = argmin_use_numpy_select_last_index(
x, dtype=np.uint32).reshape((1))
x = Tensor(Dtype.FP8x23, x.shape,
x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_fp8x23_1D_last_index"
make_test(
[x], y, "input_0.argmin(0, Option::None(()), Option::Some(true))", name)
default_params()
keepdims_false()
last_index()
argmin_1D()
def argmin_2D():
def default_params():
x = to_fp(np.random.randint(-127, 127, (2, 2)
).astype(np.int8), FixedImpl.FP8x23)
y = argmin_use_numpy(x, dtype=np.uint32)
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_fp8x23_2D_default"
make_test(
[x], y, "input_0.argmin(0, Option::None(()), Option::None(()))", name)
def keepdims_false():
x = to_fp(np.random.randint(-127, 127, (2, 2)
).astype(np.int8), FixedImpl.FP8x23)
y = argmin_use_numpy(
x, k |
eepdims=0, dtype=np.uint32)
x = Tensor(Dtype.FP8x23, x.shape,
x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_fp8x23_2D_keepdims_false"
make_test(
[x], y, "input_0.argmin(0, Option::Some(false), Option::None(()))", name)
def last_index():
x = to_fp(np.random.randint(-127, 127, (2, 2)
).astype(np.int8), FixedImpl.FP8x23)
y = argmin_use_numpy_select_last_index(
x, dtype=np.int8)
x = Tensor(Dtype.FP8x23, x.shape,
x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_fp8x23_2D_last_index"
make_test(
[x], y, "input_0.argmin(0, Option::None(()), Option::Some(true))", name)
default_params()
keepdims_false()
last_index()
argmin_2D()
def argmin_3D():
def default_params():
x = to_fp(np.random.randint(-127, 127, (2, 2, 2)
).astype(np.int8), FixedImpl.FP8x23)
y = argmin_use_numpy(x, dtype=np.uint32)
x = Tensor(Dtype.FP8x23, x.shape,
x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_fp8x23_3D_default"
make_test(
[x], y, "input_0.argmin(0, Option::None(()), Option::None(()))", name)
def keepdims_false():
x = to_fp(np.random.randint(-127, 127, (2, 2, 2)
).astype(np.int8), FixedImpl.FP8x23)
y = argmin_use_numpy(
x, keepdims=0, dtype=np.uint32)
x = Tensor(Dtype.FP8x23, x.shape,
x.flatten())
y = Tensor(Dtype.U32, y.shape, y |
.flatten())
name = "argmin_fp8x23_3D_keepdims_false"
make_test(
[x], y, "input_0.argmin(0, Option::Some(false), Option::None(()))", name)
def last_index():
x = to_fp(np.random.randint(-127, 127, (2, 2, 2)
).astype(np.int8), FixedImpl.FP8x23)
y = argmin_use_numpy_select_last_index(
x, dtype=np.uint32)
x = Tensor(Dtype.FP8x23, x.shape,
x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "argmin_fp8x23_3D_last_index"
make_test(
[x], y, "input_0.argmin(0, Option::None(()), Option::Some(true))", name)
default_params()
keepdims_false()
last_index()
argmin_3D() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl |
class Array_feature_extractor(RunAll):
@staticmethod
def array_feature_extractor_3D():
def array_feature_extractor_i32():
x = np.random.randint(-3, 3, (2, 3, 4)).astype(np.int32)
y = np.array([1, 3]).astype(np.uint32)
z = (x[..., y])
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "array_feature_extractor_3D_i32"
make_test([x, y], z, "TensorTrait::array_feature_extractor(@input_0, input_1)", name)
def array_feature_extractor_fp8x23():
x = np.random.randint(-3, 3, (2, 3, 4)).astype(np.float64)
y = np.array([1, 3]).astype(np.uint32)
z = (x[..., y])
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.FP8x23, z.shape, to_fp(
z.flatten(), FixedImpl.FP8x23))
name = "array_feature_extractor_3D_fp8x23"
make_test([x, y], z, "TensorTrait::array_feature_extractor(@input_0, input_1)", name)
def array_feature_extractor_fp16x16():
x = np.random.randint(-3, 3, (2, 3, 4)).astype(np.float64)
y = np.array([1, 3]).astype(np.uint32)
z = (x[..., y])
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.FP16x16, z.shape, to_fp(
z.flatten(), FixedImpl.FP16x16))
name = "array_feature_extractor_3D_fp16x16"
make_test([x, y], z, "TensorTrait::array_feature_extractor(@input_0, input_1)", name)
array_feature_extractor_i32()
array_feature_extractor_fp8x23()
array_feature_extractor_fp16x16()
@staticmethod
def array_feature_extractor_2D(): |
def array_feature_extractor_i32():
x = np.random.randint(-3, 3, (3, 4)).astype(np.int32)
y = np.array([1, 3]).astype(np.uint32)
z = (x[..., y])
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "array_feature_extractor_2D_i32"
make_test([x, y], z, "TensorTrait::array_feature_extractor(@input_0, input_1)", name)
def array_feature_extractor_fp8x23():
x = np.random.randint(-3, 3, (3, 4)).astype(np.float64)
y = np.array([1, 3]).astype(np.uint32)
z = (x[..., y])
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.FP8x23, z.shape, to_fp(
z.flatten(), FixedImpl.FP8x23))
name = "array_feature_extractor_2D_fp8x23"
make_test([x, y], z, "TensorTrait::array_feature_extractor(@input_0, input_1)", name)
def array_feature_extractor_fp16x16():
x = np.random.randint(-3, 3, (3, 4)).astype(np.float64)
y = np.array([1, 3]).astype(np.uint32)
z = (x[..., y])
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.FP16x16, z.shape, to_fp(
z.flatten(), FixedImpl.FP16x16))
name = "array_feature_extractor_2D_fp16x16"
make_test([x, y], z, "TensorTrait::array_feature_extractor(@input_0, input_1)", name)
array_feature_extractor_i32()
array_feature_extractor_fp8x23()
array_feature_extractor_fp16x16()
@staticmethod
def array_feature_extractor_1D():
def array_feature_extractor_i32():
x = np.random.randint(-3, 3, (4)).a |
stype(np.int32)
y = np.array([1, 3]).astype(np.uint32)
z = (x[..., y])
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.I32, z.shape, z.flatten())
name = "array_feature_extractor_1D_i32"
make_test([x, y], z, "TensorTrait::array_feature_extractor(@input_0, input_1)", name)
def array_feature_extractor_fp8x23():
x = np.random.randint(-3, 3, (4)).astype(np.float64)
y = np.array([1, 3]).astype(np.uint32)
z = (x[..., y])
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.FP8x23, z.shape, to_fp(
z.flatten(), FixedImpl.FP8x23))
name = "array_feature_extractor_1D_fp8x23"
make_test([x, y], z, "TensorTrait::array_feature_extractor(@input_0, input_1)", name)
def array_feature_extractor_fp16x16():
x = np.random.randint(-3, 3, (4)).astype(np.float64)
y = np.array([1, 3]).astype(np.uint32)
z = (x[..., y])
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.U32, y.shape, y.flatten())
z = Tensor(Dtype.FP16x16, z.shape, to_fp(
z.flatten(), FixedImpl.FP16x16))
name = "array_feature_extractor_1D_fp16x16"
make_test([x, y], z, "TensorTrait::array_feature_extractor(@input_0, input_1)", name)
array_feature_extractor_i32()
array_feature_extractor_fp8x23()
array_feature_extractor_fp16x16() |
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
class Asin(RunAll):
@staticmethod
def asin_fp8x23():
x = np.random.uniform(-1, 1, (2, 2)).astype(np.float64)
y = np.arcsin(x)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23))
name = "asin_fp8x23"
make_test([x], y, "input_0.asin()", name)
@staticmethod
def asin_fp16x16():
x = np.random.uniform(-1, 1, (2, 2)).astype(np.float64)
y = np.arcsin(x)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "asin_fp16x16"
make_test([x], y, "input_0.asin()", name)
|
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
class Asinh(RunAll):
@staticmethod
def asinh_fp8x23():
x = np.random.uniform(1, 5, (2, 2)).astype(np.float64)
y = np.arcsinh(x)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "asinh_fp8x23"
make_test([x], y, "input_0.asinh()", name)
@staticmethod
def asinh_fp16x16():
x = np.random.uniform(1, 5, (2, 2)).astype(np.float64)
y = np.arcsinh(x)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "asinh_fp16x16"
make_test([x], y, "input_0.asinh()", name)
|
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
class Atan(RunAll):
@staticmethod
def atan_fp8x23():
x = np.random.uniform(-10, 127, (2, 2)).astype(np.float64)
y = np.arctan(x)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "atan_fp8x23"
make_test([x], y, "input_0.atan()", name)
@staticmethod
def atan_fp16x16():
x = np.random.uniform(-10, 127, (2, 2)).astype(np.float64)
y = np.arctan(x)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "atan_fp16x16"
make_test([x], y, "input_0.atan()", name)
|
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_node, make_test, to_fp, Tensor, Dtype, FixedImpl
class Binarizer(RunAll):
@staticmethod
def binarizer_fp8x23():
x = np.random.uniform(-3, 3, (3, 3, 3)).astype(np.float64)
threshold = np.float64(1)
y = (x > threshold).astype(np.float64)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
name = "binarizer_fp8x23"
make_node([x], [y], name)
make_test([x], y, "TensorTrait::binarizer(@input_0, Option::Some(FixedTrait::new(8388608, false));", name)
@staticmethod
def binarizer_fp16x16():
x = np.random.uniform(-3, 3, (3, 3, 3)).astype(np.float64)
threshold = np.float64(1)
y = (x > threshold).astype(np.float64)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "binarizer_fp16x16"
make_node([x], [y], name)
make_test([x], y, "TensorTrait::binarizer(@input_0, Option::Some(FixedTrait::new(65536, false));", name)
|
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait, get_data_statement
def blackman_window(size, output_datatype=None, periodic=None) -> np.ndarray:
if periodic == 1:
N_1 = size
else:
N_1 = size - 1
ni = np.arange(size, dtype=output_datatype)
alpha = 0.42
beta = 0.08
y = np.cos((ni * (np.float64(np.pi).astype(output_datatype) * 2)) / N_1).astype(output_datatype) * (-0.5)
y += np.cos((ni * (np.float64(np.pi).astype(output_datatype) * 4)) / N_1) * beta
y += alpha
return y.astype(output_datatype) |
class Blackman_window(RunAll):
@staticmethod
def fp8x23():
args = [3]
args_str = get_data_statement(to_fp(np.array(args).flatten(), FixedImpl.FP8x23), Dtype.FP8x23)
y = blackman_window(*args, np.float64)
y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23))
name = "blackman_window_fp8x23"
make_test(
[],
y,
f"TensorTrait::blackman_window({','.join(args_str)}, Option::Some(0))",
name
)
@staticmethod
def fp16x16():
print(get_data_statement(to_fp(np.array([np.pi]).flatten(), FixedImpl.FP16x16), Dtype.FP16x16))
args = [3]
args_str = get_data_statement(to_fp(np.array(args).flatten(), FixedImpl.FP16x16), Dtype.FP16x16)
y = blackman_window(*args, np.float16, 1)
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "blackman_window_fp16x16"
make_test(
[],
y,
f"TensorTrait::blackman_window({','.join(args_str)}, Option::Some(1))",
name
) |
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl
class Ceil(RunAll):
@staticmethod
def ceil_fp8x23():
x = np.random.uniform(-1, 1, (2, 2)).astype(np.float64)
y = np.ceil(x)
x = Tensor(Dtype.FP8x23, x.shape, to_fp(x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23))
name = "ceil_fp8x23"
make_test([x], y, "input_0.ceil()", name)
@staticmethod
def ceil_fp16x16():
x = np.random.uniform(-1, 1, (2, 2)).astype(np.float64)
y = np.ceil(x)
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "ceil_fp16x16"
make_test([x], y, "input_0.ceil()", name)
|
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl |
class Clip(RunAll):
@staticmethod
def clip_u32():
def clip_2D():
x = np.random.randint(0, 255, (2, 4)).astype(np.uint32)
y = np.clip(x, np.uint32(10), np.uint32(20))
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "clip_u32_2d"
make_test(
[x], y, "input_0.clip(Option::Some(10_u32), Option::Some(20_u32))", name)
def clip_3D():
x = np.random.randint(0, 255, (20, 10, 5)).astype(np.uint32)
y = np.clip(x, np.uint32(10), np.uint32(20))
x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "clip_u32_3d"
make_test(
[x], y, "input_0.clip(Option::Some(10_u32), Option::Some(20_u32))", name)
clip_2D()
clip_3D()
@staticmethod
def clip_i32():
def clip_2D():
x = np.random.randint(-127, 127, (2, 4)).astype(np.int32)
y = np.clip(x, np.int32(-10), np.int32(20))
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "clip_i32_2d"
make_test(
[x], y, "input_0.clip(Option::Some(-10_i32), Option::Some(20_i32))", name)
def clip_3D():
x = np.random.randint(-127, 127, (20, 10, 5)).astype(np.int32)
y = np.clip(x, np.int32(-10), np.int32(20))
x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "clip_i32_3d"
make_test(
[x], y, "input_0.clip(Option::Some(-10_i32), Option::Some(20_i32))", name)
clip_2D()
clip_3D()
@staticmethod
def clip_i8():
def clip_2D():
x = np.random.randint(-127, 127, (2, 4)).astype(np.int8)
y = np.clip(x, np.int8(-10), np.int8(20))
x = Tensor(Dtype.I8, x.shape, x |
.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "clip_i8_2d"
make_test(
[x], y, "input_0.clip(Option::Some(-10_i8), Option::Some(20_i8))", name)
def clip_3D():
x = np.random.randint(-127, 127, (20, 10, 5)).astype(np.int8)
y = np.clip(x, np.int8(-10), np.int8(20))
x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "clip_i8_3d"
make_test(
[x], y, "input_0.clip(Option::Some(-10_i8), Option::Some(20_i8))", name)
clip_2D()
clip_3D()
@staticmethod
def clip_fp8x23():
def clip_2D():
x = to_fp(np.random.randint(-127, 127, (2, 4)
).astype(np.int64), FixedImpl.FP8x23)
y = np.clip(x, to_fp(np.int64(-10), FixedImpl.FP8x23), to_fp(np.int64(20), FixedImpl.FP8x23))
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "clip_fp8x23_2d"
make_test(
[x], y, "input_0.clip(Option::Some(FP8x23 { mag: 83886080, sign: true }), Option::Some(FP8x23 { mag: 167772160, sign: false }))", name)
def clip_3D():
x = to_fp(np.random.randint(-127, 127, (20, 10, 5)
).astype(np.int64), FixedImpl.FP8x23)
y = np.clip(x, to_fp(np.int64(-10), FixedImpl.FP8x23), to_fp(np.int64(20), FixedImpl.FP8x23))
x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())
name = "clip_fp8x23_3d"
make_test(
[x], y, "input_0.clip(Option::Some(FP8x23 { mag: 83886080, sign: true }), Option::Some(FP8x23 { mag: 167772160, sign: false }))", name)
clip_2D()
clip_3D()
@staticmethod
def clip_fp16x16():
def clip_2D():
x = to_fp(np |
.random.randint(-127, 127, (2, 4)
).astype(np.int64), FixedImpl.FP16x16)
y = np.clip(x, to_fp(np.int64(-10), FixedImpl.FP16x16), to_fp(np.int64(20), FixedImpl.FP16x16))
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "clip_fp16x16_2d"
make_test(
[x], y, "input_0.clip(Option::Some(FP16x16 { mag: 655360, sign: true }), Option::Some(FP16x16 { mag: 1310720, sign: false }))", name)
def clip_3D():
x = to_fp(np.random.randint(-127, 127, (20, 10, 5)
).astype(np.int64), FixedImpl.FP16x16)
y = np.clip(x, to_fp(np.int64(-10), FixedImpl.FP16x16), to_fp(np.int64(20), FixedImpl.FP16x16))
x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())
name = "clip_fp16x16_3d"
make_test(
[x], y, "input_0.clip(Option::Some(FP16x16 { mag: 655360, sign: true }), Option::Some(FP16x16 { mag: 1310720, sign: false }))", name)
clip_2D()
clip_3D() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait
def col2im(data, image_shape, block_shape, dilations=None, pads=None, strides=None):
if dilations is None:
dilations = [1 for s in image_shape]
if pads is None:
pads = [0 for s in image_shape] * 2
if strides is None:
strides = [1 for s in image_shape]
bl = np.prod(block_shape)
C = data.shape[1]
data = data.reshape(data.shape[:1] + (C,) + (bl,) + data.shape[2:])
ks = tuple(block_shape)
res = None
for n in range(data.shape[0]):
for c in range(data.shape[1]):
out = col2im_naive_implementation(
data[n, c, ...], image_shape, ks, dilations, pads, strides
)
if res is None:
new_shape = data.shape[:2] + out.shape
res = np.empty(new_shape, dtype=data.dtype)
res[n, c, ...] = out
return (res,)
def _get_indices(i, shape):
res = np.empty((len(shape),), dtype=np.int64)
k = len(shape) - 1
while k > 0:
m = i % shape[k]
res[k] = m
i -= m
i /= shape[k]
k -= 1
res[0] = i
return res
def _col2im_shape_check(X, output_shape, kernel_shape, dilations, pads, strides):
n_input_plane = X.shape[0]
kernel_size = np.prod(kernel_shape)
if n_input_plane % kernel_size != 0:
raise ValueError(
f"Expected size of input's dimension 1 to be divisible by the "
f"product of kernel_size={kernel_size}, "
f"but got input.size(1)={n_input_plane} "
f"and kernel_shape={kernel_shape}, X.shape={X.shape}, output_shape={output_shape}."
)
input_length = X.shape[1]
n_dims = len(output_shape)
n_blocks = []
for i in range(n_dims):
n_block = (
output_shape[i]
+ pads[i, :].sum()
- dilations[i] * (kernel_shape[i] - 1)
- 1
)
n_blocks.append(n_block)
block_size = np.prod(n_blocks)
if input_length |
!= block_size:
raise ValueError(
f"Given n_input_plane={n_input_plane}, X.shape={X.shape}, "
f"output_shape={output_shape}, kernel_shape={kernel_shape}, "
f"dilations={dilations}, pads={pads}, strides={strides}, "
f"expected size of input's dimension 2 to match the calculated number of "
f"sliding blocks {n_blocks} = {block_size}, "
f"but got input.size(2)={input_length}.",
)
def col2im_naive_implementation(data, image_shape, kernel_shape, dilations, pads, strides):
n_dims = len(pads)
new_pads = np.array([(pads[i], pads[i + n_dims]) for i in range(n_dims)])
_col2im_shape_check(data, image_shape, kernel_shape, dilations, new_pads, strides)
data_col = data
data_im = np.zeros(image_shape, dtype=data.dtype)
dim_col = []
for i in range(n_dims):
col = (
image_shape[i]
+ new_pads[i, :].sum()
- (dilations[i] * (kernel_shape[i] - 1) + 1)
)
dim_col.append(col)
kernel_size = np.prod(kernel_shape)
col_size = np.prod(dim_col)
for c_col in range(kernel_size):
offset = _get_indices(c_col, kernel_shape)
for col in range(col_size):
ind_col = _get_indices(col, dim_col)
ind_im = []
for i in range(n_dims):
ind = (
ind_col[i] * strides[i] - new_pads[i, 0] + offset[i] * dilations[i]
)
ind_im.append(ind)
if not _is_out(ind_im, data_im.shape):
data_im[tuple(ind_im)] += data_col[c_col, col]
return data_im
def _is_out(ind, shape):
for i, s in zip(ind, shape):
if i < 0:
return True
if i >= s:
return True
return False |
class Col2im(RunAll):
@staticmethod
def export_col2im() -> None:
x = np.array(
[
[
[1.0, 6.0, 11.0, 16.0, 21.0],
[2.0, 7.0, 12.0, 17.0, 22.0],
[3.0, 8.0, 13.0, 18.0, 23.0],
[4.0, 9.0, 14.0, 19.0, 24.0],
[5.0, 0.0, 15.0, 20.0, 25.0],
]
]
).astype(np.float32)
image_shape = np.array([5, 5]).astype(np.int64)
block_shape = np.array([1, 5]).astype(np.int64)
y = col2im(x,image_shape,block_shape)
y = np.array(y[0])
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "col2im"
func_sig = "NNTrait::col2im("
func_sig += "@input_0,"
func_sig += "array![5, 5].span(),"
func_sig += "array![1, 5].span(),"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None)"
make_test(
[x], y, func_sig, name, Trait.NN)
@staticmethod
def export_col2im_strides() -> None:
x = np.array(
[
[
[0.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0],
]
]
).astype(np.float32)
image_shape = np.array([5, 5]).astype(np.int64)
block_shape = np.array([3, 3]).astype(np.int64)
y = col2im(x,image_shape,block_shape,strides=[2, 2])
y = np.array(y[0])
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp( |
y.flatten(), FixedImpl.FP16x16))
name = "col2im_strides"
func_sig = "NNTrait::col2im("
func_sig += "@input_0,"
func_sig += "array![5, 5].span(),"
func_sig += "array![3, 3].span(),"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::Some(array![2, 2].span()))"
make_test(
[x], y, func_sig, name, Trait.NN)
@staticmethod
def export_col2im_pads() -> None:
x = np.array(
[
[
[
1.0, 6.0, 11.0, 16.0, 21.0, 26, 31, 36, 41, 46, 51, 56, 61, 66, 71,
],
[
2.0, 7.0, 12.0, 17.0, 22.0, 27, 32, 37, 42, 47, 52, 57, 62, 67, 72,
],
[
3.0, 8.0, 13.0, 18.0, 23.0, 28, 33, 38, 43, 48, 53, 58, 63, 68, 73,
],
[
4.0, 9.0, 14.0, 19.0, 24.0, 29, 34, 39, 44, 49, 54, 59, 64, 69, 74,
],
[
5.0, 10.0, 15.0, 20.0, 25.0, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75,
],
]
]
).astype(np.float32)
image_shape = np.array([5, 5]).astype(np.int64)
block_shape = np.array([1, 5]).astype(np.int64)
y = col2im(x,image_shape,block_shape,pads=[0, 1, 0, 1])
y = np.array(y[0])
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "col2im_pads"
func_sig = "NNTrait::col2im("
func_sig += "@input_0,"
func_sig += "array![5, 5].span(),"
func_sig += "array![1, 5].span(),"
func_sig += "Option::None,"
func_sig += "Option::Some(array![0, 1, 0, 1].span()),"
func_sig += "Option::None)"
make_test(
[x], y, func_sig, name, Trait.NN)
@sta |
ticmethod
def export_col2im_dilations() -> None:
x = np.array(
[
[
[1.0, 5.0, 9.0, 13.0, 17],
[2.0, 6.0, 10.0, 14.0, 18],
[3.0, 7.0, 11.0, 15.0, 19],
[4.0, 8.0, 12.0, 16.0, 20],
]
]
).astype(np.float32)
image_shape = np.array([6, 6]).astype(np.int64)
block_shape = np.array([2, 2]).astype(np.int64)
y = col2im(x,image_shape,block_shape, dilations=[1, 5])
y = np.array(y[0])
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "col2im_dilations"
func_sig = "NNTrait::col2im("
func_sig += "@input_0,"
func_sig += "array![6, 6].span(),"
func_sig += "array![2, 2].span(),"
func_sig += "Option::Some(array![1, 5].span()),"
func_sig += "Option::None,"
func_sig += "Option::None)"
make_test(
[x], y, func_sig, name, Trait.NN)
@staticmethod
def export_col2im_5D() -> None:
x = np.array(
[
[
[1, 6, 11, 16, 21, 26, 31, 36, 41, 46, 51, 56],
[2, 7, 12, 17, 22, 27, 32, 37, 42, 47, 52, 57],
[3, 8, 13, 18, 23, 28, 33, 38, 43, 48, 53, 58],
[4, 9, 14, 19, 24, 29, 34, 39, 44, 49, 54, 59],
[5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60],
[61, 66, 71, 76, 81, 86, 91, 96, 101, 106, 111, 116],
[62, 67, 72, 77, 82, 87, 92, 97, 102, 107, 112, 117],
[63, 68, 73, 78, 83, 88, 93, 98, 103, 108, 113, 118],
[64, 69, 74, 79, 84, 89, 94, 99, 104, 109, 114, 119],
[65, 70, 75, 80, 85, 90, 95, 100, 105, 110, 115, 120],
]
]
).astype(np.float32)
image_shape = np.array([3, 4, 5] |
).astype(np.int64)
block_shape = np.array([1, 1, 5]).astype(np.int64)
y = col2im(x,image_shape,block_shape)
y = np.array(y[0])
x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16))
name = "col2im_5D"
func_sig = "NNTrait::col2im("
func_sig += "@input_0,"
func_sig += "array![3, 4, 5].span(),"
func_sig += "array![1, 1, 5].span(),"
func_sig += "Option::None,"
func_sig += "Option::None,"
func_sig += "Option::None)"
make_test(
[x], y, func_sig, name, Trait.NN) |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait |
class Compress(RunAll):
@staticmethod
def compress_fp16x16():
def compress_3D():
def default():
x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64)
x2 = np.array([0, 1, 1]).astype(np.uint32)
y = x1.compress(x2, axis=0)
x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16))
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "compress_fp16x16_3d_default"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(0))",
name= name)
def axis1():
x1 = np.arange(0,180).reshape(3,4,3,5).astype(np.int64)
x2 = np.array([1, 1, 1, 0]).astype(np.int64)
y = x1.compress(x2, axis=1)
x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16))
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "compress_fp16x16_3d_axis1"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(1))",
name= name)
def axis2():
x1 = np.arange(0,48).reshape(4,3,4).astype(np.int64)
x2 = np.array([1, 0, 1, 1]).astype(np.int64)
y = x1.compress(x2, axis=2)
x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16))
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "compress |
_fp16x16_3d_axis2"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(2))",
name= name)
def axis3():
x1 = np.arange(0,96).reshape(4,3,4, 2).astype(np.int64)
x2 = np.array([1, 0]).astype(np.int64)
y = x1.compress(x2, axis=3)
x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16))
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "compress_fp16x16_3d_axis3"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(3))",
name= name)
def noaxis():
x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64)
x2 = np.array([1, 0, 1, 0, 1, 1, 1, 1, 1]).astype(np.int64)
y = x1.compress(x2)
x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16))
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
name = "compress_fp16x16_3d_noaxis"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::None(()))",
name= name)
default()
axis1()
axis2()
axis3()
noaxis()
compress_3D()
@staticmethod
def compress_fp8x23():
def compress_3D():
def default():
x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64)
x2 = np.array([0, 1, 1]).astype(np.uint32)
y = x1 |
.compress(x2, axis=0)
x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23))
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23))
name = "compress_fp8x23_3d_default"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(0))",
name= name)
def axis1():
x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64)
x2 = np.array([0, 1, 1]).astype(np.uint32)
y = x1.compress(x2, axis=1)
x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23))
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23))
name = "compress_fp8x23_3d_axis1"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(1))",
name= name)
def axis2():
x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64)
x2 = np.array([0, 1, 1]).astype(np.uint32)
y = x1.compress(x2, axis=2)
x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23))
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23))
name = "compress_fp8x23_3d_axis2"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(2))",
name= name)
default()
axis1()
axis2()
compress_3D()
@staticmethod
def compress_i8(): |
def compress_3D():
def default():
x1 = np.arange(0,27).reshape(3,3,3).astype(np.int8)
x2 = np.array([0, 1, 1]).astype(np.uint8)
y = x1.compress(x2, axis=0)
x1 = Tensor(Dtype.I8, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "compress_i8_3d_default"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(0))",
name= name)
def axis1():
x1 = np.arange(0,27).reshape(3,3,3).astype(np.int8)
x2 = np.array([0, 1, 1]).astype(np.uint8)
y = x1.compress(x2, axis=1)
x1 = Tensor(Dtype.I8, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "compress_i8_3d_axis1"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(1))",
name= name)
def axis2():
x1 = np.arange(0,27).reshape(3,3,3).astype(np.int8)
x2 = np.array([0, 1, 1]).astype(np.uint8)
y = x1.compress(x2, axis=2)
x1 = Tensor(Dtype.I8, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
name = "compress_i8_3d_axis2"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(2))",
name= name)
default()
axis1()
axis2()
compress_3D()
@staticmethod
def compress_i32() |
:
def compress_3D():
def default():
x1 = np.arange(0,27).reshape(3,3,3).astype(np.int32)
x2 = np.array([0, 1, 1]).astype(np.int32)
y = x1.compress(x2, axis=0)
x1 = Tensor(Dtype.I32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "compress_i32_3d_default"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(0))",
name= name)
def axis1():
x1 = np.arange(0,27).reshape(3,3,3).astype(np.int32)
x2 = np.array([0, 1, 1]).astype(np.int32)
y = x1.compress(x2, axis=1)
x1 = Tensor(Dtype.I32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "compress_i32_3d_axis1"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(1))",
name= name)
def axis2():
x1 = np.arange(0,27).reshape(3,3,3).astype(np.int32)
x2 = np.array([0, 1, 1]).astype(np.int32)
y = x1.compress(x2, axis=2)
x1 = Tensor(Dtype.I32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
name = "compress_i32_3d_axis2"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(2))",
name= name)
default()
axis1()
axis2()
compress_3D()
@staticmet |
hod
def compress_u32():
def compress_3D():
def default():
x1 = np.arange(0,48).reshape(4,4,3).astype(np.uint32)
x2 = np.array([1, 1]).astype(np.uint32)
y = x1.compress(x2, axis=0)
x1 = Tensor(Dtype.U32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "compress_u32_3d_default"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(0))",
name= name)
def axis1():
x1 = np.arange(0,36).reshape(3,4,3).astype(np.uint32)
x2 = np.array([0, 1, 1]).astype(np.uint32)
y = x1.compress(x2, axis=1)
x1 = Tensor(Dtype.U32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "compress_u32_3d_axis1"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(1))",
name= name)
def axis2():
x1 = np.arange(0,48).reshape(3,4,4).astype(np.uint32)
x2 = np.array([0, 1, 1]).astype(np.uint32)
y = x1.compress(x2, axis=2)
x1 = Tensor(Dtype.U32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "compress_u32_3d_axis2"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(2))",
name= name)
def axis2_2():
x1 = np.arange(0,60).reshap |
e(3,4,5).astype(np.uint32)
x2 = np.array([0, 1, 1]).astype(np.uint32)
y = x1.compress(x2, axis=2)
x1 = Tensor(Dtype.U32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "compress_u32_3d_axis2_2"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(2))",
name= name)
def axis3():
x1 = np.arange(0,270).reshape(3,3,5,6).astype(np.uint32)
x2 = np.array([0, 1, 1,1,0,1]).astype(np.uint32)
y = x1.compress(x2, axis=3)
x1 = Tensor(Dtype.U32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "compress_u32_3d_axis3"
make_test(
inputs = [x1, x2], output = y, func_sig = "input_0.compress(condition:input_1, axis:Option::Some(3))",
name= name)
default()
axis1()
axis2()
axis2_2()
axis3()
compress_3D() |
import numpy as np
from nodegen.node |
import RunAll
from ..helpers |
import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait |
class Concat(RunAll):
@staticmethod
def concat_u32():
def concat_1D():
x1 = np.arange(0,3).astype(np.uint32)
x2 = np.arange(3,6).astype(np.uint32)
y = np.concatenate((x1, x2))
x1 = Tensor(Dtype.U32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "concat_u32_1d"
make_test(
inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 0)",
name= name, trait= Trait.TENSOR)
def concat_2D():
x1 = np.arange(0,4).astype(np.uint32).reshape(2,2)
x2 = np.arange(4,8).astype(np.uint32).reshape(2,2)
y = np.concatenate((x1, x2), axis=0)
x1 = Tensor(Dtype.U32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "concat_u32_2d"
make_test(
inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 0)",
name= name, trait= Trait.TENSOR)
def concat_3D():
def default():
x1 = np.arange(0,27).astype(np.uint32).reshape(3,3,3)
x2 = np.arange(27,54).astype(np.uint32).reshape(3,3,3)
y = np.concatenate((x1, x2), axis=0)
x1 = Tensor(Dtype.U32, x1.shape, x1.flatten())
x2 = Tensor(Dtype.U32, x2.shape, x2.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
name = "concat_u32_3d_default"
make_test(
inputs = [x1, x2], output = y, func_sig = "TensorTrait::concat(array![input_0, input_1].span(), 0)",
name= name, trait= Trait.TENSOR)
def axis_1():
x1 = np.arange(0,27).astype(np.uint32).resha |
End of preview. Expand
in Dataset Viewer.
This dataset is a truncated version of this one but where the format is compatible with MLX-lora, using {"text": "This is an example for the model."}, and where each entry has been truncated, following some code logic (i.e., following classes, functions etc) to ensure each entry is smaller than 2048 tokens.
- Downloads last month
- 50