ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
44
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 16
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 127
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 96
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
0d8fad02-ec28-44dc-a261-776f19b8ca59 | cpp | tensorflow/tensorflow | graph_def_util | tensorflow/core/framework/graph_def_util.cc | tensorflow/core/framework/graph_def_util_test.cc | #include "tensorflow/core/framework/graph_def_util.h"
#include <set>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_def_util.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
namespace tensorflow {
string SummarizeGraphDef(const GraphDef& graph_def) {
string ret;
strings::StrAppend(
&ret, "versions = ", graph_def.versions().ShortDebugString(), ";\n");
for (const NodeDef& node : graph_def.node()) {
strings::StrAppend(&ret, SummarizeNodeDef(node), ";\n");
}
return ret;
}
Status ValidateExternalGraphDefSyntax(const GraphDef& graph_def) {
for (const NodeDef& node : graph_def.node()) {
TF_RETURN_IF_ERROR(ValidateExternalNodeDefSyntax(node));
}
return absl::OkStatus();
}
Status AddDefaultAttrsToGraphDef(GraphDef* graph_def,
const OpRegistryInterface& op_registry,
int node_offset) {
return AddDefaultAttrsToGraphDef(graph_def, op_registry, node_offset, false);
}
Status AddDefaultAttrsToGraphDef(GraphDef* graph_def,
const OpRegistryInterface& op_registry,
int node_offset, bool skip_unknown_ops) {
if (node_offset > graph_def->node_size()) {
return errors::InvalidArgument(
"Tried to add default attrs to GraphDef "
"starting at offset ",
node_offset, " with total nodes in graph: ", graph_def->node_size());
}
for (int i = node_offset; i < graph_def->node_size(); ++i) {
NodeDef* node_def = graph_def->mutable_node(i);
const OpDef* op_def;
Status s = op_registry.LookUpOpDef(node_def->op(), &op_def);
if (s.ok()) {
AddDefaultsToNodeDef(*op_def, node_def);
} else if (!skip_unknown_ops) {
return s;
}
}
return absl::OkStatus();
}
static Status RemoveNewDefaultAttrsFromNodeDef(
NodeDef* node_def, const OpRegistryInterface& consumer_op_registry,
const OpRegistryInterface& producer_op_registry,
std::set<std::pair<string, string>>* op_attr_removed) {
const OpDef* producer_op_def;
const OpDef* consumer_op_def;
TF_RETURN_IF_ERROR(
producer_op_registry.LookUpOpDef(node_def->op(), &producer_op_def));
TF_RETURN_IF_ERROR(
consumer_op_registry.LookUpOpDef(node_def->op(), &consumer_op_def));
std::vector<string> to_remove;
for (const auto& attr : node_def->attr()) {
if (!absl::StartsWith(attr.first, "_") &&
FindAttr(attr.first, *consumer_op_def) == nullptr) {
const OpDef::AttrDef* producer_attr_def =
FindAttr(attr.first, *producer_op_def);
if (producer_attr_def == nullptr) {
return errors::InvalidArgument(
"Attr '", attr.first,
"' missing in producer's OpDef: ", SummarizeOpDef(*producer_op_def),
" but found in node: ", FormatNodeDefForError(*node_def));
}
if (producer_attr_def->has_default_value() &&
AreAttrValuesEqual(producer_attr_def->default_value(), attr.second)) {
to_remove.emplace_back(attr.first);
}
}
}
for (const string& attr_name : to_remove) {
node_def->mutable_attr()->erase(attr_name);
if (op_attr_removed != nullptr) {
op_attr_removed->insert(std::make_pair(node_def->op(), attr_name));
}
}
return absl::OkStatus();
}
static bool IsFunction(const GraphDef& graph_def, const string& op_name) {
for (const auto& func_def : graph_def.library().function()) {
if (op_name == func_def.signature().name()) return true;
}
return false;
}
Status RemoveNewDefaultAttrsFromGraphDef(
GraphDef* graph_def, const OpRegistryInterface& consumer_op_registry,
const OpRegistryInterface& producer_op_registry,
std::set<std::pair<string, string>>* op_attr_removed) {
for (int n = 0; n < graph_def->node_size(); ++n) {
NodeDef* node_def = graph_def->mutable_node(n);
if (!IsFunction(*graph_def, node_def->op())) {
TF_RETURN_IF_ERROR(RemoveNewDefaultAttrsFromNodeDef(
node_def, consumer_op_registry, producer_op_registry,
op_attr_removed));
}
}
for (int f = 0; f < graph_def->library().function_size(); ++f) {
FunctionDef* func_def = graph_def->mutable_library()->mutable_function(f);
for (int n = 0; n < func_def->node_def_size(); ++n) {
NodeDef* node_def = func_def->mutable_node_def(n);
if (!IsFunction(*graph_def, node_def->op())) {
TF_RETURN_IF_ERROR(RemoveNewDefaultAttrsFromNodeDef(
node_def, consumer_op_registry, producer_op_registry,
op_attr_removed));
}
}
}
return absl::OkStatus();
}
void StripDefaultAttributes(const OpRegistryInterface& op_registry,
protobuf::RepeatedPtrField<NodeDef>* nodes) {
for (int i = 0; i < nodes->size(); ++i) {
NodeDef* node = nodes->Mutable(i);
const OpDef* op_def;
const OpRegistrationData* op_reg_data = nullptr;
Status s = op_registry.LookUp(node->op(), &op_reg_data);
if (!s.ok()) {
VLOG(1) << "Ignoring encountered unknown operation "
<< SummarizeNodeDef(*node)
<< " when stripping default attributes. It is likely a function, "
"in which case ignoring it is fine";
continue;
}
op_def = &op_reg_data->op_def;
for (const OpDef::AttrDef& attr_def : op_def->attr()) {
if (attr_def.has_default_value()) {
AttrValueMap* attrs = node->mutable_attr();
const string& name = attr_def.name();
auto iter = attrs->find(name);
if (iter != attrs->end()) {
const AttrValue& default_value = attr_def.default_value();
if (AreAttrValuesEqual(iter->second, default_value,
true)) {
attrs->erase(name);
}
}
}
}
}
}
void OpsUsedByGraph(const GraphDef& graph_def,
std::set<string>* ops_used_in_graph) {
std::unordered_map<string, const FunctionDef*> name_to_function;
for (const auto& function : graph_def.library().function()) {
name_to_function.insert(
std::make_pair(function.signature().name(), &function));
}
std::set<string> used_ops;
std::vector<const FunctionDef*> functions_to_process;
const auto mark_op_as_used = [&used_ops, &functions_to_process,
&name_to_function](const string& op) {
if (used_ops.insert(op).second) {
const auto it = name_to_function.find(op);
if (it != name_to_function.end()) {
functions_to_process.push_back(it->second);
}
}
};
for (const auto& node : graph_def.node()) {
mark_op_as_used(node.op());
}
while (!functions_to_process.empty()) {
const FunctionDef* fun = functions_to_process.back();
functions_to_process.pop_back();
for (const auto& node : fun->node_def()) {
mark_op_as_used(node.op());
}
}
ops_used_in_graph->clear();
for (const string& op_name : used_ops) {
if (name_to_function.find(op_name) == name_to_function.end()) {
ops_used_in_graph->insert(op_name);
}
}
}
Status StrippedOpListForGraph(const GraphDef& graph_def,
const OpRegistryInterface& op_registry,
OpList* stripped_op_list) {
std::set<string> used_ops;
OpsUsedByGraph(graph_def, &used_ops);
stripped_op_list->clear_op();
for (const string& op_name : used_ops) {
const OpDef* op_def;
TF_RETURN_IF_ERROR(op_registry.LookUpOpDef(op_name, &op_def));
OpDef* stripped_op = stripped_op_list->add_op();
stripped_op->CopyFrom(*op_def);
RemoveDescriptionsFromOpDef(stripped_op);
}
return absl::OkStatus();
}
} | #include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/equal_graph_def.h"
namespace tensorflow {
namespace {
Status FinalizeOpDef(const OpDefBuilder& b, OpDef* op_def) {
OpRegistrationData op_reg_data;
const Status s = b.Finalize(&op_reg_data);
*op_def = op_reg_data.op_def;
return s;
}
TEST(AddToGraphTest, MakeGraphDefWithNamespacedOpName) {
OpList op_list;
TF_ASSERT_OK(FinalizeOpDef(OpDefBuilder("Project>SomeOp"), op_list.add_op()));
OpListOpRegistry registry(&op_list);
GraphDef graph_def;
TF_ASSERT_OK(NodeDefBuilder("node", "Project>SomeOp", ®istry)
.Finalize(graph_def.add_node()));
}
TEST(RemoveNewDefaultAttrsFromGraphDefTest, NoChangeWithDefault) {
OpList op_list;
TF_ASSERT_OK(
FinalizeOpDef(OpDefBuilder("NoChangeWithDefault").Attr("a: int = 12"),
op_list.add_op()));
OpListOpRegistry registry(&op_list);
GraphDef graph_def;
TF_ASSERT_OK(NodeDefBuilder("ncwd", "NoChangeWithDefault", ®istry)
.Finalize(graph_def.add_node()));
GraphDef expected_graph_def = graph_def;
std::set<std::pair<string, string>> op_attr_removed;
TF_ASSERT_OK(RemoveNewDefaultAttrsFromGraphDef(&graph_def, registry, registry,
&op_attr_removed));
TF_EXPECT_GRAPH_EQ(expected_graph_def, graph_def);
EXPECT_TRUE(op_attr_removed.empty());
}
TEST(RemoveNewDefaultAttrsFromGraphDefTest, NoChangeNoDefault) {
OpList op_list;
TF_ASSERT_OK(FinalizeOpDef(OpDefBuilder("NoChangeNoDefault").Attr("a: int"),
op_list.add_op()));
OpListOpRegistry registry(&op_list);
GraphDef graph_def;
TF_ASSERT_OK(NodeDefBuilder("ncnd", "NoChangeNoDefault", ®istry)
.Attr("a", 42)
.Finalize(graph_def.add_node()));
GraphDef expected_graph_def = graph_def;
std::set<std::pair<string, string>> op_attr_removed;
TF_ASSERT_OK(RemoveNewDefaultAttrsFromGraphDef(&graph_def, registry, registry,
&op_attr_removed));
TF_EXPECT_GRAPH_EQ(expected_graph_def, graph_def);
EXPECT_TRUE(op_attr_removed.empty());
}
TEST(RemoveNewDefaultAttrsFromGraphDefTest, UsesDefault) {
OpList consumer_op_list;
TF_ASSERT_OK(
FinalizeOpDef(OpDefBuilder("UsesDefault"), consumer_op_list.add_op()));
OpListOpRegistry consumer_registry(&consumer_op_list);
OpList producer_op_list;
TF_ASSERT_OK(FinalizeOpDef(OpDefBuilder("UsesDefault").Attr("a: int = 17"),
producer_op_list.add_op()));
OpListOpRegistry producer_registry(&producer_op_list);
GraphDef produced_graph_def;
TF_ASSERT_OK(NodeDefBuilder("uses_default", "UsesDefault", &producer_registry)
.Finalize(produced_graph_def.add_node()));
std::set<std::pair<string, string>> op_attr_removed;
TF_ASSERT_OK(
RemoveNewDefaultAttrsFromGraphDef(&produced_graph_def, consumer_registry,
producer_registry, &op_attr_removed));
GraphDef expected_graph_def;
TF_ASSERT_OK(NodeDefBuilder("uses_default", "UsesDefault", &consumer_registry)
.Finalize(expected_graph_def.add_node()));
TF_EXPECT_GRAPH_EQ(expected_graph_def, produced_graph_def);
std::set<std::pair<string, string>> expected_removed({{"UsesDefault", "a"}});
EXPECT_EQ(expected_removed, op_attr_removed);
}
TEST(RemoveNewDefaultAttrsFromGraphDefTest, ChangedFromDefault) {
OpList consumer_op_list;
TF_ASSERT_OK(FinalizeOpDef(OpDefBuilder("ChangedFromDefault"),
consumer_op_list.add_op()));
OpListOpRegistry consumer_registry(&consumer_op_list);
OpList producer_op_list;
TF_ASSERT_OK(
FinalizeOpDef(OpDefBuilder("ChangedFromDefault").Attr("a: int = 17"),
producer_op_list.add_op()));
OpListOpRegistry producer_registry(&producer_op_list);
GraphDef produced_graph_def;
TF_ASSERT_OK(NodeDefBuilder("changed_from_default", "ChangedFromDefault",
&producer_registry)
.Attr("a", 9)
.Finalize(produced_graph_def.add_node()));
GraphDef expected_graph_def = produced_graph_def;
std::set<std::pair<string, string>> op_attr_removed;
TF_ASSERT_OK(
RemoveNewDefaultAttrsFromGraphDef(&produced_graph_def, consumer_registry,
producer_registry, &op_attr_removed));
TF_EXPECT_GRAPH_EQ(expected_graph_def, produced_graph_def);
EXPECT_TRUE(op_attr_removed.empty());
}
TEST(RemoveNewDefaultAttrsFromGraphDefTest, UnderscoreAttrs) {
OpList consumer_op_list;
TF_ASSERT_OK(
FinalizeOpDef(OpDefBuilder("Underscore"), consumer_op_list.add_op()));
OpListOpRegistry consumer_registry(&consumer_op_list);
OpList producer_op_list;
TF_ASSERT_OK(
FinalizeOpDef(OpDefBuilder("Underscore"), producer_op_list.add_op()));
OpDef::AttrDef* attr = producer_op_list.mutable_op(0)->add_attr();
attr->set_name("_underscore");
attr->set_type("int");
attr->mutable_default_value()->set_i(17);
OpListOpRegistry producer_registry(&producer_op_list);
GraphDef produced_graph_def;
TF_ASSERT_OK(NodeDefBuilder("node", "Underscore", &producer_registry)
.Attr("_underscore", 17)
.Finalize(produced_graph_def.add_node()));
GraphDef expected_graph_def = produced_graph_def;
std::set<std::pair<string, string>> op_attr_removed;
TF_ASSERT_OK(
RemoveNewDefaultAttrsFromGraphDef(&produced_graph_def, consumer_registry,
producer_registry, &op_attr_removed));
TF_EXPECT_GRAPH_EQ(expected_graph_def, produced_graph_def);
EXPECT_EQ(op_attr_removed.size(), 0);
}
TEST(RemoveNewDefaultAttrsFromGraphDefTest, HasFunction) {
OpList consumer_op_list;
TF_ASSERT_OK(
FinalizeOpDef(OpDefBuilder("UsesDefault"), consumer_op_list.add_op()));
TF_ASSERT_OK(FinalizeOpDef(OpDefBuilder("ChangedFromDefault"),
consumer_op_list.add_op()));
OpListOpRegistry consumer_registry(&consumer_op_list);
OpList producer_op_list;
TF_ASSERT_OK(FinalizeOpDef(OpDefBuilder("UsesDefault").Attr("a: int = 17"),
producer_op_list.add_op()));
TF_ASSERT_OK(
FinalizeOpDef(OpDefBuilder("ChangedFromDefault").Attr("a: int = 17"),
producer_op_list.add_op()));
OpListOpRegistry producer_registry(&producer_op_list);
GraphDef produced_graph_def;
*produced_graph_def.mutable_library()->add_function() =
FunctionDefHelper::Create(
"my_func", {}, {}, {},
{{{"x"}, "UsesDefault", {}, {{"a", 17}}},
{{"y"}, "ChangedFromDefault", {}, {{"a", 99}}}},
{});
OpList function_op_list;
*function_op_list.add_op() =
produced_graph_def.library().function(0).signature();
OpListOpRegistry function_registry(&function_op_list);
TF_ASSERT_OK(NodeDefBuilder("call_func", "my_func", &function_registry)
.Finalize(produced_graph_def.add_node()));
std::set<std::pair<string, string>> op_attr_removed;
TF_ASSERT_OK(
RemoveNewDefaultAttrsFromGraphDef(&produced_graph_def, consumer_registry,
producer_registry, &op_attr_removed));
GraphDef expected_graph_def;
*expected_graph_def.mutable_library()->add_function() =
FunctionDefHelper::Create(
"my_func", {}, {}, {},
{{{"x"}, "UsesDefault", {}, {}},
{{"y"}, "ChangedFromDefault", {}, {{"a", 99}}}},
{});
TF_ASSERT_OK(NodeDefBuilder("call_func", "my_func", &function_registry)
.Finalize(expected_graph_def.add_node()));
TF_EXPECT_GRAPH_EQ(expected_graph_def, produced_graph_def);
EXPECT_EQ(expected_graph_def.library().DebugString(),
produced_graph_def.library().DebugString());
std::set<std::pair<string, string>> expected_removed({{"UsesDefault", "a"}});
EXPECT_EQ(expected_removed, op_attr_removed);
}
TEST(StripDefaultAttributesTest, DefaultStripped) {
OpList op_list;
TF_ASSERT_OK(FinalizeOpDef(OpDefBuilder("OpName1").Attr("a: int = 12"),
op_list.add_op()));
OpListOpRegistry registry(&op_list);
GraphDef graph_def;
TF_ASSERT_OK(NodeDefBuilder("op1", "OpName1", ®istry)
.Finalize(graph_def.add_node()));
ASSERT_EQ(1, graph_def.node(0).attr_size());
ASSERT_EQ(12, graph_def.node(0).attr().at("a").i());
StripDefaultAttributes(registry, graph_def.mutable_node());
ASSERT_EQ(1, graph_def.node_size());
ASSERT_EQ(0, graph_def.node(0).attr_size());
}
TEST(StripDefaultAttributesTest, NonDefaultNotStripped) {
OpList op_list;
TF_ASSERT_OK(FinalizeOpDef(OpDefBuilder("OpName1").Attr("a: int = 12"),
op_list.add_op()));
OpListOpRegistry registry(&op_list);
GraphDef graph_def;
TF_ASSERT_OK(NodeDefBuilder("op1", "OpName1", ®istry)
.Attr("a", 9)
.Finalize(graph_def.add_node()));
GraphDef expected = graph_def;
StripDefaultAttributes(registry, graph_def.mutable_node());
TF_EXPECT_GRAPH_EQ(expected, graph_def);
}
TEST(StrippedOpListForGraphTest, FlatTest) {
OpList op_list;
for (const string& op : {"A", "B", "C", "D"}) {
OpDef* op_def = op_list.add_op();
op_def->set_name(op);
op_def->set_summary("summary");
op_def->set_description("description");
op_def->set_is_commutative(op == "B");
}
const string graph_ops[4][3] = {
{"C", "B", "B"}, {"B", "C", "B"}, {"B", "B", "C"}, {"C", "C", "B"}};
for (const bool use_function : {false, true}) {
for (int order = 0; order < 4; order++) {
GraphDef graph_def;
if (use_function) {
FunctionDef* function_def = graph_def.mutable_library()->add_function();
function_def->mutable_signature()->set_name("F");
for (const string& op : graph_ops[order]) {
function_def->add_node_def()->set_op(op);
}
graph_def.add_node()->set_op("F");
} else {
for (const string& op : graph_ops[order]) {
string name = strings::StrCat("name", graph_def.node_size());
NodeDef* node = graph_def.add_node();
node->set_name(name);
node->set_op(op);
}
}
OpList stripped_op_list;
TF_ASSERT_OK(StrippedOpListForGraph(graph_def, OpListOpRegistry(&op_list),
&stripped_op_list));
ASSERT_EQ(stripped_op_list.op_size(), 2);
for (int i = 0; i < 2; i++) {
const OpDef& op = stripped_op_list.op(i);
EXPECT_EQ(op.name(), i ? "C" : "B");
EXPECT_EQ(op.summary(), "");
EXPECT_EQ(op.description(), "");
EXPECT_EQ(op.is_commutative(), !i);
}
std::set<string> used_ops;
OpsUsedByGraph(graph_def, &used_ops);
ASSERT_EQ(std::set<string>({"B", "C"}), used_ops);
}
}
}
TEST(StrippedOpListForGraphTest, NestedFunctionTest) {
OpList op_list;
op_list.add_op()->set_name("A");
for (const bool recursive : {false, true}) {
GraphDef graph_def;
FunctionDef* b = graph_def.mutable_library()->add_function();
FunctionDef* c = graph_def.mutable_library()->add_function();
b->mutable_signature()->set_name("B");
c->mutable_signature()->set_name("C");
b->add_node_def()->set_op("A");
c->add_node_def()->set_op("B");
if (recursive) {
b->add_node_def()->set_op("B");
c->add_node_def()->set_op("C");
}
graph_def.add_node()->set_op("C");
OpList stripped_op_list;
TF_ASSERT_OK(StrippedOpListForGraph(graph_def, OpListOpRegistry(&op_list),
&stripped_op_list));
ASSERT_EQ(stripped_op_list.op_size(), 1);
ASSERT_EQ(stripped_op_list.op(0).name(), "A");
std::set<string> used_ops;
OpsUsedByGraph(graph_def, &used_ops);
ASSERT_EQ(std::set<string>({"A"}), used_ops);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/graph_def_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/graph_def_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
773b7270-3bf2-4c44-8db0-c80349766cfe | cpp | tensorflow/tensorflow | shape_inference | tensorflow/compiler/jit/shape_inference.cc | tensorflow/compiler/jit/shape_inference_test.cc | #include "tensorflow/compiler/jit/shape_inference.h"
#include <cstdint>
#include <map>
#include <vector>
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/compiler/jit/shape_inference_helpers.h"
#include "tensorflow/core/common_runtime/shape_refiner.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
Status ShapeHandleToTensorShape(shape_inference::InferenceContext* context,
const shape_inference::ShapeHandle& handle,
PartialTensorShape* shape) {
if (!context->RankKnown(handle)) return absl::OkStatus();
std::vector<int64_t> dims(context->Rank(handle));
for (int32_t i = 0, end = dims.size(); i < end; ++i) {
dims[i] = context->Value(context->Dim(handle, i));
}
return PartialTensorShape::MakePartialShape(dims.data(), dims.size(), shape);
}
Status PropagateShapes(Graph* graph,
const std::map<int, InferredShape>& arg_shapes,
const std::vector<BackEdgeHelper::BackEdge>& back_edges,
ShapeRefiner* shape_refiner) {
std::map<const Node*, const Node*> merge_to_next_iteration;
for (const auto& e : back_edges) {
if (e.src->IsNextIteration() && e.dst->IsMerge()) {
merge_to_next_iteration[e.dst] = e.src;
}
}
std::vector<Node*> order;
GetReversePostOrder(*graph, &order);
for (Node* n : order) {
VLOG(4) << "Propagating shape for node " << n->name()
<< ", type: " << n->type_string();
Status status = shape_refiner->AddNode(n);
if (!status.ok()) {
VLOG(1) << "Shape inference failed for node " << n->name() << ": "
<< status;
} else {
shape_inference::InferenceContext* context = shape_refiner->GetContext(n);
for (int i = 0; i < n->num_outputs(); i++) {
shape_inference::ShapeHandle handle = context->output(i);
VLOG(4) << "Output " << i << " for node " << n->name() << ": "
<< context->DebugString(handle);
}
}
int index = -1;
if (n->type_string() == "_Arg") {
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index));
} else if (n->type_string() == "Placeholder") {
if (const auto s = GetNodeAttr(n->attrs(), "_index", &index); !s.ok()) {
VLOG(1) << "Failed to get node index for node " << n->name();
}
}
if (index >= 0) {
if (auto it = arg_shapes.find(index); it != arg_shapes.end()) {
const InferredShape& arg_shape = it->second;
shape_inference::InferenceContext* context =
shape_refiner->GetContext(n);
if (arg_shape.handle_type != DT_INVALID) {
shape_inference::ShapeHandle handle;
TF_RETURN_IF_ERROR(context->MakeShapeFromPartialTensorShape(
arg_shape.handle_shape, &handle));
context->set_output_handle_shapes_and_types(
0, std::vector<shape_inference::ShapeAndType>{
{handle, arg_shape.handle_type}});
}
shape_inference::ShapeHandle handle;
TF_RETURN_IF_ERROR(
context->MakeShapeFromPartialTensorShape(arg_shape.shape, &handle));
TF_RETURN_IF_ERROR(shape_refiner->SetShape(n, 0, handle));
}
}
if (n->type_string() == "VariableShape") {
shape_inference::InferenceContext* context = shape_refiner->GetContext(n);
auto handle_shapes_and_types = context->input_handle_shapes_and_types(0);
if (handle_shapes_and_types && !handle_shapes_and_types->empty()) {
shape_inference::ShapeHandle handle =
handle_shapes_and_types->at(0).shape;
TensorShapeProto shape_proto;
context->ShapeHandleToProto(handle, &shape_proto);
if (!shape_proto.unknown_rank()) {
NodeDef const_def;
const_def.set_op("Const");
Node* var_node;
TF_RETURN_IF_ERROR(n->input_node(0, &var_node));
const_def.set_name(
graph->NewName(absl::StrCat("var_shape_", var_node->name())));
DataType dtype = n->output_type(0);
AddNodeAttr("dtype", dtype, &const_def);
TensorProto value;
value.set_dtype(dtype);
value.mutable_tensor_shape()->add_dim()->set_size(
shape_proto.dim_size());
for (const auto& dim : shape_proto.dim()) {
if (dtype == DT_INT32) {
value.add_int_val(dim.size());
} else {
value.add_int64_val(dim.size());
}
}
AddNodeAttr("value", value, &const_def);
for (auto const& attr : n->attrs()) {
if (*attr.first.begin() == '_') {
AddNodeAttr(attr.first, attr.second, &const_def);
}
}
TF_ASSIGN_OR_RETURN(Node * const_node, graph->AddNode(const_def));
graph->AddControlEdge(var_node, const_node);
std::vector<const Edge*> out_edges(n->out_edges().begin(),
n->out_edges().end());
for (const Edge* e : out_edges) {
if (e->IsControlEdge()) {
graph->AddControlEdge(const_node, e->dst());
graph->RemoveEdge(e);
} else {
Node* dst = e->dst();
int dst_input = e->dst_input();
graph->RemoveEdge(e);
graph->AddEdge(const_node, 0, dst, dst_input);
}
}
}
}
}
if (n->IsMerge() && n->output_type(0) == DT_RESOURCE) {
auto iter = merge_to_next_iteration.find(n);
if (iter != merge_to_next_iteration.end()) {
const Node *next_iter = iter->second, *node = next_iter;
do {
TF_RETURN_IF_ERROR(node->input_node(0, &node));
} while (node->IsIdentity());
const Node* switch_input;
bool is_loop_invariant = node->IsSwitch() &&
node->input_node(0, &switch_input).ok() &&
switch_input == n;
if (is_loop_invariant) {
shape_inference::InferenceContext* context =
shape_refiner->GetContext(n);
for (int i = 0; i < n->num_inputs(); i++) {
const Node* input_node;
if (n->input_node(i, &input_node).ok()) {
auto shapes_and_types = context->input_handle_shapes_and_types(i);
if (shapes_and_types) {
context->set_output_handle_shapes_and_types(0,
*shapes_and_types);
}
break;
}
}
}
}
}
}
return absl::OkStatus();
}
Status StoreOutputShapes(const Graph& graph, const ShapeRefiner& shape_refiner,
GraphShapeInfo* shape_info) {
for (const Node* node : graph.nodes()) {
shape_inference::InferenceContext* context = shape_refiner.GetContext(node);
if (!context) continue;
auto& outputs = (*shape_info)[node->name()];
outputs.resize(context->num_outputs());
for (int i = 0; i < context->num_outputs(); ++i) {
auto& output = outputs[i];
TF_RETURN_IF_ERROR(
ShapeHandleToTensorShape(context, context->output(i), &output.shape));
const auto* handle_shapes_and_types =
context->output_handle_shapes_and_types(i);
if (handle_shapes_and_types != nullptr) {
if (handle_shapes_and_types->size() == 1) {
TF_RETURN_IF_ERROR(ShapeHandleToTensorShape(
context, (*handle_shapes_and_types)[0].shape,
&output.handle_shape));
output.handle_type = (*handle_shapes_and_types)[0].dtype;
} else {
}
}
VLOG(4) << node->name() << " output " << i << " shape"
<< output.shape.DebugString() << " handle_type "
<< DataTypeString(output.handle_type) << " handle_shape "
<< output.handle_shape.DebugString();
}
}
return absl::OkStatus();
}
}
Status InferShapes(Graph* graph, const std::map<int, InferredShape>& arg_shapes,
const tensorflow::FunctionLibraryDefinition* fnlib_def,
GraphShapeInfo* shape_info) {
ShapeRefiner shape_refiner(graph->versions(), graph->op_registry());
shape_refiner.set_require_shape_inference_fns(false);
BackEdgeHelper back_edge;
TF_RETURN_IF_ERROR(back_edge.Remove(graph));
TF_RETURN_IF_ERROR(PropagateShapes(graph, arg_shapes,
back_edge.RemovedEdges(), &shape_refiner));
TF_RETURN_IF_ERROR(back_edge.Replace());
return StoreOutputShapes(*graph, shape_refiner, shape_info);
}
absl::StatusOr<InferredShape> MergeInferredShapes(const InferredShape& a,
const InferredShape& b) {
InferredShape result;
TF_RETURN_IF_ERROR(a.shape.MergeWith(b.shape, &result.shape));
if (a.handle_type == DT_INVALID) {
result.handle_type = b.handle_type;
} else if (b.handle_type == DT_INVALID) {
result.handle_type = a.handle_type;
} else if (a.handle_type == b.handle_type) {
result.handle_type = a.handle_type;
} else {
return errors::InvalidArgument(
"Mismatched resource types: ", DataTypeString(a.handle_type), " vs. ",
DataTypeString(b.handle_type));
}
TF_RETURN_IF_ERROR(
a.handle_shape.MergeWith(b.handle_shape, &result.handle_shape));
return result;
}
} | #include "tensorflow/compiler/jit/shape_inference.h"
#include <map>
#include <memory>
#include <vector>
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/jit/test_util.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/status.h"
namespace tensorflow {
namespace {
TEST(ShapeInferenceTest, Basics) {
Scope root = Scope::NewRootScope().ExitOnError();
auto a = ops::Placeholder(root.WithOpName("A"), DT_FLOAT,
ops::Placeholder::Shape({2, 3}));
auto b = ops::Placeholder(root.WithOpName("B"), DT_FLOAT,
ops::Placeholder::Shape({3}));
auto c = ops::Placeholder(root.WithOpName("C"), DT_FLOAT);
auto d = ops::Add(root.WithOpName("D"), a, b);
auto e = ops::Add(root.WithOpName("E"), d, c);
auto f = ops::Neg(root.WithOpName("F"), e);
auto g = ops::AddN(root.WithOpName("G"), std::initializer_list<Output>{e, f});
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_CHECK_OK(root.ToGraph(graph.get()));
GraphShapeInfo shape_info;
TF_ASSERT_OK(InferShapes(graph.get(), {},
nullptr, &shape_info));
std::map<string, std::vector<PartialTensorShape>> expected = {
{"A", {PartialTensorShape({2, 3})}}, {"B", {PartialTensorShape({3})}},
{"C", {PartialTensorShape()}}, {"D", {PartialTensorShape({2, 3})}},
{"E", {PartialTensorShape()}}, {"F", {PartialTensorShape()}},
{"G", {PartialTensorShape()}},
};
TF_EXPECT_OK(ShapeAnnotationsMatch(*graph, shape_info, expected));
}
TEST(ShapeInferenceTest, UseArgShapesForVariableBatchSize) {
Scope root = Scope::NewRootScope().ExitOnError();
auto a = ops::Placeholder(root.WithOpName("A"), DT_FLOAT,
ops::Placeholder::Shape({-1, 3}));
auto b = ops::Placeholder(root.WithOpName("B"), DT_FLOAT,
ops::Placeholder::Shape({-1, 3}));
auto c = ops::Add(root.WithOpName("C"), a, b);
auto d = ops::Neg(root.WithOpName("D"), c);
a.node()->AddAttr("_index", 0);
b.node()->AddAttr("_index", 1);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_CHECK_OK(root.ToGraph(graph.get()));
std::map<int, InferredShape> arg_shapes;
arg_shapes[0].shape = TensorShape({2, 3});
arg_shapes[1].shape = TensorShape({2, 3});
GraphShapeInfo shape_info;
TF_ASSERT_OK(InferShapes(graph.get(), arg_shapes,
nullptr, &shape_info));
std::map<string, std::vector<PartialTensorShape>> expected = {
{"A", {PartialTensorShape({2, 3})}},
{"B", {PartialTensorShape({2, 3})}},
{"C", {PartialTensorShape({2, 3})}},
{"D", {PartialTensorShape({2, 3})}},
};
TF_EXPECT_OK(ShapeAnnotationsMatch(*graph, shape_info, expected));
}
TEST(ShapeInferenceTest, UseArgShapesForVariableBatchSizeIncompleteUserArgs) {
Scope root = Scope::NewRootScope().ExitOnError();
auto a = ops::Placeholder(root.WithOpName("A"), DT_FLOAT,
ops::Placeholder::Shape({-1, 3}));
auto b = ops::Placeholder(root.WithOpName("B"), DT_FLOAT,
ops::Placeholder::Shape({-1, 3}));
auto c = ops::Add(root.WithOpName("C"), a, b);
auto d = ops::Neg(root.WithOpName("D"), c);
a.node()->AddAttr("_index", 0);
b.node()->AddAttr("_index", 0);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_CHECK_OK(root.ToGraph(graph.get()));
std::map<int, InferredShape> arg_shapes;
arg_shapes[0].shape = TensorShape({2, 3});
GraphShapeInfo shape_info;
TF_ASSERT_OK(InferShapes(graph.get(), arg_shapes,
nullptr, &shape_info));
std::map<string, std::vector<PartialTensorShape>> expected = {
{"A", {PartialTensorShape({2, 3})}},
{"B", {PartialTensorShape({2, 3})}},
{"C", {PartialTensorShape({2, 3})}},
{"D", {PartialTensorShape({2, 3})}},
};
TF_EXPECT_OK(ShapeAnnotationsMatch(*graph, shape_info, expected));
}
TEST(ShapeInferenceTest, WhileLoop) {
Graph graph(OpRegistry::Global());
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto dummy = ops::Placeholder(scope.WithOpName("Dummy"), DT_INT32,
ops::Placeholder::Shape({}));
auto source = ops::Placeholder(scope.WithOpName("source"), DT_INT32,
ops::Placeholder::Shape({}));
auto enter =
ops::internal::Enter(scope.WithOpName("while/Enter"), source, "aloop");
auto enter2 =
ops::internal::Enter(scope.WithOpName("while/Enter2"), source, "aloop");
auto merge = ops::Merge(scope.WithOpName("while/Merge"),
std::initializer_list<Input>{enter, dummy});
auto ten = ops::Const<int32>(
scope.WithOpName("while/Less/y").WithControlDependencies(merge.output),
10);
auto less = ops::Less(scope.WithOpName("while/Less"), merge.output, ten);
auto loop_cond = ops::LoopCond(scope.WithOpName("while/LoopCond"), less);
auto switch_node =
ops::Switch(scope.WithOpName("while/Switch"), merge.output, loop_cond);
auto exit = ops::internal::Exit(scope.WithOpName("while/Exit"),
switch_node.output_false);
auto identity = ops::Identity(scope.WithOpName("while/Identity"),
switch_node.output_true);
auto identity_shape =
ops::Const<int32>(scope.WithOpName("while/Identity/shape"), {});
auto identity_reshaped = ops::Reshape(
scope.WithOpName("while/Identity/reshaped"), identity, identity_shape);
auto one = ops::Const<int32>(
scope.WithOpName("while/add/y").WithControlDependencies(identity), 1);
auto add = ops::Add(scope.WithOpName("while/add"), identity_reshaped, one);
auto next_iteration =
ops::NextIteration(scope.WithOpName("while/NextIteration"), add);
auto sink = ops::Identity(scope.WithOpName("sink"), exit);
scope.graph()->RemoveNode(dummy.node());
scope.graph()->AddEdge(next_iteration.node(), 0, merge.output.node(), 1);
TF_EXPECT_OK(scope.ToGraph(&graph));
}
GraphShapeInfo shape_info;
TF_ASSERT_OK(InferShapes(&graph, {}, nullptr,
&shape_info));
std::map<string, std::vector<PartialTensorShape>> expected = {
{"while/Identity", {PartialTensorShape()}},
{"while/add", {PartialTensorShape({})}},
};
TF_EXPECT_OK(ShapeAnnotationsMatch(graph, shape_info, expected));
}
TEST(ShapeInferenceTest, WhileLoopWithResource) {
Graph graph(OpRegistry::Global());
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto x =
ops::VarHandleOp(scope.WithOpName("x"), DT_FLOAT, TensorShape({2, 3}));
auto enter =
ops::internal::Enter(scope.WithOpName("while/Enter"), x, "aloop");
auto dummy = ops::Placeholder(scope.WithOpName("dummy"), DT_RESOURCE);
auto merge = ops::Merge(scope.WithOpName("while/Merge"),
std::initializer_list<Input>{enter, dummy});
auto false_value = ops::Const<bool>(scope.WithOpName("false"), false);
auto loop_cond =
ops::LoopCond(scope.WithOpName("while/LoopCond"), false_value);
auto switch_node =
ops::Switch(scope.WithOpName("while/Switch"), merge.output, loop_cond);
auto exit = ops::internal::Exit(scope.WithOpName("while/Exit"),
switch_node.output_false);
auto identity = ops::Identity(scope.WithOpName("while/Identity"),
switch_node.output_true);
auto next_iteration =
ops::NextIteration(scope.WithOpName("while/NextIteration"), identity);
auto sink = ops::Identity(scope.WithOpName("sink"), exit);
scope.graph()->RemoveNode(dummy.node());
scope.graph()->AddEdge(next_iteration.node(), 0, merge.output.node(), 1);
TF_EXPECT_OK(scope.ToGraph(&graph));
}
GraphShapeInfo shape_info;
TF_ASSERT_OK(InferShapes(&graph, {}, nullptr,
&shape_info));
auto iter = shape_info.find("sink");
EXPECT_NE(iter, shape_info.end());
EXPECT_EQ(iter->second.size(), 1);
EXPECT_EQ(iter->second.at(0).handle_type, DT_FLOAT);
TensorShape resource_shape;
EXPECT_TRUE(iter->second.at(0).handle_shape.AsTensorShape(&resource_shape));
EXPECT_EQ(resource_shape, TensorShape({2, 3}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/shape_inference.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/shape_inference_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a08ec787-9d7c-4b53-a2d3-06716be6e35f | cpp | tensorflow/tensorflow | cutlass_gemm_fusion | third_party/xla/xla/service/gpu/kernels/cutlass_gemm_fusion.cc | third_party/xla/xla/service/gpu/kernels/cutlass_gemm_fusion_test.cc | #include "xla/service/gpu/kernels/cutlass_gemm_fusion.h"
#include <algorithm>
#include <array>
#include <cstddef>
#include <cstdint>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/kernels/custom_kernel.h"
#include "xla/service/gpu/kernels/custom_kernel_fusion.h"
#include "xla/service/gpu/kernels/custom_kernel_fusion_pattern.h"
#include "xla/service/gpu/kernels/cutlass_gemm.h"
#include "xla/service/gpu/kernels/cutlass_gemm_custom_kernel.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape.h"
#include "xla/stream_executor/device_description.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
struct RootWithWorkspace {
HloInstruction* root;
HloInstruction* workspace;
};
static RootWithWorkspace MatchRootWithWorkspace(HloInstruction* root) {
RootWithWorkspace result;
if (Match(root, match::Tuple(match::Op(&result.root),
match::CustomCall(
&result.workspace,
{CustomKernelFusionPattern::kWorkspace})))) {
return result;
}
return {root, nullptr};
}
struct GemmWithUpcast {
explicit GemmWithUpcast(HloDotInstruction* dot) : dot(dot) {}
HloInstruction* dot;
HloInstruction* lhs_upcast = nullptr;
HloInstruction* rhs_upcast = nullptr;
};
struct GemmWithDynamicSlice {
explicit GemmWithDynamicSlice(HloDynamicUpdateSliceInstruction* update_slice)
: update_slice(update_slice) {}
std::vector<HloInstruction*> Instrs() {
if (bitcast == nullptr) {
return {dot, update_slice};
}
return {dot, bitcast, update_slice};
}
HloInstruction* dot = nullptr;
HloInstruction* bitcast = nullptr;
HloInstruction* update_slice = nullptr;
};
absl::Status MatchRowMajorGemm(HloDotInstruction* dot) {
if (dot->operand(0)->shape().dimensions_size() != 2 ||
dot->operand(1)->shape().dimensions_size() != 2) {
return absl::InternalError("operands must have rank 2");
}
if (dot->shape().layout().minor_to_major().back() != 0) {
return absl::InternalError("The dot result must have row major layout.");
}
auto& dot_dims = dot->dot_dimension_numbers();
if (dot_dims.lhs_contracting_dimensions().size() != 1) {
return absl::InternalError("Lhs contracting dimensions must be of size 1.");
}
if (dot_dims.rhs_contracting_dimensions().size() != 1) {
return absl::InternalError("Rhs contracting dimensions must be of size 1.");
}
if (dot->operand(0)->shape().layout().minor_to_major(0) !=
dot_dims.lhs_contracting_dimensions()[0]) {
return absl::InternalError(
"Lhs contracting dimension should be along the minor axis (elements "
"that are stored contigous in memory).");
}
if (dot->operand(1)->shape().layout().minor_to_major(1) !=
dot_dims.rhs_contracting_dimensions()[0]) {
return absl::InternalError(
"Rhs contracting dimension should be along the major axis (elements "
"that are NOT stored contigous in memory).");
}
return absl::OkStatus();
}
}
static absl::Status MatchSimpleGemm(
HloDotInstruction* dot, absl::Span<const PrimitiveType> support_dtypes) {
TF_RETURN_IF_ERROR(MatchRowMajorGemm(dot));
for (PrimitiveType dtype : support_dtypes) {
if (dot->operand(0)->shape().element_type() == dtype &&
dot->operand(1)->shape().element_type() == dtype &&
dot->shape().element_type() == dtype) {
return absl::OkStatus();
}
}
return absl::InternalError("unsupported operands type");
}
static absl::StatusOr<GemmWithUpcast> MatchGemmWithUpcast(
HloDotInstruction* dot) {
TF_RETURN_IF_ERROR(MatchRowMajorGemm(dot));
GemmWithUpcast match(dot);
if (Match(const_cast<HloInstruction*>(dot->operand(0)),
match::Convert(&match.lhs_upcast, match::Op())) &&
Match(const_cast<HloInstruction*>(dot->operand(1)),
match::Convert(&match.rhs_upcast, match::Op()))) {
return match;
}
if (Match(const_cast<HloInstruction*>(dot->operand(0)),
match::Convert(&match.lhs_upcast, match::Op()))) {
return match;
}
if (Match(const_cast<HloInstruction*>(dot->operand(1)),
match::Convert(&match.rhs_upcast, match::Op()))) {
return match;
}
return absl::InternalError("unsupported gemm with upcasing");
}
template <typename Pattern>
auto OptionalBitcast(HloInstruction** optional_bitcast, Pattern pattern) {
return match::AnyOf<HloInstruction>(match::Bitcast(optional_bitcast, pattern),
std::move(pattern));
}
static absl::StatusOr<GemmWithDynamicSlice> MatchGemmWithDynamicUpdateSlice(
HloDynamicUpdateSliceInstruction* update_slice) {
GemmWithDynamicSlice match(update_slice);
if (!Match(const_cast<HloInstruction*>(update_slice->update()),
OptionalBitcast(&match.bitcast, match::Dot(&match.dot, match::Op(),
match::Op())))) {
return absl::InternalError("failed to match update slice instr");
}
TF_RETURN_IF_ERROR(MatchRowMajorGemm(Cast<HloDotInstruction>(match.dot)));
return match;
}
static bool AreInstructionsOnTheSameStream(
absl::Span<const HloInstruction* const> instructions) {
absl::flat_hash_set<int64_t> stream_set;
for (const HloInstruction* inst : instructions) {
auto gpu_config = inst->backend_config<GpuBackendConfig>();
if (!gpu_config.ok()) {
continue;
}
stream_set.insert(gpu_config->operation_queue_id());
if (stream_set.size() > 1) {
return false;
}
}
return true;
};
std::optional<CustomKernelFusionPattern::Match> CutlassGemmPattern::TryMatch(
const se::DeviceDescription& device, HloInstruction* instr) const {
auto* dot = DynCast<HloDotInstruction>(instr);
if (!dot) return std::nullopt;
auto matched = MatchSimpleGemm(dot, {PrimitiveType::F32});
if (!matched.ok()) return std::nullopt;
CustomFusionConfig config;
config.set_name("cutlass_gemm");
return Match{config, {instr}};
}
std::optional<CustomKernelFusionPattern::Match>
CutlassGemmWithDynamicUpdateSlicePattern::TryMatch(
const se::DeviceDescription& device, HloInstruction* instr) const {
auto* update_slice = DynCast<HloDynamicUpdateSliceInstruction>(instr);
if (!update_slice) return std::nullopt;
auto matched = MatchGemmWithDynamicUpdateSlice(update_slice);
if (!matched.ok() || !AreInstructionsOnTheSameStream(matched->Instrs()))
return std::nullopt;
CustomFusionConfig config;
config.set_name("cutlass_gemm_with_dynamic_update_slice");
Match match(config, matched->Instrs());
match.AddReplacement(matched->dot, [=](HloFusionInstruction* fusion) {
HloComputation* parent = fusion->parent();
auto* dus = Cast<HloDynamicUpdateSliceInstruction>(matched->update_slice);
bool has_bitcast = matched->bitcast != nullptr;
const Shape dus_shape =
has_bitcast ? matched->bitcast->shape() : matched->dot->shape();
auto* slice = parent->AddInstruction(HloInstruction::CreateDynamicSlice(
dus_shape, fusion, dus->index_operands(), dus_shape.dimensions()));
return parent->AddInstruction(
HloInstruction::CreateBitcast(matched->dot->shape(), slice));
});
return match;
}
namespace {
bool IsSupportedKernel(PrimitiveType lhs, PrimitiveType rhs,
PrimitiveType dot) {
constexpr std::array<std::array<PrimitiveType, 3>, 4> kSupportedKernels = {
{{BF16, BF16, F32}, {F32, BF16, F32}, {BF16, S8, F32}}};
return absl::c_linear_search(kSupportedKernels,
std::array<PrimitiveType, 3>{lhs, rhs, dot});
}
}
std::optional<CustomKernelFusionPattern::Match>
CutlassGemmWithUpcastPattern::TryMatch(const se::DeviceDescription& device,
HloInstruction* instr) const {
auto* dot = DynCast<HloDotInstruction>(instr);
if (!dot) return std::nullopt;
absl::StatusOr<GemmWithUpcast> matched = MatchGemmWithUpcast(dot);
if (!matched.ok()) {
VLOG(3) << "No match due to unsupported gemm with upcast: "
<< matched.status();
return std::nullopt;
}
CustomFusionConfig config;
config.set_name("cutlass_gemm_with_upcast");
HloInstruction* lhs = matched->lhs_upcast;
HloInstruction* rhs = matched->rhs_upcast;
PrimitiveType dot_type = dot->shape().element_type();
PrimitiveType lhs_type = lhs != nullptr
? lhs->operand(0)->shape().element_type()
: dot->operand(0)->shape().element_type();
PrimitiveType rhs_type = rhs != nullptr
? rhs->operand(0)->shape().element_type()
: dot->operand(1)->shape().element_type();
if (!IsSupportedKernel(lhs_type, rhs_type, dot_type)) {
VLOG(3) << "No match due to unsupported kernel input types: "
<< PrimitiveType_Name(lhs_type) << "x"
<< PrimitiveType_Name(rhs_type) << "To"
<< PrimitiveType_Name(dot_type);
return std::nullopt;
}
if (lhs != nullptr && rhs == nullptr) {
return Match{config, {matched->lhs_upcast, instr}};
} else if (lhs == nullptr && rhs != nullptr) {
return Match{config, {matched->rhs_upcast, instr}};
} else {
return Match{config, {matched->lhs_upcast, matched->rhs_upcast, instr}};
}
}
class CutlassGemmFusion : public CustomKernelFusion {
public:
absl::StatusOr<std::vector<CustomKernel>> LoadKernels(
const se::DeviceDescription& device,
const HloComputation* computation) const final {
auto* dot = DynCast<HloDotInstruction>(computation->root_instruction());
if (dot == nullptr) {
return absl::InternalError(
"cutlass_gemm requires ROOT operation to be a dot");
}
TF_RETURN_IF_ERROR(MatchSimpleGemm(dot, {PrimitiveType::F32}));
PrimitiveType dot_type = dot->shape().element_type();
auto* lhs = Cast<HloParameterInstruction>(dot->operand(0));
auto* rhs = Cast<HloParameterInstruction>(dot->operand(1));
kernel::gemm_universal::ArgsIndices indices = {
lhs->parameter_number(), rhs->parameter_number(),
computation->num_parameters()};
const Shape& lhs_shape = lhs->shape();
const Shape& rhs_shape = rhs->shape();
size_t m = lhs_shape.dimensions(0);
size_t k = lhs_shape.dimensions(1);
size_t n = rhs_shape.dimensions(1);
PrimitiveType lhs_type = lhs->shape().element_type();
PrimitiveType rhs_type = rhs->shape().element_type();
return GetCutlassGemmKernels("cutlass_gemm", dot_type, lhs_type, rhs_type,
m, n, k, indices,
{}, device);
}
};
class CutlassGemmWithUpcastFusion : public CustomKernelFusion {
public:
absl::StatusOr<std::vector<CustomKernel>> LoadKernels(
const se::DeviceDescription& device,
const HloComputation* computation) const final {
auto* dot = DynCast<HloDotInstruction>(computation->root_instruction());
if (dot == nullptr) {
return absl::InternalError(
"cutlass_gemm_with_upcast requires ROOT operation to be a dot");
}
TF_ASSIGN_OR_RETURN(GemmWithUpcast matched, MatchGemmWithUpcast(dot));
const HloParameterInstruction* lhs;
const HloParameterInstruction* rhs;
if (matched.lhs_upcast == nullptr && matched.rhs_upcast != nullptr) {
lhs = Cast<HloParameterInstruction>(matched.dot->operand(0));
rhs = Cast<HloParameterInstruction>(matched.rhs_upcast->operand(0));
} else if (matched.lhs_upcast != nullptr && matched.rhs_upcast == nullptr) {
lhs = Cast<HloParameterInstruction>(matched.lhs_upcast->operand(0));
rhs = Cast<HloParameterInstruction>(matched.dot->operand(1));
} else {
lhs = Cast<HloParameterInstruction>(matched.lhs_upcast->operand(0));
rhs = Cast<HloParameterInstruction>(matched.rhs_upcast->operand(0));
}
const Shape& lhs_shape = lhs->shape();
const Shape& rhs_shape = rhs->shape();
size_t m = lhs_shape.dimensions(0);
size_t k = lhs_shape.dimensions(1);
size_t n = rhs_shape.dimensions(1);
PrimitiveType dot_type = dot->shape().element_type();
PrimitiveType lhs_type = lhs_shape.element_type();
PrimitiveType rhs_type = rhs_shape.element_type();
kernel::gemm_universal::ArgsIndices args_indices = {
lhs->parameter_number(), rhs->parameter_number(),
computation->num_parameters()};
return GetCutlassGemmKernels("cutlass_gemm_with_upcast", dot_type, lhs_type,
rhs_type, m, n, k, args_indices, {},
device);
}
};
class CutlassGemmWithDynamicUpdateSliceFusion : public CustomKernelFusion {
public:
absl::StatusOr<std::vector<CustomKernel>> LoadKernels(
const se::DeviceDescription& device,
const HloComputation* computation) const final {
auto [root, workspace] =
MatchRootWithWorkspace(computation->root_instruction());
auto* dus = DynCast<HloDynamicUpdateSliceInstruction>(root);
if (dus == nullptr) {
return absl::InternalError(
"cutlass_gemm_with_dynamic_update_slice requires ROOT operation to "
"be a dynamic update slice");
}
TF_ASSIGN_OR_RETURN(auto matched, MatchGemmWithDynamicUpdateSlice(dus));
TF_RETURN_IF_ERROR(
MatchSimpleGemm(Cast<HloDotInstruction>(matched.dot),
{PrimitiveType::F32, PrimitiveType::BF16}));
auto dot_type = matched.dot->shape().element_type();
auto* lhs = Cast<HloParameterInstruction>(matched.dot->operand(0));
auto* rhs = Cast<HloParameterInstruction>(matched.dot->operand(1));
auto* out = Cast<HloParameterInstruction>(matched.update_slice->operand(0));
kernel::gemm_universal::ArgsIndices args_indices = {
lhs->parameter_number(), rhs->parameter_number(),
out->parameter_number(), workspace != nullptr};
auto* offset =
Cast<HloParameterInstruction>(matched.update_slice->operand(2));
kernel::gemm_universal::DynamicSliceIndices slices;
slices.out = offset->parameter_number();
const Shape& lhs_shape = lhs->shape();
const Shape& rhs_shape = rhs->shape();
size_t m = lhs_shape.dimensions(0);
size_t k = lhs_shape.dimensions(1);
size_t n = rhs_shape.dimensions(1);
PrimitiveType lhs_type = lhs->shape().element_type();
PrimitiveType rhs_type = rhs->shape().element_type();
return GetCutlassGemmKernels("cutlass_gemm_with_dynamic_update_slice",
dot_type, lhs_type, rhs_type, m, n, k,
args_indices, slices, device);
}
};
}
XLA_REGISTER_CUSTOM_FUSION_PATTERN(::xla::gpu::CutlassGemmWithUpcastPattern);
XLA_REGISTER_CUSTOM_FUSION_PATTERN(
::xla::gpu::CutlassGemmWithDynamicUpdateSlicePattern);
XLA_REGISTER_CUSTOM_FUSION("cutlass_gemm", ::xla::gpu::CutlassGemmFusion);
XLA_REGISTER_CUSTOM_FUSION("cutlass_gemm_with_upcast",
::xla::gpu::CutlassGemmWithUpcastFusion);
XLA_REGISTER_CUSTOM_FUSION("cutlass_gemm_with_dynamic_update_slice",
::xla::gpu::CutlassGemmWithDynamicUpdateSliceFusion); | #include "xla/service/gpu/kernels/cutlass_gemm_fusion.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "xla/array.h"
#include "xla/array2d.h"
#include "xla/array3d.h"
#include "xla/error_spec.h"
#include "xla/literal_util.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/kernels/custom_kernel_fusion_pattern.h"
#include "xla/service/gpu/kernels/cutlass_gemm_custom_kernel.h"
#include "xla/service/gpu/transforms/custom_kernel_fusion_rewriter.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/test.h"
namespace xla::gpu {
class CutlassFusionTest : public HloTestBase {
public:
int GpuSharedMemorySize() {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.shared_memory_per_block_optin();
}
int CutlassGemmKernelSharedMemorySize(PrimitiveType dot_type,
PrimitiveType lhs_type,
PrimitiveType rhs_type, int m, int n,
int k) {
return kernel::gemm_universal::GetCutlassGemmKernels(
"cutlass_gemm", dot_type, lhs_type, rhs_type, m, n, k,
{0, 1, 2}, {},
backend().default_stream_executor()->GetDeviceDescription())
->at(0)
.shared_memory_bytes();
};
};
TEST_F(CutlassFusionTest, RowMajorGemm) {
const char* hlo = R"(
HloModule test
ENTRY %main (p0: f32[15,19], p1: f32[19,17]) -> f32[15,17] {
%p0 = f32[15,19]{1,0} parameter(0)
%p1 = f32[19,17]{1,0} parameter(1)
ROOT %r = f32[15,17]{1,0} dot(%p0, %p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
const char* expected = R"(
; CHECK: %cutlass_gemm {{.*}} {
; CHECK: [[P0:%[^ ]+]] = f32[15,19]{1,0} parameter(0)
; CHECK: [[P1:%[^ ]+]] = f32[19,17]{1,0} parameter(1)
; CHECK: ROOT [[DOT:%[^ ]+]] = f32[15,17]{1,0} dot([[P0]], [[P1]]),
; CHECK: lhs_contracting_dims={1}, rhs_contracting_dims={0}
; CHECK: }
; CHECK: ENTRY %main {{.*}} {
; CHECK: ROOT [[FUSION:%[^ ]+]] = f32[15,17]{1,0} fusion
; CHECK: kind=kCustom, calls=%cutlass_gemm,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"cutlass_gemm","kernel_index":0}
; CHECK: }
; CHECK: }
)";
CustomKernelFusionPatternRegistry patterns;
patterns.Emplace<CutlassGemmPattern>();
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
CustomKernelFusionRewriter pass(&device, 0, &patterns);
RunAndFilecheckHloRewrite(hlo, std::move(pass), expected);
}
TEST_F(CutlassFusionTest, RowMajorGemmWithUpcast) {
const char* hlo = R"(
HloModule test
ENTRY %main (p0: bf16[15,19], p1: f32[19,17]) -> f32[15,17] {
%p0 = bf16[15,19]{1,0} parameter(0)
%p1 = bf16[19,17]{1,0} parameter(1)
%c1 = f32[19,17]{1,0} convert(%p1)
ROOT %r = f32[15,17]{1,0} dot(%p0, %c1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
const char* expected = R"(
; CHECK: %cutlass_gemm_with_upcast {{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = bf16[15,19]{1,0} parameter
; CHECK-DAG: [[P1:%[^ ]+]] = bf16[19,17]{1,0} parameter
; CHECK: [[C1:%[^ ]+]] = f32[19,17]{1,0} convert([[P1]])
; CHECK: ROOT [[DOT:%[^ ]+]] = f32[15,17]{1,0} dot([[P0]], [[C1]]),
; CHECK: lhs_contracting_dims={1}, rhs_contracting_dims={0}
; CHECK: }
; CHECK: ENTRY %main {{.*}} {
; CHECK: ROOT [[FUSION:%[^ ]+]] = f32[15,17]{1,0} fusion
; CHECK: kind=kCustom, calls=%cutlass_gemm_with_upcast,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"cutlass_gemm_with_upcast","kernel_index":0}
; CHECK: }
; CHECK: }
)";
CustomKernelFusionPatternRegistry patterns;
patterns.Emplace<CutlassGemmWithUpcastPattern>();
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
CustomKernelFusionRewriter pass(&device, 0, &patterns);
RunAndFilecheckHloRewrite(hlo, std::move(pass), expected);
}
TEST_F(CutlassFusionTest, RowMajorGemmWithUpcastOfBothOperands) {
const char* hlo = R"(
HloModule test
ENTRY %main (p0: bf16[15,19], p1: bf16[19,17]) -> f32[15,17] {
%p0 = bf16[15,19]{1,0} parameter(0)
%c1 = f32[15,19]{1,0} convert(%p0)
%p1 = bf16[19,17]{1,0} parameter(1)
%c2 = f32[19,17]{1,0} convert(%p1)
ROOT %r = f32[15,17]{1,0} dot(%c1, %c2),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
const char* expected = R"(
; CHECK: %cutlass_gemm_with_upcast {{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = bf16[15,19]{1,0} parameter
; CHECK: [[C1:%[^ ]+]] = f32[15,19]{1,0} convert([[P0]])
; CHECK-DAG: [[P1:%[^ ]+]] = bf16[19,17]{1,0} parameter
; CHECK: [[C2:%[^ ]+]] = f32[19,17]{1,0} convert([[P1]])
; CHECK: ROOT [[DOT:%[^ ]+]] = f32[15,17]{1,0} dot([[C1]], [[C2]]),
; CHECK: lhs_contracting_dims={1}, rhs_contracting_dims={0}
; CHECK: }
; CHECK: ENTRY %main {{.*}} {
; CHECK: ROOT [[FUSION:%[^ ]+]] = f32[15,17]{1,0} fusion
; CHECK: kind=kCustom, calls=%cutlass_gemm_with_upcast,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"cutlass_gemm_with_upcast","kernel_index":0}
; CHECK: }
; CHECK: }
)";
CustomKernelFusionPatternRegistry patterns;
patterns.Emplace<CutlassGemmWithUpcastPattern>();
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
CustomKernelFusionRewriter pass(&device, 0, &patterns);
RunAndFilecheckHloRewrite(hlo, std::move(pass), expected);
}
TEST_F(CutlassFusionTest, DoNotPatternMatchNotImplementedKernelTypes) {
const char* hlo = R"(
HloModule test
ENTRY %main (p0: bf16[15,19], p1: bf16[19,17]) -> f32[15,17] {
%p0 = s8[15,19]{1,0} parameter(0)
%c1 = f32[15,19]{1,0} convert(%p0)
%p1 = s8[19,17]{1,0} parameter(1)
%c2 = f32[19,17]{1,0} convert(%p1)
ROOT %r = f32[15,17]{1,0} dot(%c1, %c2),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
CustomKernelFusionPatternRegistry patterns;
patterns.Emplace<CutlassGemmWithUpcastPattern>();
absl::StatusOr<std::unique_ptr<VerifiedHloModule>> hlo_module =
ParseAndReturnVerifiedModule(hlo);
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
CustomKernelFusionRewriter pass(&device, 0, &patterns);
ASSERT_FALSE(pass.Run(hlo_module.value().get()).value());
}
TEST_F(CutlassFusionTest, RowMajorGemmWithDynamicUpdateSlice) {
const char* hlo = R"(
HloModule test
ENTRY %main (p0: f32[2,2,2], p1: f32[2,2], i: s32[]) -> f32[2,2,2] {
%p0 = f32[2,2,2]{2,1,0} parameter(0)
%p1 = f32[2,2]{1,0} parameter(1)
%i = s32[] parameter(2)
%dot = f32[2,2]{1,0} dot(%p1, %p1),
lhs_contracting_dims={1},
rhs_contracting_dims={0}
%bc = f32[1,2,2]{2,1,0} bitcast(%dot)
ROOT %r = f32[2,2,2]{2,1,0} dynamic-update-slice(%p0, %bc, %i, %i, %i)
}
)";
const char* expected = R"(
; CHECK: %cutlass_gemm_with_dynamic_update_slice {{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f32[2,2]{1,0} parameter
; CHECK-DAG: [[P1:%[^ ]+]] = f32[2,2,2]{2,1,0} parameter
; CHECK-DAG: [[P2:%[^ ]+]] = s32[] parameter
; CHECK-DAG: [[DOT:%[^ ]+]] = f32[2,2]{1,0} dot([[P0]], [[P0]])
; CHECK-DAG: [[CAST:%[^ ]+]] = f32[1,2,2]{2,1,0} bitcast([[DOT]])
; CHECK: ROOT [[DUS:%[^ ]+]] = f32[2,2,2]{2,1,0} dynamic-update-slice(
; CHECK: [[P1]], [[CAST]], [[P2]], [[P2]], [[P2]]
; CHECK: )
; CHECK: }
; CHECK: ENTRY %main {{.*}} {
; CHECK: ROOT [[FUSION:%[^ ]+]] = f32[2,2,2]{2,1,0} fusion
; CHECK: kind=kCustom, calls=%cutlass_gemm_with_dynamic_update_slice,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{
; CHECK: "name":"cutlass_gemm_with_dynamic_update_slice","kernel_index":0
; CHECK: }
; CHECK: }
; CHECK: }
)";
CustomKernelFusionPatternRegistry patterns;
patterns.Emplace<CutlassGemmWithDynamicUpdateSlicePattern>();
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
CustomKernelFusionRewriter pass(&device, 0, &patterns);
RunAndFilecheckHloRewrite(hlo, std::move(pass), expected);
}
TEST_F(CutlassFusionTest, RowMajorGemmWithDynamicUpdateSliceMultipleUses) {
const char* hlo = R"(
HloModule test
ENTRY %main {
%p0 = f32[2,2,2]{2,1,0} parameter(0)
%p1 = f32[2,2]{1,0} parameter(1)
%i = s32[] parameter(2)
%dot = f32[2,2]{1,0} dot(%p1, %p1),
lhs_contracting_dims={1},
rhs_contracting_dims={0}
%add = f32[2,2]{1,0} add(%dot, %dot)
%cast = f32[1,2,2]{2,1,0} bitcast(%dot)
%dus = f32[2,2,2]{2,1,0} dynamic-update-slice(%p0, %cast, %i, %i, %i)
ROOT %r = (f32[2,2]{1,0}, f32[2,2,2]{2,1,0}) tuple(%add, %dus)
}
)";
const char* expected = R"(
; CHECK: %cutlass_gemm_with_dynamic_update_slice {{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f32[2,2]{1,0} parameter
; CHECK-DAG: [[P1:%[^ ]+]] = f32[2,2,2]{2,1,0} parameter
; CHECK-DAG: [[P2:%[^ ]+]] = s32[] parameter
; CHECK-DAG: [[DOT:%[^ ]+]] = f32[2,2]{1,0} dot([[P0]], [[P0]])
; CHECK-DAG: [[CAST:%[^ ]+]] = f32[1,2,2]{2,1,0} bitcast([[DOT]])
; CHECK: ROOT [[DUS:%[^ ]+]] = f32[2,2,2]{2,1,0} dynamic-update-slice(
; CHECK: [[P1]], [[CAST]], [[P2]], [[P2]], [[P2]]
; CHECK: )
; CHECK: }
; CHECK: ENTRY %main {{.*}} {
; CHECK: [[OFFSET:%[^ ]+]] = s32[] parameter(2)
; CHECK: [[FUSION:%[^ ]+]] = f32[2,2,2]{2,1,0} fusion
; CHECK: kind=kCustom, calls=%cutlass_gemm_with_dynamic_update_slice,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{
; CHECK: "name":"cutlass_gemm_with_dynamic_update_slice","kernel_index":0
; CHECK: }
; CHECK: }
; CHECK: [[SLICE:%[^ ]+]] = f32[1,2,2]{2,1,0} dynamic-slice(
; CHECK: [[FUSION]], [[OFFSET]], [[OFFSET]], [[OFFSET]]),
; CHECK: dynamic_slice_sizes={1,2,2}
; CHECK: [[CAST:%[^. ]+]] = f32[2,2]{1,0} bitcast([[SLICE]])
; CHECK: [[ADD:%[^. ]+]] = f32[2,2]{1,0} add([[CAST]], [[CAST]])
; CHECK: }
)";
CustomKernelFusionPatternRegistry patterns;
patterns.Emplace<CutlassGemmWithDynamicUpdateSlicePattern>();
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
CustomKernelFusionRewriter pass(&device, 0, &patterns);
RunAndFilecheckHloRewrite(hlo, std::move(pass), expected);
}
TEST_F(CutlassFusionTest, RowMajorGemmWithDynamicUpdateSliceWithoutBitcast) {
const char* hlo = R"(
HloModule test
ENTRY %main (p0: f32[4,2], p1: f32[2,2], i: s32[]) -> f32[4,2] {
%p0 = f32[4,2]{1,0} parameter(0)
%p1 = f32[2,2]{1,0} parameter(1)
%i = s32[] parameter(2)
%dot = f32[2,2]{1,0} dot(%p1, %p1),
lhs_contracting_dims={1},
rhs_contracting_dims={0}
ROOT %r = f32[4,2]{1,0} dynamic-update-slice(%p0, %dot, %i, %i)
}
)";
const char* expected = R"(
; CHECK: %cutlass_gemm_with_dynamic_update_slice {{.*}} {
; CHECK-DAG: [[P1:%[^ ]+]] = f32[4,2]{1,0} parameter
; CHECK-DAG: [[P0:%[^ ]+]] = f32[2,2]{1,0} parameter
; CHECK-DAG: [[DOT:%[^ ]+]] = f32[2,2]{1,0} dot([[P0]], [[P0]])
; CHECK-DAG: [[P2:%[^ ]+]] = s32[] parameter
; CHECK: ROOT [[DUS:%[^ ]+]] = f32[4,2]{1,0} dynamic-update-slice([[P1]], [[DOT]], [[P2]], [[P2]])
; CHECK: }
; CHECK: ENTRY %main {{.*}} {
; CHECK: ROOT [[FUSION:%[^ ]+]] = f32[4,2]{1,0} fusion
; CHECK: kind=kCustom, calls=%cutlass_gemm_with_dynamic_update_slice,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{
; CHECK: "name":"cutlass_gemm_with_dynamic_update_slice","kernel_index":0
; CHECK: }
; CHECK: }
; CHECK: }
)";
CustomKernelFusionPatternRegistry patterns;
patterns.Emplace<CutlassGemmWithDynamicUpdateSlicePattern>();
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
CustomKernelFusionRewriter pass(&device, 0, &patterns);
RunAndFilecheckHloRewrite(hlo, std::move(pass), expected);
}
TEST_F(CutlassFusionTest, RowMajorGemmKernel) {
ErrorSpec error_spec{1e-3, 1e-3};
const char* hlo_text_cublas = R"(
HloModule cublas
ENTRY e {
arg0 = f32[100,784]{1,0} parameter(0)
arg1 = f32[784,10]{1,0} parameter(1)
gemm = (f32[100,10]{1,0}, s8[0]{0}) custom-call(arg0, arg1),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{"alpha_real":1,"beta":0,"dot_dimension_numbers":{"lhs_contracting_dimensions":[1],"rhs_contracting_dimensions":[0],"lhs_batch_dimensions":[],"rhs_batch_dimensions":[]},"alpha_imag":0,"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},"epilogue":"DEFAULT"}}
ROOT get-tuple-element = f32[100,10]{1,0} get-tuple-element((f32[100,10]{1,0}, s8[0]{0}) gemm), index=0
})";
const char* hlo_text_custom_fusion = R"(
HloModule cutlass
cutlass_gemm {
arg0 = f32[100,784]{1,0} parameter(0)
arg1 = f32[784,10]{1,0} parameter(1)
ROOT dot = f32[100,10]{1,0} dot(arg0, arg1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
arg0 = f32[100,784]{1,0} parameter(0)
arg1 = f32[784,10]{1,0} parameter(1)
ROOT _ = f32[100,10]{1,0} fusion(arg0, arg1), kind=kCustom, calls=cutlass_gemm,
backend_config={"fusion_backend_config":{kind: "__custom_fusion", custom_fusion_config: {"name":"cutlass_gemm", "kernel_index":0}}}
})";
EXPECT_TRUE(RunAndCompareTwoModules(hlo_text_cublas, hlo_text_custom_fusion,
error_spec, false));
}
TEST_F(CutlassFusionTest, GemmWithRightHandSideUpcastKernel) {
ErrorSpec error_spec{1e-3, 1e-3};
const char* hlo_text_cublas = R"(
HloModule cublas
ENTRY e {
p0 = f32[16,32]{1,0} parameter(0)
p1 = bf16[32,8]{1,0} parameter(1)
c1 = f32[32,8]{1,0} convert(p1)
gemm = (f32[16,8]{1,0}, s8[0]{0}) custom-call(p0, c1),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{"alpha_real":1,"beta":0,"dot_dimension_numbers":{"lhs_contracting_dimensions":[1],"rhs_contracting_dimensions":[0],"lhs_batch_dimensions":[],"rhs_batch_dimensions":[]},"alpha_imag":0,"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},"epilogue":"DEFAULT"}}
ROOT get-tuple-element = f32[16,8]{1,0} get-tuple-element(gemm), index=0
})";
const char* hlo_text_custom_fusion = R"(
HloModule cutlass
cutlass_gemm_with_upcast {
p0 = f32[16,32]{1,0} parameter(0)
p1 = bf16[32,8]{1,0} parameter(1)
c1 = f32[32,8]{1,0} convert(p1)
ROOT dot = f32[16,8]{1,0} dot(p0, c1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = f32[16,32]{1,0} parameter(0)
p1 = bf16[32,8]{1,0} parameter(1)
ROOT _ = f32[16,8]{1,0} fusion(p0, p1), kind=kCustom,
calls=cutlass_gemm_with_upcast,
backend_config={"fusion_backend_config":{kind: "__custom_fusion",
custom_fusion_config: {"name":"cutlass_gemm_with_upcast",
"kernel_index":0}}}
})";
EXPECT_TRUE(RunAndCompareTwoModules(hlo_text_cublas, hlo_text_custom_fusion,
error_spec, false));
}
TEST_F(CutlassFusionTest, GemmWithLeftHandAndRightHandSideUpcastKernel) {
ErrorSpec error_spec{1e-3, 1e-3};
const char* hlo_text_cublas = R"(
HloModule cublas
ENTRY e {
p0 = bf16[16,32]{1,0} parameter(0)
c0 = f32[16,32]{1,0} convert(p0)
p1 = s8[32,8]{1,0} parameter(1)
c1 = f32[32,8]{1,0} convert(p1)
gemm = (f32[16,8]{1,0}, s8[0]{0}) custom-call(c0, c1),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{"alpha_real":1,"beta":0,"dot_dimension_numbers":{"lhs_contracting_dimensions":[1],"rhs_contracting_dimensions":[0],"lhs_batch_dimensions":[],"rhs_batch_dimensions":[]},"alpha_imag":0,"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},"epilogue":"DEFAULT"}}
ROOT get-tuple-element = f32[16,8]{1,0} get-tuple-element(gemm), index=0
})";
const char* hlo_text_custom_fusion = R"(
HloModule cutlass
cutlass_gemm_with_upcast {
p0 = bf16[16,32]{1,0} parameter(0)
c0 = f32[16,32]{1,0} convert(p0)
p1 = s8[32,8]{1,0} parameter(1)
c1 = f32[32,8]{1,0} convert(p1)
ROOT dot = f32[16,8]{1,0} dot(c0, c1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = bf16[16,32]{1,0} parameter(0)
p1 = s8[32,8]{1,0} parameter(1)
ROOT _ = f32[16,8]{1,0} fusion(p0, p1), kind=kCustom, calls=cutlass_gemm_with_upcast,
backend_config={"fusion_backend_config":{kind: "__custom_fusion", custom_fusion_config: {"name":"cutlass_gemm_with_upcast", "kernel_index":0}}}
})";
EXPECT_TRUE(RunAndCompareTwoModules(hlo_text_cublas, hlo_text_custom_fusion,
error_spec, false));
}
TEST_F(CutlassFusionTest, RowMajorGemmWithDynamicUpdateSliceKernel) {
if (GpuSharedMemorySize() <
CutlassGemmKernelSharedMemorySize(BF16, BF16, BF16, 8, 8, 8)) {
GTEST_SKIP_("The GPU does not have sufficient shared memory");
}
ErrorSpec error_spec{1e-3, 1e-3};
const char* hlo_text_cublas = R"(
HloModule cublas
ENTRY e {
p0 = bf16[2,8,8]{2,1,0} parameter(0)
p1 = bf16[8,8]{1,0} parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
gemm.tuple = (bf16[8,8]{1,0}, s8[0]{0}) custom-call(p1, p1),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{"alpha_real":1,"beta":0,"dot_dimension_numbers":{"lhs_contracting_dimensions":[1],"rhs_contracting_dimensions":[0],"lhs_batch_dimensions":[],"rhs_batch_dimensions":[]},"alpha_imag":0,"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},"epilogue":"DEFAULT"}}
gemm = bf16[8,8]{1,0} get-tuple-element(gemm.tuple), index=0
cast = bf16[1,8,8]{2,1,0} bitcast(gemm)
ROOT r = bf16[2,8,8]{2,1,0} dynamic-update-slice(p0, cast, p2, p3, p3)
})";
const char* hlo_text_custom_fusion = R"(
HloModule cutlass
cutlass_gemm {
p0.1 = bf16[8,8]{1,0} parameter(0)
p1.1 = bf16[2,8,8]{2,1,0} parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
dot.1 = bf16[8,8]{1,0} dot(p0.1, p0.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
bc.1 = bf16[1,8,8]{2,1,0} bitcast(dot.1)
r.1 = bf16[2,8,8]{2,1,0} dynamic-update-slice(p1.1, bc.1, p2, p3, p3)
workspace = u8[1024]{0} custom-call(),
custom_call_target="__custom_kernel_fusion$workspace",
api_version=API_VERSION_TYPED_FFI
ROOT tuple = (bf16[2,8,8]{2,1,0}, u8[1024]{0}) tuple(r.1, workspace)
}
ENTRY e {
p0 = bf16[2,8,8]{2,1,0} parameter(0)
p1 = bf16[8,8]{1,0} parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
r.0 = (bf16[2,8,8]{2,1,0}, u8[1024]{0}) fusion(p1, p0, p2, p3), kind=kCustom,
calls=%cutlass_gemm,
backend_config={"fusion_backend_config":{"kind":"__custom_fusion","custom_fusion_config":{"name":"cutlass_gemm_with_dynamic_update_slice", "kernel_index":0}}}
ROOT %get-tuple-element = bf16[2,8,8]{2,1,0} get-tuple-element(r.0), index=0
})";
Array3D<bfloat16> p0_arr(2, 8, 8);
Array2D<bfloat16> p1_arr(8, 8);
p1_arr.Each([](int64_t i, int64_t j, bfloat16* out) {
*out = bfloat16{1.0f * i * j};
});
Array<int32_t> p2_arr({}, 1);
Array<int32_t> p3_arr({}, 0);
auto p0 = LiteralUtil::CreateFromArray(p0_arr);
auto p1 = LiteralUtil::CreateFromArray(p1_arr);
auto p2 = LiteralUtil::CreateFromArray(p2_arr);
auto p3 = LiteralUtil::CreateFromArray(p3_arr);
EXPECT_TRUE(RunAndCompareTwoModules(hlo_text_cublas, hlo_text_custom_fusion,
{&p0, &p1, &p2, &p3}, error_spec,
false));
}
TEST_F(CutlassFusionTest,
RowMajorGemmWithDynamicUpdateSliceKernelWithoutBitcast) {
if (GpuSharedMemorySize() <
CutlassGemmKernelSharedMemorySize(BF16, BF16, BF16, 8, 8, 8)) {
GTEST_SKIP_("The GPU does not have sufficient shared memory");
}
ErrorSpec error_spec{1e-3, 1e-3};
const char* hlo_text_cublas = R"(
HloModule cublas
ENTRY e {
p0 = bf16[16,8]{1,0} parameter(0)
p1 = bf16[8,8]{1,0} parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
gemm.tuple = (bf16[8,8]{1,0}, s8[0]{0}) custom-call(p1, p1),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{"alpha_real":1,"beta":0,"dot_dimension_numbers":{"lhs_contracting_dimensions":[1],"rhs_contracting_dimensions":[0],"lhs_batch_dimensions":[],"rhs_batch_dimensions":[]},"alpha_imag":0,"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},"epilogue":"DEFAULT"}}
gemm = bf16[8,8]{1,0} get-tuple-element(gemm.tuple), index=0
ROOT r = bf16[16,8]{1,0} dynamic-update-slice(p0, gemm, p2, p3)
}
)";
const char* hlo_text_custom_fusion = R"(
HloModule cutlass
cutlass_gemm {
p0.1 = bf16[8,8]{1,0} parameter(0)
p1.1 = bf16[16,8]{1,0} parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
dot.1 = bf16[8,8]{1,0} dot(p0.1, p0.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
r.1 = bf16[16,8]{1,0} dynamic-update-slice(p1.1, dot.1, p2, p3)
workspace = u8[1024]{0} custom-call(),
custom_call_target="__custom_kernel_fusion$workspace",
api_version=API_VERSION_TYPED_FFI
ROOT tuple = (bf16[16,8]{1,0}, u8[1024]{0}) tuple(r.1, workspace)
}
ENTRY e {
p0 = bf16[16,8]{1,0} parameter(0)
p1 = bf16[8,8]{1,0} parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
r.0 = (bf16[16,8]{1,0}, u8[1024]{0}) fusion(p1, p0, p2, p3), kind=kCustom,
calls=%cutlass_gemm,
backend_config={"fusion_backend_config":{"kind":"__custom_fusion","custom_fusion_config":{"name":"cutlass_gemm_with_dynamic_update_slice", "kernel_index":0}}}
ROOT %get-tuple-element = bf16[16,8]{1,0} get-tuple-element(r.0), index=0
})";
Array2D<bfloat16> p0_arr(16, 8);
Array2D<bfloat16> p1_arr(8, 8);
p1_arr.Each([](int64_t i, int64_t j, bfloat16* out) {
*out = bfloat16{1.0f * i * j};
});
Array<int32_t> p2_arr({}, 0);
Array<int32_t> p3_arr({}, 1);
auto p0 = LiteralUtil::CreateFromArray(p0_arr);
auto p1 = LiteralUtil::CreateFromArray(p1_arr);
auto p2 = LiteralUtil::CreateFromArray(p2_arr);
auto p3 = LiteralUtil::CreateFromArray(p3_arr);
EXPECT_TRUE(RunAndCompareTwoModules(hlo_text_cublas, hlo_text_custom_fusion,
{&p0, &p1, &p2, &p3}, error_spec,
false));
}
TEST_F(CutlassFusionTest, GemmWithUpcastShouldBeFused) {
const char* hlo = R"(
ENTRY e {
p0 = f32[16,32]{1,0} parameter(0)
p1 = bf16[32,8]{1,0} parameter(1)
c1 = f32[32,8]{1,0} convert(p1)
ROOT dot = f32[16,8]{1,0} dot(p0, c1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
std::string expected = "CHECK: cutlass_gemm";
CustomKernelFusionPatternRegistry patterns;
patterns.Emplace<CutlassGemmWithUpcastPattern>();
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
CustomKernelFusionRewriter pass(&device, 0, &patterns);
RunAndFilecheckHloRewrite(hlo, std::move(pass), expected);
EXPECT_TRUE(RunAndCompare(hlo, ErrorSpec{1e-3, 1e-3}));
}
TEST_F(CutlassFusionTest,
GemmWithUpcastWithALhsColumnMajorOperandShouldNotBeFused) {
const char* hlo = R"(
ENTRY e {
p0 = f32[16,32]{0,1} parameter(0)
p1 = bf16[32,8]{1,0} parameter(1)
c1 = f32[32,8]{1,0} convert(p1)
ROOT dot = f32[16,8]{1,0} dot(p0, c1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
CustomKernelFusionPatternRegistry patterns;
patterns.Emplace<CutlassGemmWithUpcastPattern>();
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
CustomKernelFusionRewriter pass(&device, 0, &patterns);
RunAndFilecheckHloRewrite(hlo, std::move(pass), std::nullopt);
}
TEST_F(CutlassFusionTest,
GemmWithUpcastWithARhsColumnMajorOperandShouldNotBeFused) {
const char* hlo = R"(
ENTRY e {
p0 = f32[16,32]{1,0} parameter(0)
p1 = bf16[32,8]{0,1} parameter(1)
c1 = f32[32,8]{0,1} convert(p1)
ROOT dot = f32[16,8]{1,0} dot(p0, c1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
CustomKernelFusionPatternRegistry patterns;
patterns.Emplace<CutlassGemmWithUpcastPattern>();
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
CustomKernelFusionRewriter pass(&device, 0, &patterns);
RunAndFilecheckHloRewrite(hlo, std::move(pass), std::nullopt);
}
TEST_F(CutlassFusionTest,
GemmWithUpcastWithAColumnMajorDotResultShouldNotBeFused) {
const char* hlo = R"(
ENTRY e {
p0 = f32[16,32]{1,0} parameter(0)
p1 = bf16[32,8]{1,0} parameter(1)
c1 = f32[32,8]{1,0} convert(p1)
ROOT dot = f32[16,8]{0,1} dot(p0, c1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
CustomKernelFusionPatternRegistry patterns;
patterns.Emplace<CutlassGemmWithUpcastPattern>();
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
CustomKernelFusionRewriter pass(&device, 0, &patterns);
RunAndFilecheckHloRewrite(hlo, std::move(pass), std::nullopt);
}
TEST_F(CutlassFusionTest,
GemmWithUpcastLhsContractingDimensionShouldBeOnTheMinorAxis) {
const char* hlo = R"(
ENTRY e {
p0 = f32[32,16]{1,0} parameter(0)
p1 = bf16[32,8]{1,0} parameter(1)
c1 = f32[32,8]{1,0} convert(p1)
ROOT dot = f32[16,8]{1,0} dot(p0, c1),
lhs_contracting_dims={0},
rhs_contracting_dims={0}
})";
CustomKernelFusionPatternRegistry patterns;
patterns.Emplace<CutlassGemmWithUpcastPattern>();
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
CustomKernelFusionRewriter pass(&device, 0, &patterns);
RunAndFilecheckHloRewrite(hlo, std::move(pass), std::nullopt);
}
TEST_F(CutlassFusionTest,
GemmWithUpcastRhsContractingDimensionShouldBeOnTheMajorAxis) {
const char* hlo = R"(
ENTRY e {
p0 = f32[16,32]{1,0} parameter(0)
p1 = bf16[8,32]{1,0} parameter(1)
c1 = f32[8,32]{1,0} convert(p1)
ROOT dot = f32[16,8]{1,0} dot(p0, c1),
lhs_contracting_dims={1},
rhs_contracting_dims={1}
})";
CustomKernelFusionPatternRegistry patterns;
patterns.Emplace<CutlassGemmWithUpcastPattern>();
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
CustomKernelFusionRewriter pass(&device, 0, &patterns);
RunAndFilecheckHloRewrite(hlo, std::move(pass), std::nullopt);
}
TEST_F(CutlassFusionTest, GemmWithUpcastWithBatchDimensionShouldNotBeFused) {
const char* hlo = R"(
ENTRY e {
p0 = f32[4,16,32]{2,1,0} parameter(0)
p1 = bf16[4,32,8]{2,1,0} parameter(1)
c1 = f32[4,32,8]{2,1,0} convert(p1)
ROOT dot = f32[4,16,8]{2,1,0} dot(p0, c1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1}
})";
CustomKernelFusionPatternRegistry patterns;
patterns.Emplace<CutlassGemmWithUpcastPattern>();
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
CustomKernelFusionRewriter pass(&device, 0, &patterns);
RunAndFilecheckHloRewrite(hlo, std::move(pass), std::nullopt);
}
TEST_F(CutlassFusionTest, GemmWithUpcastAndColumnMajorOperandsShouldBeFused) {
const char* hlo = R"(
ENTRY e {
p0 = f32[32,16]{0,1} parameter(0)
p1 = bf16[8,32]{0,1} parameter(1)
c1 = f32[8,32]{0,1} convert(p1)
ROOT dot = f32[16,8]{1,0} dot(p0, c1),
lhs_contracting_dims={0},
rhs_contracting_dims={1}
})";
CustomKernelFusionPatternRegistry patterns;
patterns.Emplace<CutlassGemmWithUpcastPattern>();
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
CustomKernelFusionRewriter pass(&device, 0, &patterns);
std::string expected = "CHECK: cutlass_gemm";
RunAndFilecheckHloRewrite(hlo, std::move(pass), expected);
EXPECT_TRUE(RunAndCompare(hlo, ErrorSpec{1e-3, 1e-3}));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/kernels/cutlass_gemm_fusion.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/kernels/cutlass_gemm_fusion_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b22227d5-e006-494b-ba20-d41d0438b437 | cpp | abseil/abseil-cpp | sequence_lock | absl/flags/internal/sequence_lock.h | absl/flags/internal/sequence_lock_test.cc | #ifndef ABSL_FLAGS_INTERNAL_SEQUENCE_LOCK_H_
#define ABSL_FLAGS_INTERNAL_SEQUENCE_LOCK_H_
#include <stddef.h>
#include <stdint.h>
#include <atomic>
#include <cassert>
#include <cstring>
#include "absl/base/optimization.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace flags_internal {
inline constexpr size_t AlignUp(size_t x, size_t align) {
return align * ((x + align - 1) / align);
}
class SequenceLock {
public:
constexpr SequenceLock() : lock_(kUninitialized) {}
void MarkInitialized() {
assert(lock_.load(std::memory_order_relaxed) == kUninitialized);
lock_.store(0, std::memory_order_release);
}
bool TryRead(void* dst, const std::atomic<uint64_t>* src, size_t size) const {
int64_t seq_before = lock_.load(std::memory_order_acquire);
if (ABSL_PREDICT_FALSE(seq_before & 1) == 1) return false;
RelaxedCopyFromAtomic(dst, src, size);
std::atomic_thread_fence(std::memory_order_acquire);
int64_t seq_after = lock_.load(std::memory_order_relaxed);
return ABSL_PREDICT_TRUE(seq_before == seq_after);
}
void Write(std::atomic<uint64_t>* dst, const void* src, size_t size) {
int64_t orig_seq = lock_.load(std::memory_order_relaxed);
assert((orig_seq & 1) == 0);
lock_.store(orig_seq + 1, std::memory_order_relaxed);
std::atomic_thread_fence(std::memory_order_release);
RelaxedCopyToAtomic(dst, src, size);
lock_.store(orig_seq + 2, std::memory_order_release);
}
int64_t ModificationCount() const {
int64_t val = lock_.load(std::memory_order_relaxed);
assert(val != kUninitialized && (val & 1) == 0);
return val / 2;
}
void IncrementModificationCount() {
int64_t val = lock_.load(std::memory_order_relaxed);
assert(val != kUninitialized);
lock_.store(val + 2, std::memory_order_relaxed);
}
private:
static void RelaxedCopyFromAtomic(void* dst, const std::atomic<uint64_t>* src,
size_t size) {
char* dst_byte = static_cast<char*>(dst);
while (size >= sizeof(uint64_t)) {
uint64_t word = src->load(std::memory_order_relaxed);
std::memcpy(dst_byte, &word, sizeof(word));
dst_byte += sizeof(word);
src++;
size -= sizeof(word);
}
if (size > 0) {
uint64_t word = src->load(std::memory_order_relaxed);
std::memcpy(dst_byte, &word, size);
}
}
static void RelaxedCopyToAtomic(std::atomic<uint64_t>* dst, const void* src,
size_t size) {
const char* src_byte = static_cast<const char*>(src);
while (size >= sizeof(uint64_t)) {
uint64_t word;
std::memcpy(&word, src_byte, sizeof(word));
dst->store(word, std::memory_order_relaxed);
src_byte += sizeof(word);
dst++;
size -= sizeof(word);
}
if (size > 0) {
uint64_t word = 0;
std::memcpy(&word, src_byte, size);
dst->store(word, std::memory_order_relaxed);
}
}
static constexpr int64_t kUninitialized = -1;
std::atomic<int64_t> lock_;
};
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/flags/internal/sequence_lock.h"
#include <algorithm>
#include <atomic>
#include <thread>
#include <tuple>
#include <vector>
#include "gtest/gtest.h"
#include "absl/base/internal/sysinfo.h"
#include "absl/container/fixed_array.h"
#include "absl/time/clock.h"
namespace {
namespace flags = absl::flags_internal;
class ConcurrentSequenceLockTest
: public testing::TestWithParam<std::tuple<int, int>> {
public:
ConcurrentSequenceLockTest()
: buf_bytes_(std::get<0>(GetParam())),
num_threads_(std::get<1>(GetParam())) {}
protected:
const int buf_bytes_;
const int num_threads_;
};
TEST_P(ConcurrentSequenceLockTest, ReadAndWrite) {
const int buf_words =
flags::AlignUp(buf_bytes_, sizeof(uint64_t)) / sizeof(uint64_t);
absl::FixedArray<std::atomic<uint64_t>> protected_buf(buf_words);
for (auto& v : protected_buf) v = -1;
flags::SequenceLock seq_lock;
std::atomic<bool> stop{false};
std::atomic<int64_t> bad_reads{0};
std::atomic<int64_t> good_reads{0};
std::atomic<int64_t> unsuccessful_reads{0};
std::vector<std::thread> threads;
for (int i = 0; i < num_threads_; i++) {
threads.emplace_back([&]() {
absl::FixedArray<char> local_buf(buf_bytes_);
while (!stop.load(std::memory_order_relaxed)) {
if (seq_lock.TryRead(local_buf.data(), protected_buf.data(),
buf_bytes_)) {
bool good = true;
for (const auto& v : local_buf) {
if (v != local_buf[0]) good = false;
}
if (good) {
good_reads.fetch_add(1, std::memory_order_relaxed);
} else {
bad_reads.fetch_add(1, std::memory_order_relaxed);
}
} else {
unsuccessful_reads.fetch_add(1, std::memory_order_relaxed);
}
}
});
}
while (unsuccessful_reads.load(std::memory_order_relaxed) < num_threads_) {
absl::SleepFor(absl::Milliseconds(1));
}
seq_lock.MarkInitialized();
absl::Time deadline = absl::Now() + absl::Seconds(5);
for (int i = 0; i < 100 && absl::Now() < deadline; i++) {
absl::FixedArray<char> writer_buf(buf_bytes_);
for (auto& v : writer_buf) v = i;
seq_lock.Write(protected_buf.data(), writer_buf.data(), buf_bytes_);
absl::SleepFor(absl::Microseconds(10));
}
stop.store(true, std::memory_order_relaxed);
for (auto& t : threads) t.join();
ASSERT_GE(good_reads, 0);
ASSERT_EQ(bad_reads, 0);
}
std::vector<int> MultiplicativeRange(int low, int high, int scale) {
std::vector<int> result;
for (int current = low; current < high; current *= scale) {
result.push_back(current);
}
result.push_back(high);
return result;
}
#ifndef ABSL_HAVE_THREAD_SANITIZER
const int kMaxThreads = absl::base_internal::NumCPUs();
#else
const int kMaxThreads = std::min(absl::base_internal::NumCPUs(), 4);
#endif
std::vector<int> InterestingBufferSizes() {
std::vector<int> ret;
for (int v : MultiplicativeRange(1, 128, 2)) {
ret.push_back(v);
if (v > 1) {
ret.push_back(v - 1);
}
ret.push_back(v + 1);
}
return ret;
}
INSTANTIATE_TEST_SUITE_P(
TestManyByteSizes, ConcurrentSequenceLockTest,
testing::Combine(
testing::ValuesIn(InterestingBufferSizes()),
testing::ValuesIn(MultiplicativeRange(1, kMaxThreads, 2))));
class SequenceLockTest : public testing::TestWithParam<int> {};
TEST_P(SequenceLockTest, SingleThreaded) {
const int size = GetParam();
absl::FixedArray<std::atomic<uint64_t>> protected_buf(
flags::AlignUp(size, sizeof(uint64_t)) / sizeof(uint64_t));
flags::SequenceLock seq_lock;
seq_lock.MarkInitialized();
std::vector<char> src_buf(size, 'x');
seq_lock.Write(protected_buf.data(), src_buf.data(), size);
std::vector<char> dst_buf(size, '0');
ASSERT_TRUE(seq_lock.TryRead(dst_buf.data(), protected_buf.data(), size));
ASSERT_EQ(src_buf, dst_buf);
}
INSTANTIATE_TEST_SUITE_P(TestManyByteSizes, SequenceLockTest,
testing::Range(1, 128));
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/flags/internal/sequence_lock.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/flags/internal/sequence_lock_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
298fcdc7-3af6-4de5-9d69-893e38b98c6d | cpp | tensorflow/tensorflow | hlo_fusion_analysis | third_party/xla/xla/service/gpu/hlo_fusion_analysis.cc | third_party/xla/xla/service/gpu/hlo_fusion_analysis_test.cc | #include "xla/service/gpu/hlo_fusion_analysis.h"
#include <algorithm>
#include <limits>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/ADT/STLExtras.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/reduction_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
namespace {
bool IsInputFusibleNonStridedSlices(
const absl::Span<const HloInstructionAdaptor> fusion_roots) {
return absl::c_all_of(fusion_roots, [&](const HloInstructionAdaptor& root) {
return IsSliceWithUnitStrides(&root.instruction());
});
}
bool AllSliceInputsAreCompatible(
const absl::Span<const HloInstructionAdaptor> fusion_roots) {
const Shape& first_slice_operand_shape =
fusion_roots[0].GetOperand(0).shape();
return absl::c_all_of(fusion_roots, [&](const HloInstructionAdaptor& slice) {
return ShapeUtil::EqualIgnoringElementType(slice.GetOperand(0).shape(),
first_slice_operand_shape);
});
}
std::optional<TransposeDescription> FindConsistentTransposeHero(
const absl::InlinedVector<HloInstructionAdaptor, 2>& hlo_roots,
const absl::InlinedVector<HloInstructionAdaptor, 2>& heroes) {
std::optional<TransposeDescription> tiled_transpose_hero;
std::vector<const HloInstruction*> non_transpose_roots;
for (auto [root, hero] : llvm::zip(hlo_roots, heroes)) {
if (auto tr = GetDescriptionForTiledTransposeEmitter(hero.instruction())) {
if (!tiled_transpose_hero) {
tiled_transpose_hero = tr;
} else if (!tiled_transpose_hero->IsEquivalent(*tr)) {
return std::nullopt;
}
} else {
non_transpose_roots.push_back(&root.instruction());
}
}
if (!tiled_transpose_hero) return std::nullopt;
for (auto* root : non_transpose_roots) {
if (!ShapeUtil::IsReshapeOrTransposeBitcast(
root->shape(), tiled_transpose_hero->input_shape(),
true)) {
return std::nullopt;
}
}
return tiled_transpose_hero;
}
const Shape& GetShape(const HloInstructionAdaptor& adaptor) {
return adaptor.shape();
}
const Shape& GetShape(const HloInstruction* instruction) {
return instruction->shape();
}
template <typename Container>
int SmallestBitWidth(const Container& args) {
int bits = std::numeric_limits<int>::max();
for (const auto& operand : args) {
const Shape& shape = GetShape(operand);
if (!shape.IsArray()) continue;
bits = std::min(bits, shape.element_type() == PRED
? 8
: primitive_util::BitWidth(shape.element_type()));
}
return bits;
}
}
HloFusionAnalysis::HloFusionAnalysis(
FusionBackendConfig fusion_backend_config,
std::unique_ptr<HloFusionAdaptor> fusion,
absl::InlinedVector<HloInstructionAdaptor, 2> fusion_roots,
absl::InlinedVector<HloInstructionAdaptor, 2> fusion_heroes,
const se::DeviceDescription* device_info,
std::optional<TransposeDescription> tiled_transpose,
HloFusionAnalysis::InputOutputInfo input_output_info)
: fusion_backend_config_(std::move(fusion_backend_config)),
fusion_(std::move(fusion)),
fusion_roots_(std::move(fusion_roots)),
fusion_heroes_(std::move(fusion_heroes)),
device_info_(device_info),
tiled_transpose_(tiled_transpose),
input_output_info_(std::move(input_output_info)) {}
HloFusionAnalysis HloFusionAnalysis::Create(
FusionBackendConfig backend_config,
std::unique_ptr<HloFusionAdaptor> fusion,
const se::DeviceDescription* device_info) {
absl::InlinedVector<HloInstructionAdaptor, 2> roots = fusion->GetRoots();
absl::InlinedVector<HloInstructionAdaptor, 2> heroes;
for (auto root : roots) {
heroes.push_back(FindNonTrivialHero(root));
}
InputOutputInfo input_output_info{
SmallestBitWidth(fusion->GetParameters()),
SmallestBitWidth(roots),
};
std::optional<TransposeDescription> tiled_transpose_hero =
FindConsistentTransposeHero(roots, heroes);
return HloFusionAnalysis(std::move(backend_config), std::move(fusion),
std::move(roots), std::move(heroes), device_info,
tiled_transpose_hero, std::move(input_output_info));
}
HloFusionAnalysis HloFusionAnalysis::Create(
const HloInstruction& instruction,
const se::DeviceDescription& device_info) {
absl::StatusOr<GpuBackendConfig> gpu_backend_config =
instruction.backend_config<GpuBackendConfig>();
FusionBackendConfig fusion_backend_config =
gpu_backend_config.ok() ? gpu_backend_config->fusion_backend_config()
: FusionBackendConfig::default_instance();
return Create(std::move(fusion_backend_config),
HloFusionAdaptor::ForInstruction(&instruction), &device_info);
}
HloFusionAnalysis HloFusionAnalysis::Create(
const HloInstruction& producer, const HloInstruction& consumer,
const se::DeviceDescription& device_info) {
absl::StatusOr<GpuBackendConfig> gpu_backend_config;
if (consumer.has_backend_config()) {
gpu_backend_config = consumer.backend_config<GpuBackendConfig>();
}
if (!gpu_backend_config.ok() && producer.has_backend_config()) {
gpu_backend_config = producer.backend_config<GpuBackendConfig>();
}
FusionBackendConfig fusion_backend_config =
gpu_backend_config.ok() ? gpu_backend_config->fusion_backend_config()
: FusionBackendConfig::default_instance();
return HloFusionAnalysis::Create(
std::move(fusion_backend_config),
HloFusionAdaptor::ForProducerConsumer(&producer, &consumer),
&device_info);
}
bool HloFusionAnalysis::HasConsistentTransposeHeros() const {
return tiled_transpose_.has_value();
}
static bool UseConcatenateFusion(
absl::Span<const HloInstructionAdaptor> roots,
absl::Span<const HloInstructionAdaptor> heroes) {
if (heroes.size() != 1) return false;
if (heroes.front().opcode() != HloOpcode::kConcatenate) return false;
if (roots.front().shape().IsTuple()) return false;
if (heroes.front().instruction().operand_count() > 4) return false;
return true;
}
HloFusionAnalysis::EmitterFusionKind HloFusionAnalysis::GetEmitterFusionKind()
const {
if (fusion_backend_config_.kind() == kCustomFusionKind) {
return EmitterFusionKind::kCustomFusion;
}
if (fusion_backend_config_.kind() == kTritonFusionKind ||
fusion_backend_config_.kind() == kTritonGemmFusionKind) {
return EmitterFusionKind::kTriton;
}
if (fusion_backend_config_.kind() == kCuDnnFusionKind) {
return EmitterFusionKind::kCuDnn;
}
if (input_output_info_.smallest_input_dtype_bits < 8 ||
input_output_info_.smallest_output_dtype_bits < 8) {
if (fusion_roots_.size() > 1 &&
IsInputFusibleNonStridedSlices(fusion_roots_) &&
AllSliceInputsAreCompatible(fusion_roots_)) {
return EmitterFusionKind::kInputSlices;
}
return EmitterFusionKind::kLoop;
}
std::optional<HloInstructionAdaptor> first_reduce_hero;
for (auto [root, hero] : llvm::zip(fusion_roots_, fusion_heroes_)) {
if (IsRealReductionHero(root.instruction(), hero.instruction())) {
first_reduce_hero = hero;
break;
}
}
if (first_reduce_hero.has_value()) {
bool valid_shapes = true;
Shape hero_operand_shape = first_reduce_hero->GetOperand(0).shape();
for (auto [root, hero] : llvm::zip(fusion_roots_, fusion_heroes_)) {
if (root == *first_reduce_hero) {
continue;
}
if (!IsRealReductionHero(root.instruction(), hero.instruction())) {
if (ShapeUtil::ElementsIn(root.shape()) !=
ShapeUtil::ElementsIn(hero_operand_shape)) {
valid_shapes = false;
break;
}
} else if (!AreReductionsMultiOutputFusionCompatible(
&hero.instruction(), &first_reduce_hero->instruction())) {
valid_shapes = false;
break;
}
}
if (valid_shapes) {
return EmitterFusionKind::kReduction;
}
}
if (HasConsistentTransposeHeros()) {
return EmitterFusionKind::kTranspose;
}
if (fusion_roots_.size() > 1) {
if (IsInputFusibleNonStridedSlices(fusion_roots_) &&
AllSliceInputsAreCompatible(fusion_roots_)) {
return EmitterFusionKind::kInputSlices;
}
return EmitterFusionKind::kLoop;
}
if (fusion_roots_[0].opcode() == HloOpcode::kScatter) {
return EmitterFusionKind::kScatter;
}
if (UseConcatenateFusion(fusion_roots_, fusion_heroes_)) {
return EmitterFusionKind::kConcatenate;
}
return EmitterFusionKind::kLoop;
}
const HloInstruction* HloFusionAnalysis::FindHeroReduction() const {
if (GetEmitterFusionKind() != EmitterFusionKind::kReduction) {
return nullptr;
}
const auto& roots = fusion_roots();
CHECK(!roots.empty());
for (auto [root, hero] : llvm::zip(roots, fusion_heroes_)) {
if (IsRealReductionHero(root.instruction(), hero.instruction())) {
return &hero.instruction();
}
}
LOG(FATAL) << "Did not find a hero reduction";
}
}
} | #include "xla/service/gpu/hlo_fusion_analysis.h"
#include <gtest/gtest.h>
#include "xla/protobuf_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_description.pb.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
class HloFusionAnalysisTest : public HloTestBase {};
TEST_F(HloFusionAnalysisTest, DoesNotPeekOutsideBoundary) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
ENTRY main {
%p0 = f32[1024] parameter(0)
%p1 = f32[] parameter(1)
%reduce = f32[] reduce(%p0, %p1), dimensions={0}, to_apply=add
ROOT %bitcast = s32[] bitcast(%reduce)
})"));
auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* root = module->entry_computation()->root_instruction();
auto analysis = HloFusionAnalysis::Create(*root, device_info);
EXPECT_EQ(analysis.GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kLoop);
auto analysis_fused =
HloFusionAnalysis::Create(*root->operand(0), *root, device_info);
EXPECT_EQ(analysis_fused.GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kReduction);
}
TEST_F(HloFusionAnalysisTest, ReductionWithMultipleUsers) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fused_computation {
%p0 = f32[1024] parameter(0)
%p1 = f32[] parameter(1)
%reduce = f32[] reduce(%p0, %p1), dimensions={0}, to_apply=add
%negate = f32[] negate(%reduce)
%log = f32[] log(%reduce)
ROOT %tuple = (f32[], f32[]) tuple(%negate, %log)
}
ENTRY main {
%p0 = f32[1024] parameter(0)
%p1 = f32[] parameter(1)
ROOT %fusion = (f32[], f32[]) fusion(%p0, %p1), kind=kLoop, calls=fused_computation
})"));
auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto analysis = HloFusionAnalysis::Create(
FusionBackendConfig::default_instance(),
HloFusionAdaptor::ForInstruction(
module->entry_computation()->root_instruction()),
&device_info);
EXPECT_EQ(analysis.GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kReduction);
}
TEST_F(HloFusionAnalysisTest, ReductionEpilogueFusion) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fused_computation {
%p0 = f32[1024] parameter(0)
%p1 = f32[] parameter(1)
%reduce = f32[] reduce(%p0, %p1), dimensions={0}, to_apply=add
ROOT %negate = f32[] negate(%reduce)
}
ENTRY main {
%p0 = f32[1024] parameter(0)
%p1 = f32[] parameter(1)
ROOT %fusion = f32[] fusion(%p0, %p1), kind=kInput, calls=fused_computation
})"));
auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* root = module->entry_computation()->root_instruction();
auto analysis = HloFusionAnalysis::Create(
FusionBackendConfig::default_instance(),
HloFusionAdaptor::ForInstruction(root), &device_info);
EXPECT_EQ(analysis.GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kReduction);
}
TEST_F(HloFusionAnalysisTest, ReductionEpilogueFusionPartiallyFused) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion {
%p0 = f32[1024] parameter(0)
%p1 = f32[] parameter(1)
ROOT %reduce = f32[] reduce(%p0, %p1), dimensions={0}, to_apply=add
}
ENTRY main {
%p0 = f32[1024] parameter(0)
%p1 = f32[] parameter(1)
%fusion = f32[] fusion(%p0, %p1), kind=kInput, calls=fusion
ROOT %negate = f32[] negate(%fusion)
})"));
auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* root = module->entry_computation()->root_instruction();
auto analysis =
HloFusionAnalysis::Create(*root->operand(0), *root, device_info);
EXPECT_EQ(analysis.GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kReduction);
}
TEST_F(HloFusionAnalysisTest, ReductionEpilogueFusionPartiallyFusedInConsumer) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion {
%p0 = f32[] parameter(0)
ROOT %negate = f32[] negate(%p0)
}
ENTRY main {
%p0 = f32[1024] parameter(0)
%p1 = f32[] parameter(1)
%reduce = f32[] reduce(%p0, %p1), dimensions={0}, to_apply=add
ROOT %fusion = f32[] fusion(%reduce), kind=kInput, calls=fusion
})"));
auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* root = module->entry_computation()->root_instruction();
auto analysis =
HloFusionAnalysis::Create(*root->operand(0), *root, device_info);
EXPECT_EQ(analysis.GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kReduction);
}
TEST_F(HloFusionAnalysisTest, ReductionEpilogueFusionPartiallyFusedInBoth) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion.1 {
%p0 = f32[1024] parameter(0)
%p1 = f32[] parameter(1)
ROOT %reduce = f32[] reduce(%p0, %p1), dimensions={0}, to_apply=add
}
fusion.2 {
%p0 = f32[] parameter(0)
ROOT %negate = f32[] negate(%p0)
}
ENTRY main {
%p0 = f32[1024] parameter(0)
%p1 = f32[] parameter(1)
%fusion.1 = f32[] fusion(%p0, %p1), kind=kInput, calls=fusion.1
ROOT %fusion.2 = f32[] fusion(%fusion.1), kind=kInput, calls=fusion.2
})"));
auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* root = module->entry_computation()->root_instruction();
auto analysis =
HloFusionAnalysis::Create(*root->operand(0), *root, device_info);
EXPECT_EQ(analysis.GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kReduction);
}
TEST_F(HloFusionAnalysisTest, ReduceMultiOutputFusionWithTransposeBitcast) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion {
%p0 = f32[1024, 512]{1,0} parameter(0)
%p1 = f32[] parameter(1)
%reduce = f32[1024]{0} reduce(%p0, %p1), dimensions={1}, to_apply=add
%bitcast = f32[512, 1024]{0,1} bitcast(%p0)
ROOT res = (f32[1024]{0}, f32[512, 1024]{0,1}) tuple(%reduce, %bitcast)
}
ENTRY main {
%p0 = f32[1024, 512]{1,0} parameter(0)
%p1 = f32[] parameter(1)
ROOT %fusion = (f32[1024]{0}, f32[512, 1024]{0,1}) fusion(%p0, %p1), kind=kInput, calls=fusion
})"));
auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* root = module->entry_computation()->root_instruction();
auto analysis = HloFusionAnalysis::Create(*root, device_info);
EXPECT_EQ(analysis.GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kReduction);
}
TEST_F(HloFusionAnalysisTest, InvalidReduceMultiOutputFusion) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion {
%p0 = f32[1024, 1024]{1,0} parameter(0)
%p1 = f32[] parameter(1)
%reduce = f32[1024]{0} reduce(%p0, %p1), dimensions={0}, to_apply=add
%reduce2 = f32[1024]{0} reduce(%p0, %p1), dimensions={1}, to_apply=add
ROOT res = (f32[1024]{0}, f32[1024]{0}) tuple(reduce, reduce2)
}
ENTRY main {
%p0 = f32[1024, 1024]{1,0} parameter(0)
%p1 = f32[] parameter(1)
ROOT %fusion = (f32[1024]{0}, f32[1024]{0}) fusion(%p0, %p1), kind=kInput, calls=fusion
})"));
auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* root = module->entry_computation()->root_instruction();
auto analysis = HloFusionAnalysis::Create(*root, device_info);
EXPECT_EQ(analysis.GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kLoop);
}
TEST_F(HloFusionAnalysisTest, InvalidDevice) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
ENTRY main {
%p0 = f32[1024,128] parameter(0)
%p1 = f32[] parameter(1)
%reduce = f32[128] reduce(%p0, %p1), dimensions={0}, to_apply=add
ROOT %bitcast = s32[128] bitcast(%reduce)
})"));
stream_executor::GpuDeviceInfoProto device_info_proto;
stream_executor::DeviceDescription device_info(device_info_proto);
auto* root = module->entry_computation()->root_instruction();
auto analysis_fused =
HloFusionAnalysis::Create(*root->operand(0), *root, device_info);
EXPECT_EQ(analysis_fused.GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kReduction);
}
TEST_F(HloFusionAnalysisTest, ConcatFusion) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
fused_computation {
%p0 = f32[128] parameter(0)
%p1 = f32[128] parameter(1)
%add = f32[128] add(p0, p0)
%concat = f32[256] concatenate(%add, %p1), dimensions={0}
ROOT %negate = f32[256] negate(%concat)
}
ENTRY main {
%p0 = f32[128] parameter(0)
%p1 = f32[128] parameter(1)
ROOT %fusion = f32[256] fusion(%p0, %p1), kind=kInput, calls=fused_computation
})"));
auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* root = module->entry_computation()->root_instruction();
auto analysis = HloFusionAnalysis::Create(
FusionBackendConfig::default_instance(),
HloFusionAdaptor::ForInstruction(root), &device_info);
EXPECT_EQ(analysis.GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kConcatenate);
}
TEST_F(HloFusionAnalysisTest, ExtractValidGpuBackendConfig) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule module
fused_computation.1 {
%x = s32[64] parameter(0)
%y = s32[64] parameter(1)
ROOT %root = s32[64] add(%x, %y)
}
fused_computation.2 {
%x = s32[64] parameter(0)
%y = s32[64] parameter(1)
ROOT %root = s32[64] add(%x, %y)
}
ENTRY entry {
%x = s32[64] parameter(0)
%y = s32[64] parameter(1)
%fusion.1 = s32[64] fusion(%x, %y), kind=kLoop, calls=fused_computation.1, backend_config={"fusion_backend_config": {kind: "__triton"}}
ROOT %fusion.2 = s32[64] fusion(%fusion.1, %y), kind=kLoop, calls=fused_computation.2
})"));
auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* consumer = module->entry_computation()->root_instruction();
auto* producer = consumer->operand(0);
auto producer_analysis = HloFusionAnalysis::Create(*producer, device_info);
EXPECT_EQ(producer_analysis.fusion_backend_config().kind(),
kTritonFusionKind);
auto producer_consumer_analysis =
HloFusionAnalysis::Create(*producer, *consumer, device_info);
EXPECT_EQ(producer_consumer_analysis.fusion_backend_config().kind(),
kTritonFusionKind);
}
TEST_F(HloFusionAnalysisTest,
InvalidGpuBackendConfig_SingleInstruction_Ignored) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule module
ENTRY entry {
%x = s32[64,64,64] parameter(0)
%y = s32[64,64,64] parameter(1)
ROOT %root = s32[64,128,64] concatenate(x, y), dimensions={1}, backend_config={"outer_dimension_partitions": ["1"]}
})"));
auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* root = module->entry_computation()->root_instruction();
auto analysis = HloFusionAnalysis::Create(*root, device_info);
EXPECT_TRUE(
protobuf_util::ProtobufEquals(analysis.fusion_backend_config(),
FusionBackendConfig::default_instance()));
}
TEST_F(HloFusionAnalysisTest,
InvalidGpuBackendConfig_ProducerConsumer_Ignored) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule module
fused_computation {
%x = s32[64] parameter(0)
%y = s32[64] parameter(1)
ROOT %root = s32[64] add(%x, %y)
}
ENTRY entry {
%x = s32[64] parameter(0)
%y = s32[64] parameter(1)
%fusion = s32[64] fusion(%x, %y), kind=kLoop, calls=fused_computation, backend_config={"invalid_field": "some_value"}
ROOT %root = s32[128] concatenate(fusion, y), dimensions={0}, backend_config={"invalid_field": "some_value"}
})"));
auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* consumer = module->entry_computation()->root_instruction();
auto* producer = consumer->operand(0);
auto analysis = HloFusionAnalysis::Create(*producer, *consumer, device_info);
EXPECT_TRUE(
protobuf_util::ProtobufEquals(analysis.fusion_backend_config(),
FusionBackendConfig::default_instance()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/hlo_fusion_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/hlo_fusion_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
db830d9b-54d6-4e33-a5a4-45840555a2d1 | cpp | tensorflow/tensorflow | fuse_add_to_conv | tensorflow/lite/delegates/gpu/common/transformations/fuse_add_to_conv.cc | tensorflow/lite/delegates/gpu/common/transformations/fuse_add_to_conv_test.cc | #include "tensorflow/lite/delegates/gpu/common/transformations/fuse_add_to_conv.h"
#include <any>
#include <memory>
#include <string>
#include <variant>
#include <vector>
#include "absl/types/any.h"
#include "absl/types/variant.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
namespace tflite {
namespace gpu {
namespace {
void FuseBiasWithAddAttributes(const ElementwiseAttributes& add_attr,
const int channels,
Tensor<Linear, DataType::FLOAT32>* bias) {
auto add = std::get_if<Tensor<Linear, DataType::FLOAT32>>(&add_attr.param);
auto add_scalar = std::get_if<float>(&add_attr.param);
if (bias->data.empty()) {
*bias = MakeZeroTensor<Linear, DataType::FLOAT32>(Linear(channels));
}
for (int d = 0; d < channels; ++d) {
bias->data[d] += add ? add->data[d] : *add_scalar;
}
}
class MergeConvolutionWithAdd : public SequenceTransformation {
public:
int ExpectedSequenceLength() const final { return 2; }
TransformResult ApplyToNodesSequence(const std::vector<Node*>& sequence,
GraphFloat32* graph) final {
auto& conv_node = *sequence[0];
if (graph->FindInputs(conv_node.id).size() != 1) {
return {TransformStatus::DECLINED,
"This fusion is only applicable to ops with one runtime input."};
}
auto& add_node = *sequence[1];
if (add_node.operation.type != ToString(OperationType::ADD)) {
return {TransformStatus::SKIPPED, ""};
}
ElementwiseAttributes add_attr =
std::any_cast<ElementwiseAttributes>(add_node.operation.attributes);
if (!std::holds_alternative<Tensor<Linear, DataType::FLOAT32>>(
add_attr.param) &&
!std::holds_alternative<float>(add_attr.param)) {
return {TransformStatus::DECLINED,
"This fuse applicable only for broadcast or scalar addition."};
}
if (conv_node.operation.type == ToString(OperationType::CONVOLUTION_2D)) {
Convolution2DAttributes* conv_attr =
std::any_cast<Convolution2DAttributes>(
&conv_node.operation.attributes);
FuseConvolution2DWithAdd(add_attr, conv_attr);
} else if (conv_node.operation.type ==
ToString(OperationType::CONVOLUTION_TRANSPOSED)) {
ConvolutionTransposedAttributes* conv_attr =
std::any_cast<ConvolutionTransposedAttributes>(
&conv_node.operation.attributes);
FuseConvolutionTransposedWithAdd(add_attr, conv_attr);
} else if (conv_node.operation.type ==
ToString(OperationType::DEPTHWISE_CONVOLUTION)) {
DepthwiseConvolution2DAttributes* conv_attr =
std::any_cast<DepthwiseConvolution2DAttributes>(
&conv_node.operation.attributes);
FuseDepthwiseConvolution2DWithAdd(add_attr, conv_attr);
} else if (conv_node.operation.type ==
ToString(OperationType::FULLY_CONNECTED)) {
FullyConnectedAttributes* conv_attr =
std::any_cast<FullyConnectedAttributes>(
&conv_node.operation.attributes);
FuseFullyConnectedWithAdd(add_attr, conv_attr);
} else {
return {TransformStatus::SKIPPED, ""};
}
absl::Status status = RemoveFollowingNode(graph, &add_node, &conv_node);
if (!status.ok()) {
return {TransformStatus::INVALID,
"Unable to remove add node after convolution: " +
std::string(status.message())};
}
return {TransformStatus::APPLIED, ""};
}
};
void FuseAddWithConvolution2D(const ElementwiseAttributes& add_attr,
Convolution2DAttributes* attr) {
auto add = std::get_if<Tensor<Linear, DataType::FLOAT32>>(&add_attr.param);
auto add_scalar = std::get_if<float>(&add_attr.param);
if (attr->bias.data.empty()) {
attr->bias = MakeZeroTensor<Linear, DataType::FLOAT32>(
Linear(attr->weights.shape.o));
}
for (int d = 0; d < attr->weights.shape.o; ++d) {
float sum = 0.0f;
for (int s = 0; s < attr->weights.shape.i; ++s) {
const float add_value = add ? add->data[s] : *add_scalar;
for (int k_y = 0; k_y < attr->weights.shape.h; ++k_y) {
for (int k_x = 0; k_x < attr->weights.shape.w; ++k_x) {
const int index = attr->weights.shape.LinearIndex({{d, k_y, k_x, s}});
sum += add_value * attr->weights.data[index];
}
}
}
attr->bias.data[d] += sum;
}
}
class MergeAddWithConvolution : public SequenceTransformation {
public:
int ExpectedSequenceLength() const final { return 2; }
TransformResult ApplyToNodesSequence(const std::vector<Node*>& sequence,
GraphFloat32* graph) final {
auto& conv_node = *sequence[1];
if (graph->FindInputs(conv_node.id).size() != 1) {
return {TransformStatus::DECLINED,
"This fusion is only applicable to ops with one runtime input."};
}
auto& add_node = *sequence[0];
if (add_node.operation.type != ToString(OperationType::ADD)) {
return {TransformStatus::SKIPPED, ""};
}
ElementwiseAttributes add_attr =
std::any_cast<ElementwiseAttributes>(add_node.operation.attributes);
if (!std::holds_alternative<Tensor<Linear, DataType::FLOAT32>>(
add_attr.param) &&
!std::holds_alternative<float>(add_attr.param)) {
return {TransformStatus::DECLINED,
"This fuse applicable only for broadcast or scalar addition."};
}
if (conv_node.operation.type == ToString(OperationType::CONVOLUTION_2D)) {
Convolution2DAttributes* conv_attr =
std::any_cast<Convolution2DAttributes>(
&conv_node.operation.attributes);
if (conv_attr->groups != 1) {
return {TransformStatus::DECLINED,
"This fuse not applicable for grouped convolution."};
}
if (conv_attr->padding.appended.w != 0 ||
conv_attr->padding.appended.h != 0 ||
conv_attr->padding.prepended.w != 0 ||
conv_attr->padding.prepended.h != 0) {
return {TransformStatus::DECLINED,
"This fuse applicable only for convolution that do not read "
"out of bound elements."};
}
FuseAddWithConvolution2D(add_attr, conv_attr);
} else {
return {TransformStatus::SKIPPED, ""};
}
absl::Status status = RemovePrecedingNode(graph, &add_node, &conv_node);
if (!status.ok()) {
return {TransformStatus::INVALID,
"Unable to remove mul node after convolution: " +
std::string(status.message())};
}
return {TransformStatus::APPLIED, ""};
}
};
}
std::unique_ptr<SequenceTransformation> NewMergeConvolutionWithAdd() {
return std::make_unique<MergeConvolutionWithAdd>();
}
std::unique_ptr<SequenceTransformation> NewMergeAddWithConvolution() {
return std::make_unique<MergeAddWithConvolution>();
}
void FuseConvolution2DWithAdd(const ElementwiseAttributes& add_attr,
Convolution2DAttributes* attr) {
FuseBiasWithAddAttributes(add_attr, attr->weights.shape.o, &attr->bias);
}
void FuseDepthwiseConvolution2DWithAdd(const ElementwiseAttributes& add_attr,
DepthwiseConvolution2DAttributes* attr) {
FuseBiasWithAddAttributes(
add_attr, attr->weights.shape.o * attr->weights.shape.i, &attr->bias);
}
void FuseConvolutionTransposedWithAdd(const ElementwiseAttributes& add_attr,
ConvolutionTransposedAttributes* attr) {
FuseBiasWithAddAttributes(add_attr, attr->weights.shape.o, &attr->bias);
}
void FuseFullyConnectedWithAdd(const ElementwiseAttributes& add_attr,
FullyConnectedAttributes* attr) {
FuseBiasWithAddAttributes(add_attr, attr->weights.shape.o, &attr->bias);
}
}
} | #include "tensorflow/lite/delegates/gpu/common/transformations/fuse_add_to_conv.h"
#include <any>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/types/any.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
using ::testing::FloatNear;
using ::testing::Pointwise;
namespace tflite {
namespace gpu {
namespace {
TEST(MergeConvolutionWithAddTest, Smoke) {
GraphFloat32 graph;
auto input = graph.NewValue();
input->tensor.shape = BHWC(1, 4, 4, 8);
Convolution2DAttributes conv_attr;
conv_attr.padding.prepended = HW(0, 0);
conv_attr.padding.appended = HW(0, 0);
conv_attr.strides = HW(1, 1);
conv_attr.dilations = HW(1, 1);
conv_attr.weights.shape = OHWI(16, 3, 2, 8);
conv_attr.weights.data.resize(conv_attr.weights.shape.DimensionsProduct());
conv_attr.bias.shape = Linear(16);
conv_attr.bias.data.resize(16);
Tensor<Linear, DataType::FLOAT32> add_tensor;
add_tensor.shape = Linear(16);
add_tensor.data.resize(16);
ElementwiseAttributes add_attr;
add_attr.param = add_tensor;
auto conv_node = graph.NewNode();
conv_node->operation.type = ToString(OperationType::CONVOLUTION_2D);
conv_node->operation.attributes = conv_attr;
auto add_node = graph.NewNode();
add_node->operation.type = ToString(OperationType::ADD);
add_node->operation.attributes = add_attr;
ASSERT_TRUE(graph.AddConsumer(conv_node->id, input->id).ok());
Value* output = nullptr;
ASSERT_TRUE(AddOutput(&graph, add_node, &output).ok());
output->tensor.shape = BHWC(1, 4, 4, 16);
Value* link1 = nullptr;
ASSERT_TRUE(ConnectTwoNodes(&graph, conv_node, add_node, &link1).ok());
link1->tensor.shape = BHWC(1, 4, 4, 16);
ASSERT_EQ(2, graph.nodes().size());
ASSERT_EQ(3, graph.values().size());
auto transformation = NewMergeConvolutionWithAdd();
ModelTransformer transformer(&graph);
transformer.Apply("merge_convolution_with_add", transformation.get());
EXPECT_EQ(1, graph.nodes().size());
EXPECT_EQ(2, graph.values().size());
EXPECT_EQ(ToString(OperationType::CONVOLUTION_2D),
graph.nodes()[0]->operation.type);
}
TEST(FuseAddAfterConvolution2DTest, Smoke) {
Convolution2DAttributes attr;
attr.weights.shape = OHWI(2, 1, 2, 2);
attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f};
attr.bias.shape = Linear(2);
attr.bias.data = {1.1f, 1.2f};
Tensor<Linear, DataType::FLOAT32> add_tensor;
add_tensor.shape = Linear(2);
add_tensor.data = {0.3f, 0.7f};
ElementwiseAttributes add_attr;
add_attr.param = add_tensor;
FuseConvolution2DWithAdd(add_attr, &attr);
EXPECT_THAT(attr.weights.data,
Pointwise(FloatNear(1e-6),
{0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f}));
EXPECT_THAT(attr.bias.data, Pointwise(FloatNear(1e-6), {1.4f, 1.9f}));
}
TEST(FuseAddAfterDepthwiseConvolution2DTest, Smoke) {
DepthwiseConvolution2DAttributes attr;
attr.weights.shape = OHWI(2, 1, 2, 2);
attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f};
attr.bias.shape = Linear(4);
attr.bias.data = {1.1f, 1.2f, 1.3f, 1.4f};
Tensor<Linear, DataType::FLOAT32> add_tensor;
add_tensor.shape = Linear(4);
add_tensor.data = {0.3f, 0.7f, 0.5f, 0.1f};
ElementwiseAttributes add_attr;
add_attr.param = add_tensor;
FuseDepthwiseConvolution2DWithAdd(add_attr, &attr);
EXPECT_THAT(attr.weights.data,
Pointwise(FloatNear(1e-6),
{0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f}));
EXPECT_THAT(attr.bias.data,
Pointwise(FloatNear(1e-6), {1.4f, 1.9f, 1.8f, 1.5f}));
}
TEST(FuseAddAfterConvolutionTransposedTest, Smoke) {
ConvolutionTransposedAttributes attr;
attr.weights.shape = OHWI(2, 1, 2, 2);
attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f};
attr.bias.shape = Linear(2);
attr.bias.data = {1.1f, 1.2f};
Tensor<Linear, DataType::FLOAT32> add_tensor;
add_tensor.shape = Linear(2);
add_tensor.data = {0.3f, 0.7f};
ElementwiseAttributes add_attr;
add_attr.param = add_tensor;
FuseConvolutionTransposedWithAdd(add_attr, &attr);
EXPECT_THAT(attr.weights.data,
Pointwise(FloatNear(1e-6),
{0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f}));
EXPECT_THAT(attr.bias.data, Pointwise(FloatNear(1e-6), {1.4f, 1.9f}));
}
TEST(FuseAddAfterFullyConnectedTest, Smoke) {
FullyConnectedAttributes attr;
attr.weights.shape = OHWI(2, 1, 1, 2);
attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f};
attr.bias.shape = Linear(2);
attr.bias.data = {1.1f, 1.2f};
Tensor<Linear, DataType::FLOAT32> add_tensor;
add_tensor.shape = Linear(2);
add_tensor.data = {0.3f, 0.7f};
ElementwiseAttributes add_attr;
add_attr.param = add_tensor;
FuseFullyConnectedWithAdd(add_attr, &attr);
EXPECT_THAT(attr.weights.data,
Pointwise(FloatNear(1e-6), {0.1f, 0.2f, 0.3f, 0.4f}));
EXPECT_THAT(attr.bias.data, Pointwise(FloatNear(1e-6), {1.4f, 1.9f}));
}
TEST(MergeAddWithConvolutionTest, Smoke) {
GraphFloat32 graph;
auto input = graph.NewValue();
input->tensor.shape = BHWC(1, 4, 4, 2);
Tensor<Linear, DataType::FLOAT32> add_tensor;
add_tensor.shape = Linear(2);
add_tensor.data = {1.0f, 2.0f};
ElementwiseAttributes add_attr;
add_attr.param = add_tensor;
Convolution2DAttributes conv_attr;
conv_attr.padding.prepended = HW(0, 0);
conv_attr.padding.appended = HW(0, 0);
conv_attr.strides = HW(1, 1);
conv_attr.dilations = HW(1, 1);
conv_attr.weights.shape = OHWI(2, 1, 2, 2);
conv_attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f};
conv_attr.bias.shape = Linear(2);
conv_attr.bias.data = {1.1f, 1.2f};
auto conv_node = graph.NewNode();
conv_node->operation.type = ToString(OperationType::CONVOLUTION_2D);
conv_node->operation.attributes = conv_attr;
auto add_node = graph.NewNode();
add_node->operation.type = ToString(OperationType::ADD);
add_node->operation.attributes = add_attr;
ASSERT_TRUE(graph.AddConsumer(add_node->id, input->id).ok());
Value* output = nullptr;
ASSERT_TRUE(AddOutput(&graph, conv_node, &output).ok());
output->tensor.shape = BHWC(1, 4, 3, 2);
Value* link1 = nullptr;
ASSERT_TRUE(ConnectTwoNodes(&graph, add_node, conv_node, &link1).ok());
link1->tensor.shape = BHWC(1, 4, 4, 2);
ASSERT_EQ(2, graph.nodes().size());
ASSERT_EQ(3, graph.values().size());
auto transformation = NewMergeAddWithConvolution();
ModelTransformer transformer(&graph);
transformer.Apply("merge_add_with_convolution", transformation.get());
EXPECT_EQ(1, graph.nodes().size());
EXPECT_EQ(2, graph.values().size());
EXPECT_EQ(ToString(OperationType::CONVOLUTION_2D),
graph.nodes()[0]->operation.type);
Convolution2DAttributes* conv_attr_new =
std::any_cast<Convolution2DAttributes>(
&graph.nodes()[0]->operation.attributes);
EXPECT_THAT(conv_attr_new->bias.data,
Pointwise(FloatNear(1e-6), {2.7f, 5.2f}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/transformations/fuse_add_to_conv.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/transformations/fuse_add_to_conv_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bf5c041b-feb6-4665-97af-0b6a497e1521 | cpp | tensorflow/tensorflow | identify_l2_normalization | tensorflow/lite/toco/graph_transformations/identify_l2_normalization.cc | tensorflow/lite/toco/graph_transformations/tests/identify_l2_normalization_test.cc | #include <cmath>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/lite/toco/graph_transformations/graph_transformations.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/tooling_util.h"
namespace toco {
::tensorflow::Status IdentifyL2Normalization::Run(Model* model,
std::size_t op_index,
bool* modified) {
*modified = false;
const auto div_it = model->operators.begin() + op_index;
const auto* div_or_mul_op = div_it->get();
OperatorType expected_op_type_producing_div_or_mul_input;
if (div_or_mul_op->type == OperatorType::kDiv) {
expected_op_type_producing_div_or_mul_input = OperatorType::kSqrt;
} else if (div_or_mul_op->type == OperatorType::kMul) {
expected_op_type_producing_div_or_mul_input = OperatorType::kRsqrt;
} else {
return absl::OkStatus();
}
CHECK_EQ(div_or_mul_op->inputs.size(), 2);
Operator* op_producing_div_or_mul_input[2] = {
GetOpWithOutput(*model, div_or_mul_op->inputs[0]),
GetOpWithOutput(*model, div_or_mul_op->inputs[1]),
};
if (!op_producing_div_or_mul_input[1] ||
op_producing_div_or_mul_input[1]->type !=
expected_op_type_producing_div_or_mul_input) {
return absl::OkStatus();
}
Operator* sqrt_or_rsqrt_op = op_producing_div_or_mul_input[1];
CHECK_EQ(sqrt_or_rsqrt_op->inputs.size(), 1);
Operator* op_producing_sqrt_or_rsqrt_input =
GetOpWithOutput(*model, sqrt_or_rsqrt_op->inputs[0]);
if (!op_producing_sqrt_or_rsqrt_input) {
return absl::OkStatus();
}
Operator* add_op = nullptr;
Operator* op_producing_add_input = nullptr;
if (op_producing_sqrt_or_rsqrt_input->type == OperatorType::kAdd ||
op_producing_sqrt_or_rsqrt_input->type == OperatorType::kMaximum) {
add_op = op_producing_sqrt_or_rsqrt_input;
bool add_can_be_removed = false;
CHECK_EQ(op_producing_sqrt_or_rsqrt_input->inputs.size(), 2);
for (int i = 0; i < 2; i++) {
const auto& input_array =
model->GetArray(op_producing_sqrt_or_rsqrt_input->inputs[i]);
if (!input_array.buffer) {
continue;
}
if (input_array.buffer->type != ArrayDataType::kFloat) {
continue;
}
if (RequiredBufferSizeForShape(input_array.shape()) != 1) {
continue;
}
const auto& input_float_data =
input_array.GetBuffer<ArrayDataType::kFloat>().data;
if (std::abs(input_float_data[0]) > 1e-3f) {
continue;
}
add_can_be_removed = true;
op_producing_add_input = GetOpWithOutput(*model, add_op->inputs[1 - i]);
break;
}
if (!add_can_be_removed) {
AddMessageF(
"Giving up trying to identify L2Normalization subgraph "
" because the operator producing the input to the square root, %s,"
", does not match the expected pattern",
LogName(*op_producing_sqrt_or_rsqrt_input));
return absl::OkStatus();
}
}
Operator* sum_op =
add_op ? op_producing_add_input : op_producing_sqrt_or_rsqrt_input;
if (sum_op->type != OperatorType::kSum) {
AddMessageF(
"Giving up trying to identify L2Normalization subgraph: "
"expected Sum op, got %s",
LogName(*sum_op));
return absl::OkStatus();
}
Operator* square_op = GetOpWithOutput(*model, sum_op->inputs[0]);
if (square_op->type != OperatorType::kSquare) {
AddMessageF(
"Giving up trying to identify L2Normalization subgraph: "
"expected Square op, got %s",
LogName(*square_op));
return absl::OkStatus();
}
CHECK_EQ(square_op->inputs.size(), 1);
if (square_op->inputs[0] != div_or_mul_op->inputs[0]) {
AddMessageF(
"Giving up trying to identify L2Normalization subgraph: %s does not "
"take the same input as the Mul/Div node",
LogName(*square_op));
return absl::OkStatus();
}
auto* l2norm_op = new L2NormalizationOperator;
l2norm_op->inputs = {div_or_mul_op->inputs[0]};
l2norm_op->outputs = div_or_mul_op->outputs;
model->operators.emplace(div_it, l2norm_op);
AddMessageF("Creating %s replacing equivalent subgraph", LogName(*l2norm_op));
DeleteOpAndArrays(model, square_op);
DeleteOpAndArrays(model, sum_op);
if (add_op) {
DeleteOpAndArrays(model, add_op);
}
DeleteOpAndArrays(model, sqrt_or_rsqrt_op);
DeleteOpAndArrays(model, div_or_mul_op);
*modified = true;
return absl::OkStatus();
}
} | #include <tuple>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/toco/graph_transformations/graph_transformations.h"
#include "tensorflow/lite/toco/model.h"
namespace toco {
namespace {
void RunIdentifyL2Normalization(const std::vector<float>& input,
const std::vector<int>& input_shape,
const std::vector<int>& output_shape,
const bool div_square = false) {
Model model;
Array& input0 = model.GetOrCreateArray("input0");
Array& output = model.GetOrCreateArray("output");
*input0.mutable_shape()->mutable_dims() = input_shape;
input0.data_type = ArrayDataType::kFloat;
input0.GetMutableBuffer<ArrayDataType::kFloat>().data = input;
*output.mutable_shape()->mutable_dims() = output_shape;
auto sq_op = new TensorFlowSquareOperator;
sq_op->inputs = {"input0"};
sq_op->outputs = {"output"};
Array& sumoutput = model.GetOrCreateArray("Sumoutput");
*sumoutput.mutable_shape()->mutable_dims() = output_shape;
auto sum_op = new TensorFlowSumOperator;
sum_op->inputs = {sq_op->outputs[0]};
sum_op->outputs = {"Sumoutput"};
if (div_square) {
Array& sqrtoutput = model.GetOrCreateArray("squarertoutput");
*sqrtoutput.mutable_shape()->mutable_dims() = output_shape;
auto sqrt_op = new TensorFlowSqrtOperator;
sqrt_op->inputs = {sum_op->outputs[0]};
sqrt_op->outputs = {"squarertoutput"};
Array& divoutput = model.GetOrCreateArray("Divoutput");
*divoutput.mutable_shape()->mutable_dims() = output_shape;
auto div_op = new DivOperator;
div_op->inputs = {"input0", sqrt_op->outputs[0]};
div_op->outputs = {"Divoutput"};
model.operators.push_back(std::unique_ptr<Operator>(div_op));
model.operators.push_back(std::unique_ptr<Operator>(sqrt_op));
model.operators.push_back(std::unique_ptr<Operator>(sum_op));
model.operators.push_back(std::unique_ptr<Operator>(sq_op));
} else {
Array& rsqoutput = model.GetOrCreateArray("Rsquareoutput");
*rsqoutput.mutable_shape()->mutable_dims() = output_shape;
auto rsqrt_op = new TensorFlowRsqrtOperator;
rsqrt_op->inputs = {sum_op->outputs[0]};
rsqrt_op->outputs = {"Rsquareoutput"};
Array& muloutput = model.GetOrCreateArray("Muloutput");
*muloutput.mutable_shape()->mutable_dims() = output_shape;
auto mul_op = new MulOperator;
mul_op->inputs = {"input0", rsqrt_op->outputs[0]};
mul_op->outputs = {"Muloutput"};
model.operators.push_back(std::unique_ptr<Operator>(mul_op));
model.operators.push_back(std::unique_ptr<Operator>(rsqrt_op));
model.operators.push_back(std::unique_ptr<Operator>(sum_op));
model.operators.push_back(std::unique_ptr<Operator>(sq_op));
}
bool modified;
ASSERT_TRUE(IdentifyL2Normalization().Run(&model, 0, &modified).ok());
for (auto& op_it : model.operators) {
Operator* op = op_it.get();
if (div_square) {
EXPECT_FALSE(op->type == OperatorType::kDiv);
EXPECT_FALSE(op->type == OperatorType::kSqrt);
} else {
EXPECT_FALSE(op->type == OperatorType::kMul);
EXPECT_FALSE(op->type == OperatorType::kRsqrt);
}
EXPECT_FALSE(op->type == OperatorType::kAdd);
EXPECT_FALSE(op->type == OperatorType::kSquare);
}
}
TEST(IdentifyL2Normalization, MulRsqrtTest) {
RunIdentifyL2Normalization(
{3, 1, 4, 1, -5, 9, -2, 6, 5, 3, 5, 8},
{3, 4},
{3, 4},
false);
}
TEST(IdentifyL2Normalization, DivSqrtNormTest) {
RunIdentifyL2Normalization(
{3, 1, 4, 1, -5, 9, -2, 6, 5, 3, 5, 8},
{3, 4},
{3, 4},
true);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/graph_transformations/identify_l2_normalization.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/graph_transformations/tests/identify_l2_normalization_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
422bf0f2-be5f-4d65-a40c-dbfaaaace216 | cpp | tensorflow/tensorflow | message_wrappers | tensorflow/core/distributed_runtime/message_wrappers.cc | tensorflow/core/distributed_runtime/message_wrappers_test.cc | #include "tensorflow/core/distributed_runtime/message_wrappers.h"
#include <memory>
#include "absl/status/status.h"
#include "tensorflow/core/framework/cost_graph.pb.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/named_tensor.pb.h"
namespace tensorflow {
bool ParseTensorProtoToTensor(const TensorProto& tensor_proto,
Tensor* out_tensor) {
if (tensor_proto.dtype() > 0 && tensor_proto.dtype() <= DataType_MAX) {
Tensor parsed(tensor_proto.dtype());
if (parsed.FromProto(cpu_allocator(), tensor_proto)) {
*out_tensor = parsed;
return true;
}
}
return false;
}
const string& InMemoryRunStepRequest::session_handle() const {
return session_handle_;
}
void InMemoryRunStepRequest::set_session_handle(const string& handle) {
session_handle_ = handle;
}
const string& InMemoryRunStepRequest::partial_run_handle() const {
return partial_run_handle_;
}
void InMemoryRunStepRequest::set_partial_run_handle(const string& handle) {
partial_run_handle_ = handle;
}
size_t InMemoryRunStepRequest::num_feeds() const { return feeds_.size(); }
const string& InMemoryRunStepRequest::feed_name(size_t i) const {
return feeds_[i].first;
}
Status InMemoryRunStepRequest::FeedValue(size_t i, Tensor* out_tensor) const {
*out_tensor = feeds_[i].second;
return absl::OkStatus();
}
Status InMemoryRunStepRequest::FeedValue(size_t i,
TensorProto* out_tensor) const {
feeds_[i].second.AsProtoTensorContent(out_tensor);
return absl::OkStatus();
}
void InMemoryRunStepRequest::add_feed(const string& name, const Tensor& value) {
feeds_.emplace_back(name, value);
}
size_t InMemoryRunStepRequest::num_fetches() const { return fetches_.size(); }
const string& InMemoryRunStepRequest::fetch_name(size_t i) const {
return fetches_[i];
}
void InMemoryRunStepRequest::add_fetch(const string& name) {
fetches_.push_back(name);
}
size_t InMemoryRunStepRequest::num_targets() const { return targets_.size(); }
const string& InMemoryRunStepRequest::target_name(size_t i) const {
return targets_[i];
}
void InMemoryRunStepRequest::add_target(const string& name) {
targets_.push_back(name);
}
const RunOptions& InMemoryRunStepRequest::options() const { return options_; }
RunOptions* InMemoryRunStepRequest::mutable_options() { return &options_; }
bool InMemoryRunStepRequest::store_errors_in_response_body() const {
return store_errors_in_response_body_;
}
int64_t InMemoryRunStepRequest::request_id() const {
return 0;
}
void InMemoryRunStepRequest::set_store_errors_in_response_body(
bool store_errors) {
store_errors_in_response_body_ = store_errors;
}
string InMemoryRunStepRequest::DebugString() const {
return ToProto().DebugString();
}
const RunStepRequest& InMemoryRunStepRequest::ToProto() const {
if (!proto_version_) {
proto_version_ = std::make_unique<RunStepRequest>();
proto_version_->set_session_handle(session_handle());
proto_version_->set_partial_run_handle(partial_run_handle());
for (size_t i = 0; i < num_feeds(); ++i) {
auto feed = proto_version_->add_feed();
feed->set_name(feed_name(i));
feeds_[i].second.AsProtoTensorContent(feed->mutable_tensor());
}
for (size_t i = 0; i < num_fetches(); ++i) {
proto_version_->add_fetch(fetch_name(i));
}
for (size_t i = 0; i < num_targets(); ++i) {
proto_version_->add_target(target_name(i));
}
*proto_version_->mutable_options() = options();
}
return *proto_version_;
}
const string& MutableProtoRunStepRequest::session_handle() const {
return request_.session_handle();
}
void MutableProtoRunStepRequest::set_session_handle(const string& handle) {
request_.set_session_handle(handle);
}
const string& MutableProtoRunStepRequest::partial_run_handle() const {
return request_.partial_run_handle();
}
void MutableProtoRunStepRequest::set_partial_run_handle(const string& handle) {
request_.set_partial_run_handle(handle);
}
size_t MutableProtoRunStepRequest::num_feeds() const {
return request_.feed_size();
}
const string& MutableProtoRunStepRequest::feed_name(size_t i) const {
return request_.feed(i).name();
}
Status MutableProtoRunStepRequest::FeedValue(size_t i,
Tensor* out_tensor) const {
if (!ParseTensorProtoToTensor(request_.feed(i).tensor(), out_tensor)) {
return errors::InvalidArgument("Invalid TensorProto for feed value ", i);
} else {
return absl::OkStatus();
}
}
Status MutableProtoRunStepRequest::FeedValue(size_t i,
TensorProto* out_tensor) const {
*out_tensor = request_.feed(i).tensor();
return absl::OkStatus();
}
void MutableProtoRunStepRequest::add_feed(const string& name,
const Tensor& value) {
NamedTensorProto* feed = request_.add_feed();
feed->set_name(name);
TensorProto* value_proto = feed->mutable_tensor();
value.AsProtoTensorContent(value_proto);
}
size_t MutableProtoRunStepRequest::num_fetches() const {
return request_.fetch_size();
}
const string& MutableProtoRunStepRequest::fetch_name(size_t i) const {
return request_.fetch(i);
}
void MutableProtoRunStepRequest::add_fetch(const string& name) {
request_.add_fetch(name);
}
size_t MutableProtoRunStepRequest::num_targets() const {
return request_.target_size();
}
const string& MutableProtoRunStepRequest::target_name(size_t i) const {
return request_.target(i);
}
void MutableProtoRunStepRequest::add_target(const string& name) {
request_.add_target(name);
}
const RunOptions& MutableProtoRunStepRequest::options() const {
return request_.options();
}
RunOptions* MutableProtoRunStepRequest::mutable_options() {
return request_.mutable_options();
}
bool MutableProtoRunStepRequest::store_errors_in_response_body() const {
return request_.store_errors_in_response_body();
}
void MutableProtoRunStepRequest::set_store_errors_in_response_body(
bool store_errors) {
request_.set_store_errors_in_response_body(store_errors);
}
int64_t MutableProtoRunStepRequest::request_id() const {
return request_.request_id();
}
string MutableProtoRunStepRequest::DebugString() const {
return request_.DebugString();
}
const RunStepRequest& MutableProtoRunStepRequest::ToProto() const {
return request_;
}
ProtoRunStepRequest::ProtoRunStepRequest(const RunStepRequest* request)
: request_(request) {}
const string& ProtoRunStepRequest::session_handle() const {
return request_->session_handle();
}
const string& ProtoRunStepRequest::partial_run_handle() const {
return request_->partial_run_handle();
}
size_t ProtoRunStepRequest::num_feeds() const { return request_->feed_size(); }
const string& ProtoRunStepRequest::feed_name(size_t i) const {
return request_->feed(i).name();
}
Status ProtoRunStepRequest::FeedValue(size_t i, Tensor* out_tensor) const {
if (!ParseTensorProtoToTensor(request_->feed(i).tensor(), out_tensor)) {
return errors::InvalidArgument("Invalid TensorProto for feed value ", i);
} else {
return absl::OkStatus();
}
}
Status ProtoRunStepRequest::FeedValue(size_t i, TensorProto* out_tensor) const {
*out_tensor = request_->feed(i).tensor();
return absl::OkStatus();
}
size_t ProtoRunStepRequest::num_fetches() const {
return request_->fetch_size();
}
const string& ProtoRunStepRequest::fetch_name(size_t i) const {
return request_->fetch(i);
}
size_t ProtoRunStepRequest::num_targets() const {
return request_->target_size();
}
const string& ProtoRunStepRequest::target_name(size_t i) const {
return request_->target(i);
}
const RunOptions& ProtoRunStepRequest::options() const {
return request_->options();
}
bool ProtoRunStepRequest::store_errors_in_response_body() const {
return request_->store_errors_in_response_body();
}
int64_t ProtoRunStepRequest::request_id() const {
return request_->request_id();
}
string ProtoRunStepRequest::DebugString() const {
return request_->DebugString();
}
const RunStepRequest& ProtoRunStepRequest::ToProto() const { return *request_; }
const string& InMemoryRunGraphRequest::session_handle() const {
return session_handle_;
}
bool InMemoryRunGraphRequest::create_worker_session_called() const {
return create_worker_session_called_;
}
void InMemoryRunGraphRequest::set_session_handle(const string& handle) {
session_handle_ = handle;
}
void InMemoryRunGraphRequest::set_create_worker_session_called(bool called) {
create_worker_session_called_ = called;
}
const string& InMemoryRunGraphRequest::graph_handle() const {
return graph_handle_;
}
void InMemoryRunGraphRequest::set_graph_handle(const string& handle) {
graph_handle_ = handle;
}
int64_t InMemoryRunGraphRequest::step_id() const { return step_id_; }
void InMemoryRunGraphRequest::set_step_id(int64_t step_id) {
step_id_ = step_id;
}
const ExecutorOpts& InMemoryRunGraphRequest::exec_opts() const {
return exec_opts_;
}
ExecutorOpts* InMemoryRunGraphRequest::mutable_exec_opts() {
return &exec_opts_;
}
size_t InMemoryRunGraphRequest::num_sends() const { return sends_.size(); }
const string& InMemoryRunGraphRequest::send_key(size_t i) const {
return sends_[i].first;
}
Status InMemoryRunGraphRequest::SendValue(size_t i, Tensor* out_tensor) const {
*out_tensor = sends_[i].second;
return absl::OkStatus();
}
Status InMemoryRunGraphRequest::AddSendFromRunStepRequest(
const RunStepRequestWrapper& run_step_request, size_t i,
const string& send_key) {
Tensor tensor;
TF_RETURN_IF_ERROR(run_step_request.FeedValue(i, &tensor));
sends_.emplace_back(send_key, std::move(tensor));
return absl::OkStatus();
}
Status InMemoryRunGraphRequest::AddSendFromRunCallableRequest(
const RunCallableRequest& run_callable_request, size_t i,
const string& send_key) {
Tensor tensor;
if (!ParseTensorProtoToTensor(run_callable_request.feed(i), &tensor)) {
return errors::InvalidArgument("Invalid TensorProto for feed value ", i);
}
sends_.emplace_back(send_key, std::move(tensor));
return absl::OkStatus();
}
size_t InMemoryRunGraphRequest::num_recvs() const { return recvs_.size(); }
const string& InMemoryRunGraphRequest::recv_key(size_t i) const {
return recvs_[i];
}
void InMemoryRunGraphRequest::add_recv_key(const string& recv_key) {
recvs_.push_back(recv_key);
}
bool InMemoryRunGraphRequest::is_partial() const { return is_partial_; }
void InMemoryRunGraphRequest::set_is_partial(bool is_partial) {
is_partial_ = is_partial;
}
bool InMemoryRunGraphRequest::is_last_partial_run() const {
return is_last_partial_run_;
}
void InMemoryRunGraphRequest::set_is_last_partial_run(
bool is_last_partial_run) {
is_last_partial_run_ = is_last_partial_run;
}
bool InMemoryRunGraphRequest::store_errors_in_response_body() const {
return store_errors_in_response_body_;
}
void InMemoryRunGraphRequest::set_store_errors_in_response_body(
bool store_errors) {
store_errors_in_response_body_ = store_errors;
}
int64_t InMemoryRunGraphRequest::request_id() const { return request_id_; }
void InMemoryRunGraphRequest::set_request_id(int64_t request_id) {
request_id_ = request_id;
}
const RunGraphRequest& InMemoryRunGraphRequest::ToProto() const {
if (!proto_version_) {
proto_version_ = std::make_unique<RunGraphRequest>();
proto_version_->set_session_handle(session_handle());
proto_version_->set_create_worker_session_called(
create_worker_session_called());
proto_version_->set_graph_handle(graph_handle());
proto_version_->set_step_id(step_id());
*proto_version_->mutable_exec_opts() = exec_opts();
for (size_t i = 0; i < num_sends(); ++i) {
auto send = proto_version_->add_send();
send->set_name(send_key(i));
sends_[i].second.AsProtoTensorContent(send->mutable_tensor());
}
for (size_t i = 0; i < num_recvs(); ++i) {
proto_version_->add_recv_key(recv_key(i));
}
proto_version_->set_is_partial(is_partial());
proto_version_->set_is_last_partial_run(is_last_partial_run());
}
proto_version_->set_store_errors_in_response_body(
store_errors_in_response_body_);
proto_version_->set_request_id(request_id_);
return *proto_version_;
}
const string& MutableProtoRunGraphRequest::session_handle() const {
return request_.session_handle();
}
void MutableProtoRunGraphRequest::set_session_handle(const string& handle) {
request_.set_session_handle(handle);
}
bool MutableProtoRunGraphRequest::create_worker_session_called() const {
return request_.create_worker_session_called();
}
void MutableProtoRunGraphRequest::set_create_worker_session_called(
bool called) {
request_.set_create_worker_session_called(called);
}
const string& MutableProtoRunGraphRequest::graph_handle() const {
return request_.graph_handle();
}
void MutableProtoRunGraphRequest::set_graph_handle(const string& handle) {
request_.set_graph_handle(handle);
}
int64_t MutableProtoRunGraphRequest::step_id() const {
return request_.step_id();
}
void MutableProtoRunGraphRequest::set_step_id(int64_t step_id) {
request_.set_step_id(step_id);
}
const ExecutorOpts& MutableProtoRunGraphRequest::exec_opts() const {
return request_.exec_opts();
}
ExecutorOpts* MutableProtoRunGraphRequest::mutable_exec_opts() {
return request_.mutable_exec_opts();
}
size_t MutableProtoRunGraphRequest::num_sends() const {
return request_.send_size();
}
const string& MutableProtoRunGraphRequest::send_key(size_t i) const {
return request_.send(i).name();
}
Status MutableProtoRunGraphRequest::SendValue(size_t i,
Tensor* out_tensor) const {
if (!ParseTensorProtoToTensor(request_.send(i).tensor(), out_tensor)) {
return errors::InvalidArgument("Invalid TensorProto for feed value ", i);
} else {
return absl::OkStatus();
}
}
Status MutableProtoRunGraphRequest::AddSendFromRunStepRequest(
const RunStepRequestWrapper& run_step_request, size_t i,
const string& send_key) {
NamedTensorProto* send = request_.add_send();
send->set_name(send_key);
TF_RETURN_IF_ERROR(run_step_request.FeedValue(i, send->mutable_tensor()));
return absl::OkStatus();
}
Status MutableProtoRunGraphRequest::AddSendFromRunCallableRequest(
const RunCallableRequest& run_callable_request, size_t i,
const string& send_key) {
NamedTensorProto* send = request_.add_send();
send->set_name(send_key);
*send->mutable_tensor() = run_callable_request.feed(i);
return absl::OkStatus();
}
size_t MutableProtoRunGraphRequest::num_recvs() const {
return request_.recv_key_size();
}
const string& MutableProtoRunGraphRequest::recv_key(size_t i) const {
return request_.recv_key(i);
}
void MutableProtoRunGraphRequest::add_recv_key(const string& recv_key) {
request_.add_recv_key(recv_key);
}
bool MutableProtoRunGraphRequest::is_partial() const {
return request_.is_partial();
}
void MutableProtoRunGraphRequest::set_is_partial(bool is_partial) {
request_.set_is_partial(is_partial);
}
bool MutableProtoRunGraphRequest::is_last_partial_run() const {
return request_.is_last_partial_run();
}
void MutableProtoRunGraphRequest::set_is_last_partial_run(
bool is_last_partial_run) {
request_.set_is_last_partial_run(is_last_partial_run);
}
bool MutableProtoRunGraphRequest::store_errors_in_response_body() const {
return request_.store_errors_in_response_body();
}
void MutableProtoRunGraphRequest::set_store_errors_in_response_body(
bool store_errors) {
request_.set_store_errors_in_response_body(store_errors);
}
int64_t MutableProtoRunGraphRequest::request_id() const {
return request_.request_id();
}
void MutableProtoRunGraphRequest::set_request_id(int64_t request_id) {
request_.set_request_id(request_id);
}
const RunGraphRequest& MutableProtoRunGraphRequest::ToProto() const {
return request_;
}
ProtoRunGraphRequest::ProtoRunGraphRequest(const RunGraphRequest* request)
: request_(request) {}
const string& ProtoRunGraphRequest::session_handle() const {
return request_->session_handle();
}
bool ProtoRunGraphRequest::create_worker_session_called() const {
return request_->create_worker_session_called();
}
const string& ProtoRunGraphRequest::graph_handle() const {
return request_->graph_handle();
}
int64_t ProtoRunGraphRequest::step_id() const { return request_->step_id(); }
const ExecutorOpts& ProtoRunGraphRequest::exec_opts() const {
return request_->exec_opts();
}
size_t ProtoRunGraphRequest::num_sends() const { return request_->send_size(); }
const string& ProtoRunGraphRequest::send_key(size_t i) const {
return request_->send(i).name();
}
Status ProtoRunGraphRequest::SendValue(size_t i, Tensor* out_tensor) const {
if (!ParseTensorProtoToTensor(request_->send(i).tensor(), out_tensor)) {
return errors::InvalidArgument("Invalid TensorProto for feed value ", i);
} else {
return absl::OkStatus();
}
}
size_t ProtoRunGraphRequest::num_recvs() const {
return request_->recv_key_size();
}
const string& ProtoRunGraphRequest::recv_key(size_t i) const {
return request_->recv_key(i);
}
bool ProtoRunGraphRequest::is_partial() const { return request_->is_partial(); }
bool ProtoRunGraphRequest::is_last_partial_run() const {
return request_->is_last_partial_run();
}
bool ProtoRunGraphRequest::store_errors_in_response_body() const {
return request_->store_errors_in_response_body();
}
int64_t ProtoRunGraphRequest::request_id() const {
return request_->request_id();
}
const RunGraphRequest& ProtoRunGraphRequest::ToProto() const {
return *request_;
}
size_t InMemoryRunGraphResponse::num_recvs() const { return recvs_.size(); }
const string& InMemoryRunGraphResponse::recv_key(size_t i) const {
return recvs_[i].first;
}
Status InMemoryRunGraphResponse::RecvValue(size_t i, TensorProto* out_tensor) {
recvs_[i].second.AsProtoTensorContent(out_tensor);
return absl::OkStatus();
}
Status InMemoryRunGraphResponse::RecvValue(size_t i, Tensor* out_tensor) {
*out_tensor = recvs_[i].second;
return absl::OkStatus();
}
void InMemoryRunGraphResponse::AddRecv(const string& key, const Tensor& value) {
recvs_.emplace_back(key, value);
}
StepStats* InMemoryRunGraphResponse::mutable_step_stats() {
return &step_stats_;
}
CostGraphDef* InMemoryRunGraphResponse::mutable_cost_graph() {
return &cost_graph_;
}
Status InMemoryRunGraphResponse::status() const { return status_; }
errors::Code InMemoryRunGraphResponse::status_code() const {
return static_cast<errors::Code>(status_.code());
}
void InMemoryRunGraphResponse::set_status(const Status& status) {
status_ = status;
}
RunGraphResponse* InMemoryRunGraphResponse::get_proto() {
LOG(FATAL) << "Cannot get a mutable protobuf for an InMemoryRunGraphResponse";
return nullptr;
}
size_t InMemoryRunGraphResponse::num_partition_graphs() const {
return partition_graphs_.size();
}
GraphDef* InMemoryRunGraphResponse::mutable_partition_graph(size_t i) {
return &partition_graphs_[i];
}
void InMemoryRunGraphResponse::AddPartitionGraph(
const GraphDef& partition_graph) {
partition_graphs_.push_back(partition_graph);
}
size_t OwnedProtoRunGraphResponse::num_recvs() const {
return response_.recv_size();
}
const string& OwnedProtoRunGraphResponse::recv_key(size_t i) const {
return response_.recv(i).name();
}
Status OwnedProtoRunGraphResponse::RecvValue(size_t i,
TensorProto* out_tensor) {
out_tensor->Swap(response_.mutable_recv(i)->mutable_tensor());
return absl::OkStatus();
}
Status OwnedProtoRunGraphResponse::RecvValue(size_t i, Tensor* out_tensor) {
if (!ParseTensorProtoToTensor(response_.recv(i).tensor(), out_tensor)) {
return errors::InvalidArgument("Invalid TensorProto for recv value ", i);
} else {
return absl::OkStatus();
}
}
void OwnedProtoRunGraphResponse::AddRecv(const string& key,
const Tensor& value) {
NamedTensorProto* recv = response_.add_recv();
recv->set_name(key);
TensorProto* value_proto = recv->mutable_tensor();
value.AsProtoTensorContent(value_proto);
}
StepStats* OwnedProtoRunGraphResponse::mutable_step_stats() {
return response_.mutable_step_stats();
}
CostGraphDef* OwnedProtoRunGraphResponse::mutable_cost_graph() {
return response_.mutable_cost_graph();
}
Status OwnedProtoRunGraphResponse::status() const {
return Status(static_cast<absl::StatusCode>(response_.status_code()),
response_.status_error_message());
}
absl::StatusCode OwnedProtoRunGraphResponse::status_code() const {
return static_cast<absl::StatusCode>(response_.status_code());
}
void OwnedProtoRunGraphResponse::set_status(const Status& status) {
response_.set_status_code(static_cast<tsl::error::Code>(status.code()));
response_.set_status_error_message(absl::StatusMessageAsCStr(status));
}
RunGraphResponse* OwnedProtoRunGraphResponse::get_proto() { return &response_; }
size_t OwnedProtoRunGraphResponse::num_partition_graphs() const {
return response_.partition_graph_size();
}
GraphDef* OwnedProtoRunGraphResponse::mutable_partition_graph(size_t i) {
return response_.mutable_partition_graph(i);
}
void OwnedProtoRunGraphResponse::AddPartitionGraph(
const GraphDef& partition_graph) {
GraphDef* graph_def = response_.mutable_partition_graph()->Add();
*graph_def = partition_graph;
}
NonOwnedProtoRunGraphResponse::NonOwnedProtoRunGraphResponse(
RunGraphResponse* response)
: response_(response) {}
size_t NonOwnedProtoRunGraphResponse::num_recvs() const {
return response_->recv_size();
}
const string& NonOwnedProtoRunGraphResponse::recv_key(size_t i) const {
return response_->recv(i).name();
}
Status NonOwnedProtoRunGraphResponse::RecvValue(size_t i,
TensorProto* out_tensor) {
out_tensor->Swap(response_->mutable_recv(i)->mutable_tensor());
return absl::OkStatus();
}
Status NonOwnedProtoRunGraphResponse::RecvValue(size_t i, Tensor* out_tensor) {
if (!ParseTensorProtoToTensor(response_->recv(i).tensor(), out_tensor)) {
return errors::InvalidArgument("Invalid TensorProto for recv value ", i);
} else {
return absl::OkStatus();
}
}
void NonOwnedProtoRunGraphResponse::AddRecv(const string& key,
const Tensor& value) {
NamedTensorProto* recv = response_->add_recv();
recv->set_name(key);
TensorProto* value_proto = recv->mutable_tensor();
value.AsProtoTensorContent(value_proto);
}
StepStats* NonOwnedProtoRunGraphResponse::mutable_step_stats() {
return response_->mutable_step_stats();
}
CostGraphDef* NonOwnedProtoRunGraphResponse::mutable_cost_graph() {
return response_->mutable_cost_graph();
}
Status NonOwnedProtoRunGraphResponse::status() const {
return Status(static_cast<absl::StatusCode>(response_->status_code()),
response_->status_error_message());
}
absl::StatusCode NonOwnedProtoRunGraphResponse::status_code() const {
return static_cast<absl::StatusCode>(response_->status_code());
}
void NonOwnedProtoRunGraphResponse::set_status(const Status& status) {
response_->set_status_code(static_cast<tsl::error::Code>(status.code()));
response_->set_status_error_message(absl::StatusMessageAsCStr(status));
}
RunGraphResponse* NonOwnedProtoRunGraphResponse::get_proto() {
return response_;
}
size_t NonOwnedProtoRunGraphResponse::num_partition_graphs() const {
return response_->partition_graph_size();
}
GraphDef* NonOwnedProtoRunGraphResponse::mutable_partition_graph(size_t i) {
return response_->mutable_partition_graph(i);
}
void NonOwnedProtoRunGraphResponse::AddPartitionGraph(
const GraphDef& partition_graph) {
GraphDef* graph_def = response_->add_partition_graph();
*graph_def = partition_graph;
}
MutableRunStepResponseWrapper::~MutableRunStepResponseWrapper() {}
size_t InMemoryRunStepResponse::num_tensors() const { return tensors_.size(); }
const string& InMemoryRunStepResponse::tensor_name(size_t i) const {
return tensors_[i].first;
}
Status InMemoryRunStepResponse::TensorValue(size_t i,
Tensor* out_tensor) const {
*out_tensor = tensors_[i].second;
return absl::OkStatus();
}
const RunMetadata& InMemoryRunStepResponse::metadata() const {
return metadata_;
}
Status InMemoryRunStepResponse::AddTensorFromRunGraphResponse(
const string& name, MutableRunGraphResponseWrapper* wrapper, size_t i) {
Tensor tensor;
TF_RETURN_IF_ERROR(wrapper->RecvValue(i, &tensor));
tensors_.emplace_back(name, tensor);
return absl::OkStatus();
}
RunMetadata* InMemoryRunStepResponse::mutable_metadata() { return &metadata_; }
Status InMemoryRunStepResponse::status() const { return status_; }
errors::Code InMemoryRunStepResponse::status_code() const {
return static_cast<errors::Code>(status_.code());
}
void InMemoryRunStepResponse::set_status(const Status& status) {
status_ = status;
}
RunStepResponse* InMemoryRunStepResponse::get_proto() {
LOG(FATAL) << "Cannot get a mutable protobuf for an InMemoryRunStepResponse";
return nullptr;
}
size_t OwnedProtoRunStepResponse::num_tensors() const {
return response_.tensor_size();
}
const string& OwnedProtoRunStepResponse::tensor_name(size_t i) const {
return response_.tensor(i).name();
}
Status OwnedProtoRunStepResponse::TensorValue(size_t i,
Tensor* out_tensor) const {
if (!ParseTensorProtoToTensor(response_.tensor(i).tensor(), out_tensor)) {
return errors::InvalidArgument("Invalid TensorProto for fetch value ", i);
} else {
return absl::OkStatus();
}
}
const RunMetadata& OwnedProtoRunStepResponse::metadata() const {
return response_.metadata();
}
Status OwnedProtoRunStepResponse::AddTensorFromRunGraphResponse(
const string& name, MutableRunGraphResponseWrapper* run_graph_response,
size_t i) {
NamedTensorProto* response_tensor = response_.add_tensor();
response_tensor->set_name(name);
return run_graph_response->RecvValue(i, response_tensor->mutable_tensor());
}
RunMetadata* OwnedProtoRunStepResponse::mutable_metadata() {
return response_.mutable_metadata();
}
Status OwnedProtoRunStepResponse::status() const {
return Status(static_cast<absl::StatusCode>(response_.status_code()),
response_.status_error_message());
}
absl::StatusCode OwnedProtoRunStepResponse::status_code() const {
return static_cast<absl::StatusCode>(response_.status_code());
}
void OwnedProtoRunStepResponse::set_status(const Status& status) {
response_.set_status_code(static_cast<tsl::error::Code>(status.code()));
response_.set_status_error_message(absl::StatusMessageAsCStr(status));
}
RunStepResponse* OwnedProtoRunStepResponse::get_proto() { return &response_; }
NonOwnedProtoRunStepResponse::NonOwnedProtoRunStepResponse(
RunStepResponse* response)
: response_(response) {}
size_t NonOwnedProtoRunStepResponse::num_tensors() const {
return response_->tensor_size();
}
const string& NonOwnedProtoRunStepResponse::tensor_name(size_t i) const {
return response_->tensor(i).name();
}
Status NonOwnedProtoRunStepResponse::TensorValue(size_t i,
Tensor* out_tensor) const {
if (!ParseTensorProtoToTensor(response_->tensor(i).tensor(), out_tensor)) {
return errors::InvalidArgument("Invalid TensorProto for fetch value ", i);
} else {
return absl::OkStatus();
}
}
const RunMetadata& NonOwnedProtoRunStepResponse::metadata() const {
return response_->metadata();
}
Status NonOwnedProtoRunStepResponse::AddTensorFromRunGraphResponse(
const string& name, MutableRunGraphResponseWrapper* run_graph_response,
size_t i) {
NamedTensorProto* response_tensor = response_->add_tensor();
response_tensor->set_name(name);
return run_graph_response->RecvValue(i, response_tensor->mutable_tensor());
}
RunMetadata* NonOwnedProtoRunStepResponse::mutable_metadata() {
return response_->mutable_metadata();
}
Status NonOwnedProtoRunStepResponse::status() const {
return Status(static_cast<absl::StatusCode>(response_->status_code()),
response_->status_error_message());
}
absl::StatusCode NonOwnedProtoRunStepResponse::status_code() const {
return static_cast<absl::StatusCode>(response_->status_code());
}
void NonOwnedProtoRunStepResponse::set_status(const Status& status) {
response_->set_status_code(static_cast<tsl::error::Code>(status.code()));
response_->set_status_error_message(absl::StatusMessageAsCStr(status));
}
RunStepResponse* NonOwnedProtoRunStepResponse::get_proto() { return response_; }
} | #include "tensorflow/core/distributed_runtime/message_wrappers.h"
#include "tensorflow/core/framework/cost_graph.pb.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/config.pb.h"
namespace tensorflow {
namespace {
Tensor TensorA() {
Tensor a_tensor(DT_INT32, TensorShape({2, 2}));
test::FillValues<int32>(&a_tensor, {3, 2, -1, 0});
return a_tensor;
}
Tensor TensorB() {
Tensor b_tensor(DT_INT32, TensorShape({1, 2}));
test::FillValues<int32>(&b_tensor, {1, 2});
return b_tensor;
}
void BuildRunStepRequest(MutableRunStepRequestWrapper* request) {
request->set_session_handle("handle");
request->set_partial_run_handle("partial_handle");
request->add_feed("feed_a:0", TensorA());
request->add_feed("feed_b:0", TensorB());
request->add_fetch("fetch_x:0");
request->add_fetch("fetch_y:0");
request->add_target("target_i");
request->add_target("target_j");
request->mutable_options()->set_timeout_in_ms(37);
}
void CheckRunStepRequest(const RunStepRequestWrapper& request) {
EXPECT_EQ("handle", request.session_handle());
EXPECT_EQ("partial_handle", request.partial_run_handle());
EXPECT_EQ(2, request.num_feeds());
EXPECT_EQ("feed_a:0", request.feed_name(0));
EXPECT_EQ("feed_b:0", request.feed_name(1));
Tensor val;
TF_EXPECT_OK(request.FeedValue(0, &val));
test::ExpectTensorEqual<int32>(TensorA(), val);
TF_EXPECT_OK(request.FeedValue(1, &val));
test::ExpectTensorEqual<int32>(TensorB(), val);
EXPECT_EQ(2, request.num_fetches());
EXPECT_EQ("fetch_x:0", request.fetch_name(0));
EXPECT_EQ("fetch_y:0", request.fetch_name(1));
EXPECT_EQ("target_i", request.target_name(0));
EXPECT_EQ("target_j", request.target_name(1));
EXPECT_EQ(37, request.options().timeout_in_ms());
}
void BuildRunGraphRequest(const RunStepRequestWrapper& run_step_request,
MutableRunGraphRequestWrapper* run_graph_request) {
run_graph_request->set_graph_handle("graph_handle");
run_graph_request->set_step_id(13);
run_graph_request->mutable_exec_opts()->set_record_timeline(true);
TF_EXPECT_OK(run_graph_request->AddSendFromRunStepRequest(run_step_request, 0,
"send_0"));
TF_EXPECT_OK(run_graph_request->AddSendFromRunStepRequest(run_step_request, 1,
"send_1"));
run_graph_request->add_recv_key("recv_2");
run_graph_request->add_recv_key("recv_3");
run_graph_request->set_is_partial(true);
}
void CheckRunGraphRequest(const RunGraphRequestWrapper& request) {
EXPECT_EQ("graph_handle", request.graph_handle());
EXPECT_EQ(13, request.step_id());
EXPECT_FALSE(request.exec_opts().record_costs());
EXPECT_TRUE(request.exec_opts().record_timeline());
EXPECT_FALSE(request.exec_opts().record_partition_graphs());
EXPECT_EQ(2, request.num_sends());
Tensor val;
TF_EXPECT_OK(request.SendValue(0, &val));
test::ExpectTensorEqual<int32>(TensorA(), val);
TF_EXPECT_OK(request.SendValue(1, &val));
test::ExpectTensorEqual<int32>(TensorB(), val);
EXPECT_TRUE(request.is_partial());
EXPECT_FALSE(request.is_last_partial_run());
}
void BuildRunGraphResponse(MutableRunGraphResponseWrapper* run_graph_response) {
run_graph_response->AddRecv("recv_2", TensorA());
run_graph_response->AddRecv("recv_3", TensorB());
run_graph_response->mutable_step_stats()->add_dev_stats()->set_device(
"/cpu:0");
run_graph_response->mutable_cost_graph()->add_node()->set_name("cost_node");
GraphDef graph_def;
graph_def.mutable_versions()->set_producer(1234);
graph_def.mutable_versions()->set_min_consumer(1234);
run_graph_response->AddPartitionGraph(graph_def);
}
void CheckRunGraphResponse(MutableRunGraphResponseWrapper* response) {
ASSERT_EQ(2, response->num_recvs());
EXPECT_EQ("recv_2", response->recv_key(0));
EXPECT_EQ("recv_3", response->recv_key(1));
Tensor val;
TF_EXPECT_OK(response->RecvValue(0, &val));
test::ExpectTensorEqual<int32>(TensorA(), val);
TF_EXPECT_OK(response->RecvValue(1, &val));
test::ExpectTensorEqual<int32>(TensorB(), val);
ASSERT_EQ(1, response->mutable_step_stats()->dev_stats_size());
EXPECT_EQ("/cpu:0", response->mutable_step_stats()->dev_stats(0).device());
ASSERT_EQ(1, response->mutable_cost_graph()->node_size());
EXPECT_EQ("cost_node", response->mutable_cost_graph()->node(0).name());
ASSERT_EQ(1, response->num_partition_graphs());
EXPECT_EQ(1234, response->mutable_partition_graph(0)->versions().producer());
EXPECT_EQ(1234,
response->mutable_partition_graph(0)->versions().min_consumer());
}
void BuildRunStepResponse(MutableRunGraphResponseWrapper* run_graph_response,
MutableRunStepResponseWrapper* run_step_response) {
TF_EXPECT_OK(run_step_response->AddTensorFromRunGraphResponse(
"fetch_x:0", run_graph_response, 0));
TF_EXPECT_OK(run_step_response->AddTensorFromRunGraphResponse(
"fetch_y:0", run_graph_response, 1));
*run_step_response->mutable_metadata()->mutable_step_stats() =
*run_graph_response->mutable_step_stats();
protobuf::RepeatedPtrField<GraphDef>* partition_graph_defs =
run_step_response->mutable_metadata()->mutable_partition_graphs();
for (size_t i = 0; i < run_graph_response->num_partition_graphs(); i++) {
partition_graph_defs->Add()->Swap(
run_graph_response->mutable_partition_graph(i));
}
}
void CheckRunStepResponse(const MutableRunStepResponseWrapper& response) {
ASSERT_EQ(2, response.num_tensors());
EXPECT_EQ("fetch_x:0", response.tensor_name(0));
EXPECT_EQ("fetch_y:0", response.tensor_name(1));
Tensor val;
TF_EXPECT_OK(response.TensorValue(0, &val));
test::ExpectTensorEqual<int32>(TensorA(), val);
TF_EXPECT_OK(response.TensorValue(1, &val));
test::ExpectTensorEqual<int32>(TensorB(), val);
ASSERT_EQ(1, response.metadata().step_stats().dev_stats_size());
EXPECT_EQ("/cpu:0", response.metadata().step_stats().dev_stats(0).device());
ASSERT_EQ(1, response.metadata().partition_graphs_size());
EXPECT_EQ(1234,
response.metadata().partition_graphs(0).versions().producer());
EXPECT_EQ(1234,
response.metadata().partition_graphs(0).versions().min_consumer());
}
TEST(MessageWrappers, RunStepRequest_Basic) {
InMemoryRunStepRequest in_memory_request;
BuildRunStepRequest(&in_memory_request);
CheckRunStepRequest(in_memory_request);
MutableProtoRunStepRequest proto_request;
BuildRunStepRequest(&proto_request);
CheckRunStepRequest(proto_request);
CheckRunStepRequest(ProtoRunStepRequest(&in_memory_request.ToProto()));
CheckRunStepRequest(ProtoRunStepRequest(&proto_request.ToProto()));
}
TEST(MessageWrappers, RunGraphRequest_Basic) {
InMemoryRunStepRequest in_memory_run_step_request;
BuildRunStepRequest(&in_memory_run_step_request);
MutableProtoRunStepRequest mutable_proto_run_step_request;
BuildRunStepRequest(&mutable_proto_run_step_request);
ProtoRunStepRequest proto_run_step_request(
&mutable_proto_run_step_request.ToProto());
{
InMemoryRunGraphRequest request;
BuildRunGraphRequest(in_memory_run_step_request, &request);
CheckRunGraphRequest(request);
CheckRunGraphRequest(ProtoRunGraphRequest(&request.ToProto()));
}
{
InMemoryRunGraphRequest request;
BuildRunGraphRequest(mutable_proto_run_step_request, &request);
CheckRunGraphRequest(request);
CheckRunGraphRequest(ProtoRunGraphRequest(&request.ToProto()));
}
{
InMemoryRunGraphRequest request;
BuildRunGraphRequest(proto_run_step_request, &request);
CheckRunGraphRequest(request);
CheckRunGraphRequest(ProtoRunGraphRequest(&request.ToProto()));
}
{
MutableProtoRunGraphRequest request;
BuildRunGraphRequest(in_memory_run_step_request, &request);
CheckRunGraphRequest(request);
CheckRunGraphRequest(ProtoRunGraphRequest(&request.ToProto()));
}
{
MutableProtoRunGraphRequest request;
BuildRunGraphRequest(mutable_proto_run_step_request, &request);
CheckRunGraphRequest(request);
CheckRunGraphRequest(ProtoRunGraphRequest(&request.ToProto()));
}
{
MutableProtoRunGraphRequest request;
BuildRunGraphRequest(proto_run_step_request, &request);
CheckRunGraphRequest(request);
CheckRunGraphRequest(ProtoRunGraphRequest(&request.ToProto()));
}
}
TEST(MessageWrappers, RunGraphResponse_Basic) {
InMemoryRunGraphResponse in_memory_response;
BuildRunGraphResponse(&in_memory_response);
CheckRunGraphResponse(&in_memory_response);
OwnedProtoRunGraphResponse owned_proto_response;
BuildRunGraphResponse(&owned_proto_response);
CheckRunGraphResponse(&owned_proto_response);
RunGraphResponse response_proto;
NonOwnedProtoRunGraphResponse non_owned_proto_response(&response_proto);
BuildRunGraphResponse(&non_owned_proto_response);
CheckRunGraphResponse(&non_owned_proto_response);
}
TEST(MessageWrappers, RunStepResponse_Basic) {
{
InMemoryRunGraphResponse run_graph_response;
BuildRunGraphResponse(&run_graph_response);
InMemoryRunStepResponse response;
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
{
InMemoryRunGraphResponse run_graph_response;
BuildRunGraphResponse(&run_graph_response);
OwnedProtoRunStepResponse response;
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
{
InMemoryRunGraphResponse run_graph_response;
BuildRunGraphResponse(&run_graph_response);
RunStepResponse response_proto;
NonOwnedProtoRunStepResponse response(&response_proto);
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
{
OwnedProtoRunGraphResponse run_graph_response;
BuildRunGraphResponse(&run_graph_response);
InMemoryRunStepResponse response;
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
{
OwnedProtoRunGraphResponse run_graph_response;
BuildRunGraphResponse(&run_graph_response);
OwnedProtoRunStepResponse response;
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
{
OwnedProtoRunGraphResponse run_graph_response;
BuildRunGraphResponse(&run_graph_response);
RunStepResponse response_proto;
NonOwnedProtoRunStepResponse response(&response_proto);
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
{
RunGraphResponse run_graph_response_proto;
NonOwnedProtoRunGraphResponse run_graph_response(&run_graph_response_proto);
BuildRunGraphResponse(&run_graph_response);
InMemoryRunStepResponse response;
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
{
RunGraphResponse run_graph_response_proto;
NonOwnedProtoRunGraphResponse run_graph_response(&run_graph_response_proto);
BuildRunGraphResponse(&run_graph_response);
OwnedProtoRunStepResponse response;
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
{
RunGraphResponse run_graph_response_proto;
NonOwnedProtoRunGraphResponse run_graph_response(&run_graph_response_proto);
BuildRunGraphResponse(&run_graph_response);
RunStepResponse response_proto;
NonOwnedProtoRunStepResponse response(&response_proto);
BuildRunStepResponse(&run_graph_response, &response);
CheckRunStepResponse(response);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/message_wrappers.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/message_wrappers_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7d0d2827-9665-4108-9bf1-a5f64ad67a81 | cpp | google/tensorstore | irregular_grid | tensorstore/internal/irregular_grid.cc | tensorstore/internal/irregular_grid_test.cc | #include "tensorstore/internal/irregular_grid.h"
#include <assert.h>
#include <stddef.h>
#include <algorithm>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal {
IrregularGrid::IrregularGrid(std::vector<std::vector<Index>> inclusive_mins)
: shape_(inclusive_mins.size(), 0),
inclusive_mins_(std::move(inclusive_mins)) {
for (size_t i = 0; i < inclusive_mins_.size(); i++) {
std::sort(inclusive_mins_[i].begin(), inclusive_mins_[i].end());
auto new_it =
std::unique(inclusive_mins_[i].begin(), inclusive_mins_[i].end());
inclusive_mins_[i].resize(
std::distance(inclusive_mins_[i].begin(), new_it));
shape_[i] = inclusive_mins_[i].size() - 1;
}
}
Index IrregularGrid::operator()(DimensionIndex dim, Index output_index,
IndexInterval* cell_bounds) const {
auto points = inclusive_min(dim);
auto it = std::upper_bound(points.begin(), points.end(), output_index);
Index cell = std::distance(points.begin(), it) - 1;
if (cell_bounds) {
if (cell < 0) {
*cell_bounds = IndexInterval::UncheckedHalfOpen(-kInfIndex, points[0]);
} else if (cell < points.size() - 1) {
*cell_bounds =
IndexInterval::UncheckedHalfOpen(points[cell], points[cell + 1]);
} else {
*cell_bounds = IndexInterval::UncheckedClosed(points[cell], kInfIndex);
}
}
return cell;
}
IrregularGrid IrregularGrid::Make(
tensorstore::span<const IndexDomain<>> domains) {
absl::InlinedVector<IndexDomainView<>, 16> views;
views.reserve(domains.size());
for (const auto& d : domains) views.push_back(d);
return Make(tensorstore::span(views));
}
IrregularGrid IrregularGrid::Make(
tensorstore::span<const IndexDomainView<>> domains) {
assert(!domains.empty());
DimensionIndex rank = domains[0].rank();
std::vector<std::vector<Index>> inclusive_mins;
inclusive_mins.resize(rank);
for (auto& d : domains) {
assert(d.rank() == rank);
for (DimensionIndex i = 0; i < rank; i++) {
if (inclusive_mins[i].empty() ||
inclusive_mins[i].back() != d[i].inclusive_min()) {
inclusive_mins[i].push_back(d[i].inclusive_min());
}
inclusive_mins[i].push_back(d[i].exclusive_max());
}
}
return IrregularGrid(std::move(inclusive_mins));
}
}
} | #include "tensorstore/internal/irregular_grid.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/internal/grid_partition.h"
#include "tensorstore/internal/grid_partition_impl.h"
#include "tensorstore/util/span.h"
namespace {
using ::tensorstore::BoxView;
using ::tensorstore::DimensionIndex;
using ::tensorstore::Index;
using ::tensorstore::IndexDomain;
using ::tensorstore::IndexInterval;
using ::tensorstore::kInfIndex;
using ::tensorstore::internal::IrregularGrid;
using ::testing::ElementsAre;
TEST(IrregularGridTest, Basic) {
std::vector<Index> dimension0{2, 0, -3};
std::vector<Index> dimension1{10, 45, 20, 30};
auto grid = IrregularGrid({dimension0, dimension1});
EXPECT_EQ(2, grid.rank());
EXPECT_THAT(grid.shape(), ElementsAre(2, 3));
EXPECT_THAT(grid.inclusive_min(0), ElementsAre(-3, 0, 2));
EXPECT_THAT(grid.inclusive_min(1), ElementsAre(10, 20, 30, 45));
IndexInterval grid_cell;
EXPECT_EQ(grid(0, -4, &grid_cell), -1);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedClosed(-kInfIndex, -4));
EXPECT_EQ(grid(0, -3, &grid_cell), 0);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(-3, 3));
EXPECT_EQ(grid(0, -2, &grid_cell), 0);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(-3, 3));
EXPECT_EQ(grid(0, -1, &grid_cell), 0);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(-3, 3));
EXPECT_EQ(grid(0, 0, &grid_cell), 1);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(0, 2));
EXPECT_EQ(grid(0, 1, &grid_cell), 1);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(0, 2));
EXPECT_EQ(grid(0, 2, &grid_cell), 2);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedClosed(2, kInfIndex));
EXPECT_EQ(grid(0, 3, &grid_cell), 2);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedClosed(2, kInfIndex));
EXPECT_EQ(grid(1, 7, &grid_cell), -1);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedClosed(-kInfIndex, 9));
EXPECT_EQ(grid(1, 11, &grid_cell), 0);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(10, 10));
EXPECT_EQ(grid(1, 57, &grid_cell), 3);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedClosed(45, kInfIndex));
}
TEST(IrregularGridTest, IndexDomain) {
const Index origin1[] = {-3, 10};
const Index shape1[] = {3, 10};
const Index origin2[] = {0, 20};
const Index shape2[] = {2, 10};
const Index origin3[] = {0, 30};
const Index shape3[] = {2, 15};
std::vector<IndexDomain<>> domains(
{IndexDomain<>{
BoxView<>{tensorstore::span(origin1), tensorstore::span(shape1)}},
IndexDomain<>{
BoxView<>{tensorstore::span(origin2), tensorstore::span(shape2)}},
IndexDomain<>{
BoxView<>{tensorstore::span(origin3), tensorstore::span(shape3)}}});
auto grid = IrregularGrid::Make(domains);
EXPECT_EQ(2, grid.rank());
EXPECT_THAT(grid.shape(), ElementsAre(2, 3));
EXPECT_THAT(grid.inclusive_min(0), ElementsAre(-3, 0, 2));
EXPECT_THAT(grid.inclusive_min(1), ElementsAre(10, 20, 30, 45));
IndexInterval grid_cell;
EXPECT_EQ(grid(0, -4, &grid_cell), -1);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedClosed(-kInfIndex, -4));
EXPECT_EQ(grid(0, -3, &grid_cell), 0);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(-3, 3));
EXPECT_EQ(grid(0, -2, &grid_cell), 0);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(-3, 3));
EXPECT_EQ(grid(0, -1, &grid_cell), 0);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(-3, 3));
EXPECT_EQ(grid(0, 0, &grid_cell), 1);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(0, 2));
EXPECT_EQ(grid(0, 1, &grid_cell), 1);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(0, 2));
EXPECT_EQ(grid(0, 2, &grid_cell), 2);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedClosed(2, kInfIndex));
EXPECT_EQ(grid(0, 3, &grid_cell), 2);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedClosed(2, kInfIndex));
EXPECT_EQ(grid(1, 7, &grid_cell), -1);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedClosed(-kInfIndex, 9));
EXPECT_EQ(grid(1, 11, &grid_cell), 0);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedSized(10, 10));
EXPECT_EQ(grid(1, 57, &grid_cell), 3);
EXPECT_EQ(grid_cell, IndexInterval::UncheckedClosed(45, kInfIndex));
}
TEST(IrregularGridTest, Rank0) {
std::vector<std::vector<Index>> inclusive_mins;
auto grid = IrregularGrid(inclusive_mins);
EXPECT_EQ(0, grid.rank());
EXPECT_TRUE(grid.shape().empty());
EXPECT_TRUE(grid.cell_origin({}).empty());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/irregular_grid.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/irregular_grid_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
b7c4b8a1-814a-467f-8bfa-161939660f06 | cpp | tensorflow/tensorflow | gen_node | tensorflow/core/grappler/graph_analyzer/gen_node.cc | tensorflow/core/grappler/graph_analyzer/gen_node_test.cc | #include "tensorflow/core/grappler/graph_analyzer/gen_node.h"
#include "absl/memory/memory.h"
#include "absl/strings/str_format.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/grappler/graph_analyzer/hash_tools.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
namespace tensorflow {
namespace grappler {
namespace graph_analyzer {
GenNode::GenNode(const NodeDef* node) : node_(node), op_(nullptr) {}
Status GenNode::BuildGraphInMap(const GraphDef& source, GenNodeMap* map) {
for (const auto& n : source.node()) {
const string& name = n.name();
if (map->find(name) != map->end()) {
return Status(absl::StatusCode::kInvalidArgument,
"Duplicate node name '" + name + "'.");
}
(*map)[name] = std::make_unique<GenNode>(&n);
}
for (const auto& mapit : *map) {
Status st = mapit.second->ParseInputs(map);
if (!st.ok()) {
return st;
}
}
return absl::OkStatus();
}
Status GenNode::ParseInputs(const GenNodeMap* map) {
all_inputs_or_none_ = false;
Status st = OpRegistry::Global()->LookUpOpDef(opcode(), &op_);
if (!st.ok()) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrFormat("Node '%s' contains an undefined operation '%s': %s",
name(), opcode(), st.message()));
}
int n_inputs = node_->input_size();
int n_named_inputs = op_->input_arg_size();
int n_multi_inputs = 0;
for (const auto& inarg : op_->input_arg()) {
if (!inarg.number_attr().empty() || !inarg.type_list_attr().empty()) {
++n_multi_inputs;
}
}
bool is_commutative = grappler::IsCommutative(*node_);
if (n_multi_inputs > 1 || (n_multi_inputs > 0 && n_named_inputs > 1)) {
is_commutative = false;
}
if (is_commutative) {
n_named_inputs = 1;
all_inputs_or_none_ = false;
} else if (n_multi_inputs > 0) {
all_inputs_or_none_ = true;
}
for (int i = 0; i < n_inputs; ++i) {
int other_position;
string other_name = ParseNodeName(node_->input(i), &other_position);
auto other_it = map->find(other_name);
if (other_it == map->end()) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrFormat(
"Node '%s' input %d refers to a non-existing node '%s'.", name(),
i, other_name));
}
GenNode* other_node = other_it->second.get();
int this_position = other_position < 0 ? -1 : (is_commutative ? 0 : i);
if (this_position >= 0 && n_multi_inputs == 0 &&
this_position >= n_named_inputs) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrFormat(
"Node '%s' has a non-control input from '%s' at index %d but its "
"operation '%s' defines only %d inputs.",
name(), other_name, this_position, op_->name(), n_named_inputs));
}
Port this_port(true, this_position);
Port other_port(false, other_position);
links_[this_port].emplace_back(LinkTarget(other_node, other_port));
other_node->links_[other_port].emplace_back(LinkTarget(this, this_port));
}
return absl::OkStatus();
}
bool GenNode::IsMultiInput(Port port) const {
if (!port.IsInbound()) {
return false;
}
auto it = links_.find(port);
if (it == links_.end()) {
return false;
}
return (it->second.size() > 1);
}
GenNode::Port::operator string() const {
string result = this->IsInbound() ? "i" : "o";
if (this->IsControl()) {
result.append("C");
} else {
result.append(absl::StrFormat("%d", this->Id()));
}
return result;
}
}
}
} | #include "tensorflow/core/grappler/graph_analyzer/gen_node.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "tensorflow/core/grappler/graph_analyzer/test_tools.h"
namespace tensorflow {
namespace grappler {
namespace graph_analyzer {
namespace test {
namespace {
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::Ne;
TEST(GenNodeTest, Port) {
{
GenNode::Port p(true, 100);
EXPECT_THAT(p.IsInbound(), Eq(true));
EXPECT_THAT(p.IsControl(), Eq(false));
EXPECT_THAT(p.Id(), Eq(100));
GenNode::Port p2 = GenNode::Port::Decode(p.Encoded());
EXPECT_THAT(p2.IsInbound(), Eq(true));
EXPECT_THAT(p2.IsControl(), Eq(false));
EXPECT_THAT(p2.Id(), Eq(100));
}
{
GenNode::Port p(false, 0);
EXPECT_THAT(p.IsInbound(), Eq(false));
EXPECT_THAT(p.IsControl(), Eq(false));
EXPECT_THAT(p.Id(), Eq(0));
GenNode::Port p2 = GenNode::Port::Decode(p.Encoded());
EXPECT_THAT(p2.IsInbound(), Eq(false));
EXPECT_THAT(p2.IsControl(), Eq(false));
EXPECT_THAT(p2.Id(), Eq(0));
}
{
GenNode::Port p(true, -100);
EXPECT_THAT(p.IsInbound(), Eq(true));
EXPECT_THAT(p.IsControl(), Eq(true));
EXPECT_THAT(p.Id(), Eq(-100));
GenNode::Port p2 = GenNode::Port::Decode(p.Encoded());
EXPECT_THAT(p2.IsInbound(), Eq(true));
EXPECT_THAT(p2.IsControl(), Eq(true));
EXPECT_THAT(p2.Id(), Eq(-100));
}
{
GenNode::Port p(false, -1);
EXPECT_THAT(p.IsInbound(), Eq(false));
EXPECT_THAT(p.IsControl(), Eq(true));
EXPECT_THAT(p.Id(), Eq(-1));
GenNode::Port p2 = GenNode::Port::Decode(p.Encoded());
EXPECT_THAT(p2.IsInbound(), Eq(false));
EXPECT_THAT(p2.IsControl(), Eq(true));
EXPECT_THAT(p2.Id(), Eq(-1));
}
}
TEST(GenNodeTest, ParseNodeNoInputs) {
GenNodeMap map;
NodeDef node1 = MakeNodeConst("node1");
map["node1"] = std::make_unique<GenNode>(&node1);
auto gn1 = map["node1"].get();
ASSERT_THAT(gn1->ParseInputs(&map), Eq(absl::OkStatus()));
EXPECT_THAT(DumpLinkMap(gn1->links()), ElementsAre());
}
TEST(GenNodeTest, ParseNodeWithControl) {
GenNodeMap map;
NodeDef node1 = MakeNodeConst("node1");
map["node1"] = std::make_unique<GenNode>(&node1);
NodeDef node2 = MakeNodeConst("node2");
map["node2"] = std::make_unique<GenNode>(&node2);
NodeDef node3 = MakeNodeSub("node3", "node1", "node2");
node3.add_input("^node1");
node3.add_input("^node2");
map["node3"] = std::make_unique<GenNode>(&node3);
auto gn1 = map["node1"].get();
auto gn2 = map["node2"].get();
auto gn3 = map["node3"].get();
ASSERT_THAT(gn3->ParseInputs(&map), Eq(absl::OkStatus()));
EXPECT_THAT(DumpLinkMap(gn1->links()), ElementsAre(
"o0: node3[i0]",
"oC: node3[iC]"
));
EXPECT_THAT(DumpLinkMap(gn2->links()), ElementsAre(
"o0: node3[i1]",
"oC: node3[iC]"
));
EXPECT_THAT(DumpLinkMap(gn3->links()), ElementsAre(
"i0: node1[o0]",
"i1: node2[o0]",
"iC: node1[oC], node2[oC]"
));
EXPECT_THAT(gn3->IsMultiInput(GenNode::Port(true, 0)), Eq(false));
EXPECT_THAT(gn3->IsMultiInput(GenNode::Port(true, -1)), Eq(true));
EXPECT_FALSE(gn1->AllInputsOrNone());
EXPECT_FALSE(gn2->AllInputsOrNone());
EXPECT_FALSE(gn3->AllInputsOrNone());
}
TEST(GenNodeTest, ParseNodeCommutative) {
GenNodeMap map;
NodeDef node1 = MakeNodeConst("node1");
map["node1"] = std::make_unique<GenNode>(&node1);
NodeDef node2 = MakeNodeConst("node2");
map["node2"] = std::make_unique<GenNode>(&node2);
NodeDef node3 = MakeNodeMul("node3", "node1", "node2");
map["node3"] = std::make_unique<GenNode>(&node3);
auto gn1 = map["node1"].get();
auto gn2 = map["node2"].get();
auto gn3 = map["node3"].get();
ASSERT_THAT(gn3->ParseInputs(&map), Eq(absl::OkStatus()));
EXPECT_THAT(DumpLinkMap(gn1->links()), ElementsAre(
"o0: node3[i0]"
));
EXPECT_THAT(DumpLinkMap(gn2->links()), ElementsAre(
"o0: node3[i0]"
));
EXPECT_THAT(DumpLinkMap(gn3->links()), ElementsAre(
"i0: node1[o0], node2[o0]"
));
EXPECT_THAT(gn3->IsMultiInput(GenNode::Port(true, 0)), Eq(true));
EXPECT_FALSE(gn3->AllInputsOrNone());
}
TEST(GenNodeTest, ParseNodeMultiInputCommutative) {
GenNodeMap map;
NodeDef node1 = MakeNodeConst("node1");
map["node1"] = std::make_unique<GenNode>(&node1);
NodeDef node2 = MakeNodeConst("node2");
map["node2"] = std::make_unique<GenNode>(&node2);
NodeDef node3 = MakeNodeAddN("node3", "node1", "node2");
map["node3"] = std::make_unique<GenNode>(&node3);
auto gn1 = map["node1"].get();
auto gn2 = map["node2"].get();
auto gn3 = map["node3"].get();
ASSERT_THAT(gn3->ParseInputs(&map), Eq(absl::OkStatus()));
EXPECT_THAT(DumpLinkMap(gn1->links()), ElementsAre(
"o0: node3[i0]"
));
EXPECT_THAT(DumpLinkMap(gn2->links()), ElementsAre(
"o0: node3[i0]"
));
EXPECT_THAT(DumpLinkMap(gn3->links()), ElementsAre(
"i0: node1[o0], node2[o0]"
));
EXPECT_THAT(gn2->IsMultiInput(GenNode::Port(false, 0)), Eq(false));
EXPECT_THAT(gn3->IsMultiInput(GenNode::Port(true, 0)), Eq(true));
EXPECT_FALSE(gn3->AllInputsOrNone());
}
TEST(GenNodeTest, ParseNodeMultiInputNotCommutative) {
GenNodeMap map;
NodeDef node1 = MakeNodeConst("node1");
map["node1"] = std::make_unique<GenNode>(&node1);
NodeDef node2 = MakeNodeConst("node2");
map["node2"] = std::make_unique<GenNode>(&node2);
NodeDef node3 = MakeNodeShapeN("node3", "node1", "node2");
map["node3"] = std::make_unique<GenNode>(&node3);
auto gn1 = map["node1"].get();
auto gn2 = map["node2"].get();
auto gn3 = map["node3"].get();
ASSERT_THAT(gn3->ParseInputs(&map), Eq(absl::OkStatus()));
EXPECT_THAT(DumpLinkMap(gn1->links()), ElementsAre(
"o0: node3[i0]"
));
EXPECT_THAT(DumpLinkMap(gn2->links()), ElementsAre(
"o0: node3[i1]"
));
EXPECT_THAT(DumpLinkMap(gn3->links()), ElementsAre(
"i0: node1[o0]",
"i1: node2[o0]"
));
EXPECT_THAT(gn3->IsMultiInput(GenNode::Port(true, 0)), Eq(false));
EXPECT_TRUE(gn3->AllInputsOrNone());
}
TEST(GenNodeTest, ParseNodeMultiInputList) {
GenNodeMap map;
NodeDef node1 = MakeNodeConst("node1");
map["node1"] = std::make_unique<GenNode>(&node1);
NodeDef node2 = MakeNodeConst("node2");
map["node2"] = std::make_unique<GenNode>(&node2);
NodeDef node3 = MakeNodeIdentityN("node3", "node1", "node2");
map["node3"] = std::make_unique<GenNode>(&node3);
auto gn1 = map["node1"].get();
auto gn2 = map["node2"].get();
auto gn3 = map["node3"].get();
ASSERT_THAT(gn3->ParseInputs(&map), Eq(absl::OkStatus()));
EXPECT_THAT(DumpLinkMap(gn1->links()), ElementsAre(
"o0: node3[i0]"
));
EXPECT_THAT(DumpLinkMap(gn2->links()), ElementsAre(
"o0: node3[i1]"
));
EXPECT_THAT(DumpLinkMap(gn3->links()), ElementsAre(
"i0: node1[o0]",
"i1: node2[o0]"
));
EXPECT_THAT(gn3->IsMultiInput(GenNode::Port(true, 0)), Eq(false));
EXPECT_TRUE(gn3->AllInputsOrNone());
}
TEST(GenNodeTest, ParseNodeMultiMultiInput) {
GenNodeMap map;
NodeDef node1 = MakeNodeConst("node1");
map["node1"] = std::make_unique<GenNode>(&node1);
NodeDef node2 = MakeNodeConst("node2");
map["node2"] = std::make_unique<GenNode>(&node2);
NodeDef node3 = MakeNodeConst("node3");
map["node3"] = std::make_unique<GenNode>(&node3);
NodeDef node4 = MakeNodeConst("node4");
map["node4"] = std::make_unique<GenNode>(&node4);
NodeDef node5 =
MakeNodeQuantizedConcat("node5", "node1", "node2", "node3", "node4");
map["node5"] = std::make_unique<GenNode>(&node5);
auto gn1 = map["node1"].get();
auto gn2 = map["node2"].get();
auto gn3 = map["node3"].get();
auto gn4 = map["node4"].get();
auto gn5 = map["node5"].get();
ASSERT_THAT(gn5->ParseInputs(&map), Eq(absl::OkStatus()));
EXPECT_THAT(DumpLinkMap(gn1->links()), ElementsAre(
"o0: node5[i0]"
));
EXPECT_THAT(DumpLinkMap(gn2->links()), ElementsAre(
"o0: node5[i1]"
));
EXPECT_THAT(DumpLinkMap(gn3->links()), ElementsAre(
"o0: node5[i2]"
));
EXPECT_THAT(DumpLinkMap(gn4->links()), ElementsAre(
"o0: node5[i3]"
));
EXPECT_THAT(DumpLinkMap(gn5->links()), ElementsAre(
"i0: node1[o0]",
"i1: node2[o0]",
"i2: node3[o0]",
"i3: node4[o0]"
));
EXPECT_THAT(gn5->IsMultiInput(GenNode::Port(true, 1)), Eq(false));
EXPECT_THAT(gn5->IsMultiInput(GenNode::Port(true, 2)), Eq(false));
EXPECT_TRUE(gn5->AllInputsOrNone());
}
TEST(GenNodeTest, ParseNodeMultiOutput) {
GenNodeMap map;
NodeDef node1 = MakeNodeConst("node1");
map["node1"] = std::make_unique<GenNode>(&node1);
NodeDef node2 = MakeNodeConst("node2");
map["node2"] = std::make_unique<GenNode>(&node2);
NodeDef node3 = MakeNodeBroadcastGradientArgs("node3", "node1", "node2");
map["node3"] = std::make_unique<GenNode>(&node3);
NodeDef node4 = MakeNodeSub("node4", "node3:1", "node3:0");
map["node4"] = std::make_unique<GenNode>(&node4);
auto gn4 = map["node4"].get();
ASSERT_THAT(gn4->ParseInputs(&map), Eq(absl::OkStatus()));
EXPECT_THAT(DumpLinkMap(gn4->links()), ElementsAre(
"i0: node3[o1]",
"i1: node3[o0]"
));
}
TEST(GenNodeTest, ParseNodeUndefinedOp) {
GenNodeMap map;
NodeDef node1;
node1.set_name("node1");
node1.set_op("Zzzx");
map["node1"] = std::make_unique<GenNode>(&node1);
const OpDef* opdef;
Status nested_error = OpRegistry::Global()->LookUpOpDef("Zzzx", &opdef);
auto gn = map["node1"].get();
ASSERT_THAT(
gn->ParseInputs(&map),
Eq(Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("Node 'node1' contains an undefined operation 'Zzzx': ",
nested_error.message()))));
}
TEST(GenNodeTest, ParseNodeUnexpectedInputs) {
GenNodeMap map;
NodeDef node1 = MakeNodeConst("node1");
map["node1"] = std::make_unique<GenNode>(&node1);
node1.add_input("node1");
auto gn1 = map["node1"].get();
EXPECT_THAT(gn1->ParseInputs(&map),
Eq(Status(absl::StatusCode::kInvalidArgument,
"Node 'node1' has a non-control "
"input from 'node1' at index 0 but its operation "
"'Const' defines only 0 inputs.")));
NodeDef node2 = MakeNodeConst("node2");
map["node2"] = std::make_unique<GenNode>(&node2);
NodeDef node3 = MakeNodeSub("node3", "node1", "node2");
map["node3"] = std::make_unique<GenNode>(&node3);
node3.add_input("node1");
auto gn3 = map["node3"].get();
EXPECT_THAT(gn3->ParseInputs(&map),
Eq(Status(absl::StatusCode::kInvalidArgument,
"Node 'node3' has a non-control "
"input from 'node1' at index 2 but its operation "
"'Sub' defines only 2 inputs.")));
}
TEST(GenNodeTest, ParseNodeControlInputsAlwaysOk) {
GenNodeMap map;
NodeDef node1 = MakeNodeConst("node1");
map["node1"] = std::make_unique<GenNode>(&node1);
node1.add_input("^node1");
auto gn1 = map["node1"].get();
ASSERT_THAT(gn1->ParseInputs(&map), Eq(absl::OkStatus()));
EXPECT_THAT(DumpLinkMap(gn1->links()), ElementsAre(
"iC: node1[oC]",
"oC: node1[iC]"
));
}
TEST(GenNodeTest, ParseNodeInvalidInput) {
GenNodeMap map;
NodeDef node1 = MakeNodeAddN("node1", "node2", "node3");
map["node1"] = std::make_unique<GenNode>(&node1);
node1.add_input("node1");
auto gn1 = map["node1"].get();
ASSERT_THAT(
gn1->ParseInputs(&map),
Eq(Status(
absl::StatusCode::kInvalidArgument,
"Node 'node1' input 0 refers to a non-existing node 'node2'.")));
}
TEST(GenNodeTest, BuildGraphInMap) {
GraphDef graph;
(*graph.add_node()) = MakeNodeConst("node1");
(*graph.add_node()) = MakeNodeSub("node2", "node3:1", "node3:0");
(*graph.add_node()) =
MakeNodeBroadcastGradientArgs("node3", "node1", "node2");
GenNodeMap map;
ASSERT_THAT(GenNode::BuildGraphInMap(graph, &map), Eq(absl::OkStatus()));
ASSERT_THAT(map.find("node1"), Ne(map.end()));
ASSERT_THAT(map.find("node2"), Ne(map.end()));
ASSERT_THAT(map.find("node3"), Ne(map.end()));
EXPECT_THAT(map["node1"]->name(), Eq("node1"));
EXPECT_THAT(map["node2"]->name(), Eq("node2"));
EXPECT_THAT(map["node3"]->name(), Eq("node3"));
EXPECT_THAT(DumpLinkMap(map["node1"]->links()), ElementsAre(
"o0: node3[i0]"
));
EXPECT_THAT(DumpLinkMap(map["node2"]->links()), ElementsAre(
"i0: node3[o1]",
"i1: node3[o0]",
"o0: node3[i1]"
));
EXPECT_THAT(DumpLinkMap(map["node3"]->links()), ElementsAre(
"i0: node1[o0]",
"i1: node2[o0]",
"o0: node2[i1]",
"o1: node2[i0]"
));
}
TEST(GenNodeTest, BuildGraphInMapDuplicateNode) {
GraphDef graph;
(*graph.add_node()) = MakeNodeConst("node1");
(*graph.add_node()) = MakeNodeConst("node1");
GenNodeMap map;
ASSERT_THAT(GenNode::BuildGraphInMap(graph, &map),
Eq(Status(absl::StatusCode::kInvalidArgument,
"Duplicate node name 'node1'.")));
}
TEST(GenNodeTest, BuildGraphInMapParseError) {
GraphDef graph;
(*graph.add_node()) = MakeNodeConst("node1");
(*graph.add_node()) = MakeNodeSub("node2", "node3:1", "node3:0");
GenNodeMap map;
ASSERT_THAT(
GenNode::BuildGraphInMap(graph, &map),
Eq(Status(
absl::StatusCode::kInvalidArgument,
"Node 'node2' input 0 refers to a non-existing node 'node3'.")));
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/graph_analyzer/gen_node.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/graph_analyzer/gen_node_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
915dbca6-57f2-4dc1-bd74-29bf051fe4d0 | cpp | google/tensorstore | collecting_sender | tensorstore/util/execution/collecting_sender.h | tensorstore/util/execution/collecting_sender_test.cc | #ifndef TENSORSTORE_UTIL_EXECUTION_COLLECTING_SENDER_H_
#define TENSORSTORE_UTIL_EXECUTION_COLLECTING_SENDER_H_
#include <utility>
#include "tensorstore/util/execution/execution.h"
namespace tensorstore {
namespace internal {
template <typename Container, typename SingleReceiver>
struct CollectingReceiver {
SingleReceiver receiver;
Container container;
template <typename CancelReceiver>
friend void set_starting(CollectingReceiver& self, CancelReceiver cancel) {
}
template <typename... V>
friend void set_value(CollectingReceiver& self, V... v) {
self.container.emplace_back(std::move(v)...);
}
template <typename E>
friend void set_error(CollectingReceiver& self, E e) {
execution::set_error(self.receiver, std::move(e));
}
friend void set_done(CollectingReceiver& self) {
execution::set_value(self.receiver, std::move(self.container));
}
friend void set_stopping(CollectingReceiver& self) {}
};
template <typename Container, typename Sender>
struct CollectingSender {
Sender sender;
template <typename Receiver>
friend void submit(CollectingSender& self, Receiver receiver) {
execution::submit(self.sender, CollectingReceiver<Container, Receiver>{
std::move(receiver)});
}
};
template <typename Container, typename Sender>
CollectingSender<Container, Sender> MakeCollectingSender(Sender sender) {
return {std::move(sender)};
}
}
}
#endif | #include "tensorstore/util/execution/collecting_sender.h"
#include <ostream>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_join.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender.h"
#include "tensorstore/util/execution/sender_testutil.h"
#include "tensorstore/util/execution/sender_util.h"
#include "tensorstore/util/span.h"
namespace {
struct X {
explicit X(int value) : value(value) {}
int value;
friend std::ostream& operator<<(std::ostream& os, const std::vector<X>& vec) {
for (auto v : vec) {
os << v.value << ' ';
}
return os;
}
};
TEST(CollectingSenderTest, SuccessX) {
std::vector<std::string> log;
std::vector<int> input{1, 2, 3, 4};
tensorstore::execution::submit(
tensorstore::internal::MakeCollectingSender<std::vector<X>>(
tensorstore::RangeFlowSender<tensorstore::span<int>>{input}),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_value: 1 2 3 4 "));
}
struct Y {
explicit Y(int value) : value(value) {}
int value;
template <typename Sink>
friend void AbslStringify(Sink& sink, const Y& x) {
absl::Format(&sink, "%d", x.value);
}
template <typename Sink>
friend void AbslStringify(Sink& sink, const std::vector<Y>& vec) {
sink.Append(absl::StrJoin(vec, " "));
}
};
TEST(CollectingSenderTest, SuccessY) {
std::vector<std::string> log;
std::vector<int> input{1, 2, 3, 4};
tensorstore::execution::submit(
tensorstore::internal::MakeCollectingSender<std::vector<Y>>(
tensorstore::RangeFlowSender<tensorstore::span<int>>{input}),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_value: 1 2 3 4"));
}
TEST(CollectingSenderTest, Error) {
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::internal::MakeCollectingSender<std::vector<X>>(
tensorstore::FlowSingleSender<tensorstore::ErrorSender<int>>{5}),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre("set_error: 5"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/collecting_sender.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/collecting_sender_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
65d0efa3-bde3-4dea-bcb8-1f5223d73655 | cpp | tensorflow/tensorflow | byte_size | tensorflow/core/data/service/byte_size.cc | tensorflow/core/data/service/byte_size_test.cc | #include "tensorflow/core/data/service/byte_size.h"
#include <cstddef>
#include <string>
#include "absl/strings/str_cat.h"
namespace tensorflow {
namespace data {
size_t ByteSize::ToUnsignedBytes() const { return bytes_; }
double ByteSize::ToDoubleBytes() const { return static_cast<double>(bytes_); }
double ByteSize::ToDoubleKB() const { return *this / ByteSize::KB(1); }
double ByteSize::ToDoubleMB() const { return *this / ByteSize::MB(1); }
double ByteSize::ToDoubleGB() const { return *this / ByteSize::GB(1); }
double ByteSize::ToDoubleTB() const { return *this / ByteSize::TB(1); }
std::string ByteSize::DebugString() const {
if (*this < ByteSize::KB(1)) {
return absl::StrCat(ToUnsignedBytes(), "B");
}
if (*this < ByteSize::MB(1)) {
return absl::StrCat(ToDoubleKB(), "KB");
}
if (*this < ByteSize::GB(1)) {
return absl::StrCat(ToDoubleMB(), "MB");
}
if (*this < ByteSize::TB(1)) {
return absl::StrCat(ToDoubleGB(), "GB");
}
return absl::StrCat(ToDoubleTB(), "TB");
}
}
} | #include "tensorflow/core/data/service/byte_size.h"
#include <cstddef>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "tsl/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::Eq;
using ::testing::Not;
TEST(ByteSizeTest, Constructors) {
EXPECT_EQ(ByteSize::Bytes(0), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::Bytes(1), ByteSize::Bytes(1));
EXPECT_EQ(ByteSize::Bytes(1024), ByteSize::Bytes(1024));
EXPECT_EQ(ByteSize::Bytes(1024), ByteSize::KB(1));
EXPECT_EQ(ByteSize::Bytes(size_t{1} << 63), ByteSize::TB(size_t{1} << 23));
EXPECT_EQ(ByteSize::KB(0), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::KB(1), ByteSize::Bytes(size_t{1} << 10));
EXPECT_EQ(ByteSize::KB(0.9), ByteSize::Bytes(1024 * 0.9));
EXPECT_EQ(ByteSize::KB(1.5), ByteSize::Bytes(1024 * 1.5));
EXPECT_EQ(ByteSize::KB(1.5), ByteSize::KB(1.5));
EXPECT_EQ(ByteSize::KB(1024), ByteSize::MB(1));
EXPECT_EQ(ByteSize::MB(0), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::MB(1), ByteSize::Bytes(size_t{1} << 20));
EXPECT_EQ(ByteSize::MB(0.9), ByteSize::Bytes(size_t{1} << 20) * 0.9);
EXPECT_EQ(ByteSize::MB(1.5), ByteSize::Bytes(size_t{1} << 20) * 1.5);
EXPECT_EQ(ByteSize::MB(1.5), ByteSize::MB(1.5));
EXPECT_EQ(ByteSize::MB(1024), ByteSize::GB(1));
EXPECT_EQ(ByteSize::GB(0), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::GB(1), ByteSize::Bytes(size_t{1} << 30));
EXPECT_EQ(ByteSize::GB(0.9), ByteSize::Bytes(size_t{1} << 30) * 0.9);
EXPECT_EQ(ByteSize::GB(1.5), ByteSize::Bytes(size_t{1} << 30) * 1.5);
EXPECT_EQ(ByteSize::GB(1.5), ByteSize::GB(1.5));
EXPECT_EQ(ByteSize::GB(1024), ByteSize::TB(1));
EXPECT_EQ(ByteSize::TB(0), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::TB(1), ByteSize::Bytes(size_t{1} << 40));
EXPECT_EQ(ByteSize::TB(0.9), ByteSize::Bytes(size_t{1} << 40) * 0.9);
EXPECT_EQ(ByteSize::TB(1.5), ByteSize::Bytes(size_t{1} << 40) * 1.5);
EXPECT_EQ(ByteSize::TB(1.5), ByteSize::TB(1.5));
EXPECT_EQ(ByteSize::TB(1024), ByteSize::TB(1024));
EXPECT_EQ(ByteSize::TB(size_t{1} << 23), ByteSize::TB(size_t{1} << 23));
EXPECT_THAT(ByteSize::Bytes(0), Not(Eq(ByteSize::Bytes(1))));
EXPECT_THAT(ByteSize::Bytes(1025), Not(Eq(ByteSize::KB(1))));
EXPECT_THAT(ByteSize::KB(1), Not(Eq(ByteSize::MB(1))));
EXPECT_THAT(ByteSize::MB(1), Not(Eq(ByteSize::GB(1))));
EXPECT_THAT(ByteSize::GB(1), Not(Eq(ByteSize::TB(1))));
EXPECT_THAT(ByteSize::TB(1), Not(Eq(ByteSize::TB(2))));
}
TEST(ByteSizeTest, ConstexprConstruction) {
constexpr ByteSize default_byte_size;
EXPECT_EQ(default_byte_size, ByteSize::Bytes(0));
constexpr ByteSize bytes = ByteSize::Bytes(1);
EXPECT_EQ(bytes, ByteSize::Bytes(1));
constexpr ByteSize kb = ByteSize::KB(1);
EXPECT_EQ(kb, ByteSize::KB(1));
constexpr ByteSize mb = ByteSize::MB(1);
EXPECT_EQ(mb, ByteSize::MB(1));
constexpr ByteSize gb = ByteSize::GB(1);
EXPECT_EQ(gb, ByteSize::GB(1));
constexpr ByteSize tb = ByteSize::TB(1);
EXPECT_EQ(tb, ByteSize::TB(1));
constexpr ByteSize tb_copy(tb);
EXPECT_EQ(tb_copy, tb);
}
TEST(ByteSizeTest, ConvertToBytes) {
EXPECT_EQ(ByteSize::Bytes(0).ToUnsignedBytes(), 0);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(0).ToDoubleBytes(), 0);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(0).ToDoubleKB(), 0);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(0).ToDoubleMB(), 0);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(0).ToDoubleGB(), 0);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(0).ToDoubleTB(), 0);
EXPECT_EQ(ByteSize::Bytes(1).ToUnsignedBytes(), 1);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(1).ToDoubleBytes(), 1.0);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(1).ToDoubleKB(), 1.0 / 1024);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(1).ToDoubleMB(), 1.0 / 1024 / 1024);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(1).ToDoubleGB(), 1.0 / 1024 / 1024 / 1024);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(1).ToDoubleTB(),
1.0 / 1024 / 1024 / 1024 / 1024);
EXPECT_EQ(ByteSize::KB(0.25).ToUnsignedBytes(), 0.25 * (size_t{1} << 10));
EXPECT_DOUBLE_EQ(ByteSize::KB(0.25).ToDoubleBytes(), 0.25 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::KB(0.25).ToDoubleKB(), 0.25);
EXPECT_DOUBLE_EQ(ByteSize::KB(0.25).ToDoubleMB(), 0.25 / 1024);
EXPECT_DOUBLE_EQ(ByteSize::KB(0.25).ToDoubleGB(), 0.25 / 1024 / 1024);
EXPECT_DOUBLE_EQ(ByteSize::KB(0.25).ToDoubleTB(), 0.25 / 1024 / 1024 / 1024);
EXPECT_EQ(ByteSize::MB(0.5).ToUnsignedBytes(), 0.5 * (size_t{1} << 20));
EXPECT_DOUBLE_EQ(ByteSize::MB(0.5).ToDoubleBytes(), 0.5 * 1024 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::MB(0.5).ToDoubleKB(), 0.5 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::MB(0.5).ToDoubleMB(), 0.5);
EXPECT_DOUBLE_EQ(ByteSize::MB(0.5).ToDoubleGB(), 0.5 / 1024);
EXPECT_DOUBLE_EQ(ByteSize::MB(0.5).ToDoubleTB(), 0.5 / 1024 / 1024);
EXPECT_EQ(ByteSize::GB(10).ToUnsignedBytes(), 10.0 * (size_t{1} << 30));
EXPECT_DOUBLE_EQ(ByteSize::GB(10).ToDoubleBytes(), 10.0 * 1024 * 1024 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::GB(10).ToDoubleKB(), 10.0 * 1024 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::GB(10).ToDoubleMB(), 10.0 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::GB(10).ToDoubleGB(), 10.0);
EXPECT_DOUBLE_EQ(ByteSize::GB(10).ToDoubleTB(), 10.0 / 1024);
EXPECT_EQ(ByteSize::TB(1024).ToUnsignedBytes(), 1024 * (size_t{1} << 40));
EXPECT_DOUBLE_EQ(ByteSize::TB(1024).ToDoubleBytes(),
1024.0 * 1024 * 1024 * 1024 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::TB(1024).ToDoubleKB(),
1024.0 * 1024 * 1024 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::TB(1024).ToDoubleMB(), 1024.0 * 1024 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::TB(1024).ToDoubleGB(), 1024.0 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::TB(1024).ToDoubleTB(), 1024.0);
}
TEST(ByteSizeTest, Arithmetics) {
EXPECT_EQ(ByteSize::Bytes(0) + ByteSize::Bytes(0), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::Bytes(0) + ByteSize::Bytes(1), ByteSize::Bytes(1));
EXPECT_EQ(ByteSize::Bytes(512) + ByteSize::Bytes(512), ByteSize::KB(1));
EXPECT_EQ(ByteSize::Bytes(512) + ByteSize::KB(1), ByteSize::KB(1.5));
EXPECT_EQ(ByteSize::KB(0.5) + ByteSize::KB(1), ByteSize::KB(1.5));
EXPECT_EQ(ByteSize::MB(1) + ByteSize::KB(512), ByteSize::MB(1.5));
EXPECT_EQ(ByteSize::MB(1) + ByteSize::Bytes(512), ByteSize::Bytes(1049088));
EXPECT_EQ(ByteSize::GB(0.5) + ByteSize::MB(256) + ByteSize::MB(256),
ByteSize::GB(1));
std::vector<ByteSize> GBs(1024, ByteSize::GB(1));
EXPECT_EQ(absl::c_accumulate(GBs, ByteSize::Bytes(0)), ByteSize::TB(1));
EXPECT_EQ(ByteSize::TB(1) + ByteSize::TB(0.5) + ByteSize::GB(512),
ByteSize::TB(2));
EXPECT_EQ(ByteSize::Bytes(0) - ByteSize::Bytes(0), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::KB(1) - ByteSize::Bytes(512), ByteSize::KB(0.5));
EXPECT_EQ(ByteSize::MB(1) - ByteSize::KB(512) - ByteSize::KB(512),
ByteSize::MB(0));
EXPECT_EQ(ByteSize::GB(1) - ByteSize::MB(512), ByteSize::GB(0.5));
EXPECT_EQ(ByteSize::GB(0.5) - ByteSize::MB(512), ByteSize::GB(0));
EXPECT_EQ(ByteSize::GB(1) - ByteSize::MB(512) - ByteSize::MB(512),
ByteSize::GB(0));
EXPECT_EQ(ByteSize::TB(1) - ByteSize::GB(512) - ByteSize::GB(512),
ByteSize::GB(0));
EXPECT_EQ(ByteSize::Bytes(0) - ByteSize::Bytes(1), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::Bytes(0) - ByteSize::GB(1), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::MB(1) - ByteSize::GB(1), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::Bytes(0) * 0, ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::KB(1) * 0, ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::MB(1) * 0, ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::GB(1) * 0, ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::TB(1) * 0, ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::Bytes(1) * 1024, ByteSize::KB(1));
EXPECT_EQ(ByteSize::KB(1) * 1024, ByteSize::MB(1));
EXPECT_EQ(ByteSize::MB(1) * 1024, ByteSize::GB(1));
EXPECT_EQ(ByteSize::GB(1) * 1024, ByteSize::TB(1));
EXPECT_EQ(ByteSize::Bytes(1) * 1.1, ByteSize::Bytes(1));
EXPECT_EQ(ByteSize::KB(1) * 1.2, ByteSize::KB(1.2));
EXPECT_EQ(ByteSize::MB(1) * 1.3, ByteSize::MB(1.3));
EXPECT_EQ(ByteSize::GB(1) * 1.4, ByteSize::GB(1.4));
EXPECT_EQ(ByteSize::TB(1) * 1.5, ByteSize::TB(1.5));
EXPECT_EQ(ByteSize::KB(1) * 0.5, ByteSize::Bytes(512));
EXPECT_EQ(ByteSize::MB(1) * 0.5, ByteSize::KB(512));
EXPECT_EQ(ByteSize::GB(1) * 0.5, ByteSize::MB(512));
EXPECT_EQ(ByteSize::TB(1) * 0.25, ByteSize::GB(256));
EXPECT_EQ(1024 * ByteSize::Bytes(1), ByteSize::KB(1));
EXPECT_EQ(1024 * ByteSize::KB(1), ByteSize::MB(1));
EXPECT_EQ(1024 * ByteSize::MB(1), ByteSize::GB(1));
EXPECT_EQ(1024 * ByteSize::GB(1), ByteSize::TB(1));
EXPECT_EQ(0.9 * ByteSize::TB(1), ByteSize::GB(921.6));
EXPECT_EQ(0 * ByteSize::TB(1), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::Bytes(0) / 1, ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::KB(1) / 2, ByteSize::KB(0.5));
EXPECT_EQ(ByteSize::MB(1) / 2, ByteSize::KB(512));
EXPECT_EQ(ByteSize::GB(1) / 2, ByteSize::MB(512));
EXPECT_EQ(ByteSize::TB(1.5) / 2, ByteSize::GB(768));
EXPECT_EQ(ByteSize::KB(1) / 0.5, ByteSize::KB(2));
EXPECT_EQ(ByteSize::MB(1) / 0.5, ByteSize::MB(2));
EXPECT_EQ(ByteSize::GB(1) / 0.5, ByteSize::GB(2));
EXPECT_EQ(ByteSize::TB(1) / 0.25, ByteSize::TB(4));
EXPECT_DOUBLE_EQ(ByteSize::Bytes(0) / ByteSize::KB(1), 0.0);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(1) / ByteSize::TB(1),
1.0 / 1024 / 1024 / 1024 / 1024);
EXPECT_DOUBLE_EQ(ByteSize::KB(1) / ByteSize::KB(2), 0.5);
EXPECT_DOUBLE_EQ(ByteSize::KB(512) / ByteSize::MB(1), 0.5);
EXPECT_DOUBLE_EQ(ByteSize::KB(1) / ByteSize::MB(1), 1.0 / 1024.0);
EXPECT_DOUBLE_EQ(ByteSize::MB(1) / ByteSize::GB(1), 1.0 / 1024.0);
EXPECT_DOUBLE_EQ(ByteSize::GB(1) / ByteSize::TB(1), 1.0 / 1024.0);
}
TEST(ByteSizeTest, Assignments) {
ByteSize byte_size;
EXPECT_EQ(byte_size, ByteSize::Bytes(0));
byte_size = ByteSize::Bytes(1);
EXPECT_EQ(byte_size, ByteSize::Bytes(1));
for (size_t i = 0; i < 1023; ++i) {
byte_size += ByteSize::Bytes(1);
}
EXPECT_EQ(byte_size, ByteSize::KB(1));
for (size_t i = 0; i < 10; ++i) {
byte_size *= 2;
}
EXPECT_EQ(byte_size, ByteSize::MB(1));
byte_size *= 1024 * 1024;
EXPECT_EQ(byte_size, ByteSize::TB(1));
for (size_t i = 0; i < 10; ++i) {
byte_size /= 2;
}
EXPECT_EQ(byte_size, ByteSize::GB(1));
for (size_t i = 0; i < 4; ++i) {
byte_size -= ByteSize::MB(256);
}
EXPECT_EQ(byte_size, ByteSize::Bytes(0));
byte_size -= ByteSize::Bytes(1);
EXPECT_EQ(byte_size, ByteSize::Bytes(0));
}
TEST(ByteSizeTest, Comparisons) {
EXPECT_LE(ByteSize::Bytes(0), ByteSize::Bytes(0));
EXPECT_LT(ByteSize::Bytes(0), ByteSize::Bytes(1));
EXPECT_LE(ByteSize::Bytes(0), ByteSize::Bytes(1));
EXPECT_LT(ByteSize::Bytes(1), ByteSize::Bytes(1024));
EXPECT_LE(ByteSize::Bytes(1), ByteSize::Bytes(1024));
EXPECT_LT(ByteSize::Bytes(1024), ByteSize::Bytes(1024 * 1024));
EXPECT_LE(ByteSize::Bytes(1024), ByteSize::Bytes(1024 * 1024));
EXPECT_LT(ByteSize::Bytes(1024), ByteSize::KB(1.1));
EXPECT_LE(ByteSize::Bytes(1024), ByteSize::KB(1.1));
EXPECT_LE(ByteSize::KB(0), ByteSize::Bytes(0));
EXPECT_LE(ByteSize::KB(1), ByteSize::Bytes(1024));
EXPECT_LT(ByteSize::KB(0), ByteSize::Bytes(1));
EXPECT_LE(ByteSize::KB(0), ByteSize::Bytes(1));
EXPECT_LT(ByteSize::KB(0.9), ByteSize::Bytes(1024));
EXPECT_LE(ByteSize::KB(0.9), ByteSize::Bytes(1024));
EXPECT_LT(ByteSize::KB(1), ByteSize::KB(1024));
EXPECT_LE(ByteSize::KB(1), ByteSize::KB(1024));
EXPECT_LT(ByteSize::KB(1), ByteSize::MB(1));
EXPECT_LE(ByteSize::KB(1), ByteSize::MB(1));
EXPECT_LT(ByteSize::KB(1024), ByteSize::MB(1.1));
EXPECT_LE(ByteSize::KB(1024), ByteSize::MB(1.1));
EXPECT_LE(ByteSize::MB(0), ByteSize::Bytes(0));
EXPECT_LT(ByteSize::MB(0), ByteSize::Bytes(1));
EXPECT_LE(ByteSize::MB(0), ByteSize::Bytes(1));
EXPECT_LT(ByteSize::MB(0.9), ByteSize::KB(1024));
EXPECT_LE(ByteSize::MB(0.9), ByteSize::KB(1024));
EXPECT_LT(ByteSize::MB(1), ByteSize::MB(1024));
EXPECT_LE(ByteSize::MB(1), ByteSize::MB(1024));
EXPECT_LT(ByteSize::MB(1), ByteSize::GB(1));
EXPECT_LE(ByteSize::MB(1), ByteSize::GB(1));
EXPECT_LT(ByteSize::MB(1024), ByteSize::GB(1.1));
EXPECT_LE(ByteSize::MB(1024), ByteSize::GB(1.1));
EXPECT_LE(ByteSize::GB(0), ByteSize::Bytes(0));
EXPECT_LT(ByteSize::GB(0), ByteSize::Bytes(1));
EXPECT_LE(ByteSize::GB(0), ByteSize::Bytes(1));
EXPECT_LT(ByteSize::GB(0.9), ByteSize::MB(1024));
EXPECT_LE(ByteSize::GB(0.9), ByteSize::MB(1024));
EXPECT_LT(ByteSize::GB(1), ByteSize::GB(1024));
EXPECT_LE(ByteSize::GB(1), ByteSize::GB(1024));
EXPECT_LT(ByteSize::GB(1), ByteSize::TB(1));
EXPECT_LE(ByteSize::GB(1), ByteSize::TB(1));
EXPECT_LT(ByteSize::GB(1024), ByteSize::TB(1.1));
EXPECT_LE(ByteSize::GB(1024), ByteSize::TB(1.1));
EXPECT_LE(ByteSize::TB(0), ByteSize::Bytes(0));
EXPECT_LT(ByteSize::TB(0), ByteSize::Bytes(1));
EXPECT_LE(ByteSize::TB(0), ByteSize::Bytes(1));
EXPECT_LT(ByteSize::TB(0.9), ByteSize::GB(1024));
EXPECT_LE(ByteSize::TB(0.9), ByteSize::GB(1024));
EXPECT_LT(ByteSize::TB(1), ByteSize::TB(1024));
EXPECT_LE(ByteSize::TB(1), ByteSize::TB(1024));
EXPECT_LT(ByteSize::TB(1024), ByteSize::TB(1025));
EXPECT_LE(ByteSize::TB(1024), ByteSize::TB(1025));
EXPECT_GT(ByteSize::TB(1), ByteSize::GB(1));
EXPECT_GT(ByteSize::GB(1), ByteSize::MB(1));
EXPECT_GT(ByteSize::MB(1), ByteSize::KB(1));
EXPECT_GT(ByteSize::KB(1), ByteSize::Bytes(1));
EXPECT_GT(ByteSize::Bytes(1), ByteSize::Bytes(0));
EXPECT_GT(ByteSize::TB(1), ByteSize::GB(1));
EXPECT_GT(ByteSize::TB(1), ByteSize::GB(1) + ByteSize::MB(1) +
ByteSize::KB(1) + ByteSize::Bytes(1));
EXPECT_GT(ByteSize::GB(1), 0.0000001 * ByteSize::TB(1));
EXPECT_GT(ByteSize::MB(1), ByteSize::KB(1) * 1023);
EXPECT_GT(ByteSize::KB(1), ByteSize::KB(3) / 4);
EXPECT_GT(ByteSize::Bytes(1), ByteSize::TB(0));
EXPECT_GE(ByteSize::TB(0.5), ByteSize::GB(0.5));
EXPECT_GE(ByteSize::GB(0.5), ByteSize::MB(0.5));
EXPECT_GE(ByteSize::MB(0.5), ByteSize::KB(0.5));
EXPECT_GE(ByteSize::KB(0.5), ByteSize::Bytes(1));
EXPECT_GE(ByteSize::Bytes(1), ByteSize::Bytes(0));
EXPECT_GE(ByteSize::TB(0), ByteSize::Bytes(0));
EXPECT_GE(ByteSize::GB(0), ByteSize::Bytes(0));
EXPECT_GE(ByteSize::MB(0), ByteSize::Bytes(0));
EXPECT_GE(ByteSize::KB(0), ByteSize::Bytes(0));
EXPECT_GE(ByteSize::Bytes(0), ByteSize::Bytes(0));
}
TEST(ByteSizeTest, DebugString) {
EXPECT_EQ(ByteSize::Bytes(0).DebugString(), "0B");
EXPECT_EQ(ByteSize::Bytes(1).DebugString(), "1B");
EXPECT_EQ(ByteSize::Bytes(size_t{1} << 10).DebugString(), "1KB");
EXPECT_EQ(ByteSize::Bytes(size_t{1} << 20).DebugString(), "1MB");
EXPECT_EQ(ByteSize::Bytes(size_t{1} << 30).DebugString(), "1GB");
EXPECT_EQ(ByteSize::Bytes(size_t{1} << 40).DebugString(), "1TB");
EXPECT_EQ(ByteSize::KB(0.5).DebugString(), "512B");
EXPECT_EQ(ByteSize::KB(1).DebugString(), "1KB");
EXPECT_EQ(ByteSize::KB(1.5).DebugString(), "1.5KB");
EXPECT_EQ(ByteSize::KB(1024).DebugString(), "1MB");
EXPECT_EQ(ByteSize::KB(1024 * 1024).DebugString(), "1GB");
EXPECT_EQ(ByteSize::KB(1024 * 1024 * 1024).DebugString(), "1TB");
EXPECT_EQ(ByteSize::MB(0.5).DebugString(), "512KB");
EXPECT_EQ(ByteSize::MB(1).DebugString(), "1MB");
EXPECT_EQ(ByteSize::MB(1.5).DebugString(), "1.5MB");
EXPECT_EQ(ByteSize::MB(1024).DebugString(), "1GB");
EXPECT_EQ(ByteSize::MB(1024 * 1024).DebugString(), "1TB");
EXPECT_EQ(ByteSize::GB(0.5).DebugString(), "512MB");
EXPECT_EQ(ByteSize::GB(1).DebugString(), "1GB");
EXPECT_EQ(ByteSize::GB(1.5).DebugString(), "1.5GB");
EXPECT_EQ(ByteSize::GB(1024).DebugString(), "1TB");
EXPECT_EQ(ByteSize::TB(0.5).DebugString(), "512GB");
EXPECT_EQ(ByteSize::TB(1).DebugString(), "1TB");
EXPECT_EQ(ByteSize::TB(1.5).DebugString(), "1.5TB");
EXPECT_EQ(ByteSize::TB(1024).DebugString(), "1024TB");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/byte_size.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/byte_size_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cc7acb88-4403-4bed-a00b-7e0ce9b987e8 | cpp | tensorflow/tensorflow | ffi | third_party/xla/xla/ffi/api/ffi.h | third_party/xla/xla/ffi/api/ffi_test.cc | #ifndef XLA_FFI_API_FFI_H_
#define XLA_FFI_API_FFI_H_
#ifdef XLA_FFI_FFI_H_
#error Two different XLA FFI implementations cannot be included together. \
See README.md for more details.
#endif
#include <algorithm>
#include <atomic>
#include <cassert>
#include <complex>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <functional>
#include <iostream>
#include <limits>
#include <memory>
#include <numeric>
#include <optional>
#include <ostream>
#include <string>
#include <string_view>
#include <type_traits>
#include <utility>
#include <variant>
#include <vector>
#include "xla/ffi/api/c_api.h"
#include "xla/ffi/api/api.h"
namespace xla::ffi {
using TypeId = XLA_FFI_TypeId;
enum class DataType : uint8_t {
INVALID = XLA_FFI_DataType_INVALID,
PRED = XLA_FFI_DataType_PRED,
S8 = XLA_FFI_DataType_S8,
S16 = XLA_FFI_DataType_S16,
S32 = XLA_FFI_DataType_S32,
S64 = XLA_FFI_DataType_S64,
U8 = XLA_FFI_DataType_U8,
U16 = XLA_FFI_DataType_U16,
U32 = XLA_FFI_DataType_U32,
U64 = XLA_FFI_DataType_U64,
F16 = XLA_FFI_DataType_F16,
F32 = XLA_FFI_DataType_F32,
F64 = XLA_FFI_DataType_F64,
BF16 = XLA_FFI_DataType_BF16,
C64 = XLA_FFI_DataType_C64,
C128 = XLA_FFI_DataType_C128,
TOKEN = XLA_FFI_DataType_TOKEN,
F8E5M2 = XLA_FFI_DataType_F8E5M2,
F8E4M3 = XLA_FFI_DataType_F8E4M3,
F8E4M3FN = XLA_FFI_DataType_F8E4M3FN,
F8E4M3B11FNUZ = XLA_FFI_DataType_F8E4M3B11FNUZ,
F8E5M2FNUZ = XLA_FFI_DataType_F8E5M2FNUZ,
F8E4M3FNUZ = XLA_FFI_DataType_F8E4M3FNUZ,
F8E3M4 = XLA_FFI_DataType_F8E3M4,
};
inline constexpr DataType PRED = DataType::PRED;
inline constexpr DataType S8 = DataType::S8;
inline constexpr DataType S16 = DataType::S16;
inline constexpr DataType S32 = DataType::S32;
inline constexpr DataType S64 = DataType::S64;
inline constexpr DataType U8 = DataType::U8;
inline constexpr DataType U16 = DataType::U16;
inline constexpr DataType U32 = DataType::U32;
inline constexpr DataType U64 = DataType::U64;
inline constexpr DataType F16 = DataType::F16;
inline constexpr DataType F32 = DataType::F32;
inline constexpr DataType F64 = DataType::F64;
inline constexpr DataType BF16 = DataType::BF16;
inline constexpr DataType C64 = DataType::C64;
inline constexpr DataType C128 = DataType::C128;
inline constexpr DataType TOKEN = DataType::TOKEN;
inline constexpr DataType F8E5M2 = DataType::F8E5M2;
inline constexpr DataType F8E4M3 = DataType::F8E4M3;
inline constexpr DataType F8E4M3FN = DataType::F8E4M3FN;
inline constexpr DataType F8E4M3B11FNUZ = DataType::F8E4M3B11FNUZ;
inline constexpr DataType F8E5M2FNUZ = DataType::F8E5M2FNUZ;
inline constexpr DataType F8E4M3FNUZ = DataType::F8E4M3FNUZ;
inline constexpr DataType F8E3M4 = DataType::F8E3M4;
inline std::ostream& operator<<(std::ostream& os, const DataType dtype) {
return os << static_cast<XLA_FFI_DataType>(dtype);
}
constexpr size_t ByteWidth(DataType dtype) {
switch (dtype) {
case DataType::INVALID:
case DataType::TOKEN:
return 0;
case DataType::PRED:
return 1;
case DataType::S8:
case DataType::U8:
case DataType::F8E5M2:
case DataType::F8E4M3:
case DataType::F8E4M3FN:
case DataType::F8E4M3B11FNUZ:
case DataType::F8E5M2FNUZ:
case DataType::F8E4M3FNUZ:
case DataType::F8E3M4:
return 1;
case DataType::S16:
case DataType::U16:
case DataType::F16:
case DataType::BF16:
return 2;
case DataType::S32:
case DataType::U32:
case DataType::F32:
return 4;
case DataType::S64:
case DataType::U64:
case DataType::F64:
return 8;
case DataType::C64:
return 8;
case DataType::C128:
return 16;
}
}
template <typename T>
class Span {
public:
constexpr Span() : data_(nullptr), size_(0) {}
Span(T* data, size_t size) : data_(data), size_(size) {}
Span(const std::vector<std::remove_const_t<T>>& vec)
: Span(vec.data(), vec.size()) {}
T& operator[](size_t index) const { return data_[index]; }
bool operator==(const Span<T>& other) const {
return size() == other.size() && std::equal(begin(), end(), other.begin());
}
T& front() const { return data_[0]; }
T& back() const { return data_[size_ - 1]; }
Span<T> first(size_t n) const { return Span<T>(data_, n); }
Span<T> last(size_t n) const { return Span<T>(data_ + size_ - n, n); }
size_t size() const { return size_; }
T* begin() const { return data_; }
T* end() const { return data_ + size_; }
private:
T* data_;
size_t size_;
};
enum class ErrorCode : uint8_t {
kOk = XLA_FFI_Error_Code_OK,
kCancelled = XLA_FFI_Error_Code_CANCELLED,
kUnknown = XLA_FFI_Error_Code_UNKNOWN,
kInvalidArgument = XLA_FFI_Error_Code_INVALID_ARGUMENT,
kDeadlineExceeded = XLA_FFI_Error_Code_DEADLINE_EXCEEDED,
kNotFound = XLA_FFI_Error_Code_NOT_FOUND,
kAlreadyExists = XLA_FFI_Error_Code_ALREADY_EXISTS,
kPermissionDenied = XLA_FFI_Error_Code_PERMISSION_DENIED,
kResourceExhausted = XLA_FFI_Error_Code_RESOURCE_EXHAUSTED,
kFailedPrecondition = XLA_FFI_Error_Code_FAILED_PRECONDITION,
kAborted = XLA_FFI_Error_Code_ABORTED,
kOutOfRange = XLA_FFI_Error_Code_OUT_OF_RANGE,
kUnimplemented = XLA_FFI_Error_Code_UNIMPLEMENTED,
kInternal = XLA_FFI_Error_Code_INTERNAL,
kUnavailable = XLA_FFI_Error_Code_UNAVAILABLE,
kDataLoss = XLA_FFI_Error_Code_DATA_LOSS,
kUnauthenticated = XLA_FFI_Error_Code_UNAUTHENTICATED,
};
class Error {
public:
Error() = default;
Error(ErrorCode errc, std::string message)
: errc_(errc), message_(std::move(message)) {}
Error(XLA_FFI_Error_Code errc, std::string message)
: Error(static_cast<ErrorCode>(errc), std::move(message)) {}
bool success() const { return errc_ == ErrorCode::kOk; }
bool failure() const { return !success(); }
std::optional<ErrorCode> errc() const { return errc_; }
const std::string& message() const { return message_; }
static Error Success() { return Error(); }
static Error Internal(std::string message) {
return Error(ErrorCode::kInternal, std::move(message));
}
static Error InvalidArgument(std::string message) {
return Error(ErrorCode::kInvalidArgument, std::move(message));
}
private:
ErrorCode errc_ = ErrorCode::kOk;
std::string message_;
};
template <typename E>
class Unexpected;
template <typename T, typename E>
class Expected {
public:
constexpr Expected(T value) : data_(std::move(value)) {}
constexpr Expected(Unexpected<E> u);
constexpr operator bool() const {
return has_value();
}
constexpr T& operator*() & { return value(); }
constexpr const T& operator*() const& { return value(); }
constexpr T&& operator*() && { return std::move(value()); }
constexpr const T& operator*() const&& { return std::move(value()); }
constexpr T* operator->() { return &value(); }
constexpr const T* operator->() const { return &value(); }
constexpr bool has_value() const { return std::holds_alternative<T>(data_); }
constexpr bool has_error() const { return std::holds_alternative<E>(data_); }
constexpr T& value() & { return std::get<T>(data_); }
constexpr const T& value() const& { return std::get<T>(data_); }
constexpr T&& value() && { return std::get<T>(std::move(data_)); }
constexpr const T& value() const&& { return std::get<T>(std::move(data_)); }
constexpr E& error() & { return std::get<E>(data_); }
constexpr const E& error() const& { return std::get<E>(data_); }
constexpr E&& error() && { return std::get<E>(std::move(data_)); }
constexpr const E&& error() const&& { return std::get<E>(std::move(data_)); }
private:
std::variant<T, E> data_;
};
template <typename E>
class Unexpected {
public:
constexpr Unexpected(E error) : error_(std::move(error)) {}
private:
template <typename, typename>
friend class Expected;
E error_;
};
Unexpected(const char*) -> Unexpected<std::string>;
template <typename T, typename E>
constexpr Expected<T, E>::Expected(Unexpected<E> u)
: data_(std::move(u.error_)) {}
template <typename T>
class ErrorOr : public Expected<T, Error> {
public:
using Expected<T, Error>::Expected;
};
class Promise;
class Future {
public:
explicit Future(const Promise& promise);
Future(Future&&) = default;
Future& operator=(Future&&) = default;
template <typename F>
void OnReady(F&& f);
private:
friend class Promise;
using Waiter = std::function<void(const std::optional<Error>& error)>;
enum class State : uint8_t { kPending, kAvailable, kError };
struct WaiterAndState {
static_assert(alignof(std::max_align_t) >= 8 && sizeof(Waiter*) == 8);
static constexpr uint64_t kStateMask = (1ull << 2) - 1;
static constexpr uint64_t kPointerMask = ~kStateMask;
WaiterAndState(Waiter* ptr, State state) {
value = (reinterpret_cast<uintptr_t>(ptr) & kPointerMask) |
(static_cast<uintptr_t>(state) & kStateMask);
}
WaiterAndState() : WaiterAndState(nullptr, State::kPending) {}
State state() const { return static_cast<State>(value & kStateMask); }
Waiter* waiter() const {
return reinterpret_cast<Waiter*>(value & kPointerMask);
}
uintptr_t value;
};
static_assert(std::atomic<WaiterAndState>::is_always_lock_free,
"WaiterAndState atomic must be lock-free");
struct Data {
std::atomic<WaiterAndState> waiter_and_state = WaiterAndState();
std::optional<Error> error;
};
std::shared_ptr<Data> data_;
};
class Promise {
public:
Promise() : data_(std::make_shared<Future::Data>()) {}
Promise(Promise&&) = default;
Promise& operator=(Promise&&) = default;
void SetAvailable();
void SetError(Error error);
private:
friend class Future;
void SetCompleted(Future::State state);
std::shared_ptr<Future::Data> data_;
};
inline Future::Future(const Promise& promise) : data_(promise.data_) {
assert(data_.use_count() == 2 &&
"Promise can be used to create at most one Future");
}
template <typename F>
void Future::OnReady(F&& f) {
static_assert(std::is_invocable_v<F, const std::optional<Error>&>,
"F must be compatible with Waiter signature");
WaiterAndState old_value =
data_->waiter_and_state.load(std::memory_order_acquire);
if (old_value.state() != State::kPending) {
f(data_->error);
return;
}
auto* waiter = new Waiter(std::forward<F>(f));
auto new_value = WaiterAndState(waiter, State::kPending);
while (!data_->waiter_and_state.compare_exchange_weak(
old_value, new_value, std::memory_order_acq_rel,
std::memory_order_acquire)) {
if (old_value.state() != State::kPending) {
assert(old_value.waiter() == nullptr);
(*waiter)(data_->error);
delete waiter;
return;
}
}
assert(old_value.state() == State::kPending);
}
inline void Promise::SetAvailable() { SetCompleted(Future::State::kAvailable); }
inline void Promise::SetError(Error error) {
assert(error.errc() != ErrorCode::kOk);
assert(data_->error == std::nullopt);
data_->error = std::move(error);
SetCompleted(Future::State::kError);
}
inline void Promise::SetCompleted(Future::State state) {
Future::WaiterAndState old_value = data_->waiter_and_state.exchange(
{nullptr, state}, std::memory_order_acq_rel);
assert(old_value.state() == Future::State::kPending);
if (Future::Waiter* waiter = old_value.waiter()) {
(*waiter)(data_->error);
delete waiter;
}
}
class AnyBuffer {
public:
using Dimensions = Span<const int64_t>;
explicit AnyBuffer(const XLA_FFI_Buffer* buf) : buf_(buf) {
assert(buf != nullptr && "XLA_FFI_Buffer must be non-null");
}
DataType element_type() const { return DataType(buf_->dtype); }
Dimensions dimensions() const { return Dimensions(buf_->dims, buf_->rank); }
XLA_FFI_ATTRIBUTE_ALWAYS_INLINE size_t size_bytes() const {
return ByteWidth(element_type()) * element_count();
}
XLA_FFI_ATTRIBUTE_ALWAYS_INLINE size_t element_count() const {
Dimensions dims = dimensions();
return std::accumulate(dims.begin(), dims.end(), int64_t{1},
std::multiplies<>());
}
void* untyped_data() const { return buf_->data; }
private:
const XLA_FFI_Buffer* buf_;
};
namespace internal {
template <DataType dtype>
struct always_false : std::false_type {};
template <DataType dtype>
struct DataTypeToNative {
static_assert(always_false<dtype>::value, "unsupported data type");
};
#define XLA_FFI_REGISTER_DATATYPE_MAPPING(data_type_value, actual_type) \
template <> \
struct DataTypeToNative<data_type_value> { \
using type = actual_type; \
};
XLA_FFI_REGISTER_DATATYPE_MAPPING(DataType::PRED, bool);
XLA_FFI_REGISTER_DATATYPE_MAPPING(DataType::U8, uint8_t);
XLA_FFI_REGISTER_DATATYPE_MAPPING(DataType::U16, uint16_t);
XLA_FFI_REGISTER_DATATYPE_MAPPING(DataType::U32, uint32_t);
XLA_FFI_REGISTER_DATATYPE_MAPPING(DataType::U64, uint64_t);
XLA_FFI_REGISTER_DATATYPE_MAPPING(DataType::S8, int8_t);
XLA_FFI_REGISTER_DATATYPE_MAPPING(DataType::S16, int16_t);
XLA_FFI_REGISTER_DATATYPE_MAPPING(DataType::S32, int32_t);
XLA_FFI_REGISTER_DATATYPE_MAPPING(DataType::S64, int64_t);
XLA_FFI_REGISTER_DATATYPE_MAPPING(DataType::F16, uint16_t);
XLA_FFI_REGISTER_DATATYPE_MAPPING(DataType::F32, float);
XLA_FFI_REGISTER_DATATYPE_MAPPING(DataType::F64, double);
XLA_FFI_REGISTER_DATATYPE_MAPPING(DataType::BF16, uint16_t);
XLA_FFI_REGISTER_DATATYPE_MAPPING(DataType::C64, std::complex<float>);
XLA_FFI_REGISTER_DATATYPE_MAPPING(DataType::C128, std::complex<double>);
XLA_FFI_REGISTER_DATATYPE_MAPPING(DataType::TOKEN, void);
#undef XLA_FFI_REGISTER_DATATYPE_MAPPING
inline constexpr size_t kDynamicRank = std::numeric_limits<size_t>::max();
}
constexpr DataType ToComplex(DataType dtype) {
switch (dtype) {
case DataType::F32:
return DataType::C64;
case DataType::F64:
return DataType::C128;
default:
return DataType::INVALID;
}
}
constexpr DataType ToReal(DataType dtype) {
switch (dtype) {
case DataType::C64:
return DataType::F32;
case DataType::C128:
return DataType::F64;
default:
return dtype;
}
}
constexpr DataType ToImag(DataType dtype) {
switch (dtype) {
case DataType::C64:
return DataType::F32;
case DataType::C128:
return DataType::F64;
default:
return dtype;
}
}
template <DataType dtype>
using NativeType = typename internal::DataTypeToNative<dtype>::type;
template <DataType dtype>
constexpr bool IsComplexType() {
return std::is_same_v<NativeType<dtype>,
std::complex<NativeType<ToReal(dtype)>>>;
}
static_assert(ToReal(DataType::C64) == DataType::F32);
static_assert(ToReal(DataType::C128) == DataType::F64);
static_assert(ToReal(DataType::F32) == DataType::F32);
static_assert(ToComplex(DataType::F32) == DataType::C64);
static_assert(ToComplex(DataType::F64) == DataType::C128);
static_assert(ToComplex(DataType::S32) == DataType::INVALID);
static_assert(ToComplex(ToReal(DataType::C64)) == DataType::C64);
static_assert(ToComplex(ToImag(DataType::C128)) == DataType::C128);
static_assert(IsComplexType<DataType::C64>());
static_assert(IsComplexType<DataType::C128>());
static_assert(!IsComplexType<DataType::F32>());
template <DataType dtype, size_t rank = internal::kDynamicRank>
class Buffer {
public:
using Dimensions = AnyBuffer::Dimensions;
explicit Buffer(const XLA_FFI_Buffer* buf) : buf_(buf) {
assert(buf_ != nullptr && "XLA_FFI_Buffer must be non-null");
}
DataType element_type() const { return dtype; }
Dimensions dimensions() const {
return Dimensions(buf_->dims,
rank == internal::kDynamicRank ? buf_->rank : rank);
}
XLA_FFI_ATTRIBUTE_ALWAYS_INLINE size_t size_bytes() const {
return ByteWidth(dtype) * element_count();
}
XLA_FFI_ATTRIBUTE_ALWAYS_INLINE size_t element_count() const {
Dimensions dims = dimensions();
return std::accumulate(dims.begin(), dims.end(), int64_t{1},
std::multiplies<>());
}
void* untyped_data() const { return buf_->data; }
NativeType<dtype>* typed_data() const {
return reinterpret_cast<NativeType<dtype>*>(untyped_data());
}
private:
const XLA_FFI_Buffer* buf_;
};
template <DataType dtype> using BufferR0 = Buffer<dtype, 0>;
template <DataType dtype> using BufferR1 = Buffer<dtype, 1>;
template <DataType dtype> using BufferR2 = Buffer<dtype, 2>;
template <DataType dtype> using BufferR3 = Buffer<dtype, 3>;
template <DataType dtype> using BufferR4 = Buffer<dtype, 4>;
using Token = BufferR0<DataType::TOKEN>;
namespace internal {
template <DataType dtype, size_t rank>
XLA_FFI_ATTRIBUTE_ALWAYS_INLINE std::optional<Buffer<dtype, rank>> DecodeBuffer(
XLA_FFI_Buffer* buf, DiagnosticEngine& diagnostic) {
if (auto buf_dtype = static_cast<DataType>(buf->dtype);
XLA_FFI_PREDICT_FALSE(buf_dtype != dtype)) {
return diagnostic.Emit("Wrong buffer dtype: expected ")
<< dtype << " but got " << buf_dtype;
}
if constexpr (rank != internal::kDynamicRank) {
if (XLA_FFI_PREDICT_FALSE(buf->rank != rank)) {
return diagnostic.Emit("Wrong buffer rank: expected ")
<< rank << " but got " << buf->rank;
}
}
return Buffer<dtype, rank>(buf);
}
}
template <DataType dtype, size_t rank = internal::kDynamicRank>
using ResultBuffer = Result<Buffer<dtype, rank>>;
template <DataType dtype> using ResultBufferR0 = ResultBuffer<dtype, 0>;
template <DataType dtype> using ResultBufferR1 = ResultBuffer<dtype, 1>;
template <DataType dtype> using ResultBufferR2 = ResultBuffer<dtype, 2>;
template <DataType dtype> using ResultBufferR3 = ResultBuffer<dtype, 3>;
template <DataType dtype> using ResultBufferR4 = ResultBuffer<dtype, 4>;
template <>
struct ArgBinding<AnyBuffer> {
using Arg = AnyBuffer;
};
template <DataType dtype, size_t rank>
struct ArgBinding<Buffer<dtype, rank>> {
using Arg = Buffer<dtype, rank>;
};
template <>
struct RetBinding<Result<AnyBuffer>> {
using Ret = AnyBuffer;
};
template <DataType dtype, size_t rank>
struct RetBinding<Result<Buffer<dtype, rank>>> {
using Ret = Buffer<dtype, rank>;
};
inline std::ostream& operator<<(std::ostream& os, const XLA_FFI_ArgType type) {
switch (type) {
case XLA_FFI_ArgType_BUFFER:
return os << "buffer";
}
}
template <>
struct ArgDecoding<AnyBuffer> {
XLA_FFI_ATTRIBUTE_ALWAYS_INLINE
static std::optional<AnyBuffer> Decode(XLA_FFI_ArgType type, void* arg,
DiagnosticEngine& diagnostic) {
if (XLA_FFI_PREDICT_FALSE(type != XLA_FFI_ArgType_BUFFER)) {
return diagnostic.Emit("Wrong argument type: expected ")
<< XLA_FFI_ArgType_BUFFER << " but got " << type;
}
return AnyBuffer(reinterpret_cast<XLA_FFI_Buffer*>(arg));
}
};
template <DataType dtype, size_t rank>
struct ArgDecoding<Buffer<dtype, rank>> {
XLA_FFI_ATTRIBUTE_ALWAYS_INLINE
static std::optional<Buffer<dtype, rank>> Decode(
XLA_FFI_ArgType type, void* arg, DiagnosticEngine& diagnostic) {
if (XLA_FFI_PREDICT_FALSE(type != XLA_FFI_ArgType_BUFFER)) {
return diagnostic.Emit("Wrong argument type: expected ")
<< XLA_FFI_ArgType_BUFFER << " but got " << type;
}
return internal::DecodeBuffer<dtype, rank>(
reinterpret_cast<XLA_FFI_Buffer*>(arg), diagnostic);
}
};
class RemainingArgs : public internal::RemainingArgsBase {
public:
using internal::RemainingArgsBase::RemainingArgsBase;
template <typename T>
ErrorOr<T> get(size_t index) const {
size_t idx = offset() + index;
if (XLA_FFI_PREDICT_FALSE(idx >= args()->size)) {
return Unexpected(
Error(ErrorCode::kInvalidArgument, "Index out of range"));
}
DiagnosticEngine diagnostic;
std::optional<T> value = ArgDecoding<T>::Decode(
args()->types[idx], args()->args[idx], diagnostic);
if (XLA_FFI_PREDICT_FALSE(!value.has_value())) {
return Unexpected(Error::Internal(diagnostic.Result()));
}
return *value;
}
};
template <>
struct internal::Decode<internal::RemainingArgsTag> {
static std::optional<RemainingArgs> call(DecodingOffsets& offsets,
DecodingContext& ctx,
DiagnosticEngine& diagnostic) {
return RemainingArgs(&ctx.call_frame->args, offsets.args);
}
};
inline std::ostream& operator<<(std::ostream& os, const XLA_FFI_RetType type) {
switch (type) {
case XLA_FFI_RetType_BUFFER:
return os << "buffer";
}
}
template <>
struct RetDecoding<AnyBuffer> {
XLA_FFI_ATTRIBUTE_ALWAYS_INLINE
static std::optional<Result<AnyBuffer>> Decode(XLA_FFI_RetType type,
void* ret,
DiagnosticEngine& diagnostic) {
if (XLA_FFI_PREDICT_FALSE(type != XLA_FFI_RetType_BUFFER)) {
return diagnostic.Emit("Wrong result type: expected ")
<< XLA_FFI_RetType_BUFFER << " but got " << type;
}
return AnyBuffer(reinterpret_cast<XLA_FFI_Buffer*>(ret));
}
};
template <DataType dtype, size_t rank>
struct RetDecoding<Buffer<dtype, rank>> {
XLA_FFI_ATTRIBUTE_ALWAYS_INLINE
static std::optional<Result<Buffer<dtype, rank>>> Decode(
XLA_FFI_RetType type, void* ret, DiagnosticEngine& diagnostic) {
if (XLA_FFI_PREDICT_FALSE(type != XLA_FFI_RetType_BUFFER)) {
return diagnostic.Emit("Wrong result type: expected ")
<< XLA_FFI_RetType_BUFFER << " but got " << type;
}
return internal::DecodeBuffer<dtype, rank>(
reinterpret_cast<XLA_FFI_Buffer*>(ret), diagnostic);
}
};
class RemainingRets : public internal::RemainingRetsBase {
public:
using internal::RemainingRetsBase::RemainingRetsBase;
template <typename T>
ErrorOr<Result<T>> get(size_t index) const {
size_t idx = offset() + index;
if (XLA_FFI_PREDICT_FALSE(idx >= rets()->size)) {
return Unexpected(
Error(ErrorCode::kInvalidArgument, "Index out of range"));
}
DiagnosticEngine diagnostic;
std::optional<Result<T>> value = RetDecoding<T>::Decode(
rets()->types[idx], rets()->rets[idx], diagnostic);
if (XLA_FFI_PREDICT_FALSE(!value.has_value())) {
return Unexpected(Error::Internal(diagnostic.Result()));
}
return *value;
}
};
template <>
struct internal::Decode<internal::RemainingRetsTag> {
static std::optional<RemainingRets> call(DecodingOffsets& offsets,
DecodingContext& ctx,
DiagnosticEngine& diagnostic) {
return RemainingRets(&ctx.call_frame->rets, offsets.rets);
}
};
#define XLA_FFI_REGISTER_ARRAY_ATTR_DECODING(T, TYPE) \
template <> \
struct AttrDecoding<Span<const T>> { \
using Type = Span<const T>; \
static std::optional<Type> Decode(XLA_FFI_AttrType type, void* attr, \
DiagnosticEngine& diagnostic) { \
if (XLA_FFI_PREDICT_FALSE(type != XLA_FFI_AttrType_ARRAY)) { \
return diagnostic.Emit("Wrong attribute type: expected ") \
<< XLA_FFI_AttrType_ARRAY << " but got " << type; \
} \
\
auto* array = reinterpret_cast<XLA_FFI_Array*>(attr); \
if (XLA_FFI_PREDICT_FALSE(array->dtype != TYPE)) { \
return diagnostic.Emit("Wrong array data type: expected ") \
<< TYPE << " but got " << array->dtype; \
} \
\
return Span<const T>(reinterpret_cast<T*>(array->data), array->size); \
} \
}
XLA_FFI_REGISTER_ARRAY_ATTR_DECODING(int8_t, XLA_FFI_DataType_S8);
XLA_FFI_REGISTER_ARRAY_ATTR_DECODING(int16_t, XLA_FFI_DataType_S16);
XLA_FFI_REGISTER_ARRAY_ATTR_DECODING(int32_t, XLA_FFI_DataType_S32);
XLA_FFI_REGISTER_ARRAY_ATTR_DECODING(int64_t, XLA_FFI_DataType_S64);
XLA_FFI_REGISTER_ARRAY_ATTR_DECODING(uint8_t, XLA_FFI_DataType_U8);
XLA_FFI_REGISTER_ARRAY_ATTR_DECODING(uint16_t, XLA_FFI_DataType_U16);
XLA_FFI_REGISTER_ARRAY_ATTR_DECODING(uint32_t, XLA_FFI_DataType_U32);
XLA_FFI_REGISTER_ARRAY_ATTR_DECODING(uint64_t, XLA_FFI_DataType_U64);
XLA_FFI_REGISTER_ARRAY_ATTR_DECODING(float, XLA_FFI_DataType_F32);
XLA_FFI_REGISTER_ARRAY_ATTR_DECODING(double, XLA_FFI_DataType_F64);
#undef XLA_FFI_REGISTER_ARRAY_ATTR_DECODING
template <typename T>
struct Pointer {};
template <typename T>
struct AttrDecoding<Pointer<T>> {
using Type = T*;
static std::optional<Type> Decode(XLA_FFI_AttrType type, void* attr,
DiagnosticEngine& diagnostic) {
auto* scalar = reinterpret_cast<XLA_FFI_Scalar*>(attr);
if (XLA_FFI_PREDICT_FALSE(type != XLA_FFI_AttrType_SCALAR ||
scalar->dtype != XLA_FFI_DataType_S64)) {
return diagnostic.Emit("Wrong attribute type: ")
<< "expected i64 scalar for passing pointer but got " << type;
}
static_assert(sizeof(uintptr_t) == sizeof(int64_t));
uintptr_t ptr = *reinterpret_cast<uintptr_t*>(scalar->value);
return reinterpret_cast<Type>(ptr);
}
};
class Dictionary : public internal::DictionaryBase {
public:
using internal::DictionaryBase::DictionaryBase;
template <typename T>
ErrorOr<T> get(std::string_view name) const {
DiagnosticEngine diagnostic;
std::optional<T> value = internal::DictionaryBase::get<T>(name, diagnostic);
if (!value.has_value()) {
return Unexpected(Error::Internal(diagnostic.Result()));
}
return *value;
}
};
template <>
struct internal::Decode<internal::AttrsTag<Dictionary>> {
static std::optional<Dictionary> call(DecodingOffsets& offsets,
DecodingContext& ctx,
DiagnosticEngine& diagnostic) {
return Dictionary(&ctx.call_frame->attrs);
}
};
template <>
struct AttrDecoding<Dictionary> {
using Type = Dictionary;
static std::optional<Dictionary> Decode(XLA_FFI_AttrType type, void* attr,
DiagnosticEngine& diagnostic) {
if (XLA_FFI_PREDICT_FALSE(type != XLA_FFI_AttrType_DICTIONARY)) {
return diagnostic.Emit("Wrong attribute type: expected ")
<< XLA_FFI_AttrType_DICTIONARY << " but got " << type;
}
return Dictionary(reinterpret_cast<XLA_FFI_Attrs*>(attr));
}
};
namespace internal {
inline XLA_FFI_Error* CreateError(const XLA_FFI_Api* api, const Error& error) {
XLA_FFI_Error_Create_Args args;
args.struct_size = XLA_FFI_Error_Create_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.errc = static_cast<XLA_FFI_Error_Code>(*error.errc());
args.message = error.message().c_str();
return api->XLA_FFI_Error_Create(&args);
}
inline void DestroyError(const XLA_FFI_Api* api, XLA_FFI_Error* error) {
XLA_FFI_Error_Destroy_Args args;
args.struct_size = XLA_FFI_Error_Destroy_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.error = error;
api->XLA_FFI_Error_Destroy(&args);
}
inline const char* GetErrorMessage(const XLA_FFI_Api* api,
XLA_FFI_Error* error) {
XLA_FFI_Error_GetMessage_Args args;
args.struct_size = XLA_FFI_Error_GetMessage_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.error = error;
api->XLA_FFI_Error_GetMessage(&args);
return args.message;
}
}
template <ExecutionStage stage>
struct ResultEncoding<stage, Error> {
static XLA_FFI_Error* Encode(const XLA_FFI_Api* api,
XLA_FFI_ExecutionContext* ctx, Error error) {
if (XLA_FFI_PREDICT_TRUE(error.success())) {
return nullptr;
}
return internal::CreateError(api, error);
}
};
template <typename T>
struct ResultEncoding<ExecutionStage::kInstantiate,
ErrorOr<std::unique_ptr<T>>> {
static_assert(std::is_same_v<decltype(T::id), TypeId>,
"State type must have a static `TypeId id` field");
static XLA_FFI_Error* Encode(const XLA_FFI_Api* api,
XLA_FFI_ExecutionContext* ctx,
ErrorOr<std::unique_ptr<T>> state) {
if (XLA_FFI_PREDICT_TRUE(state.has_value())) {
XLA_FFI_State_Set_Args args;
args.struct_size = XLA_FFI_State_Set_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.ctx = ctx;
args.type_id = &T::id;
args.state = state.value().release();
args.deleter = +[](void* state) { delete reinterpret_cast<T*>(state); };
return api->XLA_FFI_State_Set(&args);
}
return internal::CreateError(api, state.error());
}
};
template <ExecutionStage stage>
struct ResultEncoding<stage, Future> {
static std::variant<XLA_FFI_Error*, XLA_FFI_Future*> Encode(
const XLA_FFI_Api* api, XLA_FFI_ExecutionContext* ctx, Future future) {
XLA_FFI_Future_Create_Args args;
args.struct_size = XLA_FFI_Future_Create_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.future = nullptr;
if (auto* err = api->XLA_FFI_Future_Create(&args)) {
return err;
}
assert(args.future != nullptr && "XLA_FFI_Future_Create failed");
future.OnReady([api, f = args.future](const std::optional<Error>& error) {
auto abort_on_error = [api](XLA_FFI_Error* err) {
if (XLA_FFI_PREDICT_TRUE(err == nullptr)) {
return;
}
std::cerr << "Failed to signal XLA_FFI_Future completion: "
<< internal::GetErrorMessage(api, err) << std::endl;
internal::DestroyError(api, err);
std::abort();
};
if (XLA_FFI_PREDICT_FALSE(error.has_value())) {
XLA_FFI_Future_SetError_Args args;
args.struct_size = XLA_FFI_Future_SetError_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.future = f;
args.error = internal::CreateError(api, *error);
abort_on_error(api->XLA_FFI_Future_SetError(&args));
} else {
XLA_FFI_Future_SetAvailable_Args args;
args.struct_size = XLA_FFI_Future_SetAvailable_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.future = f;
abort_on_error(api->XLA_FFI_Future_SetAvailable(&args));
}
});
return args.future;
}
};
template <typename T>
struct PlatformStream {};
template <typename T>
struct CtxDecoding<PlatformStream<T>> {
using Type = T;
static_assert(std::is_pointer_v<T>, "stream type must be a pointer");
static std::optional<Type> Decode(const XLA_FFI_Api* api,
XLA_FFI_ExecutionContext* ctx,
DiagnosticEngine& diagnostic) {
XLA_FFI_Stream_Get_Args args;
args.struct_size = XLA_FFI_Stream_Get_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.ctx = ctx;
args.stream = nullptr;
if (XLA_FFI_Error* error = api->XLA_FFI_Stream_Get(&args)) {
diagnostic.Emit("Failed to get platform stream: ")
<< internal::GetErrorMessage(api, error);
internal::DestroyError(api, error);
return std::nullopt;
}
return reinterpret_cast<T>(args.stream);
}
};
class ScratchAllocator {
public:
~ScratchAllocator();
ScratchAllocator(ScratchAllocator&&) = default;
ScratchAllocator& operator=(ScratchAllocator&&) = default;
std::optional<void*> Allocate(size_t size, size_t alignment = 1);
private:
friend struct CtxDecoding<ScratchAllocator>;
ScratchAllocator(const XLA_FFI_Api* api, XLA_FFI_ExecutionContext* ctx,
DiagnosticEngine& diagnostic);
struct Allocation {
size_t size;
void* data;
};
const XLA_FFI_Api* api_;
XLA_FFI_ExecutionContext* ctx_;
DiagnosticEngine& diagnostic_;
std::vector<Allocation> allocations_;
};
template <>
struct CtxDecoding<ScratchAllocator> {
using Type = ScratchAllocator;
static std::optional<Type> Decode(const XLA_FFI_Api* api,
XLA_FFI_ExecutionContext* ctx,
DiagnosticEngine& diagnostic) {
return ScratchAllocator(api, ctx, diagnostic);
}
};
inline std::optional<void*> ScratchAllocator::Allocate(size_t size,
size_t alignment) {
XLA_FFI_DeviceMemory_Allocate_Args args;
args.struct_size = XLA_FFI_DeviceMemory_Allocate_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.ctx = ctx_;
args.size = size;
args.alignment = alignment;
args.data = nullptr;
if (XLA_FFI_Error* error = api_->XLA_FFI_DeviceMemory_Allocate(&args)) {
diagnostic_.Emit("Failed to allocate scratch memory: ")
<< internal::GetErrorMessage(api_, error);
internal::DestroyError(api_, error);
return std::nullopt;
}
allocations_.push_back({size, args.data});
return args.data;
}
inline ScratchAllocator::ScratchAllocator(const XLA_FFI_Api* api,
XLA_FFI_ExecutionContext* ctx,
DiagnosticEngine& diagnostic)
: api_(api), ctx_(ctx), diagnostic_(diagnostic) {}
inline ScratchAllocator::~ScratchAllocator() {
for (Allocation& alloc : allocations_) {
XLA_FFI_DeviceMemory_Free_Args args;
args.struct_size = XLA_FFI_DeviceMemory_Free_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.ctx = ctx_;
args.size = alloc.size;
args.data = alloc.data;
if (XLA_FFI_Error* error = api_->XLA_FFI_DeviceMemory_Free(&args)) {
diagnostic_.Emit("Failed to free scratch memory: ")
<< internal::GetErrorMessage(api_, error);
internal::DestroyError(api_, error);
}
}
}
class ThreadPool {
public:
template <typename F>
void Schedule(F&& f) {
XLA_FFI_Task* task = +[](void* data) {
auto* f = reinterpret_cast<F*>(data);
(*f)();
delete f;
};
F* data = new F(std::forward<F>(f));
XLA_FFI_ThreadPool_Schedule_Args args;
args.struct_size = XLA_FFI_ThreadPool_Schedule_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.ctx = ctx_;
args.task = task;
args.data = data;
if (XLA_FFI_Error* error = api_->XLA_FFI_ThreadPool_Schedule(&args)) {
diagnostic_.Emit("Failed to schedule task on a thread pool: ")
<< internal::GetErrorMessage(api_, error);
internal::DestroyError(api_, error);
task(data);
}
}
private:
friend struct CtxDecoding<ThreadPool>;
ThreadPool(const XLA_FFI_Api* api, XLA_FFI_ExecutionContext* ctx,
DiagnosticEngine& diagnostic);
const XLA_FFI_Api* api_;
XLA_FFI_ExecutionContext* ctx_;
DiagnosticEngine& diagnostic_;
};
template <>
struct CtxDecoding<ThreadPool> {
using Type = ThreadPool;
static std::optional<Type> Decode(const XLA_FFI_Api* api,
XLA_FFI_ExecutionContext* ctx,
DiagnosticEngine& diagnostic) {
return ThreadPool(api, ctx, diagnostic);
}
};
inline ThreadPool::ThreadPool(const XLA_FFI_Api* api,
XLA_FFI_ExecutionContext* ctx,
DiagnosticEngine& diagnostic)
: api_(api), ctx_(ctx), diagnostic_(diagnostic) {}
namespace internal {
inline XLA_FFI_Error* RegisterType(const XLA_FFI_Api* api,
std::string_view name,
XLA_FFI_TypeId* type_id) {
XLA_FFI_TypeId_Register_Args args;
args.struct_size = XLA_FFI_TypeId_Register_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.name = XLA_FFI_ByteSpan{name.data(), name.size()};
args.type_id = type_id;
return api->XLA_FFI_TypeId_Register(&args);
}
}
#define XLA_FFI_REGISTER_TYPE(API, NAME, TYPE_ID) \
XLA_FFI_REGISTER_TYPE_(API, NAME, TYPE_ID, __COUNTER__)
#define XLA_FFI_REGISTER_TYPE_(API, NAME, TYPE_ID, N) \
XLA_FFI_REGISTER_TYPE__(API, NAME, TYPE_ID, N)
#define XLA_FFI_REGISTER_TYPE__(API, NAME, TYPE_ID, N) \
XLA_FFI_ATTRIBUTE_UNUSED static const XLA_FFI_Error* \
xla_ffi_type_##N##_registered_ = [] { \
return ::xla::ffi::internal::RegisterType(API, NAME, TYPE_ID); \
}()
template <typename T>
struct UserData {};
template <typename T>
struct CtxDecoding<UserData<T>> {
using Type = T*;
static_assert(std::is_same_v<decltype(T::id), TypeId>,
"UserData type must have a static `TypeId id` field");
static std::optional<Type> Decode(const XLA_FFI_Api* api,
XLA_FFI_ExecutionContext* ctx,
DiagnosticEngine& diagnostic) {
XLA_FFI_ExecutionContext_Get_Args args;
args.struct_size = XLA_FFI_ExecutionContext_Get_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.ctx = ctx;
args.type_id = &T::id;
args.data = nullptr;
assert(args.type_id->type_id > 0 && "type must be registered with XLA FFI");
if (XLA_FFI_Error* err = api->XLA_FFI_ExecutionContext_Get(&args); err) {
diagnostic.Emit("Failed to get user data from execution context: ")
<< internal::GetErrorMessage(api, err);
internal::DestroyError(api, err);
return std::nullopt;
}
return static_cast<Type>(args.data);
}
};
template <typename T>
struct State {};
template <typename T>
struct CtxDecoding<State<T>> {
using Type = T*;
static_assert(std::is_same_v<decltype(T::id), TypeId>,
"State type must have a static `TypeId id` field");
static std::optional<Type> Decode(const XLA_FFI_Api* api,
XLA_FFI_ExecutionContext* ctx,
DiagnosticEngine& diagnostic) {
XLA_FFI_State_Get_Args args;
args.struct_size = XLA_FFI_State_Get_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.ctx = ctx;
args.type_id = &T::id;
args.state = nullptr;
assert(args.type_id->type_id > 0 && "type must be registered with XLA FFI");
if (XLA_FFI_Error* err = api->XLA_FFI_State_Get(&args); err) {
diagnostic.Emit("Failed to get state from execution context: ")
<< internal::GetErrorMessage(api, err);
internal::DestroyError(api, err);
return std::nullopt;
}
return static_cast<Type>(args.state);
}
};
}
#endif | #include "xla/ffi/api/ffi.h"
#include <cstddef>
#include <cstdint>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/synchronization/blocking_counter.h"
#include "xla/ffi/api/c_api.h"
#include "xla/ffi/call_frame.h"
#include "xla/ffi/execution_context.h"
#include "xla/ffi/execution_state.h"
#include "xla/ffi/ffi_api.h"
#include "xla/ffi/type_id_registry.h"
#include "xla/primitive_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "xla/tsl/concurrency/chain.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
#include "tsl/platform/threadpool.h"
#define EIGEN_USE_THREADS
#include "unsupported/Eigen/CXX11/Tensor"
namespace xla::ffi {
enum class Int32BasedEnum : int32_t {
kOne = 1,
kTwo = 2,
};
static constexpr int64_t kI32MaxValue = std::numeric_limits<int32_t>::max();
enum class Int64BasedEnum : int64_t {
kOne = kI32MaxValue + 1,
kTwo = kI32MaxValue + 2,
};
}
XLA_FFI_REGISTER_ENUM_ATTR_DECODING(::xla::ffi::Int32BasedEnum);
XLA_FFI_REGISTER_ENUM_ATTR_DECODING(::xla::ffi::Int64BasedEnum);
namespace xla::ffi {
struct PairOfI32AndF32 {
int32_t i32;
float f32;
};
struct TupleOfI32 {
int32_t i32_0;
int32_t i32_1;
int32_t i32_2;
int32_t i32_3;
};
}
XLA_FFI_REGISTER_STRUCT_ATTR_DECODING(::xla::ffi::PairOfI32AndF32,
::xla::ffi::StructMember<int32_t>("i32"),
::xla::ffi::StructMember<float>("f32"));
XLA_FFI_REGISTER_STRUCT_ATTR_DECODING(
::xla::ffi::TupleOfI32, ::xla::ffi::StructMember<int32_t>("i32_0"),
::xla::ffi::StructMember<int32_t>("i32_1"),
::xla::ffi::StructMember<int32_t>("i32_2"),
::xla::ffi::StructMember<int32_t>("i32_3"));
namespace xla::ffi {
using ::testing::HasSubstr;
using ::tsl::testing::StatusIs;
TEST(FfiTest, DataTypeEnumValue) {
auto encoded = [](auto value) { return static_cast<uint8_t>(value); };
EXPECT_EQ(encoded(PrimitiveType::PRED), encoded(DataType::PRED));
EXPECT_EQ(encoded(PrimitiveType::S8), encoded(DataType::S8));
EXPECT_EQ(encoded(PrimitiveType::S16), encoded(DataType::S16));
EXPECT_EQ(encoded(PrimitiveType::S32), encoded(DataType::S32));
EXPECT_EQ(encoded(PrimitiveType::S64), encoded(DataType::S64));
EXPECT_EQ(encoded(PrimitiveType::U8), encoded(DataType::U8));
EXPECT_EQ(encoded(PrimitiveType::U16), encoded(DataType::U16));
EXPECT_EQ(encoded(PrimitiveType::U32), encoded(DataType::U32));
EXPECT_EQ(encoded(PrimitiveType::U64), encoded(DataType::U64));
EXPECT_EQ(encoded(PrimitiveType::F16), encoded(DataType::F16));
EXPECT_EQ(encoded(PrimitiveType::F32), encoded(DataType::F32));
EXPECT_EQ(encoded(PrimitiveType::F64), encoded(DataType::F64));
EXPECT_EQ(encoded(PrimitiveType::BF16), encoded(DataType::BF16));
EXPECT_EQ(encoded(PrimitiveType::C64), encoded(DataType::C64));
EXPECT_EQ(encoded(PrimitiveType::C128), encoded(DataType::C128));
EXPECT_EQ(encoded(PrimitiveType::TOKEN), encoded(DataType::TOKEN));
EXPECT_EQ(encoded(PrimitiveType::F8E5M2), encoded(DataType::F8E5M2));
EXPECT_EQ(encoded(PrimitiveType::F8E4M3), encoded(DataType::F8E4M3));
EXPECT_EQ(encoded(PrimitiveType::F8E4M3FN), encoded(DataType::F8E4M3FN));
EXPECT_EQ(encoded(PrimitiveType::F8E4M3B11FNUZ),
encoded(DataType::F8E4M3B11FNUZ));
EXPECT_EQ(encoded(PrimitiveType::F8E5M2FNUZ), encoded(DataType::F8E5M2FNUZ));
EXPECT_EQ(encoded(PrimitiveType::F8E4M3FNUZ), encoded(DataType::F8E4M3FNUZ));
EXPECT_EQ(encoded(PrimitiveType::F8E3M4), encoded(DataType::F8E3M4));
}
TEST(FfiTest, DataTypeByteWidth) {
EXPECT_EQ(0, ByteWidth(DataType::TOKEN));
EXPECT_EQ(0, ByteWidth(DataType::INVALID));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::PRED),
ByteWidth(DataType::PRED));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::S8),
ByteWidth(DataType::S8));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::S16),
ByteWidth(DataType::S16));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::S32),
ByteWidth(DataType::S32));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::S64),
ByteWidth(DataType::S64));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::U8),
ByteWidth(DataType::U8));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::U16),
ByteWidth(DataType::U16));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::U32),
ByteWidth(DataType::U32));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::U64),
ByteWidth(DataType::U64));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::F16),
ByteWidth(DataType::F16));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::F32),
ByteWidth(DataType::F32));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::F64),
ByteWidth(DataType::F64));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::BF16),
ByteWidth(DataType::BF16));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::C64),
ByteWidth(DataType::C64));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::C128),
ByteWidth(DataType::C128));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::F8E5M2),
ByteWidth(DataType::F8E5M2));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::F8E4M3),
ByteWidth(DataType::F8E4M3));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::F8E4M3FN),
ByteWidth(DataType::F8E4M3FN));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::F8E4M3B11FNUZ),
ByteWidth(DataType::F8E4M3B11FNUZ));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::F8E5M2FNUZ),
ByteWidth(DataType::F8E5M2FNUZ));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::F8E4M3FNUZ),
ByteWidth(DataType::F8E4M3FNUZ));
EXPECT_EQ(primitive_util::ByteWidth(PrimitiveType::F8E3M4),
ByteWidth(DataType::F8E3M4));
}
TEST(FfiTest, ErrorEnumValue) {
auto encoded = [](auto value) { return static_cast<uint8_t>(value); };
EXPECT_EQ(encoded(absl::StatusCode::kOk), encoded(ErrorCode::kOk));
EXPECT_EQ(encoded(absl::StatusCode::kCancelled),
encoded(ErrorCode::kCancelled));
EXPECT_EQ(encoded(absl::StatusCode::kUnknown), encoded(ErrorCode::kUnknown));
EXPECT_EQ(encoded(absl::StatusCode::kInvalidArgument),
encoded(ErrorCode::kInvalidArgument));
EXPECT_EQ(encoded(absl::StatusCode::kNotFound),
encoded(ErrorCode::kNotFound));
EXPECT_EQ(encoded(absl::StatusCode::kAlreadyExists),
encoded(ErrorCode::kAlreadyExists));
EXPECT_EQ(encoded(absl::StatusCode::kPermissionDenied),
encoded(ErrorCode::kPermissionDenied));
EXPECT_EQ(encoded(absl::StatusCode::kResourceExhausted),
encoded(ErrorCode::kResourceExhausted));
EXPECT_EQ(encoded(absl::StatusCode::kFailedPrecondition),
encoded(ErrorCode::kFailedPrecondition));
EXPECT_EQ(encoded(absl::StatusCode::kAborted), encoded(ErrorCode::kAborted));
EXPECT_EQ(encoded(absl::StatusCode::kOutOfRange),
encoded(ErrorCode::kOutOfRange));
EXPECT_EQ(encoded(absl::StatusCode::kUnimplemented),
encoded(ErrorCode::kUnimplemented));
EXPECT_EQ(encoded(absl::StatusCode::kInternal),
encoded(ErrorCode::kInternal));
EXPECT_EQ(encoded(absl::StatusCode::kUnavailable),
encoded(ErrorCode::kUnavailable));
EXPECT_EQ(encoded(absl::StatusCode::kDataLoss),
encoded(ErrorCode::kDataLoss));
EXPECT_EQ(encoded(absl::StatusCode::kUnauthenticated),
encoded(ErrorCode::kUnauthenticated));
}
TEST(FfiTest, Expected) {
ErrorOr<int32_t> value(42);
EXPECT_TRUE(value.has_value());
EXPECT_FALSE(value.has_error());
EXPECT_EQ(*value, 42);
ErrorOr<int32_t> error(Error(ErrorCode::kInternal, "Test error"));
EXPECT_FALSE(error.has_value());
EXPECT_TRUE(error.has_error());
EXPECT_THAT(error.error().message(), HasSubstr("Test error"));
}
TEST(FfiTest, FutureSetAvailable) {
Promise promise;
Future future(promise);
promise.SetAvailable();
future.OnReady([](const std::optional<Error>& error) {
EXPECT_FALSE(error.has_value());
});
}
TEST(FfiTest, FutureSetError) {
Promise promise;
Future future(promise);
promise.SetError(Error(ErrorCode::kInternal, "Test error"));
future.OnReady([](const std::optional<Error>& error) {
EXPECT_TRUE(error.has_value());
EXPECT_THAT(error->message(), HasSubstr("Test error"));
});
}
TEST(FfiTest, FutureSetAvailableFromThreadPool) {
tsl::thread::ThreadPool pool(tsl::Env::Default(), "ffi-test", 2);
Promise promise;
Future future(promise);
int32_t value = 0;
absl::BlockingCounter counter(1);
future.OnReady([&](const std::optional<Error>& error) {
EXPECT_FALSE(error.has_value());
EXPECT_EQ(value, 42);
counter.DecrementCount();
});
pool.Schedule([&]() {
value = 42;
promise.SetAvailable();
});
counter.Wait();
}
TEST(FfiTest, FutureSetErrorFromThreadPool) {
tsl::thread::ThreadPool pool(tsl::Env::Default(), "ffi-test", 2);
Promise promise;
Future future(promise);
int32_t value = 0;
absl::BlockingCounter counter(1);
future.OnReady([&](const std::optional<Error>& error) {
EXPECT_TRUE(error.has_value());
EXPECT_THAT(error->message(), HasSubstr("Test error"));
EXPECT_EQ(value, 42);
counter.DecrementCount();
});
pool.Schedule([&]() {
value = 42;
promise.SetError(Error(ErrorCode::kInternal, "Test error"));
});
counter.Wait();
}
TEST(FfiTest, FutureRace) {
tsl::thread::ThreadPool pool(tsl::Env::Default(), "ffi-test", 2);
for (int32_t i = 0; i < 1000; ++i) {
Promise promise;
Future future(promise);
absl::BlockingCounter counter(1);
pool.Schedule([&]() { promise.SetAvailable(); });
pool.Schedule([&]() {
future.OnReady([&](const std::optional<Error>& error) {
EXPECT_FALSE(error.has_value());
counter.DecrementCount();
});
});
counter.Wait();
}
}
TEST(FfiTest, ReturnError) {
CallFrameBuilder builder(0, 0);
auto call_frame = builder.Build();
auto handler = Ffi::Bind().To(
[]() { return Error(ErrorCode::kInternal, "Test error"); });
auto status = Call(*handler, call_frame);
EXPECT_EQ(status, absl::InternalError("Test error"));
}
TEST(FfiTest, AnyBufferArgument) {
std::vector<float> storage(4, 0.0f);
se::DeviceMemoryBase memory(storage.data(), 4 * sizeof(float));
CallFrameBuilder builder(1, 0);
builder.AddBufferArg(memory, PrimitiveType::F32, {2, 2});
auto call_frame = builder.Build();
auto handler = Ffi::Bind().Arg<AnyBuffer>().To([&](auto buffer) {
EXPECT_EQ(buffer.untyped_data(), storage.data());
EXPECT_EQ(buffer.dimensions().size(), 2);
return Error::Success();
});
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
TEST(FfiTest, BufferArgument) {
std::vector<float> storage(4, 0.0f);
se::DeviceMemoryBase memory(storage.data(), 4 * sizeof(float));
CallFrameBuilder builder(1, 0);
builder.AddBufferArg(memory, PrimitiveType::F32, {2, 2});
auto call_frame = builder.Build();
auto handler = Ffi::Bind().Arg<BufferR2<F32>>().To([&](auto buffer) {
EXPECT_EQ(buffer.typed_data(), storage.data());
EXPECT_EQ(buffer.dimensions().size(), 2);
return Error::Success();
});
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
TEST(FfiTest, AnyBufferResult) {
std::vector<float> storage(4, 0.0f);
se::DeviceMemoryBase memory(storage.data(), 4 * sizeof(float));
CallFrameBuilder builder(0, 1);
builder.AddBufferRet(memory, PrimitiveType::F32, {2, 2});
auto call_frame = builder.Build();
auto handler = Ffi::Bind().Ret<AnyBuffer>().To([&](Result<AnyBuffer> buffer) {
EXPECT_EQ(buffer->untyped_data(), storage.data());
EXPECT_EQ(buffer->dimensions().size(), 2);
return Error::Success();
});
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
TEST(FfiTest, MissingBufferArgument) {
CallFrameBuilder builder(0, 0);
auto call_frame = builder.Build();
auto handler = Ffi::Bind().Arg<BufferR1<F32>>().To(
[](auto) { return Error::Success(); });
auto status = Call(*handler, call_frame);
EXPECT_THAT(status, StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Wrong number of arguments")));
}
TEST(FfiTest, WrongRankBufferArgument) {
std::vector<int32_t> storage(4, 0.0);
se::DeviceMemoryBase memory(storage.data(), 4 * sizeof(int32_t));
CallFrameBuilder builder(1, 0);
builder.AddBufferArg(memory, PrimitiveType::F32, {2, 2});
auto call_frame = builder.Build();
auto handler = Ffi::Bind().Arg<BufferR1<F32>>().To(
[](auto) { return Error::Success(); });
auto status = Call(*handler, call_frame);
EXPECT_THAT(status,
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Wrong buffer rank: expected 1 but got 2")));
}
TEST(FfiTest, WrongTypeBufferArgument) {
std::vector<int32_t> storage(4, 0.0);
se::DeviceMemoryBase memory(storage.data(), 4 * sizeof(int32_t));
CallFrameBuilder builder(1, 0);
builder.AddBufferArg(memory, PrimitiveType::S32, {2, 2});
auto call_frame = builder.Build();
auto handler = Ffi::Bind().Arg<BufferR2<F32>>().To(
[](auto) { return Error::Success(); });
auto status = Call(*handler, call_frame);
EXPECT_THAT(
status,
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Wrong buffer dtype: expected F32 but got S32")));
}
TEST(FfiTest, TokenArgument) {
CallFrameBuilder builder(1, 0);
builder.AddBufferArg(se::DeviceMemoryBase(), PrimitiveType::TOKEN,
{});
auto call_frame = builder.Build();
auto fn = [&](Token tok) {
EXPECT_EQ(tok.typed_data(), nullptr);
EXPECT_EQ(tok.dimensions().size(), 0);
return Error::Success();
};
auto handler = Ffi::Bind().Arg<Token>().To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
TEST(FfiTest, RemainingArgs) {
std::vector<float> storage(4, 0.0f);
se::DeviceMemoryBase memory(storage.data(), 4 * sizeof(float));
CallFrameBuilder builder(1, 0);
builder.AddBufferArg(memory, PrimitiveType::F32, {2, 2});
auto call_frame = builder.Build();
auto fn = [&](RemainingArgs args) {
EXPECT_EQ(args.size(), 1);
ErrorOr<AnyBuffer> arg0 = args.get<AnyBuffer>(0);
ErrorOr<AnyBuffer> arg1 = args.get<AnyBuffer>(1);
EXPECT_TRUE(arg0.has_value());
EXPECT_FALSE(arg1.has_value());
return Error::Success();
};
auto handler = Ffi::Bind().RemainingArgs().To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
TEST(FfiTest, RemainingRets) {
std::vector<float> storage(4, 0.0f);
se::DeviceMemoryBase memory(storage.data(), 4 * sizeof(float));
CallFrameBuilder builder(0, 2);
builder.AddBufferRet(memory, PrimitiveType::F32, {2, 2});
builder.AddBufferRet(memory, PrimitiveType::F32, {2, 2});
auto call_frame = builder.Build();
auto fn = [&](Result<AnyBuffer> ret, RemainingRets rets) {
EXPECT_EQ(rets.size(), 1);
ErrorOr<Result<AnyBuffer>> ret0 = rets.get<AnyBuffer>(0);
ErrorOr<Result<AnyBuffer>> ret1 = rets.get<AnyBuffer>(1);
EXPECT_TRUE(ret0.has_value());
EXPECT_FALSE(ret1.has_value());
return Error::Success();
};
auto handler = Ffi::Bind().Ret<AnyBuffer>().RemainingRets().To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
TEST(FfiTest, OptionalArgs) {
std::vector<float> storage(4, 0.0f);
se::DeviceMemoryBase memory(storage.data(), 4 * sizeof(float));
CallFrameBuilder builder(1, 0);
builder.AddBufferArg(memory, PrimitiveType::F32, {2, 2});
auto call_frame = builder.Build();
{
auto fn = [&](std::optional<AnyBuffer> arg0) {
EXPECT_TRUE(arg0.has_value());
return Error::Success();
};
auto handler = Ffi::Bind().OptionalArg<AnyBuffer>().To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
{
auto fn = [&](std::optional<AnyBuffer> arg0,
std::optional<AnyBuffer> arg1) {
EXPECT_TRUE(arg0.has_value());
EXPECT_FALSE(arg1.has_value());
return Error::Success();
};
auto handler =
Ffi::Bind().OptionalArg<AnyBuffer>().OptionalArg<AnyBuffer>().To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
{
auto fn = [&](AnyBuffer arg0, std::optional<AnyBuffer> arg1) {
EXPECT_FALSE(arg1.has_value());
return Error::Success();
};
auto handler = Ffi::Bind().Arg<AnyBuffer>().OptionalArg<AnyBuffer>().To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
{
auto fn = [&](std::optional<AnyBuffer> arg0, RemainingArgs args) {
EXPECT_TRUE(arg0.has_value());
EXPECT_EQ(args.size(), 0);
return Error::Success();
};
auto handler = Ffi::Bind().OptionalArg<AnyBuffer>().RemainingArgs().To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
}
TEST(FfiTest, OptionalRets) {
std::vector<float> storage(4, 0.0f);
se::DeviceMemoryBase memory(storage.data(), 4 * sizeof(float));
CallFrameBuilder builder(0, 1);
builder.AddBufferRet(memory, PrimitiveType::F32, {2, 2});
auto call_frame = builder.Build();
{
auto fn = [&](std::optional<Result<AnyBuffer>> ret0) {
EXPECT_TRUE(ret0.has_value());
return Error::Success();
};
auto handler = Ffi::Bind().OptionalRet<AnyBuffer>().To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
{
auto fn = [&](std::optional<Result<AnyBuffer>> ret0,
std::optional<Result<AnyBuffer>> ret1) {
EXPECT_TRUE(ret0.has_value());
EXPECT_FALSE(ret1.has_value());
return Error::Success();
};
auto handler =
Ffi::Bind().OptionalRet<AnyBuffer>().OptionalRet<AnyBuffer>().To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
{
auto fn = [&](Result<AnyBuffer> ret0,
std::optional<Result<AnyBuffer>> ret1) {
EXPECT_FALSE(ret1.has_value());
return Error::Success();
};
auto handler = Ffi::Bind().Ret<AnyBuffer>().OptionalRet<AnyBuffer>().To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
{
auto fn = [&](std::optional<Result<AnyBuffer>> ret0, RemainingRets rets) {
EXPECT_TRUE(ret0.has_value());
EXPECT_EQ(rets.size(), 0);
return Error::Success();
};
auto handler = Ffi::Bind().OptionalRet<AnyBuffer>().RemainingRets().To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
}
TEST(FfiTest, AutoBinding) {
static constexpr char kI32[] = "i32";
auto handler = Ffi::BindTo(+[](AnyBuffer buffer, Attr<int32_t, kI32> foo) {
EXPECT_EQ(*foo, 42);
return Error::Success();
});
std::vector<float> storage(4, 0.0f);
se::DeviceMemoryBase memory(storage.data(), 4 * sizeof(float));
CallFrameBuilder::AttributesBuilder attrs;
attrs.Insert(kI32, 42);
CallFrameBuilder builder(1, 0);
builder.AddBufferArg(memory, PrimitiveType::F32, {2, 2});
builder.AddAttributes(attrs.Build());
auto call_frame = builder.Build();
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
TEST(FfiTest, AutoBindingResult) {
auto handler =
Ffi::BindTo(+[](Result<AnyBuffer> buffer) { return Error::Success(); });
CallFrameBuilder builder(0, 1);
builder.AddBufferRet(se::DeviceMemoryBase(), PrimitiveType::F32, {});
auto call_frame = builder.Build();
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
TEST(FfiTest, AutoBindingStructs) {
auto handler = Ffi::BindTo(+[](PairOfI32AndF32 attrs) {
EXPECT_EQ(attrs.i32, 42);
EXPECT_EQ(attrs.f32, 42.0f);
return Error::Success();
});
CallFrameBuilder::AttributesBuilder attrs;
attrs.Insert("i32", 42);
attrs.Insert("f32", 42.0f);
CallFrameBuilder builder(0, 0);
builder.AddAttributes(attrs.Build());
auto call_frame = builder.Build();
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
TEST(FfiTest, AutoBindingDictionary) {
auto handler = Ffi::BindTo(+[](Dictionary attrs) {
EXPECT_EQ(*attrs.get<int32_t>("i32"), 42);
EXPECT_EQ(*attrs.get<float>("f32"), 42.0f);
return Error::Success();
});
CallFrameBuilder::AttributesBuilder attrs;
attrs.Insert("i32", 42);
attrs.Insert("f32", 42.0f);
CallFrameBuilder builder(0, 0);
builder.AddAttributes(attrs.Build());
auto call_frame = builder.Build();
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
struct TestStreamSt;
using TestStream = TestStreamSt*;
template <>
struct CtxBinding<TestStream> {
using Ctx = PlatformStream<TestStream>;
};
TEST(FfiTest, BindingPlatformStreamInference) {
(void)Ffi::BindTo(+[](TestStream stream) { return Error::Success(); });
}
TEST(FfiTest, ArrayAttr) {
CallFrameBuilder::AttributesBuilder attrs;
attrs.Insert("arr0", std::vector<int8_t>({1, 2, 3, 4}));
attrs.Insert("arr1", std::vector<int16_t>({1, 2, 3, 4}));
attrs.Insert("arr2", std::vector<int32_t>({1, 2, 3, 4}));
attrs.Insert("arr3", std::vector<int64_t>({1, 2, 3, 4}));
attrs.Insert("arr4", std::vector<uint8_t>({1, 2, 3, 4}));
attrs.Insert("arr5", std::vector<uint16_t>({1, 2, 3, 4}));
attrs.Insert("arr6", std::vector<uint32_t>({1, 2, 3, 4}));
attrs.Insert("arr7", std::vector<uint64_t>({1, 2, 3, 4}));
attrs.Insert("arr8", std::vector<float>({1, 2, 3, 4}));
attrs.Insert("arr9", std::vector<double>({1, 2, 3, 4}));
CallFrameBuilder builder(0, 0);
builder.AddAttributes(attrs.Build());
auto call_frame = builder.Build();
auto fn = [&](auto arr0, auto arr1, auto arr2, auto arr3, auto arr4,
auto arr5, auto arr6, auto arr7, auto arr8, auto arr9) {
EXPECT_EQ(arr0, Span<const int8_t>({1, 2, 3, 4}));
EXPECT_EQ(arr1, Span<const int16_t>({1, 2, 3, 4}));
EXPECT_EQ(arr2, Span<const int32_t>({1, 2, 3, 4}));
EXPECT_EQ(arr3, Span<const int64_t>({1, 2, 3, 4}));
EXPECT_EQ(arr4, Span<const uint8_t>({1, 2, 3, 4}));
EXPECT_EQ(arr5, Span<const uint16_t>({1, 2, 3, 4}));
EXPECT_EQ(arr6, Span<const uint32_t>({1, 2, 3, 4}));
EXPECT_EQ(arr7, Span<const uint64_t>({1, 2, 3, 4}));
EXPECT_EQ(arr8, Span<const float>({1, 2, 3, 4}));
EXPECT_EQ(arr9, Span<const double>({1, 2, 3, 4}));
return Error::Success();
};
auto handler = Ffi::Bind()
.Attr<Span<const int8_t>>("arr0")
.Attr<Span<const int16_t>>("arr1")
.Attr<Span<const int32_t>>("arr2")
.Attr<Span<const int64_t>>("arr3")
.Attr<Span<const uint8_t>>("arr4")
.Attr<Span<const uint16_t>>("arr5")
.Attr<Span<const uint32_t>>("arr6")
.Attr<Span<const uint64_t>>("arr7")
.Attr<Span<const float>>("arr8")
.Attr<Span<const double>>("arr9")
.To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
TEST(FfiTest, AttrsAsDictionary) {
CallFrameBuilder::AttributesBuilder attrs;
attrs.Insert("i32", 42);
attrs.Insert("f32", 42.0f);
attrs.Insert("str", "foo");
CallFrameBuilder builder(0, 0);
builder.AddAttributes(attrs.Build());
auto call_frame = builder.Build();
auto fn = [&](Dictionary dict) {
EXPECT_EQ(dict.size(), 3);
EXPECT_TRUE(dict.contains("i32"));
EXPECT_TRUE(dict.contains("f32"));
EXPECT_TRUE(dict.contains("str"));
ErrorOr<int32_t> i32 = dict.get<int32_t>("i32");
ErrorOr<float> f32 = dict.get<float>("f32");
ErrorOr<std::string_view> str = dict.get<std::string_view>("str");
EXPECT_TRUE(i32.has_value());
EXPECT_TRUE(f32.has_value());
EXPECT_TRUE(str.has_value());
if (i32.has_value()) EXPECT_EQ(*i32, 42);
if (f32.has_value()) EXPECT_EQ(*f32, 42.0f);
if (str.has_value()) EXPECT_EQ(*str, "foo");
EXPECT_FALSE(dict.contains("i64"));
EXPECT_FALSE(dict.get<int64_t>("i32").has_value());
EXPECT_FALSE(dict.get<int64_t>("i64").has_value());
return Error::Success();
};
auto handler = Ffi::Bind().Attrs().To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
TEST(FfiTest, DictionaryAttr) {
CallFrameBuilder::AttributesMap dict0;
dict0.try_emplace("i32", 42);
CallFrameBuilder::AttributesMap dict1;
dict1.try_emplace("f32", 42.0f);
CallFrameBuilder::AttributesBuilder attrs;
attrs.Insert("dict0", dict0);
attrs.Insert("dict1", dict1);
CallFrameBuilder builder(0, 0);
builder.AddAttributes(attrs.Build());
auto call_frame = builder.Build();
auto fn = [&](Dictionary dict0, Dictionary dict1) {
EXPECT_EQ(dict0.size(), 1);
EXPECT_EQ(dict1.size(), 1);
EXPECT_TRUE(dict0.contains("i32"));
EXPECT_TRUE(dict1.contains("f32"));
ErrorOr<int32_t> i32 = dict0.get<int32_t>("i32");
ErrorOr<float> f32 = dict1.get<float>("f32");
EXPECT_TRUE(i32.has_value());
EXPECT_TRUE(f32.has_value());
if (i32.has_value()) EXPECT_EQ(*i32, 42);
if (f32.has_value()) EXPECT_EQ(*f32, 42.0f);
return Error::Success();
};
auto handler =
Ffi::Bind().Attr<Dictionary>("dict0").Attr<Dictionary>("dict1").To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
TEST(FfiTest, StructAttr) {
CallFrameBuilder::AttributesMap dict;
dict.try_emplace("i32", 42);
dict.try_emplace("f32", 42.0f);
CallFrameBuilder::AttributesBuilder attrs;
attrs.Insert("str", "foo");
attrs.Insert("i32_and_f32", dict);
CallFrameBuilder builder(0, 0);
builder.AddAttributes(attrs.Build());
auto call_frame = builder.Build();
auto fn = [&](std::string_view str, PairOfI32AndF32 i32_and_f32) {
EXPECT_EQ(str, "foo");
EXPECT_EQ(i32_and_f32.i32, 42);
EXPECT_EQ(i32_and_f32.f32, 42.0f);
return Error::Success();
};
auto handler = Ffi::Bind()
.Attr<std::string_view>("str")
.Attr<PairOfI32AndF32>("i32_and_f32")
.To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
TEST(FfiTest, AttrsAsStruct) {
CallFrameBuilder::AttributesBuilder attrs;
attrs.Insert("i32", 42);
attrs.Insert("f32", 42.0f);
CallFrameBuilder builder(0, 0);
builder.AddAttributes(attrs.Build());
auto call_frame = builder.Build();
auto fn = [&](PairOfI32AndF32 i32_and_f32) {
EXPECT_EQ(i32_and_f32.i32, 42);
EXPECT_EQ(i32_and_f32.f32, 42.0f);
return Error::Success();
};
auto handler = Ffi::Bind().Attrs<PairOfI32AndF32>().To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
TEST(FfiTest, PointerAttr) {
std::string foo = "foo";
auto ptr = reinterpret_cast<uintptr_t>(&foo);
static_assert(sizeof(ptr) == sizeof(int64_t));
CallFrameBuilder::AttributesBuilder attrs;
attrs.Insert("ptr", static_cast<int64_t>(ptr));
CallFrameBuilder builder(0, 0);
builder.AddAttributes(attrs.Build());
auto call_frame = builder.Build();
auto fn = [&](const std::string* str) {
EXPECT_EQ(*str, "foo");
return Error::Success();
};
auto handler = Ffi::Bind().Attr<Pointer<std::string>>("ptr").To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
TEST(FfiTest, EnumAttr) {
CallFrameBuilder::AttributesBuilder attrs;
attrs.Insert("i32_one", static_cast<std::underlying_type_t<Int32BasedEnum>>(
Int32BasedEnum::kOne));
attrs.Insert("i32_two", static_cast<std::underlying_type_t<Int32BasedEnum>>(
Int32BasedEnum::kTwo));
attrs.Insert("i64_one", static_cast<std::underlying_type_t<Int64BasedEnum>>(
Int64BasedEnum::kOne));
attrs.Insert("i64_two", static_cast<std::underlying_type_t<Int64BasedEnum>>(
Int64BasedEnum::kTwo));
CallFrameBuilder builder(0, 0);
builder.AddAttributes(attrs.Build());
auto call_frame = builder.Build();
auto fn = [&](Int32BasedEnum i32_one, Int32BasedEnum i32_two,
Int64BasedEnum i64_one, Int64BasedEnum i64_two) {
EXPECT_EQ(i32_one, Int32BasedEnum::kOne);
EXPECT_EQ(i32_two, Int32BasedEnum::kTwo);
EXPECT_EQ(i64_one, Int64BasedEnum::kOne);
EXPECT_EQ(i64_two, Int64BasedEnum::kTwo);
return Error::Success();
};
auto handler = Ffi::Bind()
.Attr<Int32BasedEnum>("i32_one")
.Attr<Int32BasedEnum>("i32_two")
.Attr<Int64BasedEnum>("i64_one")
.Attr<Int64BasedEnum>("i64_two")
.To(fn);
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
TEST(FfiTest, WrongEnumAttrType) {
CallFrameBuilder::AttributesMap dict;
dict.try_emplace("i32", 42);
CallFrameBuilder::AttributesBuilder attrs;
attrs.Insert("i32_enum1", dict);
attrs.Insert("i32_enum0", 42u);
CallFrameBuilder builder(0, 0);
builder.AddAttributes(attrs.Build());
auto call_frame = builder.Build();
auto fn = [](Int32BasedEnum, Int32BasedEnum) { return Error::Success(); };
auto handler = Ffi::Bind()
.Attr<Int32BasedEnum>("i32_enum0")
.Attr<Int32BasedEnum>("i32_enum1")
.To(fn);
auto status = Call(*handler, call_frame);
EXPECT_TRUE(absl::StrContains(
status.message(),
"Failed to decode all FFI handler operands (bad operands at: 0, 1)"))
<< "status.message():\n"
<< status.message() << "\n";
EXPECT_TRUE(absl::StrContains(status.message(),
"Wrong scalar data type: expected S32 but got"))
<< "status.message():\n"
<< status.message() << "\n";
EXPECT_TRUE(absl::StrContains(
status.message(),
"Wrong attribute type: expected scalar but got dictionary"))
<< "status.message():\n"
<< status.message() << "\n";
}
struct MyData {
static TypeId id;
std::string str;
};
TypeId MyData::id = {};
XLA_FFI_REGISTER_TYPE(GetXlaFfiApi(), "my_data", &MyData::id);
TEST(FfiTest, UserData) {
MyData data{"foo"};
ExecutionContext execution_context;
TF_ASSERT_OK(execution_context.Insert(
TypeIdRegistry::TypeId(MyData::id.type_id), &data));
CallFrameBuilder builder(0, 0);
auto call_frame = builder.Build();
auto fn = [&](MyData* data) {
EXPECT_EQ(data->str, "foo");
return Error::Success();
};
auto handler = Ffi::Bind().Ctx<UserData<MyData>>().To(fn);
CallOptions options;
options.execution_context = &execution_context;
auto status = Call(*handler, call_frame, options);
TF_ASSERT_OK(status);
}
struct MyState {
static TypeId id;
explicit MyState(int32_t value) : value(value) {}
int32_t value;
};
TypeId MyState::id = {};
XLA_FFI_REGISTER_TYPE(GetXlaFfiApi(), "state", &MyState::id);
TEST(FfiTest, StatefulHandler) {
ExecutionState execution_state;
CallFrameBuilder builder(0, 0);
auto call_frame = builder.Build();
CallOptions options;
options.execution_state = &execution_state;
auto instantiate =
Ffi::BindInstantiate().To([]() -> ErrorOr<std::unique_ptr<MyState>> {
return std::make_unique<MyState>(42);
});
auto execute = Ffi::Bind().Ctx<State<MyState>>().To([](MyState* state) {
EXPECT_EQ(state->value, 42);
return Error::Success();
});
TF_ASSERT_OK(
Call(*instantiate, call_frame, options, ExecutionStage::kInstantiate));
TF_ASSERT_OK(Call(*execute, call_frame, options));
}
TEST(FfiTest, ScratchAllocator) {
static void* kAddr = reinterpret_cast<void*>(0xDEADBEEF);
struct TestDeviceMemoryAllocator final : public se::DeviceMemoryAllocator {
size_t count;
TestDeviceMemoryAllocator()
: se::DeviceMemoryAllocator(nullptr), count(0) {}
absl::StatusOr<se::OwningDeviceMemory> Allocate(int, uint64_t size, bool,
int64_t) final {
count++;
return se::OwningDeviceMemory(se::DeviceMemoryBase(kAddr, size), 0, this);
}
absl::Status Deallocate(int, se::DeviceMemoryBase mem) final {
count--;
EXPECT_EQ(mem.opaque(), kAddr);
return absl::OkStatus();
}
absl::StatusOr<se::Stream*> GetStream(int) final {
return absl::UnimplementedError("Not implemented");
}
};
auto fn = [&](ScratchAllocator scratch_allocator) {
auto mem = scratch_allocator.Allocate(1024);
EXPECT_EQ(*mem, kAddr);
return Error::Success();
};
TestDeviceMemoryAllocator allocator;
auto handler = Ffi::Bind().Ctx<ScratchAllocator>().To(fn);
CallFrame call_frame =
CallFrameBuilder(0, 0).Build();
CallOptions options;
options.backend_options = CallOptions::GpuOptions{nullptr, &allocator};
auto status = Call(*handler, call_frame, options);
TF_ASSERT_OK(status);
EXPECT_EQ(allocator.count, 0);
}
TEST(FfiTest, ScratchAllocatorUnimplemented) {
auto fn = [&](ScratchAllocator scratch_allocator) {
auto mem = scratch_allocator.Allocate(1024);
EXPECT_FALSE(mem.has_value());
return Error::Success();
};
auto handler = Ffi::Bind().Ctx<ScratchAllocator>().To(fn);
CallFrame call_frame =
CallFrameBuilder(0, 0).Build();
auto status = Call(*handler, call_frame);
TF_ASSERT_OK(status);
}
TEST(FfiTest, ThreadPool) {
tsl::thread::ThreadPool pool(tsl::Env::Default(), "ffi-test", 2);
Eigen::ThreadPoolDevice device(pool.AsEigenThreadPool(), pool.NumThreads());
auto fn = [&](ThreadPool thread_pool) {
absl::BlockingCounter prepare(1);
absl::BlockingCounter execute(1);
thread_pool.Schedule([&] {
prepare.Wait();
execute.DecrementCount();
});
prepare.DecrementCount();
execute.Wait();
return Error::Success();
};
auto handler = Ffi::Bind().Ctx<ThreadPool>().To(fn);
CallFrame call_frame =
CallFrameBuilder(0, 0).Build();
CallOptions options;
options.backend_options = CallOptions::CpuOptions{&device};
auto status = Call(*handler, call_frame, options);
TF_ASSERT_OK(status);
}
TEST(FfiTest, AsyncHandler) {
tsl::thread::ThreadPool pool(tsl::Env::Default(), "ffi-test", 2);
Eigen::ThreadPoolDevice device(pool.AsEigenThreadPool(), pool.NumThreads());
int32_t value = 0;
auto fn = [&](ThreadPool thread_pool) -> Future {
Promise promise;
Future future(promise);
thread_pool.Schedule([&, promise = std::move(promise)]() mutable {
value = 42;
promise.SetAvailable();
});
return future;
};
auto handler = Ffi::Bind().Ctx<ThreadPool>().To(fn);
CallFrame call_frame =
CallFrameBuilder(0, 0).Build();
CallOptions options;
options.backend_options = CallOptions::CpuOptions{&device};
{
absl::Status status = Call(*handler, call_frame, options);
TF_ASSERT_OK(status);
EXPECT_EQ(value, 42);
}
value = 0;
{
tsl::AsyncValueRef<tsl::Chain> async_value =
CallAsync(*handler, call_frame, options);
tsl::BlockUntilReady(async_value);
ASSERT_TRUE(async_value.IsConcrete());
EXPECT_EQ(value, 42);
}
}
TEST(FfiTest, Metadata) {
auto api = GetXlaFfiApi();
auto handler = Ffi::BindTo([]() { return Error::Success(); });
auto maybe_metadata = GetMetadata(*handler);
EXPECT_TRUE(maybe_metadata.ok());
auto metadata = maybe_metadata.value();
EXPECT_EQ(metadata.api_version.major_version, api->api_version.major_version);
EXPECT_EQ(metadata.api_version.minor_version, api->api_version.minor_version);
EXPECT_EQ(metadata.traits, 0);
}
TEST(FfiTest, MetadataTraits) {
auto handler = Ffi::BindTo([]() { return Error::Success(); },
{Traits::kCmdBufferCompatible});
auto maybe_metadata = GetMetadata(*handler);
EXPECT_TRUE(maybe_metadata.ok());
auto metadata = maybe_metadata.value();
EXPECT_EQ(metadata.api_version.major_version, XLA_FFI_API_MAJOR);
EXPECT_EQ(metadata.api_version.minor_version, XLA_FFI_API_MINOR);
EXPECT_EQ(metadata.traits, XLA_FFI_HANDLER_TRAITS_COMMAND_BUFFER_COMPATIBLE);
}
static CallFrameBuilder WithBufferArgs(size_t num_args, size_t rank = 4) {
se::DeviceMemoryBase memory;
std::vector<int64_t> dims(4, 1);
CallFrameBuilder builder(num_args, 0);
for (size_t i = 0; i < num_args; ++i) {
builder.AddBufferArg(memory, PrimitiveType::F32, dims);
}
return builder;
}
void BM_AnyBufferArgX1(benchmark::State& state) {
auto call_frame = WithBufferArgs(1).Build();
auto handler = Ffi::Bind().Arg<AnyBuffer>().To([](auto buffer) {
benchmark::DoNotOptimize(buffer);
return Error::Success();
});
for (auto _ : state) {
CHECK_OK(Call(*handler, call_frame));
}
}
BENCHMARK(BM_AnyBufferArgX1);
void BM_AnyBufferArgX4(benchmark::State& state) {
auto call_frame = WithBufferArgs(4).Build();
auto handler = Ffi::Bind()
.Arg<AnyBuffer>()
.Arg<AnyBuffer>()
.Arg<AnyBuffer>()
.Arg<AnyBuffer>()
.To([](auto b0, auto b1, auto b2, auto b3) {
benchmark::DoNotOptimize(b0);
benchmark::DoNotOptimize(b1);
benchmark::DoNotOptimize(b2);
benchmark::DoNotOptimize(b3);
return Error::Success();
});
for (auto _ : state) {
CHECK_OK(Call(*handler, call_frame));
}
}
BENCHMARK(BM_AnyBufferArgX4);
void BM_AsyncAnyBufferArgX1(benchmark::State& state) {
auto call_frame = WithBufferArgs(1).Build();
auto handler = Ffi::Bind().Arg<AnyBuffer>().To([](auto buffer) {
benchmark::DoNotOptimize(buffer);
Promise promise;
promise.SetAvailable();
return Future(promise);
});
for (auto _ : state) {
CHECK_OK(Call(*handler, call_frame));
}
}
BENCHMARK(BM_AsyncAnyBufferArgX1);
void BM_BufferArgX1(benchmark::State& state) {
auto call_frame = WithBufferArgs(1).Build();
auto handler = Ffi::Bind().Arg<BufferR4<F32>>().To([](auto buffer) {
benchmark::DoNotOptimize(buffer);
return Error::Success();
});
for (auto _ : state) {
CHECK_OK(Call(*handler, call_frame));
}
}
BENCHMARK(BM_BufferArgX1);
void BM_BufferArgX4(benchmark::State& state) {
auto call_frame = WithBufferArgs(4).Build();
auto handler = Ffi::Bind()
.Arg<BufferR4<F32>>()
.Arg<BufferR4<F32>>()
.Arg<BufferR4<F32>>()
.Arg<BufferR4<F32>>()
.To([](auto b0, auto b1, auto b2, auto b3) {
benchmark::DoNotOptimize(b0);
benchmark::DoNotOptimize(b1);
benchmark::DoNotOptimize(b2);
benchmark::DoNotOptimize(b3);
return Error::Success();
});
for (auto _ : state) {
CHECK_OK(Call(*handler, call_frame));
}
}
BENCHMARK(BM_BufferArgX4);
void BM_BufferArgX8(benchmark::State& state) {
auto call_frame = WithBufferArgs(8).Build();
auto handler = Ffi::Bind()
.Arg<BufferR4<F32>>()
.Arg<BufferR4<F32>>()
.Arg<BufferR4<F32>>()
.Arg<BufferR4<F32>>()
.Arg<BufferR4<F32>>()
.Arg<BufferR4<F32>>()
.Arg<BufferR4<F32>>()
.Arg<BufferR4<F32>>()
.To([](auto b0, auto b1, auto b2, auto b3, auto b4,
auto b5, auto b6, auto b7) {
benchmark::DoNotOptimize(b0);
benchmark::DoNotOptimize(b1);
benchmark::DoNotOptimize(b2);
benchmark::DoNotOptimize(b3);
benchmark::DoNotOptimize(b4);
benchmark::DoNotOptimize(b5);
benchmark::DoNotOptimize(b6);
benchmark::DoNotOptimize(b7);
return Error::Success();
});
for (auto _ : state) {
CHECK_OK(Call(*handler, call_frame));
}
}
BENCHMARK(BM_BufferArgX8);
void BM_TupleOfI32Attrs(benchmark::State& state) {
CallFrameBuilder::AttributesBuilder attrs;
attrs.Insert("i32_0", 1);
attrs.Insert("i32_1", 2);
attrs.Insert("i32_2", 3);
attrs.Insert("i32_3", 4);
CallFrameBuilder builder(0, 0);
builder.AddAttributes(attrs.Build());
auto call_frame = builder.Build();
auto handler = Ffi::Bind().Attrs<TupleOfI32>().To([](auto tuple) {
benchmark::DoNotOptimize(tuple);
return Error::Success();
});
for (auto _ : state) {
CHECK_OK(Call(*handler, call_frame));
}
}
BENCHMARK(BM_TupleOfI32Attrs);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/ffi/api/ffi.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/ffi/api/ffi_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
dadf7064-62b2-4cb3-8bb2-f22ef4f9c855 | cpp | abseil/abseil-cpp | clock | absl/time/clock.cc | absl/time/clock_test.cc | #include "absl/time/clock.h"
#include "absl/base/attributes.h"
#include "absl/base/optimization.h"
#ifdef _WIN32
#include <windows.h>
#endif
#include <algorithm>
#include <atomic>
#include <cerrno>
#include <cstdint>
#include <ctime>
#include <limits>
#include "absl/base/internal/spinlock.h"
#include "absl/base/internal/unscaledcycleclock.h"
#include "absl/base/macros.h"
#include "absl/base/port.h"
#include "absl/base/thread_annotations.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
Time Now() {
int64_t n = absl::GetCurrentTimeNanos();
if (n >= 0) {
return time_internal::FromUnixDuration(
time_internal::MakeDuration(n / 1000000000, n % 1000000000 * 4));
}
return time_internal::FromUnixDuration(absl::Nanoseconds(n));
}
ABSL_NAMESPACE_END
}
#ifndef ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS
#define ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS 0
#endif
#if defined(__APPLE__) || defined(_WIN32)
#include "absl/time/internal/get_current_time_chrono.inc"
#else
#include "absl/time/internal/get_current_time_posix.inc"
#endif
#ifndef GET_CURRENT_TIME_NANOS_FROM_SYSTEM
#define GET_CURRENT_TIME_NANOS_FROM_SYSTEM() \
::absl::time_internal::GetCurrentTimeNanosFromSystem()
#endif
#if !ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS
namespace absl {
ABSL_NAMESPACE_BEGIN
int64_t GetCurrentTimeNanos() { return GET_CURRENT_TIME_NANOS_FROM_SYSTEM(); }
ABSL_NAMESPACE_END
}
#else
#ifndef GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW
#define GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW() \
::absl::time_internal::UnscaledCycleClockWrapperForGetCurrentTime::Now()
#endif
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace time_internal {
#if !defined(NDEBUG) && defined(__x86_64__)
constexpr int64_t kCycleClockNowMask = ~int64_t{0xff};
#else
constexpr int64_t kCycleClockNowMask = ~int64_t{0};
#endif
class UnscaledCycleClockWrapperForGetCurrentTime {
public:
static int64_t Now() {
return base_internal::UnscaledCycleClock::Now() & kCycleClockNowMask;
}
};
}
static inline uint64_t SeqAcquire(std::atomic<uint64_t> *seq) {
uint64_t x = seq->fetch_add(1, std::memory_order_relaxed);
std::atomic_thread_fence(std::memory_order_release);
return x + 2;
}
static inline void SeqRelease(std::atomic<uint64_t> *seq, uint64_t x) {
seq->store(x, std::memory_order_release);
}
enum { kScale = 30 };
static const uint64_t kMinNSBetweenSamples = 2000 << 20;
static_assert(((kMinNSBetweenSamples << (kScale + 1)) >> (kScale + 1)) ==
kMinNSBetweenSamples,
"cannot represent kMaxBetweenSamplesNSScaled");
struct TimeSampleAtomic {
std::atomic<uint64_t> raw_ns{0};
std::atomic<uint64_t> base_ns{0};
std::atomic<uint64_t> base_cycles{0};
std::atomic<uint64_t> nsscaled_per_cycle{0};
std::atomic<uint64_t> min_cycles_per_sample{0};
};
struct TimeSample {
uint64_t raw_ns = 0;
uint64_t base_ns = 0;
uint64_t base_cycles = 0;
uint64_t nsscaled_per_cycle = 0;
uint64_t min_cycles_per_sample = 0;
};
struct ABSL_CACHELINE_ALIGNED TimeState {
std::atomic<uint64_t> seq{0};
TimeSampleAtomic last_sample;
int64_t stats_initializations{0};
int64_t stats_reinitializations{0};
int64_t stats_calibrations{0};
int64_t stats_slow_paths{0};
int64_t stats_fast_slow_paths{0};
uint64_t last_now_cycles ABSL_GUARDED_BY(lock){0};
std::atomic<uint64_t> approx_syscall_time_in_cycles{10 * 1000};
std::atomic<uint32_t> kernel_time_seen_smaller{0};
absl::base_internal::SpinLock lock{absl::kConstInit,
base_internal::SCHEDULE_KERNEL_ONLY};
};
ABSL_CONST_INIT static TimeState time_state;
static int64_t GetCurrentTimeNanosFromKernel(uint64_t last_cycleclock,
uint64_t *cycleclock)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(time_state.lock) {
uint64_t local_approx_syscall_time_in_cycles =
time_state.approx_syscall_time_in_cycles.load(std::memory_order_relaxed);
int64_t current_time_nanos_from_system;
uint64_t before_cycles;
uint64_t after_cycles;
uint64_t elapsed_cycles;
int loops = 0;
do {
before_cycles =
static_cast<uint64_t>(GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW());
current_time_nanos_from_system = GET_CURRENT_TIME_NANOS_FROM_SYSTEM();
after_cycles =
static_cast<uint64_t>(GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW());
elapsed_cycles = after_cycles - before_cycles;
if (elapsed_cycles >= local_approx_syscall_time_in_cycles &&
++loops == 20) {
loops = 0;
if (local_approx_syscall_time_in_cycles < 1000 * 1000) {
local_approx_syscall_time_in_cycles =
(local_approx_syscall_time_in_cycles + 1) << 1;
}
time_state.approx_syscall_time_in_cycles.store(
local_approx_syscall_time_in_cycles, std::memory_order_relaxed);
}
} while (elapsed_cycles >= local_approx_syscall_time_in_cycles ||
last_cycleclock - after_cycles < (static_cast<uint64_t>(1) << 16));
if ((local_approx_syscall_time_in_cycles >> 1) < elapsed_cycles) {
time_state.kernel_time_seen_smaller.store(0, std::memory_order_relaxed);
} else if (time_state.kernel_time_seen_smaller.fetch_add(
1, std::memory_order_relaxed) >= 3) {
const uint64_t new_approximation =
local_approx_syscall_time_in_cycles -
(local_approx_syscall_time_in_cycles >> 3);
time_state.approx_syscall_time_in_cycles.store(new_approximation,
std::memory_order_relaxed);
time_state.kernel_time_seen_smaller.store(0, std::memory_order_relaxed);
}
*cycleclock = after_cycles;
return current_time_nanos_from_system;
}
static int64_t GetCurrentTimeNanosSlowPath() ABSL_ATTRIBUTE_COLD;
static void ReadTimeSampleAtomic(const struct TimeSampleAtomic *atomic,
struct TimeSample *sample) {
sample->base_ns = atomic->base_ns.load(std::memory_order_relaxed);
sample->base_cycles = atomic->base_cycles.load(std::memory_order_relaxed);
sample->nsscaled_per_cycle =
atomic->nsscaled_per_cycle.load(std::memory_order_relaxed);
sample->min_cycles_per_sample =
atomic->min_cycles_per_sample.load(std::memory_order_relaxed);
sample->raw_ns = atomic->raw_ns.load(std::memory_order_relaxed);
}
int64_t GetCurrentTimeNanos() {
uint64_t base_ns;
uint64_t base_cycles;
uint64_t nsscaled_per_cycle;
uint64_t min_cycles_per_sample;
uint64_t seq_read0;
uint64_t seq_read1;
uint64_t now_cycles =
static_cast<uint64_t>(GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW());
seq_read0 = time_state.seq.load(std::memory_order_acquire);
base_ns = time_state.last_sample.base_ns.load(std::memory_order_relaxed);
base_cycles =
time_state.last_sample.base_cycles.load(std::memory_order_relaxed);
nsscaled_per_cycle =
time_state.last_sample.nsscaled_per_cycle.load(std::memory_order_relaxed);
min_cycles_per_sample = time_state.last_sample.min_cycles_per_sample.load(
std::memory_order_relaxed);
std::atomic_thread_fence(std::memory_order_acquire);
seq_read1 = time_state.seq.load(std::memory_order_relaxed);
uint64_t delta_cycles;
if (seq_read0 == seq_read1 && (seq_read0 & 1) == 0 &&
(delta_cycles = now_cycles - base_cycles) < min_cycles_per_sample) {
return static_cast<int64_t>(
base_ns + ((delta_cycles * nsscaled_per_cycle) >> kScale));
}
return GetCurrentTimeNanosSlowPath();
}
static uint64_t SafeDivideAndScale(uint64_t a, uint64_t b) {
int safe_shift = kScale;
while (((a << safe_shift) >> safe_shift) != a) {
safe_shift--;
}
uint64_t scaled_b = b >> (kScale - safe_shift);
uint64_t quotient = 0;
if (scaled_b != 0) {
quotient = (a << safe_shift) / scaled_b;
}
return quotient;
}
static uint64_t UpdateLastSample(
uint64_t now_cycles, uint64_t now_ns, uint64_t delta_cycles,
const struct TimeSample *sample) ABSL_ATTRIBUTE_COLD;
ABSL_ATTRIBUTE_NOINLINE
static int64_t GetCurrentTimeNanosSlowPath()
ABSL_LOCKS_EXCLUDED(time_state.lock) {
time_state.lock.Lock();
uint64_t now_cycles;
uint64_t now_ns = static_cast<uint64_t>(
GetCurrentTimeNanosFromKernel(time_state.last_now_cycles, &now_cycles));
time_state.last_now_cycles = now_cycles;
uint64_t estimated_base_ns;
struct TimeSample sample;
ReadTimeSampleAtomic(&time_state.last_sample, &sample);
uint64_t delta_cycles = now_cycles - sample.base_cycles;
if (delta_cycles < sample.min_cycles_per_sample) {
estimated_base_ns = sample.base_ns +
((delta_cycles * sample.nsscaled_per_cycle) >> kScale);
time_state.stats_fast_slow_paths++;
} else {
estimated_base_ns =
UpdateLastSample(now_cycles, now_ns, delta_cycles, &sample);
}
time_state.lock.Unlock();
return static_cast<int64_t>(estimated_base_ns);
}
static uint64_t UpdateLastSample(uint64_t now_cycles, uint64_t now_ns,
uint64_t delta_cycles,
const struct TimeSample *sample)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(time_state.lock) {
uint64_t estimated_base_ns = now_ns;
uint64_t lock_value =
SeqAcquire(&time_state.seq);
if (sample->raw_ns == 0 ||
sample->raw_ns + static_cast<uint64_t>(5) * 1000 * 1000 * 1000 < now_ns ||
now_ns < sample->raw_ns || now_cycles < sample->base_cycles) {
time_state.last_sample.raw_ns.store(now_ns, std::memory_order_relaxed);
time_state.last_sample.base_ns.store(estimated_base_ns,
std::memory_order_relaxed);
time_state.last_sample.base_cycles.store(now_cycles,
std::memory_order_relaxed);
time_state.last_sample.nsscaled_per_cycle.store(0,
std::memory_order_relaxed);
time_state.last_sample.min_cycles_per_sample.store(
0, std::memory_order_relaxed);
time_state.stats_initializations++;
} else if (sample->raw_ns + 500 * 1000 * 1000 < now_ns &&
sample->base_cycles + 50 < now_cycles) {
if (sample->nsscaled_per_cycle != 0) {
uint64_t estimated_scaled_ns;
int s = -1;
do {
s++;
estimated_scaled_ns = (delta_cycles >> s) * sample->nsscaled_per_cycle;
} while (estimated_scaled_ns / sample->nsscaled_per_cycle !=
(delta_cycles >> s));
estimated_base_ns = sample->base_ns +
(estimated_scaled_ns >> (kScale - s));
}
uint64_t ns = now_ns - sample->raw_ns;
uint64_t measured_nsscaled_per_cycle = SafeDivideAndScale(ns, delta_cycles);
uint64_t assumed_next_sample_delta_cycles =
SafeDivideAndScale(kMinNSBetweenSamples, measured_nsscaled_per_cycle);
int64_t diff_ns = static_cast<int64_t>(now_ns - estimated_base_ns);
ns = static_cast<uint64_t>(static_cast<int64_t>(kMinNSBetweenSamples) +
diff_ns - (diff_ns / 16));
uint64_t new_nsscaled_per_cycle =
SafeDivideAndScale(ns, assumed_next_sample_delta_cycles);
if (new_nsscaled_per_cycle != 0 &&
diff_ns < 100 * 1000 * 1000 && -diff_ns < 100 * 1000 * 1000) {
time_state.last_sample.nsscaled_per_cycle.store(
new_nsscaled_per_cycle, std::memory_order_relaxed);
uint64_t new_min_cycles_per_sample =
SafeDivideAndScale(kMinNSBetweenSamples, new_nsscaled_per_cycle);
time_state.last_sample.min_cycles_per_sample.store(
new_min_cycles_per_sample, std::memory_order_relaxed);
time_state.stats_calibrations++;
} else {
time_state.last_sample.nsscaled_per_cycle.store(
0, std::memory_order_relaxed);
time_state.last_sample.min_cycles_per_sample.store(
0, std::memory_order_relaxed);
estimated_base_ns = now_ns;
time_state.stats_reinitializations++;
}
time_state.last_sample.raw_ns.store(now_ns, std::memory_order_relaxed);
time_state.last_sample.base_ns.store(estimated_base_ns,
std::memory_order_relaxed);
time_state.last_sample.base_cycles.store(now_cycles,
std::memory_order_relaxed);
} else {
time_state.stats_slow_paths++;
}
SeqRelease(&time_state.seq, lock_value);
return estimated_base_ns;
}
ABSL_NAMESPACE_END
}
#endif
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace {
constexpr absl::Duration MaxSleep() {
#ifdef _WIN32
return absl::Milliseconds(
std::numeric_limits<unsigned long>::max());
#else
return absl::Seconds(std::numeric_limits<time_t>::max());
#endif
}
void SleepOnce(absl::Duration to_sleep) {
#ifdef _WIN32
Sleep(static_cast<DWORD>(to_sleep / absl::Milliseconds(1)));
#else
struct timespec sleep_time = absl::ToTimespec(to_sleep);
while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR) {
}
#endif
}
}
ABSL_NAMESPACE_END
}
extern "C" {
ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSleepFor)(
absl::Duration duration) {
while (duration > absl::ZeroDuration()) {
absl::Duration to_sleep = std::min(duration, absl::MaxSleep());
absl::SleepOnce(to_sleep);
duration -= to_sleep;
}
}
} | #include "absl/time/clock.h"
#include "absl/base/config.h"
#if defined(ABSL_HAVE_ALARM)
#include <signal.h>
#include <unistd.h>
#ifdef _AIX
typedef void (*sig_t)(int);
#endif
#elif defined(__linux__) || defined(__APPLE__)
#error all known Linux and Apple targets have alarm
#endif
#include "gtest/gtest.h"
#include "absl/time/time.h"
namespace {
TEST(Time, Now) {
const absl::Time before = absl::FromUnixNanos(absl::GetCurrentTimeNanos());
const absl::Time now = absl::Now();
const absl::Time after = absl::FromUnixNanos(absl::GetCurrentTimeNanos());
EXPECT_GE(now, before);
EXPECT_GE(after, now);
}
enum class AlarmPolicy { kWithoutAlarm, kWithAlarm };
#if defined(ABSL_HAVE_ALARM)
bool alarm_handler_invoked = false;
void AlarmHandler(int signo) {
ASSERT_EQ(signo, SIGALRM);
alarm_handler_invoked = true;
}
#endif
bool SleepForBounded(absl::Duration d, absl::Duration lower_bound,
absl::Duration upper_bound, absl::Duration timeout,
AlarmPolicy alarm_policy, int* attempts) {
const absl::Time deadline = absl::Now() + timeout;
while (absl::Now() < deadline) {
#if defined(ABSL_HAVE_ALARM)
sig_t old_alarm = SIG_DFL;
if (alarm_policy == AlarmPolicy::kWithAlarm) {
alarm_handler_invoked = false;
old_alarm = signal(SIGALRM, AlarmHandler);
alarm(absl::ToInt64Seconds(d / 2));
}
#else
EXPECT_EQ(alarm_policy, AlarmPolicy::kWithoutAlarm);
#endif
++*attempts;
absl::Time start = absl::Now();
absl::SleepFor(d);
absl::Duration actual = absl::Now() - start;
#if defined(ABSL_HAVE_ALARM)
if (alarm_policy == AlarmPolicy::kWithAlarm) {
signal(SIGALRM, old_alarm);
if (!alarm_handler_invoked) continue;
}
#endif
if (lower_bound <= actual && actual <= upper_bound) {
return true;
}
}
return false;
}
testing::AssertionResult AssertSleepForBounded(absl::Duration d,
absl::Duration early,
absl::Duration late,
absl::Duration timeout,
AlarmPolicy alarm_policy) {
const absl::Duration lower_bound = d - early;
const absl::Duration upper_bound = d + late;
int attempts = 0;
if (SleepForBounded(d, lower_bound, upper_bound, timeout, alarm_policy,
&attempts)) {
return testing::AssertionSuccess();
}
return testing::AssertionFailure()
<< "SleepFor(" << d << ") did not return within [" << lower_bound
<< ":" << upper_bound << "] in " << attempts << " attempt"
<< (attempts == 1 ? "" : "s") << " over " << timeout
<< (alarm_policy == AlarmPolicy::kWithAlarm ? " with" : " without")
<< " an alarm";
}
TEST(SleepFor, Bounded) {
const absl::Duration d = absl::Milliseconds(2500);
const absl::Duration early = absl::Milliseconds(100);
const absl::Duration late = absl::Milliseconds(300);
const absl::Duration timeout = 48 * d;
EXPECT_TRUE(AssertSleepForBounded(d, early, late, timeout,
AlarmPolicy::kWithoutAlarm));
#if defined(ABSL_HAVE_ALARM)
EXPECT_TRUE(AssertSleepForBounded(d, early, late, timeout,
AlarmPolicy::kWithAlarm));
#endif
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/time/clock.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/time/clock_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
880f9741-8dae-41d1-975d-c0afa4e453ed | cpp | tensorflow/tensorflow | cpu_topology | third_party/xla/xla/pjrt/cpu/cpu_topology.cc | third_party/xla/xla/pjrt/cpu/cpu_topology_test.cc | #include "xla/pjrt/cpu/cpu_topology.h"
#include <cstddef>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "xla/pjrt/cpu/cpu_topology.pb.h"
namespace xla {
std::unique_ptr<const CpuTopology> CpuTopology::FromProto(
const CpuTopologyProto& cpu_topology_proto) {
std::vector<CpuTopology::CpuDevice> devices;
devices.reserve(cpu_topology_proto.cpu_devices_size());
for (size_t i = 0; i < cpu_topology_proto.cpu_devices_size(); ++i) {
auto& cpu_device_proto = cpu_topology_proto.cpu_devices(i);
devices.push_back(CpuDevice{cpu_device_proto.process_index(),
cpu_device_proto.local_hardware_id()});
}
std::vector<std::string> machine_attributes;
machine_attributes.reserve(cpu_topology_proto.machine_attributes_size());
for (size_t i = 0; i < cpu_topology_proto.machine_attributes_size(); ++i) {
machine_attributes.push_back(cpu_topology_proto.machine_attributes(i));
}
return std::make_unique<CpuTopology>(std::move(devices),
std::move(machine_attributes));
}
CpuTopologyProto CpuTopology::ToProto() const {
CpuTopologyProto proto;
for (auto& cpu_device : cpu_devices_) {
auto* cpu_device_proto = proto.add_cpu_devices();
cpu_device_proto->set_process_index(cpu_device.process_id);
cpu_device_proto->set_local_hardware_id(cpu_device.local_device_id);
}
for (const std::string& machine_attribute : machine_attributes_) {
proto.add_machine_attributes(machine_attribute);
}
return proto;
}
} | #include "xla/pjrt/cpu/cpu_topology.h"
#include <memory>
#include "xla/pjrt/cpu/cpu_topology.pb.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
TEST(CpuTopology, FromProto) {
CpuTopologyProto msg;
ASSERT_TRUE(tsl::protobuf::TextFormat::ParseFromString(
R"pb(
cpu_devices:
[ { process_index: 2, local_hardware_id: 3 }]
machine_attributes: [ "x86_64", "Intel" ]
)pb",
&msg));
std::unique_ptr<const CpuTopology> cpu_topology = CpuTopology::FromProto(msg);
EXPECT_EQ(cpu_topology->devices().size(), 1);
EXPECT_EQ(cpu_topology->devices()[0].process_id, 2);
EXPECT_EQ(cpu_topology->devices()[0].local_device_id, 3);
EXPECT_EQ(cpu_topology->machine_attributes().size(), 2);
EXPECT_EQ(cpu_topology->machine_attributes()[0], "x86_64");
EXPECT_EQ(cpu_topology->machine_attributes()[1], "Intel");
}
TEST(CpuTopology, ToProto) {
CpuTopology cpu_topology({{2, 3}}, {"ab", "cd"});
CpuTopologyProto msg = cpu_topology.ToProto();
EXPECT_EQ(msg.cpu_devices_size(), 1);
EXPECT_EQ(msg.cpu_devices(0).process_index(), 2);
EXPECT_EQ(msg.cpu_devices(0).local_hardware_id(), 3);
EXPECT_EQ(msg.machine_attributes_size(), 2);
EXPECT_EQ(msg.machine_attributes(0), "ab");
EXPECT_EQ(msg.machine_attributes(1), "cd");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/cpu/cpu_topology.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/cpu/cpu_topology_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7104f0ae-7ea6-4577-a737-6eb9c8a9195e | cpp | google/quiche | proof_source_x509 | quiche/quic/core/crypto/proof_source_x509.cc | quiche/quic/core/crypto/proof_source_x509_test.cc | #include "quiche/quic/core/crypto/proof_source_x509.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "openssl/ssl.h"
#include "quiche/quic/core/crypto/certificate_view.h"
#include "quiche/quic/core/crypto/crypto_protocol.h"
#include "quiche/quic/core/crypto/crypto_utils.h"
#include "quiche/quic/core/quic_data_writer.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/common/quiche_endian.h"
namespace quic {
ProofSourceX509::ProofSourceX509(
quiche::QuicheReferenceCountedPointer<Chain> default_chain,
CertificatePrivateKey default_key) {
if (!AddCertificateChain(default_chain, std::move(default_key))) {
return;
}
default_certificate_ = &certificates_.front();
}
std::unique_ptr<ProofSourceX509> ProofSourceX509::Create(
quiche::QuicheReferenceCountedPointer<Chain> default_chain,
CertificatePrivateKey default_key) {
std::unique_ptr<ProofSourceX509> result(
new ProofSourceX509(default_chain, std::move(default_key)));
if (!result->valid()) {
return nullptr;
}
return result;
}
void ProofSourceX509::GetProof(
const QuicSocketAddress& ,
const QuicSocketAddress& , const std::string& hostname,
const std::string& server_config,
QuicTransportVersion , absl::string_view chlo_hash,
std::unique_ptr<ProofSource::Callback> callback) {
QuicCryptoProof proof;
if (!valid()) {
QUIC_BUG(ProofSourceX509::GetProof called in invalid state)
<< "ProofSourceX509::GetProof called while the object is not valid";
callback->Run(false, nullptr, proof, nullptr);
return;
}
std::optional<std::string> payload =
CryptoUtils::GenerateProofPayloadToBeSigned(chlo_hash, server_config);
if (!payload.has_value()) {
callback->Run(false, nullptr, proof, nullptr);
return;
}
Certificate* certificate = GetCertificate(hostname, &proof.cert_matched_sni);
proof.signature =
certificate->key.Sign(*payload, SSL_SIGN_RSA_PSS_RSAE_SHA256);
MaybeAddSctsForHostname(hostname, proof.leaf_cert_scts);
callback->Run(!proof.signature.empty(), certificate->chain, proof,
nullptr);
}
quiche::QuicheReferenceCountedPointer<ProofSource::Chain>
ProofSourceX509::GetCertChain(const QuicSocketAddress& ,
const QuicSocketAddress& ,
const std::string& hostname,
bool* cert_matched_sni) {
if (!valid()) {
QUIC_BUG(ProofSourceX509::GetCertChain called in invalid state)
<< "ProofSourceX509::GetCertChain called while the object is not "
"valid";
return nullptr;
}
return GetCertificate(hostname, cert_matched_sni)->chain;
}
void ProofSourceX509::ComputeTlsSignature(
const QuicSocketAddress& ,
const QuicSocketAddress& , const std::string& hostname,
uint16_t signature_algorithm, absl::string_view in,
std::unique_ptr<ProofSource::SignatureCallback> callback) {
if (!valid()) {
QUIC_BUG(ProofSourceX509::ComputeTlsSignature called in invalid state)
<< "ProofSourceX509::ComputeTlsSignature called while the object is "
"not valid";
callback->Run(false, "", nullptr);
return;
}
bool cert_matched_sni;
std::string signature = GetCertificate(hostname, &cert_matched_sni)
->key.Sign(in, signature_algorithm);
callback->Run(!signature.empty(), signature, nullptr);
}
QuicSignatureAlgorithmVector ProofSourceX509::SupportedTlsSignatureAlgorithms()
const {
return SupportedSignatureAlgorithmsForQuic();
}
ProofSource::TicketCrypter* ProofSourceX509::GetTicketCrypter() {
return nullptr;
}
bool ProofSourceX509::AddCertificateChain(
quiche::QuicheReferenceCountedPointer<Chain> chain,
CertificatePrivateKey key) {
if (chain->certs.empty()) {
QUIC_BUG(quic_bug_10644_1) << "Empty certificate chain supplied.";
return false;
}
std::unique_ptr<CertificateView> leaf =
CertificateView::ParseSingleCertificate(chain->certs[0]);
if (leaf == nullptr) {
QUIC_BUG(quic_bug_10644_2)
<< "Unable to parse X.509 leaf certificate in the supplied chain.";
return false;
}
if (!key.MatchesPublicKey(*leaf)) {
QUIC_BUG(quic_bug_10644_3)
<< "Private key does not match the leaf certificate.";
return false;
}
certificates_.push_front(Certificate{
chain,
std::move(key),
});
Certificate* certificate = &certificates_.front();
for (absl::string_view host : leaf->subject_alt_name_domains()) {
certificate_map_[std::string(host)] = certificate;
}
return true;
}
ProofSourceX509::Certificate* ProofSourceX509::GetCertificate(
const std::string& hostname, bool* cert_matched_sni) const {
QUICHE_DCHECK(valid());
auto it = certificate_map_.find(hostname);
if (it != certificate_map_.end()) {
*cert_matched_sni = true;
return it->second;
}
auto dot_pos = hostname.find('.');
if (dot_pos != std::string::npos) {
std::string wildcard = absl::StrCat("*", hostname.substr(dot_pos));
it = certificate_map_.find(wildcard);
if (it != certificate_map_.end()) {
*cert_matched_sni = true;
return it->second;
}
}
*cert_matched_sni = false;
return default_certificate_;
}
} | #include "quiche/quic/core/crypto/proof_source_x509.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "openssl/ssl.h"
#include "quiche/quic/core/crypto/certificate_view.h"
#include "quiche/quic/core/crypto/proof_source.h"
#include "quiche/quic/platform/api/quic_expect_bug.h"
#include "quiche/quic/platform/api/quic_ip_address.h"
#include "quiche/quic/platform/api/quic_socket_address.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/test_certificates.h"
#include "quiche/common/platform/api/quiche_reference_counted.h"
namespace quic {
namespace test {
namespace {
quiche::QuicheReferenceCountedPointer<ProofSource::Chain> MakeChain(
absl::string_view cert) {
return quiche::QuicheReferenceCountedPointer<ProofSource::Chain>(
new ProofSource::Chain(std::vector<std::string>{std::string(cert)}));
}
class ProofSourceX509Test : public QuicTest {
public:
ProofSourceX509Test()
: test_chain_(MakeChain(kTestCertificate)),
wildcard_chain_(MakeChain(kWildcardCertificate)),
test_key_(
CertificatePrivateKey::LoadFromDer(kTestCertificatePrivateKey)),
wildcard_key_(CertificatePrivateKey::LoadFromDer(
kWildcardCertificatePrivateKey)) {
QUICHE_CHECK(test_key_ != nullptr);
QUICHE_CHECK(wildcard_key_ != nullptr);
}
protected:
quiche::QuicheReferenceCountedPointer<ProofSource::Chain> test_chain_,
wildcard_chain_;
std::unique_ptr<CertificatePrivateKey> test_key_, wildcard_key_;
};
TEST_F(ProofSourceX509Test, AddCertificates) {
std::unique_ptr<ProofSourceX509> proof_source =
ProofSourceX509::Create(test_chain_, std::move(*test_key_));
ASSERT_TRUE(proof_source != nullptr);
EXPECT_TRUE(proof_source->AddCertificateChain(wildcard_chain_,
std::move(*wildcard_key_)));
}
TEST_F(ProofSourceX509Test, AddCertificateKeyMismatch) {
std::unique_ptr<ProofSourceX509> proof_source =
ProofSourceX509::Create(test_chain_, std::move(*test_key_));
ASSERT_TRUE(proof_source != nullptr);
test_key_ = CertificatePrivateKey::LoadFromDer(kTestCertificatePrivateKey);
EXPECT_QUIC_BUG((void)proof_source->AddCertificateChain(
wildcard_chain_, std::move(*test_key_)),
"Private key does not match");
}
TEST_F(ProofSourceX509Test, CertificateSelection) {
std::unique_ptr<ProofSourceX509> proof_source =
ProofSourceX509::Create(test_chain_, std::move(*test_key_));
ASSERT_TRUE(proof_source != nullptr);
ASSERT_TRUE(proof_source->AddCertificateChain(wildcard_chain_,
std::move(*wildcard_key_)));
bool cert_matched_sni;
EXPECT_EQ(proof_source
->GetCertChain(QuicSocketAddress(), QuicSocketAddress(),
"unknown.test", &cert_matched_sni)
->certs[0],
kTestCertificate);
EXPECT_FALSE(cert_matched_sni);
EXPECT_EQ(proof_source
->GetCertChain(QuicSocketAddress(), QuicSocketAddress(),
"mail.example.org", &cert_matched_sni)
->certs[0],
kTestCertificate);
EXPECT_TRUE(cert_matched_sni);
EXPECT_EQ(proof_source
->GetCertChain(QuicSocketAddress(), QuicSocketAddress(),
"www.foo.test", &cert_matched_sni)
->certs[0],
kWildcardCertificate);
EXPECT_TRUE(cert_matched_sni);
EXPECT_EQ(proof_source
->GetCertChain(QuicSocketAddress(), QuicSocketAddress(),
"www.wildcard.test", &cert_matched_sni)
->certs[0],
kWildcardCertificate);
EXPECT_TRUE(cert_matched_sni);
EXPECT_EQ(proof_source
->GetCertChain(QuicSocketAddress(), QuicSocketAddress(),
"etc.wildcard.test", &cert_matched_sni)
->certs[0],
kWildcardCertificate);
EXPECT_TRUE(cert_matched_sni);
EXPECT_EQ(proof_source
->GetCertChain(QuicSocketAddress(), QuicSocketAddress(),
"wildcard.test", &cert_matched_sni)
->certs[0],
kTestCertificate);
EXPECT_FALSE(cert_matched_sni);
}
TEST_F(ProofSourceX509Test, TlsSignature) {
class Callback : public ProofSource::SignatureCallback {
public:
void Run(bool ok, std::string signature,
std::unique_ptr<ProofSource::Details> ) override {
ASSERT_TRUE(ok);
std::unique_ptr<CertificateView> view =
CertificateView::ParseSingleCertificate(kTestCertificate);
EXPECT_TRUE(view->VerifySignature("Test data", signature,
SSL_SIGN_RSA_PSS_RSAE_SHA256));
}
};
std::unique_ptr<ProofSourceX509> proof_source =
ProofSourceX509::Create(test_chain_, std::move(*test_key_));
ASSERT_TRUE(proof_source != nullptr);
proof_source->ComputeTlsSignature(QuicSocketAddress(), QuicSocketAddress(),
"example.com", SSL_SIGN_RSA_PSS_RSAE_SHA256,
"Test data", std::make_unique<Callback>());
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/proof_source_x509.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/proof_source_x509_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
0db8354f-3004-4343-b70d-579abe43b572 | cpp | tensorflow/tensorflow | add_default_attributes | tensorflow/tools/graph_transforms/add_default_attributes.cc | tensorflow/tools/graph_transforms/add_default_attributes_test.cc | #include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status AddDefaultAttributes(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def) {
std::unique_ptr<FunctionLibraryDefinition> flib_def(
new FunctionLibraryDefinition(OpRegistry::Global(),
input_graph_def.library()));
*output_graph_def = input_graph_def;
TF_RETURN_IF_ERROR(AddDefaultAttrsToGraphDef(output_graph_def, *flib_def, 0));
return OkStatus();
}
REGISTER_GRAPH_TRANSFORM("add_default_attributes", AddDefaultAttributes);
}
} | #include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/sendrecv_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status AddDefaultAttributes(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def);
class AddDefaultAttributesTest : public ::testing::Test {
protected:
void TestAddDefaultAttributes() {
GraphDef graph_def;
NodeDef* lrn_node1 = graph_def.add_node();
lrn_node1->set_name("lrn_node1");
lrn_node1->set_op("LRN");
NodeDef* lrn_node2 = graph_def.add_node();
lrn_node2->set_name("lrn_node2");
lrn_node2->set_op("LRN");
SetNodeAttr("depth_radius", 7, lrn_node2);
SetNodeAttr("bias", 2.0f, lrn_node2);
SetNodeAttr("alpha", 2.0f, lrn_node2);
SetNodeAttr("beta", 1.0f, lrn_node2);
GraphDef result;
TF_ASSERT_OK(AddDefaultAttributes(graph_def, {}, &result));
std::map<string, const NodeDef*> nodes;
MapNamesToNodes(result, &nodes);
EXPECT_EQ(5, nodes.at("lrn_node1")->attr().at("depth_radius").i());
EXPECT_NEAR(1.0f, nodes.at("lrn_node1")->attr().at("bias").f(), 1e-5f);
EXPECT_NEAR(1.0f, nodes.at("lrn_node1")->attr().at("alpha").f(), 1e-5f);
EXPECT_NEAR(0.5f, nodes.at("lrn_node1")->attr().at("beta").f(), 1e-5f);
EXPECT_EQ(7, nodes.at("lrn_node2")->attr().at("depth_radius").i());
EXPECT_NEAR(2.0f, nodes.at("lrn_node2")->attr().at("bias").f(), 1e-5f);
EXPECT_NEAR(2.0f, nodes.at("lrn_node2")->attr().at("alpha").f(), 1e-5f);
EXPECT_NEAR(1.0f, nodes.at("lrn_node2")->attr().at("beta").f(), 1e-5f);
}
};
TEST_F(AddDefaultAttributesTest, TestAddDefaultAttributes) {
TestAddDefaultAttributes();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/add_default_attributes.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/add_default_attributes_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6318f003-9cc1-4633-bfab-6c6cc68f3bfa | cpp | tensorflow/tensorflow | flatten_atrous | tensorflow/tools/graph_transforms/flatten_atrous.cc | tensorflow/tools/graph_transforms/flatten_atrous_test.cc | #include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/subgraph.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status FlattenAtrousConv(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def) {
GraphDef replaced_graph_def;
TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes(
input_graph_def,
{"BatchToSpaceND",
{
{"Conv2D|DepthwiseConv2dNative",
{
{"SpaceToBatchND",
{
{"*"},
{"*"},
{"*"}
}
},
{"*"}
}
},
{"*"},
{"*"}
}
},
[](const NodeMatch& match, const std::set<string>& input_nodes,
const std::set<string>& output_nodes,
std::vector<NodeDef>* new_nodes) {
const NodeDef& batch_to_space_node = match.node;
const NodeDef& conv_node = match.inputs[0].node;
const NodeDef& filter_node = match.inputs[0].inputs[1].node;
const NodeDef& input_node = match.inputs[0].inputs[0].inputs[0].node;
const NodeDef& space_to_batch_block_shape_node =
match.inputs[0].inputs[0].inputs[1].node;
Tensor block_shape =
GetNodeTensorAttr(space_to_batch_block_shape_node, "value");
const int32_t block_height = block_shape.flat<int32>()(0);
const int32_t block_width = block_shape.flat<int32>()(1);
const Tensor& filter = GetNodeTensorAttr(filter_node, "value");
const int32_t filter_height = filter.dim_size(0);
const int32_t filter_width = filter.dim_size(1);
const int32_t in_channels = filter.dim_size(2);
const int32_t out_channels = filter.dim_size(3);
const int32_t upsampled_filter_height =
(filter_height - 1) * block_height + 1;
const int32_t upsampled_filter_width =
(filter_width - 1) * block_width + 1;
Tensor upsampled_filter(
DT_FLOAT,
TensorShape({upsampled_filter_height, upsampled_filter_width,
in_channels, out_channels}));
auto filter_eigen = filter.tensor<float, 4>();
auto upsampled_filter_eigen = upsampled_filter.tensor<float, 4>();
upsampled_filter_eigen.setZero();
for (int h = 0; h < filter_height; ++h) {
for (int w = 0; w < filter_width; ++w) {
for (int c_in = 0; c_in < in_channels; ++c_in) {
for (int c_out = 0; c_out < out_channels; ++c_out) {
upsampled_filter_eigen(block_height * h, block_width * w, c_in,
c_out) = filter_eigen(h, w, c_in, c_out);
}
}
}
}
NodeDef upsampled_filter_node;
upsampled_filter_node.set_op("Const");
upsampled_filter_node.set_name(filter_node.name());
SetNodeAttr("dtype", DT_FLOAT, &upsampled_filter_node);
SetNodeTensorAttr<float>("value", upsampled_filter,
&upsampled_filter_node);
NodeDef flattened_conv_node;
flattened_conv_node.set_name(batch_to_space_node.name());
flattened_conv_node.set_op(conv_node.op());
flattened_conv_node.set_device(conv_node.device());
AddNodeInput(input_node.name(), &flattened_conv_node);
AddNodeInput(upsampled_filter_node.name(), &flattened_conv_node);
CopyNodeAttr(conv_node, "T", "T", &flattened_conv_node);
CopyNodeAttr(conv_node, "strides", "strides", &flattened_conv_node);
SetNodeAttr("padding", "SAME", &flattened_conv_node);
CopyNodeAttr(conv_node, "data_format", "data_format",
&flattened_conv_node);
if (conv_node.op() == "Conv2D") {
CopyNodeAttr(conv_node, "use_cudnn_on_gpu", "use_cudnn_on_gpu",
&flattened_conv_node);
}
new_nodes->push_back(input_node);
new_nodes->push_back(upsampled_filter_node);
new_nodes->push_back(flattened_conv_node);
return OkStatus();
},
{}, &replaced_graph_def));
*output_graph_def = replaced_graph_def;
return OkStatus();
}
REGISTER_GRAPH_TRANSFORM("flatten_atrous_conv", FlattenAtrousConv);
}
} | #include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/sendrecv_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status FlattenAtrousConv(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def);
class FlattenAtrousConvTest : public ::testing::Test {
protected:
template <class TConvOp>
void TestFlattenAtrousConv() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor input_data(DT_FLOAT, TensorShape({1, 3, 3, 2}));
test::FillValues<float>(
&input_data, {.1f, .4f, .2f, .5f, .3f, .6f, -1.0f, -.4f, -.2f, -.5f,
-.3f, -.6f, .1f, .4f, .2f, .5f, .3f, .6f});
Output input_op =
Const(root.WithOpName("input_op"), Input::Initializer(input_data));
Tensor block_shape_data(DT_INT32, TensorShape({2}));
test::FillValues<int>(&block_shape_data, {2, 2});
Output block_shape_op = Const(root.WithOpName("block_shape_op"),
Input::Initializer(block_shape_data));
Tensor paddings_data(DT_INT32, TensorShape({2, 2}));
test::FillValues<int>(&paddings_data, {1, 2, 1, 2});
Output paddings_op = Const(root.WithOpName("paddings_op"),
Input::Initializer(paddings_data));
Output space_to_batch_op =
SpaceToBatchND(root.WithOpName("space_to_batch_op"), input_op,
block_shape_op, paddings_op);
Tensor weights_data(DT_FLOAT, TensorShape({2, 2, 2, 1}));
test::FillValues<float>(&weights_data,
{.1f, .2f, .3f, .4f, .1f, .2f, .3f, .4f});
Output weights_op =
Const(root.WithOpName("weights_op"), Input::Initializer(weights_data));
Output conv_op = TConvOp(root.WithOpName("conv_op"), space_to_batch_op,
weights_op, {1, 1, 1, 1}, "VALID");
Tensor crops_data(DT_INT32, TensorShape({2, 2}));
test::FillValues<int>(&crops_data, {0, 1, 0, 1});
Output crops_op =
Const(root.WithOpName("crops_op"), Input::Initializer(crops_data));
Output batch_to_space_op = BatchToSpaceND(
root.WithOpName("output"), conv_op, block_shape_op, crops_op);
GraphDef original_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&original_graph_def));
std::unique_ptr<Session> original_session(NewSession(SessionOptions()));
TF_ASSERT_OK(original_session->Create(original_graph_def));
std::vector<Tensor> original_outputs;
TF_ASSERT_OK(original_session->Run({}, {"output"}, {}, &original_outputs));
GraphDef modified_graph_def;
TF_ASSERT_OK(FlattenAtrousConv(original_graph_def, {{}, {"output"}},
&modified_graph_def));
std::unique_ptr<Session> modified_session(NewSession(SessionOptions()));
TF_ASSERT_OK(modified_session->Create(modified_graph_def));
std::vector<Tensor> modified_outputs;
TF_ASSERT_OK(modified_session->Run({}, {"output"}, {}, &modified_outputs));
EXPECT_EQ(3, modified_graph_def.node_size());
EXPECT_EQ("input_op", modified_graph_def.node(0).name());
EXPECT_EQ("weights_op", modified_graph_def.node(1).name());
EXPECT_EQ("output", modified_graph_def.node(2).name());
EXPECT_EQ("Const", modified_graph_def.node(0).op());
EXPECT_EQ("Const", modified_graph_def.node(1).op());
EXPECT_EQ(conv_op.node()->type_string(), modified_graph_def.node(2).op());
test::ExpectTensorNear<float>(original_outputs[0], modified_outputs[0],
1e-6);
}
};
TEST_F(FlattenAtrousConvTest, TestFlattenAtrousConv2D) {
TestFlattenAtrousConv<::tensorflow::ops::Conv2D>();
}
TEST_F(FlattenAtrousConvTest, TestFlattenAtrousDepthwiseConv2dNative) {
TestFlattenAtrousConv<::tensorflow::ops::DepthwiseConv2dNative>();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/flatten_atrous.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/flatten_atrous_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7c19b9ed-d84d-4088-8f01-1a38749b11b0 | cpp | tensorflow/tensorflow | freeze_saved_model | tensorflow/cc/tools/freeze_saved_model.cc | tensorflow/cc/tools/freeze_saved_model_test.cc | #include "tensorflow/cc/tools/freeze_saved_model.h"
#include <iostream>
#include <queue>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "tensorflow/cc/saved_model/loader.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/public/session.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace {
void GetTensorNamesFromTensorInfo(const TensorInfo& tensor_info,
std::unordered_set<string>* tensor_names) {
if (tensor_info.has_coo_sparse()) {
const TensorInfo_CooSparse& coo_sparse = tensor_info.coo_sparse();
tensor_names->insert(coo_sparse.values_tensor_name());
tensor_names->insert(coo_sparse.indices_tensor_name());
tensor_names->insert(coo_sparse.dense_shape_tensor_name());
} else if (tensor_info.has_composite_tensor()) {
for (const auto& component : tensor_info.composite_tensor().components()) {
tensor_names->insert(component.name());
}
} else {
tensor_names->insert(tensor_info.name());
}
}
void GetSignatureDefsInputsAndOutputs(
const SavedModelBundle& saved_model_bundle,
std::unordered_set<string>* inputs, std::unordered_set<string>* outputs) {
for (auto& sigdef_elem : saved_model_bundle.meta_graph_def.signature_def()) {
const SignatureDef& signature_def = sigdef_elem.second;
for (auto& input_elem : signature_def.inputs()) {
GetTensorNamesFromTensorInfo(input_elem.second, inputs);
}
for (auto& output_elem : signature_def.outputs()) {
GetTensorNamesFromTensorInfo(output_elem.second, outputs);
}
}
}
void GetNodeNameToNodeDefMap(
GraphDef* graph_def,
std::unordered_map<string, NodeDef*>* name_to_node_map) {
for (size_t i = 0; i < graph_def->node_size(); i++) {
NodeDef* node = graph_def->mutable_node(i);
(*name_to_node_map)[node->name()] = node;
}
}
const string GetNodeNameFromTensorName(string tensor_name) {
if (tensor_name[0] == '^') {
tensor_name.erase(0, 1);
}
std::vector<string> tensor_name_parts = str_util::Split(tensor_name, ':');
return tensor_name_parts[0];
}
void GetReachableNodesAndVariables(
GraphDef* graph_def, const std::unordered_set<string>& outputs,
const std::unordered_map<string, NodeDef*>& name_to_node_map,
std::unordered_set<string>* reachable_node_names,
std::unordered_set<string>* variable_node_names) {
static const std::unordered_set<string>* kVariableTypes =
new std::unordered_set<string>({"Variable", "VariableV2", "VarHandleOp"});
std::queue<string> nodes_to_visit;
for (const string& output_tensor_name : outputs) {
nodes_to_visit.push(GetNodeNameFromTensorName(output_tensor_name));
}
while (!nodes_to_visit.empty()) {
const string node_name = nodes_to_visit.front();
nodes_to_visit.pop();
if (reachable_node_names->find(node_name) != reachable_node_names->end()) {
continue;
}
reachable_node_names->insert(node_name);
NodeDef* node = name_to_node_map.at(node_name);
if (kVariableTypes->find(node->op()) != kVariableTypes->end()) {
variable_node_names->insert(node->name());
}
for (const string& input_tensor_name : node->input()) {
nodes_to_visit.push(GetNodeNameFromTensorName(input_tensor_name));
}
}
}
Status GetVariableNameToTensorMap(
Session* session,
const std::unordered_map<string, NodeDef*>& name_to_node_map,
std::unordered_set<string> variable_names_set,
std::unordered_map<string, Tensor>* variable_name_to_value_map) {
if (variable_names_set.empty()) {
return absl::OkStatus();
}
std::vector<string> variable_names;
variable_names.reserve(variable_names_set.size());
std::vector<string> tensor_names;
tensor_names.reserve(variable_names_set.size());
for (const string& node_name : variable_names_set) {
variable_names.push_back(node_name);
NodeDef* node_def = name_to_node_map.at(node_name);
if (node_def->op() == "VarHandleOp") {
tensor_names.push_back(node_name + "/Read/ReadVariableOp:0");
} else {
tensor_names.push_back(node_name + ":0");
}
}
std::vector<Tensor> outputs;
TF_RETURN_IF_ERROR(
session->Run( {}, tensor_names, {}, &outputs));
for (size_t i = 0; i < variable_names.size(); i++) {
(*variable_name_to_value_map)[variable_names[i]] = outputs[i];
}
return absl::OkStatus();
}
void ConvertVariableToConstant(const NodeDef& variable_node,
const Tensor& variable_value,
NodeDef* const_node) {
const_node->set_name(variable_node.name());
const_node->set_op("Const");
(*const_node->mutable_attr())["dtype"] = variable_node.attr().at("dtype");
variable_value.AsProtoTensorContent(
(*const_node->mutable_attr())["value"].mutable_tensor());
}
void ConvertReadVariableOpToIdentity(const NodeDef& node,
NodeDef* identity_node) {
identity_node->set_name(node.name());
identity_node->set_op("Identity");
(*identity_node->mutable_attr())["T"] = node.attr().at("dtype");
identity_node->add_input(node.input(0));
}
StatusOr<string> GetVarHandleName(
const std::unordered_map<string, NodeDef*>& name_to_node_map,
string node_name) {
const NodeDef* node = name_to_node_map.at(node_name);
while (node->input_size() > 0) {
auto parent = name_to_node_map.find(node->input(0));
if (parent == name_to_node_map.end()) break;
node = parent->second;
if (node->op() != "Identity") {
VLOG(2) << "Stopping at non-identity node " << node->op();
break;
}
}
if (node->op() == "VarHandleOp") {
return node->name();
}
return absl::NotFoundError("No VarHandleOp ancestor found");
}
StatusOr<string> GetHandleNameIfNeedsToFreeze(
const std::unordered_map<string, NodeDef*>& name_to_node_map,
string node_name, const std::unordered_set<string>& variable_node_names) {
StatusOr<string> var_handle_name =
GetVarHandleName(name_to_node_map, node_name);
if (var_handle_name.ok() && variable_node_names.count(*var_handle_name)) {
return var_handle_name;
}
return absl::NotFoundError("No VarHandleOp ancestor found");
}
Status FreezeGraphDef(const SavedModelBundle& saved_model_bundle,
const std::unordered_set<string>& outputs,
GraphDef* frozen_graph_def) {
GraphDef graph_def = saved_model_bundle.meta_graph_def.graph_def();
*frozen_graph_def->mutable_versions() = graph_def.versions();
*frozen_graph_def->mutable_library() = graph_def.library();
if (graph_def.node_size() == 0) {
return absl::OkStatus();
}
std::unordered_map<string, NodeDef*> name_to_node_map;
GetNodeNameToNodeDefMap(&graph_def, &name_to_node_map);
std::unordered_set<string> reachable_node_names;
std::unordered_set<string> variable_node_names;
GetReachableNodesAndVariables(&graph_def, outputs, name_to_node_map,
&reachable_node_names, &variable_node_names);
std::unordered_map<string, Tensor> variable_to_value_map;
TF_RETURN_IF_ERROR(GetVariableNameToTensorMap(
saved_model_bundle.session.get(), name_to_node_map, variable_node_names,
&variable_to_value_map));
for (const NodeDef& node : graph_def.node()) {
if (reachable_node_names.find(node.name()) == reachable_node_names.end()) {
continue;
}
if (variable_node_names.find(node.name()) != variable_node_names.end()) {
ConvertVariableToConstant(node, variable_to_value_map[node.name()],
frozen_graph_def->add_node());
continue;
} else if (node.op() == "ReadVariableOp" &&
GetHandleNameIfNeedsToFreeze(name_to_node_map, node.name(),
variable_node_names)
.ok()) {
ConvertReadVariableOpToIdentity(node, frozen_graph_def->add_node());
continue;
} else if (node.op() == "Identity") {
StatusOr<string> handle_name = GetHandleNameIfNeedsToFreeze(
name_to_node_map, node.name(), variable_node_names);
if (handle_name.ok()) {
NodeDef* new_node = frozen_graph_def->add_node();
*new_node = node;
(*new_node->mutable_attr())["T"] =
name_to_node_map.at(*handle_name)->attr().at("dtype");
continue;
}
}
*frozen_graph_def->add_node() = node;
}
return absl::OkStatus();
}
}
Status FreezeSavedModel(const SavedModelBundle& saved_model_bundle,
GraphDef* frozen_graph_def,
std::unordered_set<string>* inputs,
std::unordered_set<string>* outputs) {
GetSignatureDefsInputsAndOutputs(saved_model_bundle, inputs, outputs);
TF_RETURN_IF_ERROR(
FreezeGraphDef(saved_model_bundle, *outputs, frozen_graph_def));
return absl::OkStatus();
}
} | #include "tensorflow/cc/tools/freeze_saved_model.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/state_ops.h"
#include "tensorflow/cc/saved_model/loader.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/session_options.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace {
class FreezeTest : public ::testing::Test {
protected:
void GraphDefEqual(const GraphDef& actual, const GraphDef& expected) {
EXPECT_EQ(actual.ShortDebugString(), expected.ShortDebugString());
}
SignatureDef BuildSignatureDef(const std::unordered_set<string>& inputs,
const std::unordered_set<string>& outputs) {
SignatureDef signature_def;
for (const string& input : inputs) {
(*signature_def.mutable_inputs())[input].set_name(input);
}
for (const string& output : outputs) {
(*signature_def.mutable_outputs())[output].set_name(output);
}
return signature_def;
}
void AddSignatureDefToSavedModelBundle(const SignatureDef& signature_def,
const string& key,
SavedModelBundle* saved_model_bundle) {
MetaGraphDef* meta_graph_def = &saved_model_bundle->meta_graph_def;
(*meta_graph_def->mutable_signature_def())[key] = signature_def;
}
Status InitializeSavedModelBundleSession(
const GraphDef& graph_def, const string& init_node,
SavedModelBundle* saved_model_bundle) {
SessionOptions session_options;
saved_model_bundle->session.reset(NewSession(session_options));
TF_RETURN_IF_ERROR(saved_model_bundle->session->Create(graph_def));
if (!init_node.empty()) {
std::vector<Tensor> outputs;
return saved_model_bundle->session->Run(
{}, {}, {init_node}, &outputs);
}
return absl::OkStatus();
}
Status AddGraphDefToSavedModelBundle(const GraphDef& graph_def,
const string& init_node,
SavedModelBundle* saved_model_bundle) {
MetaGraphDef* meta_graph_def = &saved_model_bundle->meta_graph_def;
*meta_graph_def->mutable_graph_def() = graph_def;
return InitializeSavedModelBundleSession(graph_def, init_node,
saved_model_bundle);
}
Status AddGraphDefWithOutputsToSavedModelBundle(
const GraphDef& graph_def, const std::unordered_set<string>& outputs,
const string& init_node, SavedModelBundle* saved_model_bundle) {
SignatureDef signature_def =
BuildSignatureDef(std::unordered_set<string>(), outputs);
AddSignatureDefToSavedModelBundle(signature_def, "signature_def",
saved_model_bundle);
return AddGraphDefToSavedModelBundle(graph_def, init_node,
saved_model_bundle);
}
void RunAndCompareFrozenAndUnfrozenGraphs(Session* unfrozen_session,
const GraphDef& frozen_graph_def,
const string& tensor_name) {
std::vector<Tensor> unfrozen_outputs;
TF_ASSERT_OK(unfrozen_session->Run( {}, {tensor_name},
{}, &unfrozen_outputs));
SessionOptions session_options;
std::unique_ptr<Session> frozen_session(NewSession(session_options));
TF_ASSERT_OK(frozen_session->Create(frozen_graph_def));
std::vector<Tensor> frozen_outputs;
TF_ASSERT_OK(frozen_session->Run( {}, {tensor_name},
{}, &frozen_outputs));
test::ExpectTensorEqual<float>(unfrozen_outputs[0], frozen_outputs[0]);
}
void TestFreezeGraphWithoutDependentVariables(bool use_resource) {
SavedModelBundle saved_model_bundle;
GraphDef graph_def;
Scope scope = Scope::NewRootScope();
Output a = ops::Const(scope.WithOpName("a"), 10.0f, {});
Output b = ops::Const(scope.WithOpName("b"), 10.0f, {});
Output c = ops::Mul(scope.WithOpName("c"), a, b);
if (use_resource) {
Output var =
ops::VarHandleOp(scope.WithOpName("var"), DataType::DT_FLOAT, {});
Output read_var = ops::ReadVariableOp(
scope.WithOpName("var/Read/ReadVariableOp"), var, DataType::DT_FLOAT);
auto assign = ops::AssignVariableOp(scope.WithOpName("assign"), var, a);
} else {
Output var =
ops::Variable(scope.WithOpName("var"), {}, DataType::DT_FLOAT);
Output assign = ops::Assign(scope.WithOpName("assign"), var, a);
}
TF_ASSERT_OK(scope.ToGraphDef(&graph_def));
TF_ASSERT_OK(AddGraphDefWithOutputsToSavedModelBundle(
graph_def, {"c:0"}, "assign", &saved_model_bundle));
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def,
&inputs, &outputs));
GraphDef expected_graph_def;
Scope expected_scope = Scope::NewRootScope();
Output expected_a = ops::Const(expected_scope.WithOpName("a"), 10.0f, {});
Output expected_b = ops::Const(expected_scope.WithOpName("b"), 10.0f, {});
Output expected_c =
ops::Mul(expected_scope.WithOpName("c"), expected_a, expected_b);
TF_ASSERT_OK(expected_scope.ToGraphDef(&expected_graph_def));
GraphDefEqual(frozen_graph_def, expected_graph_def);
RunAndCompareFrozenAndUnfrozenGraphs(saved_model_bundle.session.get(),
frozen_graph_def, "c:0");
}
void TestFreezeGraphWithDependentVariables(bool use_resource,
bool use_identity = false) {
SavedModelBundle saved_model_bundle;
GraphDef graph_def;
Scope scope = Scope::NewRootScope();
Output a = ops::Const(scope.WithOpName("a"), 10.0f, {});
Output read_var;
if (use_resource) {
Output var =
ops::VarHandleOp(scope.WithOpName("var"), DataType::DT_FLOAT, {});
if (use_identity) {
Output identity = ops::Identity(scope.WithOpName("identity"), var);
read_var =
ops::ReadVariableOp(scope.WithOpName("var/Read/ReadVariableOp"),
identity, DataType::DT_FLOAT);
} else {
read_var =
ops::ReadVariableOp(scope.WithOpName("var/Read/ReadVariableOp"),
var, DataType::DT_FLOAT);
}
auto assign = ops::AssignVariableOp(scope.WithOpName("assign"), var, a);
} else {
Output read_var =
ops::Variable(scope.WithOpName("var"), {}, DataType::DT_FLOAT);
Output assign = ops::Assign(scope.WithOpName("assign"), read_var, a);
}
Output c = ops::Mul(scope.WithOpName("c"), a, read_var);
TF_ASSERT_OK(scope.ToGraphDef(&graph_def));
TF_ASSERT_OK(AddGraphDefWithOutputsToSavedModelBundle(
graph_def, {"c:0"}, "assign", &saved_model_bundle));
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def,
&inputs, &outputs));
size_t expected_nodes = use_resource ? (use_identity ? 5 : 4) : 3;
EXPECT_EQ(frozen_graph_def.node_size(), expected_nodes);
for (const NodeDef& node : frozen_graph_def.node()) {
EXPECT_NE(node.op(), "Variable") << node.name();
EXPECT_NE(node.op(), "VariableV2") << node.name();
EXPECT_NE(node.op(), "VarHandleOp") << node.name();
EXPECT_NE(node.op(), "ReadVariableOp") << node.name();
}
RunAndCompareFrozenAndUnfrozenGraphs(saved_model_bundle.session.get(),
frozen_graph_def, "c:0");
}
void TestFreezeGraphWithAndWithoutDependentVariables(bool use_resource) {
SavedModelBundle saved_model_bundle;
GraphDef graph_def;
Scope scope = Scope::NewRootScope();
Output a = ops::Const(scope.WithOpName("a"), 10.0f, {});
Output read_var;
if (use_resource) {
Output var =
ops::VarHandleOp(scope.WithOpName("var"), DataType::DT_FLOAT, {});
read_var = ops::ReadVariableOp(
scope.WithOpName("var/Read/ReadVariableOp"), var, DataType::DT_FLOAT);
auto assign = ops::AssignVariableOp(scope.WithOpName("assign"), var, a);
Output var_1 =
ops::VarHandleOp(scope.WithOpName("var_1"), DataType::DT_FLOAT, {});
Output read_var_1 =
ops::ReadVariableOp(scope.WithOpName("var_1/Read/ReadVariableOp"),
var, DataType::DT_FLOAT);
auto assign_1 =
ops::AssignVariableOp(scope.WithOpName("assign_1"), var_1, a);
} else {
read_var = ops::Variable(scope.WithOpName("var"), {}, DataType::DT_FLOAT);
Output assign = ops::Assign(scope.WithOpName("assign"), read_var, a);
Output var_1 =
ops::Variable(scope.WithOpName("var_1"), {}, DataType::DT_FLOAT);
Output assign_1 = ops::Assign(scope.WithOpName("assign_1"), var_1, a);
}
Output c = ops::Mul(scope.WithOpName("c"), a, read_var);
TF_ASSERT_OK(scope.ToGraphDef(&graph_def));
TF_ASSERT_OK(AddGraphDefWithOutputsToSavedModelBundle(
graph_def, {"c:0"}, "assign", &saved_model_bundle));
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def,
&inputs, &outputs));
size_t expected_nodes = use_resource ? 4 : 3;
EXPECT_EQ(frozen_graph_def.node_size(), expected_nodes);
for (const NodeDef& node : frozen_graph_def.node()) {
EXPECT_NE(node.op(), "Variable") << node.name();
EXPECT_NE(node.op(), "VariableV2") << node.name();
EXPECT_NE(node.op(), "VarHandleOp") << node.name();
EXPECT_NE(node.op(), "ReadVariableOp") << node.name();
}
RunAndCompareFrozenAndUnfrozenGraphs(saved_model_bundle.session.get(),
frozen_graph_def, "c:0");
}
};
TEST_F(FreezeTest, InputsAndOutputsSingleSignatureDef) {
SavedModelBundle saved_model_bundle;
std::unordered_set<string> expected_inputs = {"input0:0", "input1:0"};
std::unordered_set<string> expected_outputs = {"output0:0", "output1:0"};
SignatureDef signature_def =
BuildSignatureDef(expected_inputs, expected_outputs);
AddSignatureDefToSavedModelBundle(signature_def, "signature_def",
&saved_model_bundle);
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def, &inputs,
&outputs));
EXPECT_EQ(expected_inputs, inputs);
EXPECT_EQ(expected_outputs, outputs);
}
TEST_F(FreezeTest, InputsAndOutputsMultipleSignatureDefs) {
SavedModelBundle saved_model_bundle;
SignatureDef signature_def_0 = BuildSignatureDef({"input0:0"}, {"output0:0"});
SignatureDef signature_def_1 = BuildSignatureDef({"input1:0"}, {"output1:0"});
AddSignatureDefToSavedModelBundle(signature_def_0, "signature_def_0",
&saved_model_bundle);
AddSignatureDefToSavedModelBundle(signature_def_1, "signature_def_1",
&saved_model_bundle);
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def, &inputs,
&outputs));
std::unordered_set<string> expected_inputs = {"input0:0", "input1:0"};
std::unordered_set<string> expected_outputs = {"output0:0", "output1:0"};
EXPECT_EQ(expected_inputs, inputs);
EXPECT_EQ(expected_outputs, outputs);
}
TEST_F(FreezeTest, GraphDefVersionsAndLibrary) {
SavedModelBundle saved_model_bundle;
GraphDef graph_def;
graph_def.mutable_versions()->set_producer(1234);
graph_def.mutable_versions()->set_min_consumer(1234);
*graph_def.mutable_library()->add_function() = test::function::NonZero();
TF_ASSERT_OK(
AddGraphDefToSavedModelBundle(graph_def, "", &saved_model_bundle));
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def, &inputs,
&outputs));
GraphDefEqual(frozen_graph_def, graph_def);
}
TEST_F(FreezeTest, GraphDefWithNoVariables) {
SavedModelBundle saved_model_bundle;
GraphDef graph_def;
Scope scope = Scope::NewRootScope();
Output a = ops::Const(scope.WithOpName("a"), 10.0f, {});
Output b = ops::Const(scope.WithOpName("b"), 10.0f, {});
Output c = ops::Mul(scope.WithOpName("c"), a, b);
TF_ASSERT_OK(scope.ToGraphDef(&graph_def));
TF_ASSERT_OK(AddGraphDefWithOutputsToSavedModelBundle(graph_def, {"c:0"}, "",
&saved_model_bundle));
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def, &inputs,
&outputs));
GraphDefEqual(frozen_graph_def, graph_def);
}
TEST_F(FreezeTest, GraphDefWithMultiOutputOperation) {
SavedModelBundle saved_model_bundle;
GraphDef graph_def;
Scope scope = Scope::NewRootScope();
Output a = ops::Const(scope.WithOpName("a"), {10.0f, 10.0f}, {2});
Output axis = ops::Const(scope.WithOpName("axis"), 0, {});
OutputList split = ops::Split(scope.WithOpName("split"), axis, a, 2).output;
Output b = ops::Const(scope.WithOpName("b"), 10.0f, {});
Output c = ops::Mul(scope.WithOpName("c"), split[1], b);
TF_ASSERT_OK(scope.ToGraphDef(&graph_def));
TF_ASSERT_OK(AddGraphDefWithOutputsToSavedModelBundle(graph_def, {"c:0"}, "",
&saved_model_bundle));
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def, &inputs,
&outputs));
GraphDefEqual(frozen_graph_def, graph_def);
}
TEST_F(FreezeTest, GraphDefWithControlDependency) {
SavedModelBundle saved_model_bundle;
GraphDef graph_def;
Scope scope = Scope::NewRootScope();
Output source = ops::Const(scope.WithOpName("source"), 10.0f, {});
Output a = ops::Const(scope.WithOpName("a").WithControlDependencies(source),
{10.0f, 10.0f}, {2});
Output b = ops::Const(scope.WithOpName("b"), 10.0f, {});
Output c = ops::Mul(scope.WithOpName("c"), a, b);
TF_ASSERT_OK(scope.ToGraphDef(&graph_def));
TF_ASSERT_OK(AddGraphDefWithOutputsToSavedModelBundle(graph_def, {"c:0"}, "",
&saved_model_bundle));
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def, &inputs,
&outputs));
GraphDefEqual(frozen_graph_def, graph_def);
}
TEST_F(FreezeTest, GraphDefWithoutDependentVariables) {
TestFreezeGraphWithoutDependentVariables(false);
}
TEST_F(FreezeTest, GraphDefWithoutDependentResourceVariables) {
TestFreezeGraphWithoutDependentVariables(true);
}
TEST_F(FreezeTest, GraphDefWithDependentVariables) {
TestFreezeGraphWithDependentVariables(false);
}
TEST_F(FreezeTest, GraphDefWithDependentResourceVariables) {
TestFreezeGraphWithDependentVariables(true);
}
TEST_F(FreezeTest, GraphDefWithDependentResourceVariablesAndIdentity) {
TestFreezeGraphWithDependentVariables(true, true);
}
TEST_F(FreezeTest, GraphDefWithAndWithoutDependentVariables) {
TestFreezeGraphWithAndWithoutDependentVariables(false);
}
TEST_F(FreezeTest, GraphDefWithAndWithoutDependentResourceVariables) {
TestFreezeGraphWithAndWithoutDependentVariables(true);
}
TEST_F(FreezeTest, InputsAndOutputsCompositeTensorSignatureDef) {
SavedModelBundle saved_model_bundle;
SignatureDef signature_def;
TensorInfo& in = (*signature_def.mutable_inputs())["input_arg"];
in.mutable_composite_tensor()->add_components()->set_name("input1:0");
in.mutable_composite_tensor()->add_components()->set_name("input2:0");
TensorInfo& out = (*signature_def.mutable_outputs())["output_arg"];
out.mutable_composite_tensor()->add_components()->set_name("output2:0");
out.mutable_composite_tensor()->add_components()->set_name("output1:0");
AddSignatureDefToSavedModelBundle(signature_def, "signature_def",
&saved_model_bundle);
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def, &inputs,
&outputs));
std::unordered_set<string> expected_inputs = {"input1:0", "input2:0"};
std::unordered_set<string> expected_outputs = {"output1:0", "output2:0"};
EXPECT_EQ(expected_inputs, inputs);
EXPECT_EQ(expected_outputs, outputs);
}
TEST_F(FreezeTest, InputsAndOutputsSparseCooSignatureDef) {
SavedModelBundle saved_model_bundle;
SignatureDef signature_def;
TensorInfo& in = (*signature_def.mutable_inputs())["input_arg"];
in.mutable_coo_sparse()->set_values_tensor_name("input1:0");
in.mutable_coo_sparse()->set_indices_tensor_name("input2:0");
in.mutable_coo_sparse()->set_dense_shape_tensor_name("input3:0");
TensorInfo& out = (*signature_def.mutable_outputs())["output_arg"];
out.mutable_coo_sparse()->set_values_tensor_name("output1:0");
out.mutable_coo_sparse()->set_indices_tensor_name("output2:0");
out.mutable_coo_sparse()->set_dense_shape_tensor_name("output3:0");
AddSignatureDefToSavedModelBundle(signature_def, "signature_def",
&saved_model_bundle);
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def, &inputs,
&outputs));
std::unordered_set<string> expected_inputs = {"input1:0", "input2:0",
"input3:0"};
std::unordered_set<string> expected_outputs = {"output1:0", "output2:0",
"output3:0"};
EXPECT_EQ(expected_inputs, inputs);
EXPECT_EQ(expected_outputs, outputs);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/tools/freeze_saved_model.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/tools/freeze_saved_model_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
af777e7f-ba41-48ea-b590-9d3c0803b996 | cpp | tensorflow/tensorflow | floor | tensorflow/lite/experimental/shlo/ops/floor.cc | tensorflow/lite/delegates/xnnpack/floor_test.cc | #include "tensorflow/lite/experimental/shlo/ops/floor.h"
#include <cmath>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Floor {
template <class T>
T operator()(T v) const {
return std::floor(v);
}
};
template <>
F16 Floor::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Floor::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
FloorOp Create(FloorOp::Attributes) { return {}; }
absl::Status Prepare(FloorOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(
CheckCtx("floor"), input, IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("floor"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(FloorOp& op, const Tensor& input, Tensor& output) {
Floor floor;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), floor, input,
output)
} else if (IsFloatTensor(input)) {
DISPATCH_FLOAT(detail::EvaluateNoQuantization, input.tensor_element_type(),
floor, input, output);
}
return absl::FailedPreconditionError(
"stablehlo.floor: Unsupported tensor type.");
}
}; | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/unary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace xnnpack {
TEST(Floor, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_FLOOR, xnnpack_delegate.get());
}
TEST(Floor, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, width, channels})
.Test(BuiltinOperator_FLOOR, xnnpack_delegate.get());
}
TEST(Floor, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, channels})
.Test(BuiltinOperator_FLOOR, xnnpack_delegate.get());
}
TEST(Floor, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
UnaryElementwiseTester().Shape({batch}).Test(BuiltinOperator_FLOOR,
xnnpack_delegate.get());
}
TEST(Floor, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_FLOOR, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/floor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/floor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ce35ed39-e4f0-49e6-98a4-f765d18996b3 | cpp | tensorflow/tensorflow | worker_impl | tensorflow/core/data/service/worker_impl.cc | tensorflow/core/data/service/worker_impl_test.cc | #include "tensorflow/core/data/service/worker_impl.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "grpcpp/create_channel.h"
#include "absl/algorithm/container.h"
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "absl/time/time.h"
#include "xla/tsl/protobuf/status.pb.h"
#include "tensorflow/core/data/service/byte_size.h"
#include "tensorflow/core/data/service/common.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/data_transfer.h"
#include "tensorflow/core/data/service/dispatcher.pb.h"
#include "tensorflow/core/data/service/dispatcher_client.h"
#include "tensorflow/core/data/service/export.pb.h"
#include "tensorflow/core/data/service/graph_rewriters.h"
#include "tensorflow/core/data/service/grpc_util.h"
#include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tensorflow/core/data/service/snapshot/snapshot_split_provider.h"
#include "tensorflow/core/data/service/snapshot/snapshot_stream_writer.h"
#include "tensorflow/core/data/service/split_provider.h"
#include "tensorflow/core/data/service/task_runner.h"
#include "tensorflow/core/data/service/utils.h"
#include "tensorflow/core/data/service/worker.pb.h"
#include "tensorflow/core/data/standalone.h"
#include "tensorflow/core/framework/dataset.pb.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/env_time.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/host_info.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/service_config.pb.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/util/dump_graph.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_to_from_proto.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace data {
namespace {
constexpr absl::Duration kRetryInterval = absl::Seconds(5);
constexpr absl::Duration kDefaultHeartBeatInterval = absl::Seconds(30);
constexpr absl::Duration kDefaultDispatcherTimeout = absl::Hours(1);
using WorkerConfig = experimental::WorkerConfig;
Status MoveElementToResponse(std::vector<Tensor>&& element,
GetElementResponse& resp) {
if (element.size() != 1 || element[0].dtype() != DT_VARIANT ||
!TensorShapeUtils::IsScalar(element[0].shape())) {
for (const auto& component : element) {
UncompressedElement* uncompressed = resp.mutable_uncompressed();
component.AsProtoTensorContent(uncompressed->add_components());
}
return absl::OkStatus();
}
Variant& variant = element[0].scalar<Variant>()();
CompressedElement* compressed = variant.get<CompressedElement>();
if (compressed == nullptr) {
return errors::FailedPrecondition(
"Expected dataset to produce a CompressedElement variant tensor, but "
"it produced ",
variant.TypeName());
}
*resp.mutable_compressed() = *compressed;
return absl::OkStatus();
}
WorkerConfig ApplyWorkerDefaults(const WorkerConfig& config) {
WorkerConfig new_config(config);
if (new_config.heartbeat_interval_ms() == 0) {
new_config.set_heartbeat_interval_ms(
absl::ToInt64Milliseconds(kDefaultHeartBeatInterval));
}
if (new_config.dispatcher_timeout_ms() == 0) {
new_config.set_dispatcher_timeout_ms(
absl::ToInt64Milliseconds(kDefaultDispatcherTimeout));
}
if (new_config.snapshot_max_chunk_size_bytes() == 0) {
new_config.set_snapshot_max_chunk_size_bytes(
kDefaultMaxChunkSize.ToUnsignedBytes());
}
return new_config;
}
TaskDef Export(const TaskDef& task) {
TaskDef result;
switch (task.dataset_case()) {
case TaskDef::kDatasetDef:
result.set_path(
"In-memory dataset graphs are omitted for brevity. To view datasets "
"stored on the dispatcher, configure a `work_dir`.");
break;
case TaskDef::kPath:
result.set_path(task.path());
break;
default:
break;
}
result.set_dataset_id(task.dataset_id());
result.set_task_id(task.task_id());
result.set_iteration_id(task.iteration_id());
result.set_num_split_providers(task.num_split_providers());
result.set_worker_address(task.worker_address());
*result.mutable_processing_mode_def() = task.processing_mode_def();
switch (task.optional_num_consumers_case()) {
case TaskDef::kNumConsumers:
result.set_num_consumers(task.num_consumers());
break;
default:
break;
}
result.set_num_workers(task.num_workers());
result.set_worker_index(task.worker_index());
return result;
}
}
mutex LocalWorkers::mu_(LINKER_INITIALIZED);
LocalWorkers::AddressToWorkerMap* LocalWorkers::local_workers_ =
new AddressToWorkerMap();
DataServiceWorkerImpl::DataServiceWorkerImpl(const WorkerConfig& config)
: config_(ApplyWorkerDefaults(config)), worker_uid_(port::JobUid()) {
metrics::RecordTFDataServiceWorkerCreated();
}
DataServiceWorkerImpl::~DataServiceWorkerImpl() {
mutex_lock l(mu_);
cancelled_ = true;
task_completion_cv_.notify_one();
heartbeat_cv_.notify_one();
}
Status DataServiceWorkerImpl::Start(
const std::string& worker_address,
const std::vector<DataTransferServerInfo>& transfer_servers) {
VLOG(3) << "Starting tf.data service worker at address " << worker_address;
TF_RETURN_IF_ERROR(ValidateWorkerConfig());
worker_address_ = worker_address;
transfer_servers_ = transfer_servers;
TF_ASSIGN_OR_RETURN(dispatcher_, CreateDispatcherClient());
auto should_retry = [this]() TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
return !cancelled_;
};
TF_RETURN_IF_ERROR(grpc_util::Retry([this]() { return Heartbeat(); },
should_retry, "Worker heartbeat.",
kint64max));
LOG(INFO) << "Worker registered with dispatcher running at "
<< config_.dispatcher_address()
<< ". Worker config: " << config_.DebugString();
task_completion_thread_ = absl::WrapUnique(
Env::Default()->StartThread({}, "data-service-worker-task-completion",
[this]() { TaskCompletionThread(); }));
heartbeat_thread_ = absl::WrapUnique(Env::Default()->StartThread(
{}, "data-service-worker-heartbeat", [this]() { HeartbeatThread(); }));
mutex_lock l(mu_);
registered_ = true;
return absl::OkStatus();
}
void DataServiceWorkerImpl::Stop() {
absl::flat_hash_map<int64_t, std::shared_ptr<Task>> tasks;
absl::flat_hash_map<SnapshotTask, std::unique_ptr<SnapshotStreamWriter>,
absl::Hash<SnapshotTask>>
snapshot_writers;
{
mutex_lock l(mu_);
cancelled_ = true;
tasks.swap(tasks_);
snapshot_writers.swap(snapshot_writers_);
}
for (const auto& [task_id, task] : tasks) {
StopTask(*task);
}
for (const auto& [unused, snapshot_writer] : snapshot_writers) {
snapshot_writer->Cancel();
}
Env::Default()->SleepForMicroseconds(config_.shutdown_quiet_period_ms() *
1000);
}
Status DataServiceWorkerImpl::ValidateWorkerConfig() const {
const bool any_tag_is_empty = absl::c_any_of(
config_.worker_tags(),
[](const std::string& worker_tag) { return worker_tag.empty(); });
if (any_tag_is_empty) {
return errors::FailedPrecondition(
"Worker tags cannot be empty. Got tags {",
absl::StrJoin(config_.worker_tags().begin(),
config_.worker_tags().end(), ", "),
"}");
}
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<DataServiceDispatcherClient>>
DataServiceWorkerImpl::CreateDispatcherClient() const TF_LOCKS_EXCLUDED(mu_) {
auto dispatcher = std::make_unique<DataServiceDispatcherClient>(
config_.dispatcher_address(), config_.protocol());
auto should_retry = [this]() TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
return !cancelled_;
};
TF_RETURN_IF_ERROR(
grpc_util::Retry([&dispatcher]() { return dispatcher->Initialize(); },
should_retry, "Initialize dispatcher client.",
kint64max));
return dispatcher;
}
Status DataServiceWorkerImpl::GetElementResult(
const GetElementRequest* request, struct GetElementResult* result) {
Task* task = nullptr;
{
mutex_lock l(mu_);
if (cancelled_) {
return errors::Cancelled("Worker is shutting down");
}
if (!registered_) {
return errors::Unavailable(
"Worker has not yet registered with dispatcher.");
}
auto it = tasks_.find(request->task_id());
if (it == tasks_.end()) {
if (deleted_tasks_.contains(request->task_id())) {
return errors::FailedPrecondition(
"Got request for local task ", request->task_id(), " of worker ",
worker_address_, ", which has been deleted. You may be creating ",
"a duplicate iteration which has already finished. To fix this, "
"make sure to create your dataset only once, as opposed to "
"re-creating it repeatedly inside a loop.");
}
if (finished_tasks_.contains(request->task_id())) {
VLOG(3) << "Task is already finished";
result->end_of_sequence = true;
result->skip = false;
return absl::OkStatus();
}
return errors::Unavailable("Task ", request->task_id(), " not found");
}
task = it->second.get();
task->outstanding_requests++;
}
auto cleanup = gtl::MakeCleanup([&] {
mutex_lock l(mu_);
task->outstanding_requests--;
cv_.notify_all();
});
TF_RETURN_IF_ERROR(EnsureTaskInitialized(*task));
TF_RETURN_IF_ERROR(task->task_runner->GetNext(*request, *result));
if (result->end_of_sequence) {
mutex_lock l(mu_);
VLOG(3) << "Reached end_of_sequence for task " << request->task_id();
pending_completed_tasks_.insert(request->task_id());
task_completion_cv_.notify_one();
}
return absl::OkStatus();
}
Status DataServiceWorkerImpl::ProcessTask(const ProcessTaskRequest* request,
ProcessTaskResponse* response) {
mutex_lock l(mu_);
const TaskDef& task = request->task();
VLOG(3) << "Received request to process task " << task.task_id();
return ProcessTaskInternal(task);
}
Status DataServiceWorkerImpl::ProcessTaskInternal(const TaskDef& task_def)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
std::shared_ptr<Task>& task = tasks_[task_def.task_id()];
if (task) {
VLOG(1) << "Received request to process already-processed task "
<< task->task_def.task_id();
return absl::OkStatus();
}
task = std::make_unique<Task>(task_def);
VLOG(3) << "Began processing for task " << task_def.task_id()
<< " with processing mode "
<< task_def.processing_mode_def().DebugString();
return absl::OkStatus();
}
Status DataServiceWorkerImpl::EnsureTaskInitialized(
DataServiceWorkerImpl::Task& task) {
if (task.task_def.worker_address() != worker_address_) {
return errors::Internal(absl::Substitute(
"Dispatcher's worker address $0 does not match worker's address $1.",
task.task_def.worker_address(), worker_address_));
}
mutex_lock l(task.mu);
if (task.initialized) {
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(DatasetDef dataset_def, GetDatasetDef(task.task_def));
TF_ASSIGN_OR_RETURN(std::unique_ptr<standalone::Dataset> dataset,
MakeDataset(dataset_def, task.task_def));
TF_ASSIGN_OR_RETURN(std::unique_ptr<standalone::Iterator> iterator,
MakeDatasetIterator(*dataset, task.task_def));
auto task_iterator = std::make_unique<StandaloneTaskIterator>(
std::move(dataset), std::move(iterator));
TF_RETURN_IF_ERROR(TaskRunner::Create(
config_, task.task_def, std::move(task_iterator), task.task_runner));
task.initialized = true;
VLOG(3) << "Created iterator for task " << task.task_def.task_id();
return absl::OkStatus();
}
absl::StatusOr<DatasetDef> DataServiceWorkerImpl::GetDatasetDef(
const TaskDef& task_def) const {
switch (task_def.dataset_case()) {
case TaskDef::kDatasetDef:
return task_def.dataset_def();
case TaskDef::kPath: {
DatasetDef def;
Status s = ReadDatasetDef(task_def.path(), def);
if (!s.ok()) {
LOG(INFO) << "Failed to read dataset from " << task_def.path() << ": "
<< s << ". Falling back to reading from dispatcher.";
TF_RETURN_IF_ERROR(
dispatcher_->GetDatasetDef(task_def.dataset_id(), def));
}
return def;
}
case TaskDef::DATASET_NOT_SET:
return errors::Internal("Unrecognized dataset case: ",
task_def.dataset_case());
}
}
absl::StatusOr<bool> DataServiceWorkerImpl::DisableCompressionAtRuntime(
const std::string& dataset_id) const {
DisableCompressionAtRuntimeResponse response;
absl::Time deadline =
absl::FromUnixMicros(EnvTime::NowMicros()) + kDefaultDispatcherTimeout;
auto should_retry = [this]() TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
return !cancelled_;
};
TF_RETURN_IF_ERROR(grpc_util::Retry(
[&]() {
return dispatcher_->DisableCompressionAtRuntime(
dataset_id, false, response);
},
should_retry, "Disable compression at runtime.",
absl::ToUnixMicros(deadline)));
if (response.no_compression_to_disable()) {
return false;
}
metrics::RecordTFDataServiceRuntimeCompressionDecision(
response.compression_disabled_at_runtime());
return response.compression_disabled_at_runtime();
}
absl::StatusOr<std::unique_ptr<standalone::Dataset>>
DataServiceWorkerImpl::MakeDataset(const DatasetDef& dataset_def,
const TaskDef& task_def) const {
TF_ASSIGN_OR_RETURN(bool compression_disabled_at_runtime,
DisableCompressionAtRuntime(task_def.dataset_id()));
GraphDef graph = dataset_def.graph();
if (VLOG_IS_ON(1)) {
std::string prefix = absl::StrCat(task_def.dataset_id(), "_", worker_uid_);
DumpGraphDefToFile(absl::StrCat(prefix, "-prerewrite_GraphDef"), graph);
DumpProtoToFile(absl::StrCat(prefix, "-prerewrite_TaskDef"), task_def);
}
if (compression_disabled_at_runtime) {
RemoveCompressionMapRewriter remove_compression_map_rewriter;
TF_ASSIGN_OR_RETURN(
graph, remove_compression_map_rewriter.ApplyRemoveCompressionMapRewrite(
graph));
}
TF_ASSIGN_OR_RETURN(AutoShardRewriter auto_shard_rewriter,
AutoShardRewriter::Create(task_def));
TF_ASSIGN_OR_RETURN(graph, auto_shard_rewriter.ApplyAutoShardRewrite(graph));
std::unique_ptr<standalone::Dataset> dataset;
TF_RETURN_IF_ERROR(standalone::Dataset::FromGraph(
standalone::Dataset::Params(), graph, &dataset));
return dataset;
}
absl::StatusOr<std::unique_ptr<standalone::Iterator>>
DataServiceWorkerImpl::MakeDatasetIterator(standalone::Dataset& dataset,
const TaskDef& task_def) const {
std::unique_ptr<standalone::Iterator> iterator;
if (IsNoShard(task_def.processing_mode_def()) ||
IsStaticShard(task_def.processing_mode_def())) {
TF_RETURN_IF_ERROR(dataset.MakeIterator(&iterator));
return iterator;
}
if (IsDynamicShard(task_def.processing_mode_def())) {
std::vector<std::unique_ptr<SplitProvider>> split_providers;
split_providers.reserve(task_def.num_split_providers());
for (int i = 0; i < task_def.num_split_providers(); ++i) {
split_providers.push_back(std::make_unique<DataServiceSplitProvider>(
config_.dispatcher_address(), config_.protocol(),
task_def.iteration_id(), i, config_.dispatcher_timeout_ms()));
}
TF_RETURN_IF_ERROR(
dataset.MakeIterator(std::move(split_providers), &iterator));
return iterator;
}
return errors::InvalidArgument("Unrecognized processing mode: ",
task_def.processing_mode_def().DebugString());
}
void DataServiceWorkerImpl::StopTask(Task& task) TF_LOCKS_EXCLUDED(mu_) {
{
mutex_lock l(task.mu);
task.initialized = true;
}
if (task.task_runner) {
task.task_runner->Cancel();
}
mutex_lock l(mu_);
while (task.outstanding_requests > 0) {
cv_.wait(l);
}
}
Status DataServiceWorkerImpl::GetElement(const GetElementRequest* request,
GetElementResponse* response) {
VLOG(3) << "Received GetElement request for task " << request->task_id();
struct GetElementResult result;
TF_RETURN_IF_ERROR(GetElementResult(request, &result));
response->set_end_of_sequence(result.end_of_sequence);
response->set_skip_task(result.skip);
if (!response->end_of_sequence() && !response->skip_task()) {
TF_RETURN_IF_ERROR(
MoveElementToResponse(std::move(result.components), *response));
VLOG(3) << "Producing an element for task " << request->task_id();
}
return absl::OkStatus();
}
Status DataServiceWorkerImpl::GetWorkerTasks(
const GetWorkerTasksRequest* request, GetWorkerTasksResponse* response) {
mutex_lock l(mu_);
for (const auto& it : tasks_) {
Task* task = it.second.get();
TaskInfo* task_info = response->add_tasks();
task_info->set_worker_address(worker_address_);
task_info->set_task_id(task->task_def.task_id());
task_info->set_iteration_id(task->task_def.iteration_id());
}
return absl::OkStatus();
}
Status DataServiceWorkerImpl::GetSnapshotTaskProgresses(
const GetSnapshotTaskProgressesRequest* request,
GetSnapshotTaskProgressesResponse* response) {
for (const auto& snapshot_task_progress : GetSnapshotTaskProgress()) {
*response->add_snapshot_task_progresses() = snapshot_task_progress;
}
return absl::OkStatus();
}
void DataServiceWorkerImpl::TaskCompletionThread() TF_LOCKS_EXCLUDED(mu_) {
while (true) {
{
mutex_lock l(mu_);
while (!cancelled_ && pending_completed_tasks_.empty()) {
task_completion_cv_.wait(l);
}
if (cancelled_ && pending_completed_tasks_.empty()) {
VLOG(3) << "Task completion thread shutting down";
return;
}
}
Status s = SendTaskUpdates();
if (!s.ok()) {
LOG(WARNING) << "Failed to send task updates to dispatcher: " << s;
mutex_lock l(mu_);
if (cancelled_) {
VLOG(3) << "Task completion thread shutting down";
return;
} else {
task_completion_cv_.wait_for(
l, absl::ToChronoMicroseconds(kRetryInterval));
}
}
}
}
Status DataServiceWorkerImpl::SendTaskUpdates() TF_LOCKS_EXCLUDED(mu_) {
std::vector<TaskProgress> task_progress;
{
mutex_lock l(mu_);
VLOG(3) << "Sending " << pending_completed_tasks_.size()
<< " task updates to dispatcher";
task_progress.reserve(pending_completed_tasks_.size());
for (int task_id : pending_completed_tasks_) {
task_progress.emplace_back();
task_progress.back().set_task_id(task_id);
task_progress.back().set_completed(true);
}
}
TF_RETURN_IF_ERROR(dispatcher_->WorkerUpdate(worker_address_, task_progress));
mutex_lock l(mu_);
for (const auto& update : task_progress) {
pending_completed_tasks_.erase(update.task_id());
}
VLOG(3) << "Sent " << task_progress.size() << " task updates ";
return absl::OkStatus();
}
void DataServiceWorkerImpl::HeartbeatThread() TF_LOCKS_EXCLUDED(mu_) {
while (true) {
int64_t next_heartbeat_micros =
Env::Default()->NowMicros() + (config_.heartbeat_interval_ms() * 1000);
{
mutex_lock l(mu_);
while (!cancelled_ &&
Env::Default()->NowMicros() < next_heartbeat_micros) {
int64_t time_to_wait_micros =
next_heartbeat_micros - Env::Default()->NowMicros();
heartbeat_cv_.wait_for(l,
std::chrono::microseconds(time_to_wait_micros));
}
if (cancelled_) {
VLOG(3) << "Heartbeat thread shutting down";
return;
}
if (!registered_) {
VLOG(1) << "Not performing heartbeat; worker is not yet registered";
continue;
}
}
Status s = Heartbeat();
if (!s.ok()) {
LOG(WARNING) << "Failed to send heartbeat to dispatcher: " << s;
}
}
}
Status DataServiceWorkerImpl::Heartbeat() {
WorkerHeartbeatRequest request = BuildWorkerHeartbeatRequest();
TF_ASSIGN_OR_RETURN(WorkerHeartbeatResponse response,
dispatcher_->WorkerHeartbeat(request));
UpdateTasks(response);
return UpdateSnapshotWriters(response);
}
std::vector<ActiveTask> DataServiceWorkerImpl::GetActiveTasks() const
TF_LOCKS_EXCLUDED(mu_) {
std::vector<ActiveTask> active_tasks;
absl::flat_hash_map<int64_t, std::shared_ptr<Task>> current_tasks;
{
mutex_lock l(mu_);
current_tasks = tasks_;
}
for (const auto& [task_id, task] : current_tasks) {
if (task == nullptr) {
continue;
}
ActiveTask active_task;
active_task.set_task_id(task_id);
active_task.set_processing_time_nsec(0.0);
bool task_initialized = false;
{
mutex_lock task_lock(task->mu);
task_initialized = task->initialized;
}
if (task_initialized && task->task_runner != nullptr &&
task->task_runner->model() != nullptr) {
std::shared_ptr<model::Model> model = task->task_runner->model();
double processing_time_nsec = model->ComputeSnapshotProcessingTimeNsec();
if (processing_time_nsec > 0) {
active_task.set_processing_time_nsec(processing_time_nsec);
}
}
active_tasks.push_back(std::move(active_task));
}
return active_tasks;
}
std::vector<int64_t> DataServiceWorkerImpl::GetTaskIds(
const std::vector<ActiveTask>& active_tasks) const {
std::vector<int64_t> task_ids;
task_ids.reserve(active_tasks.size());
for (const ActiveTask& active_task : active_tasks) {
task_ids.push_back(active_task.task_id());
}
return task_ids;
}
WorkerHeartbeatRequest DataServiceWorkerImpl::BuildWorkerHeartbeatRequest()
const TF_LOCKS_EXCLUDED(mu_) {
std::vector<ActiveTask> active_tasks = GetActiveTasks();
std::vector<int64_t> current_tasks = GetTaskIds(active_tasks);
WorkerHeartbeatRequest request;
request.set_worker_address(worker_address_);
*request.mutable_transfer_servers() = {transfer_servers_.begin(),
transfer_servers_.end()};
*request.mutable_worker_tags() = config_.worker_tags();
request.set_worker_uid(worker_uid_);
*request.mutable_current_tasks() = {current_tasks.begin(),
current_tasks.end()};
for (const auto& snapshot_task_progress : GetSnapshotTaskProgress()) {
request.mutable_snapshot_task_progress()->insert(
{snapshot_task_progress.snapshot_task().base_path(),
snapshot_task_progress});
}
*request.mutable_active_tasks() = {active_tasks.begin(), active_tasks.end()};
return request;
}
std::vector<SnapshotTaskProgress>
DataServiceWorkerImpl::GetSnapshotTaskProgress() const {
mutex_lock l(mu_);
std::vector<SnapshotTaskProgress> snapshot_task_progress;
for (const auto& [snapshot_task, stream_writer] : snapshot_writers_) {
SnapshotTaskProgress progress;
progress.mutable_snapshot_task()->set_base_path(snapshot_task.base_path);
progress.mutable_snapshot_task()->set_stream_index(
snapshot_task.stream_index);
absl::StatusOr<bool> completed = stream_writer->Completed();
if (completed.ok()) {
progress.set_completed(*completed);
} else {
*progress.mutable_status() = tsl::StatusToProto(completed.status());
}
snapshot_task_progress.push_back(std::move(progress));
}
return snapshot_task_progress;
}
void DataServiceWorkerImpl::UpdateTasks(const WorkerHeartbeatResponse& response)
TF_LOCKS_EXCLUDED(mu_) {
std::vector<std::shared_ptr<Task>> tasks_to_delete;
{
mutex_lock l(mu_);
for (const auto& task : response.new_tasks()) {
VLOG(1) << "Received new task from dispatcher with id " << task.task_id();
if (deleted_tasks_.contains(task.task_id())) {
continue;
}
Status s = ProcessTaskInternal(task);
if (!s.ok() && !errors::IsAlreadyExists(s)) {
LOG(WARNING) << "Failed to start processing task " << task.task_id()
<< ": " << s;
}
}
tasks_to_delete.reserve(response.tasks_to_delete_size());
for (int64_t task_id : response.tasks_to_delete()) {
VLOG(3) << "Deleting task " << task_id
<< " at the request of the dispatcher";
if (!tasks_.contains(task_id)) {
continue;
}
tasks_to_delete.push_back(std::move(tasks_[task_id]));
tasks_.erase(task_id);
finished_tasks_.insert(task_id);
}
}
for (const auto& task : tasks_to_delete) {
StopTask(*task);
}
}
Status DataServiceWorkerImpl::UpdateSnapshotWriters(
const WorkerHeartbeatResponse& response) TF_LOCKS_EXCLUDED(mu_) {
absl::flat_hash_set<SnapshotTask> assigned_snapshot_task_keys;
for (const SnapshotTaskDef& snapshot_task : response.snapshot_tasks()) {
SnapshotTask snapshot_task_key{snapshot_task.base_path(),
snapshot_task.stream_index()};
assigned_snapshot_task_keys.insert(snapshot_task_key);
{
mutex_lock l(mu_);
if (snapshot_writers_.contains(snapshot_task_key)) {
continue;
}
}
DatasetDef dataset_def;
TF_RETURN_IF_ERROR(ReadBinaryProto(
Env::Default(), DatasetDefFilePath(snapshot_task.base_path()),
&dataset_def));
TF_ASSIGN_OR_RETURN(std::unique_ptr<StandaloneTaskIterator> iterator,
MakeSnapshotTaskIterator(snapshot_task, dataset_def));
mutex_lock l(mu_);
snapshot_writers_.emplace(
snapshot_task_key,
std::make_unique<SnapshotStreamWriter>(
SnapshotWriterParams{
snapshot_task.base_path(), snapshot_task.stream_index(),
snapshot_task.metadata().compression(), Env::Default(),
ByteSize::Bytes(config_.snapshot_max_chunk_size_bytes())},
std::move(iterator)));
}
mutex_lock l(mu_);
for (auto it = snapshot_writers_.begin(); it != snapshot_writers_.end();) {
if (!assigned_snapshot_task_keys.contains(it->first)) {
it->second->Cancel();
snapshot_writers_.erase(it++);
} else {
++it;
}
}
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<StandaloneTaskIterator>>
DataServiceWorkerImpl::MakeSnapshotTaskIterator(
const SnapshotTaskDef& snapshot_task, const DatasetDef& dataset_def) const {
std::unique_ptr<standalone::Dataset> dataset;
TF_RETURN_IF_ERROR(standalone::Dataset::FromGraph(
standalone::Dataset::Params(), dataset_def.graph(), &dataset));
std::vector<std::unique_ptr<SplitProvider>> split_providers;
split_providers.reserve(snapshot_task.num_sources());
for (int i = 0; i < snapshot_task.num_sources(); ++i) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<DataServiceDispatcherClient> dispatcher,
CreateDispatcherClient());
split_providers.push_back(std::make_unique<SnapshotSplitProvider>(
worker_address_, snapshot_task,
i, absl::Milliseconds(config_.dispatcher_timeout_ms()),
std::move(dispatcher), Env::Default()));
}
std::unique_ptr<standalone::Iterator> iterator;
TF_RETURN_IF_ERROR(
dataset->MakeIterator(std::move(split_providers), &iterator));
return std::make_unique<StandaloneTaskIterator>(std::move(dataset),
std::move(iterator));
}
void DataServiceWorkerImpl::DeleteLocalTask(const TaskInfo& task_info)
TF_LOCKS_EXCLUDED(mu_) {
std::shared_ptr<Task> task;
{
mutex_lock l(mu_);
auto it = tasks_.find(task_info.task_id());
if (it == tasks_.end() || !it->second) {
return;
}
task = std::move(it->second);
tasks_.erase(task_info.task_id());
pending_completed_tasks_.insert(task_info.task_id());
deleted_tasks_.insert(task_info.task_id());
}
VLOG(2) << "Delete local task " << task_info.task_id() << " from worker "
<< worker_address_ << " at the request of the client.";
StopTask(*task);
}
WorkerStateExport DataServiceWorkerImpl::ExportState() const {
WorkerStateExport worker_state_export;
*worker_state_export.mutable_worker_config() = config_;
mutex_lock l(mu_);
if (!registered_) {
return worker_state_export;
}
for (const auto& task : tasks_) {
*worker_state_export.add_tasks() = Export(task.second->task_def);
}
for (int64_t finished_task : finished_tasks_) {
worker_state_export.add_finished_task_ids(finished_task);
}
for (int64_t deleted_task : deleted_tasks_) {
worker_state_export.add_deleted_task_ids(deleted_task);
}
return worker_state_export;
}
void LocalWorkers::Add(absl::string_view worker_address,
std::shared_ptr<DataServiceWorkerImpl> worker) {
DCHECK(worker != nullptr) << "Adding a nullptr local worker is disallowed.";
VLOG(1) << "Register local worker at address " << worker_address;
mutex_lock l(mu_);
(*local_workers_)[worker_address] = worker;
}
std::shared_ptr<DataServiceWorkerImpl> LocalWorkers::Get(
absl::string_view worker_address) {
tf_shared_lock l(mu_);
AddressToWorkerMap::const_iterator it = local_workers_->find(worker_address);
if (it == local_workers_->end()) {
return nullptr;
}
return it->second;
}
bool LocalWorkers::Empty() {
tf_shared_lock l(mu_);
return local_workers_->empty();
}
void LocalWorkers::Remove(absl::string_view worker_address) {
VLOG(1) << "Remove local worker at address " << worker_address;
mutex_lock l(mu_);
local_workers_->erase(worker_address);
}
}
} | #include "tensorflow/core/data/service/worker_impl.h"
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/data/service/test_cluster.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::IsNull;
using ::testing::NotNull;
class LocalWorkersTest : public ::testing::Test {
protected:
void SetUp() override {
test_cluster_ = std::make_unique<TestCluster>(0);
TF_ASSERT_OK(test_cluster_->Initialize());
}
std::unique_ptr<TestCluster> test_cluster_;
};
TEST_F(LocalWorkersTest, AddRemoveLocalWorkers) {
EXPECT_TRUE(LocalWorkers::Empty());
TF_ASSERT_OK(test_cluster_->AddWorker());
TF_ASSERT_OK(test_cluster_->AddWorker());
TF_ASSERT_OK(test_cluster_->AddWorker());
std::vector<std::string> worker_addresses = {test_cluster_->WorkerAddress(0),
test_cluster_->WorkerAddress(1),
test_cluster_->WorkerAddress(2)};
EXPECT_FALSE(LocalWorkers::Empty());
EXPECT_THAT(LocalWorkers::Get(worker_addresses[0]), NotNull());
EXPECT_THAT(LocalWorkers::Get(worker_addresses[1]), NotNull());
EXPECT_THAT(LocalWorkers::Get(worker_addresses[2]), NotNull());
test_cluster_->StopWorker(0);
EXPECT_FALSE(LocalWorkers::Empty());
EXPECT_THAT(LocalWorkers::Get(worker_addresses[0]), IsNull());
EXPECT_THAT(LocalWorkers::Get(worker_addresses[1]), NotNull());
EXPECT_THAT(LocalWorkers::Get(worker_addresses[2]), NotNull());
test_cluster_->StopWorkers();
EXPECT_TRUE(LocalWorkers::Empty());
EXPECT_THAT(LocalWorkers::Get(worker_addresses[0]), IsNull());
EXPECT_THAT(LocalWorkers::Get(worker_addresses[1]), IsNull());
EXPECT_THAT(LocalWorkers::Get(worker_addresses[2]), IsNull());
}
TEST_F(LocalWorkersTest, NoLocalWorker) {
EXPECT_TRUE(LocalWorkers::Empty());
EXPECT_THAT(LocalWorkers::Get(""), IsNull());
EXPECT_THAT(LocalWorkers::Get("Invalid address"),
IsNull());
EXPECT_TRUE(LocalWorkers::Empty());
LocalWorkers::Remove("");
LocalWorkers::Remove("Invalid address");
EXPECT_TRUE(LocalWorkers::Empty());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/worker_impl.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/worker_impl_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
671484fe-b9bf-495f-ba88-1dec3ba87779 | cpp | google/cel-cpp | map_type | common/types/map_type.cc | common/types/map_type_test.cc | #include <string>
#include "absl/base/attributes.h"
#include "absl/base/nullability.h"
#include "absl/log/absl_check.h"
#include "absl/strings/str_cat.h"
#include "common/type.h"
#include "google/protobuf/arena.h"
#include "google/protobuf/descriptor.h"
namespace cel {
namespace common_internal {
namespace {
ABSL_CONST_INIT const MapTypeData kDynDynMapTypeData = {
.key_and_value = {DynType(), DynType()},
};
ABSL_CONST_INIT const MapTypeData kStringDynMapTypeData = {
.key_and_value = {StringType(), DynType()},
};
}
absl::Nonnull<MapTypeData*> MapTypeData::Create(
absl::Nonnull<google::protobuf::Arena*> arena, const Type& key, const Type& value) {
MapTypeData* data =
::new (arena->AllocateAligned(sizeof(MapTypeData), alignof(MapTypeData)))
MapTypeData;
data->key_and_value[0] = key;
data->key_and_value[1] = value;
return data;
}
}
MapType::MapType() : MapType(&common_internal::kDynDynMapTypeData) {}
MapType::MapType(absl::Nonnull<google::protobuf::Arena*> arena, const Type& key,
const Type& value)
: MapType(key.IsDyn() && value.IsDyn()
? &common_internal::kDynDynMapTypeData
: common_internal::MapTypeData::Create(arena, key, value)) {}
std::string MapType::DebugString() const {
return absl::StrCat("map<", key().DebugString(), ", ", value().DebugString(),
">");
}
TypeParameters MapType::GetParameters() const {
ABSL_DCHECK_NE(data_, 0);
if ((data_ & kBasicBit) == kBasicBit) {
const auto* data = reinterpret_cast<const common_internal::MapTypeData*>(
data_ & kPointerMask);
return TypeParameters(data->key_and_value[0], data->key_and_value[1]);
}
if ((data_ & kProtoBit) == kProtoBit) {
const auto* descriptor =
reinterpret_cast<const google::protobuf::Descriptor*>(data_ & kPointerMask);
return TypeParameters(Type::Field(descriptor->map_key()),
Type::Field(descriptor->map_value()));
}
return TypeParameters(Type(), Type());
}
Type MapType::GetKey() const {
ABSL_DCHECK_NE(data_, 0);
if ((data_ & kBasicBit) == kBasicBit) {
return reinterpret_cast<const common_internal::MapTypeData*>(data_ &
kPointerMask)
->key_and_value[0];
}
if ((data_ & kProtoBit) == kProtoBit) {
return Type::Field(
reinterpret_cast<const google::protobuf::Descriptor*>(data_ & kPointerMask)
->map_key());
}
return Type();
}
Type MapType::key() const { return GetKey(); }
Type MapType::GetValue() const {
ABSL_DCHECK_NE(data_, 0);
if ((data_ & kBasicBit) == kBasicBit) {
return reinterpret_cast<const common_internal::MapTypeData*>(data_ &
kPointerMask)
->key_and_value[1];
}
if ((data_ & kProtoBit) == kProtoBit) {
return Type::Field(
reinterpret_cast<const google::protobuf::Descriptor*>(data_ & kPointerMask)
->map_value());
}
return Type();
}
Type MapType::value() const { return GetValue(); }
MapType JsonMapType() {
return MapType(&common_internal::kStringDynMapTypeData);
}
} | #include <sstream>
#include "absl/hash/hash.h"
#include "common/type.h"
#include "internal/testing.h"
#include "google/protobuf/arena.h"
namespace cel {
namespace {
TEST(MapType, Default) {
MapType map_type;
EXPECT_EQ(map_type.key(), DynType());
EXPECT_EQ(map_type.value(), DynType());
}
TEST(MapType, Kind) {
google::protobuf::Arena arena;
EXPECT_EQ(MapType(&arena, StringType(), BytesType()).kind(), MapType::kKind);
EXPECT_EQ(Type(MapType(&arena, StringType(), BytesType())).kind(),
MapType::kKind);
}
TEST(MapType, Name) {
google::protobuf::Arena arena;
EXPECT_EQ(MapType(&arena, StringType(), BytesType()).name(), MapType::kName);
EXPECT_EQ(Type(MapType(&arena, StringType(), BytesType())).name(),
MapType::kName);
}
TEST(MapType, DebugString) {
google::protobuf::Arena arena;
{
std::ostringstream out;
out << MapType(&arena, StringType(), BytesType());
EXPECT_EQ(out.str(), "map<string, bytes>");
}
{
std::ostringstream out;
out << Type(MapType(&arena, StringType(), BytesType()));
EXPECT_EQ(out.str(), "map<string, bytes>");
}
}
TEST(MapType, Hash) {
google::protobuf::Arena arena;
EXPECT_EQ(absl::HashOf(MapType(&arena, StringType(), BytesType())),
absl::HashOf(MapType(&arena, StringType(), BytesType())));
}
TEST(MapType, Equal) {
google::protobuf::Arena arena;
EXPECT_EQ(MapType(&arena, StringType(), BytesType()),
MapType(&arena, StringType(), BytesType()));
EXPECT_EQ(Type(MapType(&arena, StringType(), BytesType())),
MapType(&arena, StringType(), BytesType()));
EXPECT_EQ(MapType(&arena, StringType(), BytesType()),
Type(MapType(&arena, StringType(), BytesType())));
EXPECT_EQ(Type(MapType(&arena, StringType(), BytesType())),
Type(MapType(&arena, StringType(), BytesType())));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/map_type.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/map_type_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
36232e17-485a-4372-ac8e-bbcb4ca23de8 | cpp | tensorflow/tensorflow | sequence_ops | tensorflow/compiler/tf2xla/kernels/sequence_ops.cc | tensorflow/core/kernels/sequence_ops_test.cc | #include "absl/status/statusor.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/value_inference.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/literal.h"
#include "xla/primitive_util.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
template <typename T>
absl::StatusOr<xla::XlaOp> CreateRangeTensor(
const xla::LiteralSlice& start_literal,
const xla::LiteralSlice& limit_literal,
const xla::LiteralSlice& delta_literal, xla::XlaBuilder* builder) {
T start = start_literal.Get<T>({});
T limit = limit_literal.Get<T>({});
T delta = delta_literal.Get<T>({});
if (delta == 0) {
return errors::InvalidArgument("Requires delta != 0: ", delta);
}
if (delta > 0) {
if (start > limit) {
return errors::InvalidArgument(
"Requires start <= limit when delta > 0: ", start, "/", limit);
}
} else {
if (start < limit) {
return errors::InvalidArgument(
"Requires start >= limit when delta < 0: ", start, "/", limit);
}
}
int64_t size =
(std::is_integral<T>::value
? static_cast<T>(
limit == start
? 0
: (std::abs(limit - start) - 1) / std::abs(delta) + 1)
: std::ceil(std::abs((limit - start) / delta)));
return xla::ConstantR0(builder, start) +
xla::ConstantR0(builder, delta) *
xla::Iota(builder, xla::primitive_util::NativeToPrimitiveType<T>(),
size);
}
class RangeOp : public XlaOpKernel {
public:
explicit RangeOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
const TensorShape start_in_shape = ctx->InputShape(0);
const TensorShape limit_in_shape = ctx->InputShape(1);
const TensorShape delta_in_shape = ctx->InputShape(2);
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(start_in_shape),
errors::InvalidArgument("start must be a scalar, not shape ",
start_in_shape.DebugString()));
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(limit_in_shape),
errors::InvalidArgument("limit must be a scalar, not shape ",
limit_in_shape.DebugString()));
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(delta_in_shape),
errors::InvalidArgument("delta must be a scalar, not shape ",
delta_in_shape.DebugString()));
xla::Literal start, limit, delta;
OP_REQUIRES_OK(ctx, ctx->ConstantInput(
0, &start, xla::ValueInferenceMode::kLowerBound));
OP_REQUIRES_OK(ctx, ctx->ConstantInput(
1, &limit, xla::ValueInferenceMode::kUpperBound));
OP_REQUIRES_OK(ctx, ctx->ConstantInput(2, &delta));
DataType type = input_type(0);
absl::StatusOr<xla::XlaOp> output;
switch (type) {
case DT_INT32:
output = CreateRangeTensor<int32>(start, limit, delta, ctx->builder());
break;
case DT_INT64:
output =
CreateRangeTensor<int64_t>(start, limit, delta, ctx->builder());
break;
case DT_FLOAT:
output = CreateRangeTensor<float>(start, limit, delta, ctx->builder());
break;
case DT_DOUBLE:
output = CreateRangeTensor<double>(start, limit, delta, ctx->builder());
break;
default:
output = errors::InvalidArgument("Invalid type for Range ",
DataTypeString(type));
}
OP_REQUIRES_OK(ctx, output.status());
bool start_is_dynamic = false;
OP_REQUIRES_OK(ctx,
ctx->ResolveInputDynamismIntoPred(0, &start_is_dynamic));
bool limit_is_dynamic = false;
OP_REQUIRES_OK(ctx,
ctx->ResolveInputDynamismIntoPred(1, &limit_is_dynamic));
if (start_is_dynamic || limit_is_dynamic) {
xla::XlaOp delta = ctx->Input(2);
xla::XlaOp limit = ctx->Input(1);
xla::XlaOp start = ctx->Input(0);
if (type == DT_INT32 || type == DT_INT64) {
auto dynamic_size = (xla::Abs(limit - start) + xla::Abs(delta) -
xla::One(ctx->builder(), ctx->input_xla_type(0))) /
xla::Abs(delta);
dynamic_size = xla::ConvertElementType(dynamic_size, xla::S32);
output = xla::SetDimensionSize(output.value(), dynamic_size, 0);
} else {
auto dynamic_size = (xla::Ceil(xla::Abs((limit - start) / delta)));
dynamic_size = xla::ConvertElementType(dynamic_size, xla::S32);
output = xla::SetDimensionSize(output.value(), dynamic_size, 0);
}
}
ctx->SetOutput(0, output.value());
}
};
REGISTER_XLA_OP(Name("Range")
.CompileTimeConstantInput("start")
.CompileTimeConstantInput("limit")
.CompileTimeConstantInput("delta"),
RangeOp);
class LinSpaceOp : public XlaOpKernel {
public:
explicit LinSpaceOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
const TensorShape start_in_shape = ctx->InputShape("start");
const TensorShape stop_in_shape = ctx->InputShape("stop");
const TensorShape num_in_shape = ctx->InputShape("num");
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(start_in_shape),
errors::InvalidArgument("start must be a scalar, not shape ",
start_in_shape.DebugString()));
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(stop_in_shape),
errors::InvalidArgument("stop must be a scalar, not shape ",
stop_in_shape.DebugString()));
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(num_in_shape),
errors::InvalidArgument("num must be a scalar, not shape ",
num_in_shape.DebugString()));
int64_t num;
OP_REQUIRES_OK(ctx, ctx->ConstantInputAsIntScalar("num", &num));
OP_REQUIRES(ctx, num > 0,
errors::InvalidArgument("Requires num > 0: ", num));
xla::XlaOp start = ctx->Input("start");
xla::XlaOp stop = ctx->Input("stop");
xla::XlaOp iota = xla::Iota(ctx->builder(), ctx->output_xla_type(0), num);
xla::XlaOp step =
(stop - start) / xla::ScalarLike(start, (num > 1 ? num - 1 : num));
xla::XlaOp result = iota * step + start;
if (num > 1) {
xla::XlaOp mask = xla::Iota(ctx->builder(), xla::S64, num);
xla::XlaOp eq = xla::Eq(mask, xla::ScalarLike(mask, num - 1));
result = xla::Select(eq, stop, result);
}
ctx->SetOutput(0, result);
}
};
REGISTER_XLA_OP(Name("LinSpace").CompileTimeConstantInput("num"), LinSpaceOp);
}
} | #include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class RangeOpTest : public OpsTestBase {
protected:
void MakeOp(DataType input_type) {
TF_ASSERT_OK(NodeDefBuilder("myop", "Range")
.Input(FakeInput(input_type))
.Input(FakeInput(input_type))
.Input(FakeInput(input_type))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
class LinSpaceOpTest : public OpsTestBase {
protected:
void MakeOp(DataType input_type, DataType index_type) {
TF_ASSERT_OK(NodeDefBuilder("myop", "LinSpace")
.Input(FakeInput(input_type))
.Input(FakeInput(input_type))
.Input(FakeInput(index_type))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
TEST_F(RangeOpTest, Simple_D32) {
MakeOp(DT_INT32);
AddInputFromArray<int32>(TensorShape({}), {0});
AddInputFromArray<int32>(TensorShape({}), {10});
AddInputFromArray<int32>(TensorShape({}), {2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({5}));
test::FillValues<int32>(&expected, {0, 2, 4, 6, 8});
test::ExpectTensorEqual<int32>(expected, *GetOutput(0));
}
TEST_F(RangeOpTest, Simple_Half) {
MakeOp(DT_HALF);
AddInputFromList<Eigen::half, float>(TensorShape({}), {0.5});
AddInputFromList<Eigen::half, float>(TensorShape({}), {2});
AddInputFromList<Eigen::half, float>(TensorShape({}), {0.3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_HALF, TensorShape({5}));
test::FillValues<Eigen::half, float>(&expected, {0.5, 0.8, 1.1, 1.4, 1.7});
test::ExpectTensorEqual<Eigen::half>(expected, *GetOutput(0));
}
TEST_F(RangeOpTest, Simple_Float) {
MakeOp(DT_FLOAT);
AddInputFromArray<float>(TensorShape({}), {0.5});
AddInputFromArray<float>(TensorShape({}), {2});
AddInputFromArray<float>(TensorShape({}), {0.3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({5}));
test::FillValues<float>(&expected, {0.5, 0.8, 1.1, 1.4, 1.7});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(RangeOpTest, Large_Double) {
MakeOp(DT_DOUBLE);
AddInputFromArray<double>(TensorShape({}), {0.0});
AddInputFromArray<double>(TensorShape({}), {10000});
AddInputFromArray<double>(TensorShape({}), {0.5});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_DOUBLE, TensorShape({20000}));
std::vector<double> result;
for (int32_t i = 0; i < 20000; ++i) result.push_back(i * 0.5);
test::FillValues<double>(&expected, absl::Span<const double>(result));
test::ExpectTensorEqual<double>(expected, *GetOutput(0));
}
TEST_F(LinSpaceOpTest, Simple_D32) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<float>(TensorShape({}), {3.0});
AddInputFromArray<float>(TensorShape({}), {7.0});
AddInputFromArray<int32>(TensorShape({}), {3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({3}));
test::FillValues<float>(&expected, {3.0, 5.0, 7.0});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(LinSpaceOpTest, Exact_Endpoints) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<float>(TensorShape({}), {1.0});
AddInputFromArray<int32>(TensorShape({}), {42});
TF_ASSERT_OK(RunOpKernel());
Tensor output = *GetOutput(0);
float expected_start = 0.0;
float start = output.flat<float>()(0);
EXPECT_EQ(expected_start, start) << expected_start << " vs. " << start;
float expected_stop = 1.0;
float stop = output.flat<float>()(output.NumElements() - 1);
EXPECT_EQ(expected_stop, stop) << expected_stop << " vs. " << stop;
}
TEST_F(LinSpaceOpTest, Single_D64) {
MakeOp(DT_FLOAT, DT_INT64);
AddInputFromArray<float>(TensorShape({}), {9.0});
AddInputFromArray<float>(TensorShape({}), {100.0});
AddInputFromArray<int64_t>(TensorShape({}), {1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1}));
test::FillValues<float>(&expected, {9.0});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(LinSpaceOpTest, Simple_Double) {
MakeOp(DT_DOUBLE, DT_INT32);
AddInputFromArray<double>(TensorShape({}), {5.0});
AddInputFromArray<double>(TensorShape({}), {6.0});
AddInputFromArray<int32>(TensorShape({}), {6});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_DOUBLE, TensorShape({6}));
test::FillValues<double>(&expected, {5.0, 5.2, 5.4, 5.6, 5.8, 6.0});
test::ExpectTensorEqual<double>(expected, *GetOutput(0));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/sequence_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/sequence_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3d74cae6-8e08-4595-b6bf-2d5ca27cc59f | cpp | tensorflow/tensorflow | convert_mover | third_party/xla/xla/service/convert_mover.cc | third_party/xla/xla/service/convert_mover_test.cc | #include "xla/service/convert_mover.h"
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/primitive_util.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
static bool IsLosslesslyConvertibleTo(const Literal& literal,
PrimitiveType dst_ty) {
PrimitiveType orig_ty = literal.shape().element_type();
absl::StatusOr<Literal> converted1 = literal.Convert(dst_ty);
if (!converted1.ok()) {
return false;
}
absl::StatusOr<Literal> converted2 = converted1->Convert(orig_ty);
if (!converted2.ok()) {
return false;
}
return literal == *converted2;
}
bool OpCommutesWithConvert(HloOpcode opcode) {
switch (opcode) {
case HloOpcode::kConcatenate:
case HloOpcode::kPad:
case HloOpcode::kReshape:
case HloOpcode::kSlice:
case HloOpcode::kTranspose:
return true;
default:
return false;
}
}
absl::StatusOr<bool> MoveConvertPrecisionOps(HloComputation* comp) {
bool changed = false;
for (HloInstruction* instr : comp->MakeInstructionPostOrder()) {
if (!OpCommutesWithConvert(instr->opcode()) ||
instr->operand_count() == 0 ||
!absl::c_all_of(instr->operands(), [](const HloInstruction* operand) {
return (operand->opcode() == HloOpcode::kConvert &&
operand->user_count() == 1) ||
operand->opcode() == HloOpcode::kConstant;
})) {
continue;
}
auto convert_op_it = absl::c_find_if(instr->operands(),
HloPredicateIsOp<HloOpcode::kConvert>);
if (convert_op_it == instr->operands().end()) {
continue;
}
const HloInstruction* convert_op = *convert_op_it;
if (!absl::c_all_of(instr->operands(), [&](const HloInstruction* operand) {
return operand->opcode() != HloOpcode::kConvert ||
operand->operand(0)->shape().element_type() ==
convert_op->operand(0)->shape().element_type();
})) {
continue;
}
PrimitiveType src_ty = convert_op->operand(0)->shape().element_type();
PrimitiveType dst_ty = convert_op->shape().element_type();
if (primitive_util::BitWidth(src_ty) >= primitive_util::BitWidth(dst_ty)) {
continue;
}
if (absl::c_any_of(instr->operands(), [&](const HloInstruction* operand) {
return operand->opcode() == HloOpcode::kConstant &&
!IsLosslesslyConvertibleTo(operand->literal(), src_ty);
})) {
continue;
}
if (primitive_util::IsSubByteNonPredType(src_ty)) {
continue;
}
VLOG(2) << "Moving increase-precision convert op " << convert_op->ToString()
<< " down the graph: " << instr->ToString();
absl::InlinedVector<HloInstruction*, 8> new_operands;
new_operands.reserve(instr->operand_count());
for (HloInstruction* operand : instr->operands()) {
switch (operand->opcode()) {
case HloOpcode::kConvert:
new_operands.push_back(operand->mutable_operand(0));
break;
case HloOpcode::kConstant:
new_operands.push_back(MakeConvertToHlo(operand, src_ty));
break;
default:
LOG(FATAL) << "Unexpected opcode in " << operand->ToString();
}
}
Shape new_shape = instr->shape();
new_shape.set_element_type(src_ty);
HloInstruction* new_instr = comp->AddInstruction(
instr->CloneWithNewOperands(new_shape, new_operands));
TF_RETURN_IF_ERROR(comp->ReplaceWithNewInstruction(
instr, HloInstruction::CreateConvert(instr->shape(), new_instr)));
changed = true;
}
std::deque<HloInstruction*> work_queue;
std::vector<HloInstruction*> instrs = comp->MakeInstructionPostOrder();
work_queue.insert(work_queue.end(), instrs.rbegin(), instrs.rend());
while (!work_queue.empty()) {
HloInstruction* instr = work_queue.front();
work_queue.pop_front();
if (instr->opcode() != HloOpcode::kConvert ||
instr->operand(0)->user_count() != 1 ||
!OpCommutesWithConvert(instr->operand(0)->opcode())) {
continue;
}
PrimitiveType src_ty = instr->operand(0)->shape().element_type();
PrimitiveType dst_ty = instr->shape().element_type();
if (primitive_util::BitWidth(src_ty) <= primitive_util::BitWidth(dst_ty)) {
continue;
}
if (primitive_util::IsSubByteNonPredType(dst_ty)) {
continue;
}
VLOG(2) << "Moving decrease-precision convert up the graph: "
<< instr->ToString();
HloInstruction* to_convert = instr->mutable_operand(0);
absl::InlinedVector<HloInstruction*, 8> new_operands;
new_operands.reserve(to_convert->operand_count());
for (HloInstruction* operand : to_convert->operands()) {
work_queue.push_front(MakeConvertToHlo(operand, dst_ty));
new_operands.push_back(work_queue.front());
}
Shape new_shape = to_convert->shape();
new_shape.set_element_type(dst_ty);
TF_RETURN_IF_ERROR(comp->ReplaceWithNewInstruction(
instr, to_convert->CloneWithNewOperands(new_shape, new_operands)));
changed = true;
}
return changed;
}
}
absl::StatusOr<bool> ConvertMover::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* comp :
module->MakeNonfusionComputations(execution_threads)) {
TF_ASSIGN_OR_RETURN(bool changed_computation,
MoveConvertPrecisionOps(comp));
changed |= changed_computation;
}
return changed;
}
} | #include "xla/service/convert_mover.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace m = ::xla::match;
class ConvertMoverTest : public HloTestBase {
public:
ConvertMoverTest()
: HloTestBase(false,
false) {}
};
template <typename T>
auto MatchConvertToS8(T&& operand) {
return m::Convert(operand).WithShape(m::Shape().WithElementType(S8));
}
template <typename T>
auto MatchConvertToF16(T&& operand) {
return m::Convert(operand).WithShape(m::Shape().WithElementType(F16));
}
template <typename T>
auto MatchConvertToF32(T&& operand) {
return m::Convert(operand).WithShape(m::Shape().WithElementType(F32));
}
template <typename T>
auto MatchConvertToC64(T&& operand) {
return m::Convert(operand).WithShape(m::Shape().WithElementType(C64));
}
TEST_F(ConvertMoverTest, MoveDownThroughConcat) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
ROOT root = concatenate(f32[10] convert(f16[10] parameter(0)),
f32[10] convert(f16[10] parameter(1))),
dimensions={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
ConvertMover pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(MatchConvertToF32(
m::Concatenate(m::Parameter(0), m::Parameter(1)))));
}
TEST_F(ConvertMoverTest, NoMoveDownThroughConcatWithDifferentSrcTypes) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
ROOT root = concatenate(f32[10] convert(bf16[10] parameter(0)),
f32[10] convert(f16[10] parameter(1))),
dimensions={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
ConvertMover pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_FALSE(changed);
}
TEST_F(ConvertMoverTest, MoveUpReshape) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
ROOT root = f16[10,10] convert(f32[10,10] reshape(f32[100] parameter(0)))
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
ConvertMover pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Reshape(MatchConvertToF16(m::Parameter(0)))));
}
TEST_F(ConvertMoverTest, MoveUpTwoTransposes) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
t1 = transpose(f32[3,4] parameter(0)), dimensions={1,0}
t2 = transpose(t1), dimensions={1,0}
ROOT root = f16[3,4] convert(t2)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
ConvertMover pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Transpose(
m::Transpose(MatchConvertToF16(m::Parameter(0))))));
}
TEST_F(ConvertMoverTest, MoveDownTwoSlices) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
slice1 = f32[9] slice(f32[10] convert(f16[10] parameter(0))), slice={[0:9]}
ROOT slice2 = f32[8] slice(slice1), slice={[0:8]}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
ConvertMover pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(MatchConvertToF32(m::Slice(m::Slice(m::Parameter(0))))));
}
TEST_F(ConvertMoverTest, MoveDownC64) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
ROOT root = concatenate(c64[10] convert(f32[10] parameter(0)),
c64[10] convert(f32[10] parameter(1))),
dimensions={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
ConvertMover pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(MatchConvertToC64(m::Concatenate(
m::Parameter(0),
m::Parameter(1)
))));
}
TEST_F(ConvertMoverTest, MoveDownC64Constant) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
ROOT root = concatenate(c64[2] convert(f32[2] parameter(0)),
c64[2] convert(f32[2] parameter(1)),
c64[2] constant({(1,1), (-1,-1)})),
dimensions={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
ConvertMover pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_FALSE(changed);
}
TEST_F(ConvertMoverTest, MoveUpPad) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
pad = f32[10] pad(f32[8] parameter(0), f32[] constant(0)), padding=1_1
ROOT root = f16[10] convert(pad)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
ConvertMover pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Pad(MatchConvertToF16(m::Parameter(0)),
MatchConvertToF16(m::ConstantEffectiveScalar(0)))));
}
TEST_F(ConvertMoverTest, MoveUpPadWithOutOfRangeConstant) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
pad = s32[10] pad(s32[8] parameter(0), s32[] constant(1000)), padding=1_1
ROOT root = s8[10] convert(pad)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
ConvertMover pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Pad(MatchConvertToS8(m::Parameter(0)),
MatchConvertToS8(m::ConstantEffectiveScalar(1000)))));
}
TEST_F(ConvertMoverTest, MoveDownPad) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
ROOT pad = f32[10] pad(f32[8] convert(f16[8] parameter(0)), f32[] constant(0)),
padding=1_1
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
ConvertMover pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(MatchConvertToF32(m::Pad(
m::Parameter(0), MatchConvertToF16(m::ConstantEffectiveScalar(0))))));
}
TEST_F(ConvertMoverTest, NoMoveDownPadBecauseConstantIsOutOfRange) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
ROOT pad = f32[10] pad(f32[8] convert(f16[8] parameter(0)), f32[] constant(1e9)),
padding=1_1
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
ConvertMover pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_FALSE(changed);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/convert_mover.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/convert_mover_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e938fec1-2a3e-4a74-a363-d75017bc5abe | cpp | tensorflow/tensorflow | sample_stable_delegate_with_control_flow | tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate_with_control_flow.cc | tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate_with_control_flow_test.cc | #include "tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate_with_control_flow.h"
#include <memory>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/c_api.h"
#include "tensorflow/lite/c/c_api_opaque.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/delegates/utils/simple_opaque_delegate.h"
namespace tflite {
namespace example {
static const int kTopLevelSubgraphIndex = -1;
namespace {
class SampleStableDelegateKernel : public SimpleOpaqueDelegateKernelInterface {
bool IsExternalTensor(const TfLiteOpaqueTensor* opaque_tensor) const {
return external_tensors_.count(opaque_tensor) != 0;
}
void DeriveExternalTensors() {
for (const TfLiteOpaqueTensor* tensor : node_input_tensors_set_) {
if (node_output_tensors_set_.count(tensor) == 0) {
external_tensors_.insert(tensor);
}
}
for (const TfLiteOpaqueTensor* tensor : node_output_tensors_set_) {
if (node_input_tensors_set_.count(tensor) == 0) {
external_tensors_.insert(tensor);
}
}
}
public:
TfLiteStatus Init(TfLiteOpaqueContext* context,
const TfLiteOpaqueDelegateParams* params) override {
if (params->delegate == nullptr) return kTfLiteDelegateError;
context_ = context;
std::vector<int> callee_subgraph_indices;
TfLiteStatus status =
InitSubgraphNodes(context, kTopLevelSubgraphIndex,
params->nodes_to_replace, callee_subgraph_indices);
if (status != kTfLiteOk) return status;
DeriveExternalTensors();
return kTfLiteOk;
}
TfLiteStatus InitSubgraphNodes(TfLiteOpaqueContext* context,
int subgraph_index,
const TfLiteIntArray* nodes_to_execute,
std::vector<int>& callee_subgraph_indices) {
node_input_tensors_[subgraph_index].resize(nodes_to_execute->size);
node_output_tensors_[subgraph_index].resize(nodes_to_execute->size);
builtin_codes_[subgraph_index].resize(nodes_to_execute->size);
for (int i = 0; i < nodes_to_execute->size; ++i) {
const int node_index = nodes_to_execute->data[i];
TfLiteOpaqueNode* delegated_node = nullptr;
TfLiteOperator* delegated_node_registration = nullptr;
TfLiteOpaqueContextGetNodeAndRegistration(
context, node_index, &delegated_node, &delegated_node_registration);
builtin_codes_[subgraph_index][i] =
TfLiteOperatorGetBuiltInCode(delegated_node_registration);
for (int n = 0; n < TfLiteOpaqueNodeNumberOfInputs(delegated_node); ++n) {
auto input_tensor =
TfLiteOpaqueNodeGetInput(context, delegated_node, n);
node_input_tensors_[subgraph_index][i].push_back(input_tensor);
if (subgraph_index == kTopLevelSubgraphIndex) {
node_input_tensors_set_.insert(input_tensor);
}
}
for (int n = 0; n < TfLiteOpaqueNodeNumberOfOutputs(delegated_node);
++n) {
auto output_tensor =
TfLiteOpaqueNodeGetOutput(context, delegated_node, n);
node_output_tensors_[subgraph_index][i].push_back(output_tensor);
if (subgraph_index == kTopLevelSubgraphIndex) {
node_output_tensors_set_.insert(output_tensor);
}
}
if (builtin_codes_[subgraph_index][i] == kTfLiteBuiltinWhile) {
void* builtin_data = TfLiteOpaqueNodeGetBuiltinData(delegated_node);
TfLiteWhileParams* params =
reinterpret_cast<TfLiteWhileParams*>(builtin_data);
control_flow_branch_indices_[subgraph_index][i] = {
params->cond_subgraph_index, params->body_subgraph_index};
for (int branch_index :
control_flow_branch_indices_[subgraph_index][i]) {
callee_subgraph_indices.push_back(branch_index);
TfLiteStatus status;
TfLiteIntArray* execution_plan;
TfLiteOpaqueContext* branch_context;
status = TfLiteOpaqueContextAcquireSubgraphContext(
context, branch_index, &branch_context);
if (status != kTfLiteOk) return status;
status = TfLiteOpaqueContextGetExecutionPlan(branch_context,
&execution_plan);
if (status != kTfLiteOk) return status;
status = InitSubgraphNodes(branch_context, branch_index,
execution_plan, callee_subgraph_indices);
if (status != kTfLiteOk) return status;
status =
TfLiteOpaqueContextReleaseSubgraphContext(context, branch_index);
if (status != kTfLiteOk) return status;
}
}
}
return kTfLiteOk;
}
TfLiteStatus Prepare(TfLiteOpaqueContext* context,
TfLiteOpaqueNode* delegated_node) override {
if (external_tensors_.empty()) return kTfLiteOk;
const int kTheInputTensorSize =
helpers::CalculateNumElements((*external_tensors_.begin()));
for (auto [_, node_input_tensors] : node_input_tensors_) {
for (std::vector<const TfLiteOpaqueTensor*>& vecs : node_input_tensors) {
for (const TfLiteOpaqueTensor* tensor : vecs) {
if (IsExternalTensor(tensor)) continue;
std::vector<float>& vec_memory =
internal_float_tensors_memory_[tensor];
vec_memory.resize(kTheInputTensorSize);
}
}
}
for (auto [subgraph_index, node_output_tensors] : node_output_tensors_) {
for (int i = 0; i < node_output_tensors.size(); ++i) {
std::vector<const TfLiteOpaqueTensor*>& vecs = node_output_tensors[i];
for (int j = 0; j < vecs.size(); ++j) {
const TfLiteOpaqueTensor* tensor = vecs[j];
if (IsExternalTensor(tensor)) break;
if (builtin_codes_[subgraph_index][i] == kTfLiteBuiltinEqual) {
std::vector<int>& vec_memory = internal_int_tensors_memory_[tensor];
vec_memory.resize(kTheInputTensorSize);
} else {
std::vector<float>& vec_memory =
internal_float_tensors_memory_[tensor];
vec_memory.resize(kTheInputTensorSize);
}
}
}
}
return kTfLiteOk;
}
int* GetIntRawDataSource(const TfLiteOpaqueTensor* tensor) {
if (IsExternalTensor(tensor)) {
return reinterpret_cast<int*>(TfLiteOpaqueTensorData(tensor));
} else {
return internal_int_tensors_memory_[tensor].data();
}
}
float* GetFloatRawDataSource(const TfLiteOpaqueTensor* tensor) {
if (IsExternalTensor(tensor)) {
return reinterpret_cast<float*>(TfLiteOpaqueTensorData(tensor));
} else {
return internal_float_tensors_memory_[tensor].data();
}
}
void CopyRawDataSource(const TfLiteOpaqueTensor* from_tensor,
const TfLiteOpaqueTensor* to_tensor) {
float* from_data = GetFloatRawDataSource(from_tensor);
float* to_data = GetFloatRawDataSource(to_tensor);
int number_of_elements = helpers::CalculateNumElements(to_tensor);
memcpy(to_data, from_data, number_of_elements * sizeof(float));
}
TfLiteStatus EvalArithmeticOp(int subgraph_index, int node_index) {
auto node_input_tensors = node_input_tensors_[subgraph_index];
auto node_output_tensors = node_output_tensors_[subgraph_index];
auto builtin_codes = builtin_codes_[subgraph_index];
float* input1 = GetFloatRawDataSource(node_input_tensors[node_index][0]);
float* input2 = GetFloatRawDataSource(node_input_tensors[node_index][1]);
float* output = GetFloatRawDataSource(node_output_tensors[node_index][0]);
int number_of_elements =
helpers::CalculateNumElements(node_output_tensors[node_index][0]);
for (int i = 0; i < number_of_elements; ++i) {
switch (builtin_codes[node_index]) {
case kTfLiteBuiltinAdd:
output[i] = input1[i] + input2[i];
break;
case kTfLiteBuiltinSub:
output[i] = input1[i] - input2[i];
break;
case kTfLiteBuiltinMul:
output[i] = input1[i] * input2[i];
break;
default:
return kTfLiteDelegateError;
}
}
return kTfLiteOk;
}
TfLiteStatus EvalComparisonOp(int subgraph_index, int node_index) {
auto node_input_tensors = node_input_tensors_[subgraph_index];
auto node_output_tensors = node_output_tensors_[subgraph_index];
auto builtin_codes = builtin_codes_[subgraph_index];
float* input1 = GetFloatRawDataSource(node_input_tensors[node_index][0]);
float* input2 = GetFloatRawDataSource(node_input_tensors[node_index][1]);
int* output = GetIntRawDataSource(node_output_tensors[node_index][0]);
int number_of_elements =
helpers::CalculateNumElements(node_output_tensors[node_index][0]);
for (int i = 0; i < number_of_elements; ++i) {
switch (builtin_codes[node_index]) {
case kTfLiteBuiltinEqual:
output[i] = input1[i] == input2[i];
break;
default:
return kTfLiteDelegateError;
}
}
return kTfLiteOk;
}
TfLiteStatus EvalWhileOp(int while_subgraph_index, int while_node_index) {
auto branch_indices =
control_flow_branch_indices_[while_subgraph_index][while_node_index];
int cond_subgraph_index = branch_indices[0];
int body_subgraph_index = branch_indices[1];
int last_cond_node_index =
node_output_tensors_[cond_subgraph_index].size() - 1;
int last_body_node_index =
node_output_tensors_[body_subgraph_index].size() - 1;
CopyRawDataSource(
node_input_tensors_[while_subgraph_index][while_node_index][0],
node_input_tensors_[cond_subgraph_index][0][0]);
TfLiteStatus status;
while (true) {
status = EvalSubgraph(cond_subgraph_index);
if (status != kTfLiteOk) return status;
int* cond_output = GetIntRawDataSource(
node_output_tensors_[cond_subgraph_index][last_cond_node_index][0]);
int number_of_elements = helpers::CalculateNumElements(
node_output_tensors_[cond_subgraph_index][last_cond_node_index][0]);
bool condition = true;
for (int i = 0; i < number_of_elements; ++i) {
if (cond_output[i] == 0) {
condition = false;
break;
}
}
if (!condition) {
CopyRawDataSource(
node_output_tensors_[body_subgraph_index][last_body_node_index][0],
node_output_tensors_[while_subgraph_index][while_node_index][0]);
break;
}
CopyRawDataSource(node_input_tensors_[cond_subgraph_index][0][0],
node_input_tensors_[body_subgraph_index][0][0]);
status = EvalSubgraph(body_subgraph_index);
if (status != kTfLiteOk) return status;
CopyRawDataSource(
node_output_tensors_[body_subgraph_index][last_body_node_index][0],
node_input_tensors_[cond_subgraph_index][0][0]);
}
return kTfLiteOk;
}
TfLiteStatus EvalSubgraph(int subgraph_index) {
TfLiteStatus status;
for (int i = 0; i < node_input_tensors_[subgraph_index].size(); ++i) {
status = EvalNode(subgraph_index, i);
if (status != kTfLiteOk) return status;
}
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteOpaqueContext* context,
TfLiteOpaqueNode* delegated_node) override {
return EvalSubgraph(kTopLevelSubgraphIndex);
}
TfLiteStatus EvalNode(int subgraph_index, int node_index) {
TfLiteStatus status;
switch (builtin_codes_[subgraph_index][node_index]) {
case kTfLiteBuiltinAdd:
case kTfLiteBuiltinSub:
case kTfLiteBuiltinMul:
status = EvalArithmeticOp(subgraph_index, node_index);
break;
case kTfLiteBuiltinEqual:
status = EvalComparisonOp(subgraph_index, node_index);
break;
case kTfLiteBuiltinWhile:
status = EvalWhileOp(subgraph_index, node_index);
break;
default:
return kTfLiteDelegateError;
}
if (status != kTfLiteOk) {
return status;
}
return kTfLiteOk;
}
private:
absl::flat_hash_map<int, absl::flat_hash_map<int, std::vector<int>>>
control_flow_branch_indices_;
absl::flat_hash_map<int, std::vector<std::vector<const TfLiteOpaqueTensor*>>>
node_input_tensors_;
absl::flat_hash_set<const TfLiteOpaqueTensor*> node_input_tensors_set_;
absl::flat_hash_map<int, std::vector<std::vector<const TfLiteOpaqueTensor*>>>
node_output_tensors_;
absl::flat_hash_set<const TfLiteOpaqueTensor*> node_output_tensors_set_;
absl::flat_hash_set<const TfLiteOpaqueTensor*> external_tensors_;
absl::flat_hash_map<const TfLiteOpaqueTensor*, std::vector<float>>
internal_float_tensors_memory_;
absl::flat_hash_map<const TfLiteOpaqueTensor*, std::vector<int>>
internal_int_tensors_memory_;
TfLiteOpaqueContext* context_;
absl::flat_hash_map<int, std::vector<int>> builtin_codes_;
};
}
TfLiteStatus SampleStableDelegate::ComputeCompatibleCalleeSubgraphs(
TfLiteOpaqueContext* opaque_context, int subgraph_index) {
TfLiteStatus status;
TfLiteOpaqueContext* current_context;
status = TfLiteOpaqueContextAcquireSubgraphContext(
opaque_context, subgraph_index, ¤t_context);
if (status != kTfLiteOk) {
return status;
}
TfLiteIntArray* execution_plan;
status =
TfLiteOpaqueContextGetExecutionPlan(current_context, &execution_plan);
if (status != kTfLiteOk) {
return status;
}
bool is_compatible_subgraph = true;
for (int i = 0; i < execution_plan->size; ++i) {
int node_index = execution_plan->data[i];
TfLiteOpaqueNode* node = nullptr;
TfLiteOperator* registration = nullptr;
status = TfLiteOpaqueContextGetNodeAndRegistration(
current_context, node_index, &node, ®istration);
if (status != kTfLiteOk) {
return status;
}
TfLiteBuiltinOperator builtin_operator =
TfLiteOperatorGetBuiltInCode(registration);
if (builtin_operator == kTfLiteBuiltinWhile) {
void* builtin_data = TfLiteOpaqueNodeGetBuiltinData(node);
const auto* op_data =
reinterpret_cast<const TfLiteWhileParams*>(builtin_data);
AddCalleeSubgraphToCallerSubgraph(op_data->cond_subgraph_index,
subgraph_index);
ComputeCompatibleCalleeSubgraphs(opaque_context,
op_data->cond_subgraph_index);
AddCalleeSubgraphToCallerSubgraph(op_data->body_subgraph_index,
subgraph_index);
ComputeCompatibleCalleeSubgraphs(opaque_context,
op_data->body_subgraph_index);
}
if (!IsNodeSupportedByDelegate(registration, node, current_context)) {
is_compatible_subgraph = false;
}
}
if (is_compatible_subgraph) {
AddCompatibleCalleeSubgraph(subgraph_index);
}
status =
TfLiteOpaqueContextReleaseSubgraphContext(opaque_context, subgraph_index);
if (status != kTfLiteOk) {
return status;
}
return kTfLiteOk;
}
TfLiteStatus SampleStableDelegate::PrepareControlFlow(
TfLiteOpaqueContext* opaque_context) {
constexpr int kPrimarySubgraphIndex = 0;
ComputeCompatibleCalleeSubgraphs(opaque_context, kPrimarySubgraphIndex);
for (const auto& [caller_subgraph_index, callee_subgraph_indices] :
control_flow_subgraph_tree_) {
if (callee_subgraph_indices.empty()) {
continue;
}
bool callee_subgraphs_all_delegatable = true;
for (int callee_subgraph_index : callee_subgraph_indices) {
if (!IsCompatibleCalleeSubgraph(callee_subgraph_index)) {
callee_subgraphs_all_delegatable = false;
}
}
if (!callee_subgraphs_all_delegatable) {
continue;
}
for (int callee_subgraph_index : callee_subgraph_indices) {
TfLiteOpaqueContextMarkSubgraphAsDelegationSkippable(
opaque_context, callee_subgraph_index);
}
}
return kTfLiteOk;
}
int helpers::CalculateNumElements(const TfLiteOpaqueTensor* opaque_tensor) {
int total_num_elements = 1;
for (int i = 0; i < TfLiteOpaqueTensorNumDims(opaque_tensor); ++i) {
total_num_elements *= TfLiteOpaqueTensorDim(opaque_tensor, i);
}
return total_num_elements;
}
bool SampleStableDelegate::IsNodeSupportedByDelegate(
const TfLiteOperator* registration_external, const TfLiteOpaqueNode* node,
TfLiteOpaqueContext* context) const {
TfLiteBuiltinOperator builtin_operator =
TfLiteOperatorGetBuiltInCode(registration_external);
void* builtin_data = TfLiteOpaqueNodeGetBuiltinData(node);
switch (builtin_operator) {
case kTfLiteBuiltinAdd: {
TfLiteAddParams* params =
reinterpret_cast<TfLiteAddParams*>(builtin_data);
if (!params || params->activation != kTfLiteActNone) return false;
break;
}
case kTfLiteBuiltinSub: {
TfLiteSubParams* params =
reinterpret_cast<TfLiteSubParams*>(builtin_data);
if (!params || params->activation != kTfLiteActNone) return false;
break;
}
case kTfLiteBuiltinMul: {
TfLiteMulParams* params =
reinterpret_cast<TfLiteMulParams*>(builtin_data);
if (!params || params->activation != kTfLiteActNone) return false;
break;
}
case kTfLiteBuiltinEqual:
break;
case kTfLiteBuiltinWhile: {
TfLiteWhileParams* params =
reinterpret_cast<TfLiteWhileParams*>(builtin_data);
if (!params || !IsCompatibleCalleeSubgraph(params->cond_subgraph_index) ||
!IsCompatibleCalleeSubgraph(params->body_subgraph_index)) {
return false;
}
break;
}
default:
return false;
}
if (builtin_operator == kTfLiteBuiltinWhile) {
if (TfLiteOpaqueNodeNumberOfInputs(node) != 1) return false;
const TfLiteOpaqueTensor* tensor =
TfLiteOpaqueNodeGetInput(context, node, 0);
if (!tensor || TfLiteOpaqueTensorType(tensor) != kTfLiteFloat32)
return false;
} else {
if (TfLiteOpaqueNodeNumberOfInputs(node) != 2) return false;
const TfLiteOpaqueTensor* tensor_1 =
TfLiteOpaqueNodeGetInput(context, node, 0);
const TfLiteOpaqueTensor* tensor_2 =
TfLiteOpaqueNodeGetInput(context, node, 1);
if (!tensor_1 || TfLiteOpaqueTensorType(tensor_1) != kTfLiteFloat32)
return false;
if (!tensor_2 || TfLiteOpaqueTensorType(tensor_2) != kTfLiteFloat32)
return false;
if (TfLiteOpaqueTensorNumDims(tensor_1) !=
TfLiteOpaqueTensorNumDims(tensor_2))
return false;
for (int i = 0; i < TfLiteOpaqueTensorNumDims(tensor_1); ++i) {
if (TfLiteOpaqueTensorDim(tensor_1, i) !=
TfLiteOpaqueTensorDim(tensor_2, i)) {
return false;
}
}
}
return true;
}
TfLiteStatus SampleStableDelegate::Initialize(TfLiteOpaqueContext* context) {
if (!has_been_initialized_) {
PrepareControlFlow(context);
has_been_initialized_ = true;
}
return kTfLiteOk;
}
const char* SampleStableDelegate::Name() const {
return kSampleStableDelegateName;
}
std::unique_ptr<SimpleOpaqueDelegateKernelInterface>
SampleStableDelegate::CreateDelegateKernelInterface() {
return std::make_unique<SampleStableDelegateKernel>();
}
}
} | #include "tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate_with_control_flow.h"
#include <cstddef>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api.h"
#include "tensorflow/lite/c/c_api_opaque.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace {
TEST(SampleStableDelegate, StaticallyLinkedDelegateAndModelWithAdd) {
tflite::TfLiteOpaqueDelegateUniquePtr opaque_delegate =
tflite::TfLiteOpaqueDelegateFactory::Create(
std::make_unique<tflite::example::SampleStableDelegate>());
ASSERT_NE(opaque_delegate, nullptr);
TfLiteModel* model =
TfLiteModelCreateFromFile("tensorflow/lite/testdata/add.bin");
ASSERT_NE(model, nullptr);
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
ASSERT_NE(options, nullptr);
TfLiteInterpreterOptionsAddDelegate(options, opaque_delegate.get());
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
ASSERT_NE(interpreter, nullptr);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor =
TfLiteInterpreterGetInputTensor(interpreter, 0);
ASSERT_NE(input_tensor, nullptr);
const float kTensorCellValue = 3.f;
int64_t n = tflite::NumElements(input_tensor);
std::vector<float> input(n, kTensorCellValue);
ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor, input.data(),
input.size() * sizeof(float)),
kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
ASSERT_NE(output_tensor, nullptr);
std::vector<float> output(n, 0);
ASSERT_EQ(TfLiteTensorCopyToBuffer(output_tensor, output.data(),
output.size() * sizeof(float)),
kTfLiteOk);
for (int i = 0; i < output.size(); ++i) {
EXPECT_EQ(output[i], kTensorCellValue * 3);
}
TfLiteInterpreterDelete(interpreter);
TfLiteModelDelete(model);
}
TEST(SampleStableDelegate, StaticallyLinkedDelegateAndModelWithSub) {
tflite::TfLiteOpaqueDelegateUniquePtr opaque_delegate =
tflite::TfLiteOpaqueDelegateFactory::Create(
std::make_unique<tflite::example::SampleStableDelegate>());
ASSERT_NE(opaque_delegate, nullptr);
TfLiteModel* model =
TfLiteModelCreateFromFile("tensorflow/lite/testdata/sub.bin");
ASSERT_NE(model, nullptr);
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
ASSERT_NE(options, nullptr);
TfLiteInterpreterOptionsAddDelegate(options, opaque_delegate.get());
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
ASSERT_NE(interpreter, nullptr);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor_0 =
TfLiteInterpreterGetInputTensor(interpreter, 0);
ASSERT_NE(input_tensor_0, nullptr);
const float kTensor0CellValue = 3.f;
int64_t n = tflite::NumElements(input_tensor_0);
std::vector<float> input_0(n, kTensor0CellValue);
ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor_0, input_0.data(),
input_0.size() * sizeof(float)),
kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor_1 =
TfLiteInterpreterGetInputTensor(interpreter, 1);
ASSERT_NE(input_tensor_1, nullptr);
n = tflite::NumElements(input_tensor_1);
const float kTensor1CellValue = 2.f;
std::vector<float> input_1(n, kTensor1CellValue);
ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor_1, input_1.data(),
input_1.size() * sizeof(float)),
kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
ASSERT_NE(output_tensor, nullptr);
std::vector<float> output(n, 0);
ASSERT_EQ(TfLiteTensorCopyToBuffer(output_tensor, output.data(),
output.size() * sizeof(float)),
kTfLiteOk);
for (int i = 0; i < output.size(); ++i) {
EXPECT_EQ(output[i], kTensor0CellValue - kTensor1CellValue);
}
TfLiteInterpreterDelete(interpreter);
TfLiteModelDelete(model);
}
TEST(SampleStableDelegate, StaticallyLinkedDelegateAndModelWithNestedWhile) {
tflite::TfLiteOpaqueDelegateUniquePtr opaque_delegate =
tflite::TfLiteOpaqueDelegateFactory::Create(
std::make_unique<tflite::example::SampleStableDelegate>());
ASSERT_NE(opaque_delegate, nullptr);
TfLiteModel* model = TfLiteModelCreateFromFile(
"tensorflow/lite/testdata/nested_while.bin");
ASSERT_NE(model, nullptr);
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
ASSERT_NE(options, nullptr);
TfLiteInterpreterOptionsAddDelegate(options, opaque_delegate.get());
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
ASSERT_NE(interpreter, nullptr);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor =
TfLiteInterpreterGetInputTensor(interpreter, 0);
ASSERT_NE(input_tensor, nullptr);
const float kTensorCellValue = 1.f;
int64_t n = tflite::NumElements(input_tensor);
std::vector<float> input(n, kTensorCellValue);
ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor, input.data(),
input.size() * sizeof(float)),
kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
ASSERT_NE(output_tensor, nullptr);
std::vector<float> output(n, 0);
ASSERT_EQ(TfLiteTensorCopyToBuffer(output_tensor, output.data(),
output.size() * sizeof(float)),
kTfLiteOk);
for (int i = 0; i < output.size(); ++i) {
EXPECT_EQ(output[i], kTensorCellValue * 2);
}
TfLiteInterpreterDelete(interpreter);
TfLiteModelDelete(model);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate_with_control_flow.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate_with_control_flow_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9e6a59cb-d456-47b0-8f26-afe5f12ed80f | cpp | tensorflow/tensorflow | lower_cluster_to_runtime_ops | tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/lower_cluster_to_runtime_ops.cc | tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/lower_cluster_to_runtime_ops_test.cc | #include <memory>
#include <string>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Pass/PassRegistry.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/Passes.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/runtime_passes.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/sparsecore/sparsecore_passes.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/attribute_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/data_dumper_logger_config.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "xla/tsl/framework/device_type.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/error_payloads.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/tpu/tpu_defs.h"
#include "tensorflow/core/util/debug_data_dumper.h"
#include "tsl/platform/error_logging.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace tfrt_compiler {
namespace {
using mlir::LogicalResult;
using mlir::OpPassManager;
using mlir::PassManager;
using mlir::func::FuncOp;
using mlir::TF::StandardPipelineOptions;
void EnablePassIRPrinting(PassManager& pm, const std::string& dump_group_name,
llvm::StringRef module_name) {
pm.getContext()->disableMultithreading();
pm.enableIRPrinting(std::make_unique<::tensorflow::DataDumperLoggerConfig>(
[module_name, dump_group_name](const std::string& pass_tag_name,
mlir::Operation* op) {
return DEBUG_DATA_DUMPER()->GetDumpFilename(
module_name.str(), dump_group_name, pass_tag_name);
},
"",
true));
pm.enableTiming();
}
}
void AddTPULowerClusterToRuntimeOpsPassPipeline(OpPassManager& pm,
llvm::StringRef module_name) {
pm.addPass(mlir::TFTPU::CreateTPURewritePass(module_name));
pm.addPass(mlir::createSymbolDCEPass());
pm.addNestedPass<FuncOp>(
mlir::TFDevice::CreateReplicateInvariantOpHoistingPass());
pm.addNestedPass<FuncOp>(mlir::TFDevice::CreateEmbeddingProgramKeyPass());
pm.addPass(mlir::TFTPU::CreateTPUMergeVariablesWithExecutePass());
pm.addNestedPass<FuncOp>(
mlir::TFTPU::CreateExtractTPUCopyWithDynamicShapeOpPass());
pm.addNestedPass<FuncOp>(
mlir::TFTPU::CreateTPUColocateCompositeResourceOps());
if (tensorflow::GetMlirCommonFlags()
->tf_mlir_enable_tpu_variable_runtime_reformatting_pass) {
pm.addPass(mlir::TFTPU::CreateTPUVariableRuntimeReformattingPass());
}
}
void AddNonTPULowerClusterToRuntimeOpsPassPipeline(
OpPassManager& pm, llvm::StringRef module_name) {
pm.addPass(mlir::TFDevice::CreateXlaRewritePass());
pm.addNestedPass<FuncOp>(mlir::createCanonicalizerPass());
pm.addNestedPass<FuncOp>(mlir::createCSEPass());
pm.addPass(mlir::createSymbolDCEPass());
}
void CreateTPULowerClusterToRuntimeOpsPassPipeline(
OpPassManager& pm, const StandardPipelineOptions& options) {
AddTPULowerClusterToRuntimeOpsPassPipeline(pm, "");
}
void CreateNonTPULowerClusterToRuntimeOpsPassPipeline(
OpPassManager& pm, const StandardPipelineOptions& options) {
AddNonTPULowerClusterToRuntimeOpsPassPipeline(pm, "");
}
tensorflow::Status RecordIfErrorStatus(const std::string error_prefix,
std::string bridge_type,
tsl::DeviceType device_type,
absl::Status status) {
if (status.ok()) {
return status;
}
VLOG(2) << error_prefix << " " << status;
tensorflow::metrics::UpdateTfMlirBridgeFirstPhaseCounter(
bridge_type,
mlir::TF::kMlirPh1BridgeCounterV2,
device_type.type_string(),
false,
"failure");
std::string bridge_subcomponent = "TFXLA_PHASE_ONE_MLIR_TPU_BRIDGE";
tsl::OkOrSetErrorCounterPayload(
tensorflow::core::platform::ErrorSourceProto::MLIR_BRIDGE_PHASE_1,
status);
if (device_type != DeviceType(DEVICE_TPU_XLA_JIT)) {
bridge_subcomponent = "TFXLA_PHASE_ONE_MLIR_CPU/GPU_BRIDGE";
}
tsl::error_logging::Log(mlir::TF::kBridgeComponent, bridge_subcomponent,
status.ToString())
.IgnoreError();
return status;
}
absl::Status RunLowerClusterToRuntimeOpsPassPipeline(
mlir::ModuleOp module, tsl::DeviceType xla_device_type,
llvm::StringRef module_name) {
PassManager runtime_lowering(module.getContext());
::tensorflow::applyTensorflowAndCLOptions(runtime_lowering);
if (xla_device_type == DeviceType(DEVICE_TPU_XLA_JIT)) {
AddTPULowerClusterToRuntimeOpsPassPipeline(runtime_lowering, module_name);
} else {
AddNonTPULowerClusterToRuntimeOpsPassPipeline(runtime_lowering,
module_name);
}
mlir::StatusScopedDiagnosticHandler diag_handler(
module.getContext(), false,
!VLOG_IS_ON(1));
if (VLOG_IS_ON(1) ||
DEBUG_DATA_DUMPER()->ShouldDump(module_name.str(), kDebugGroupMain)) {
::tensorflow::DumpMlirOpToFile(
DEBUG_DATA_DUMPER()->GetDumpFilename(module_name.str(), kDebugGroupMain,
"runtime_lowering_before"),
module, llvm::StringRef(), &runtime_lowering);
}
if (VLOG_IS_ON(2) || DEBUG_DATA_DUMPER()->ShouldDump(
module_name.str(), kDebugGroupRuntimeLowering)) {
EnablePassIRPrinting(runtime_lowering, kDebugGroupRuntimeLowering,
module_name);
}
LogicalResult result = runtime_lowering.run(module);
(void)result;
if (VLOG_IS_ON(1) ||
DEBUG_DATA_DUMPER()->ShouldDump(module_name.str(), kDebugGroupMain)) {
::tensorflow::DumpMlirOpToFile(
DEBUG_DATA_DUMPER()->GetDumpFilename(module_name.str(), kDebugGroupMain,
"runtime_lowering_after"),
module, llvm::StringRef(), &runtime_lowering);
}
std::string bridge_type = xla_device_type == DeviceType(DEVICE_TPU_XLA_JIT)
? mlir::TF::kMlirPh1BridgeCounterReplicated
: mlir::TF::kMlirPh1BridgeCounterNonReplicated;
auto result_status = diag_handler.ConsumeStatus();
TF_RETURN_IF_ERROR(
RecordIfErrorStatus("lower_cluster_to_runtime",
bridge_type, xla_device_type, result_status));
return absl::OkStatus();
}
void RegisterTPULowerClusterToRuntimeOpsPassPipeline() {
static mlir::PassPipelineRegistration<StandardPipelineOptions> pipeline(
"tfrt-lower-cluster-to-runtime-ops-tpu",
"Run all the passes involved after the clustering transformations from "
"the TF2XLA Bridge. Takes as input a Module with tf_device.cluster ops "
"and outputs TFRT runtime ops such as TPUCompile. This pipeline is for "
"TPU.",
CreateTPULowerClusterToRuntimeOpsPassPipeline);
}
void RegisterNonTPULowerClusterToRuntimeOpsPassPipeline() {
static mlir::PassPipelineRegistration<StandardPipelineOptions> pipeline(
"tfrt-lower-cluster-to-runtime-ops-non-tpu",
"Run all the passes involved after the clustering transformations from "
"the TF2XLA Bridge. Takes as input a Module with tf_device.cluster ops "
"and outputs TFRT runtime ops such as XlaLaunch. This is for CPU/GPU",
CreateNonTPULowerClusterToRuntimeOpsPassPipeline);
}
}
} | #include "tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/lower_cluster_to_runtime_ops.h"
#include <cstdint>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/Visitors.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Pass/PassManager.h"
#include "tensorflow/compiler/mlir/register_common_dialects.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/attribute_utils.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/tsl/framework/device_type.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/tpu/tpu_defs.h"
#include "tensorflow/core/util/debug_data_dumper.h"
namespace tensorflow {
namespace tfrt_compiler {
namespace {
using mlir::DialectRegistry;
using mlir::MLIRContext;
using mlir::ModuleOp;
using mlir::OwningOpRef;
using mlir::func::FuncOp;
using ::tensorflow::monitoring::testing::CellReader;
using tsl::DeviceType;
std::string TestDataPath() {
return tensorflow::GetDataDependencyFilepath(
"tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/testdata/");
}
static constexpr char kCompilationStreamz[] =
"/tensorflow/core/tf_mlir_bridge_first_phase_v2_count";
class LowerClusterToRuntimeOpsTest : public ::testing::Test {
public:
LowerClusterToRuntimeOpsTest() {
mlir::RegisterCommonToolingDialects(registry_);
context_.appendDialectRegistry(registry_);
context_.loadAllAvailableDialects();
env_ = Env::Default();
test_group_name_ = "TestGroup";
test_dir_ = testing::TmpDir();
setenv("TF_DUMP_GRAPH_PREFIX", test_dir_.c_str(), 1);
}
absl::Status CreateMlirModule(std::string mlir_module_filename) {
std::string mlir_module_path = TestDataPath() + mlir_module_filename;
mlir_module_ =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context_);
if (!mlir_module_) {
return absl::Status(
absl::StatusCode::kNotFound,
absl::StrCat("Could not find MLIR module at ", mlir_module_path));
}
return absl::OkStatus();
}
DialectRegistry registry_;
MLIRContext context_;
OwningOpRef<mlir::ModuleOp> mlir_module_;
Env* env_;
std::string test_dir_;
std::string test_group_name_;
};
TEST_F(LowerClusterToRuntimeOpsTest, SanityCheck) {
TF_ASSERT_OK(CreateMlirModule("empty_func.mlir"));
TF_EXPECT_OK(RunLowerClusterToRuntimeOpsPassPipeline(
*mlir_module_, DeviceType(DEVICE_TPU_XLA_JIT)));
}
TEST_F(LowerClusterToRuntimeOpsTest, LowersClusterOpsTPU) {
TF_ASSERT_OK(CreateMlirModule("basic_cluster.mlir"));
TF_EXPECT_OK(RunLowerClusterToRuntimeOpsPassPipeline(
*mlir_module_, DeviceType(DEVICE_TPU_XLA_JIT)));
FuncOp main = mlir_module_->lookupSymbol<FuncOp>("main");
ASSERT_TRUE(main);
bool has_cluster_op = false;
main.walk([&](mlir::tf_device::ClusterOp) {
has_cluster_op = true;
return mlir::WalkResult::interrupt();
});
EXPECT_FALSE(has_cluster_op);
}
TEST_F(LowerClusterToRuntimeOpsTest, LowersClusterOpsCPU) {
TF_ASSERT_OK(CreateMlirModule("basic_cluster.mlir"));
TF_EXPECT_OK(RunLowerClusterToRuntimeOpsPassPipeline(
*mlir_module_, DeviceType(DEVICE_CPU_XLA_JIT)));
FuncOp main = mlir_module_->lookupSymbol<FuncOp>("main");
ASSERT_TRUE(main);
bool has_cluster_op = false;
main.walk([&](mlir::tf_device::ClusterOp) {
has_cluster_op = true;
return mlir::WalkResult::interrupt();
});
EXPECT_FALSE(has_cluster_op);
}
TEST_F(LowerClusterToRuntimeOpsTest, LowersClusterOpsGPU) {
TF_ASSERT_OK(CreateMlirModule("basic_cluster.mlir"));
TF_EXPECT_OK(RunLowerClusterToRuntimeOpsPassPipeline(
*mlir_module_, DeviceType(DEVICE_GPU_XLA_JIT)));
FuncOp main = mlir_module_->lookupSymbol<FuncOp>("main");
ASSERT_TRUE(main);
bool has_cluster_op = false;
main.walk([&](mlir::tf_device::ClusterOp) {
has_cluster_op = true;
return mlir::WalkResult::interrupt();
});
EXPECT_FALSE(has_cluster_op);
}
TEST_F(LowerClusterToRuntimeOpsTest, ErrorsWithBadCluster) {
CellReader<int64_t> compilation_status(kCompilationStreamz);
TF_ASSERT_OK(CreateMlirModule("malformed_cluster.mlir"));
EXPECT_FALSE(RunLowerClusterToRuntimeOpsPassPipeline(
*mlir_module_, DeviceType(DEVICE_TPU_XLA_JIT))
.ok());
EXPECT_EQ(
compilation_status.Delta(mlir::TF::kMlirPh1BridgeCounterReplicated,
mlir::TF::kMlirPh1BridgeCounterV2, "XLA_TPU_JIT",
"fallback_disabled", "failure"),
1);
}
TEST_F(LowerClusterToRuntimeOpsTest, DumpsPipelinePasses) {
std::vector<std::string> files;
TF_ASSERT_OK(env_->GetChildren(test_dir_, &files));
EXPECT_THAT(files, ::testing::IsEmpty());
setenv("TF_DUMP_GRAPH_NAME_FILTER", "*", 1);
setenv("TF_DUMP_GRAPH_GROUPS", "main,runtime_lowering", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
TF_ASSERT_OK(CreateMlirModule("basic_cluster.mlir"));
TF_EXPECT_OK(RunLowerClusterToRuntimeOpsPassPipeline(
*mlir_module_, DeviceType(DEVICE_TPU_XLA_JIT)));
TF_ASSERT_OK(env_->GetChildren(test_dir_, &files));
EXPECT_THAT(files, ::testing::SizeIs(15));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/lower_cluster_to_runtime_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/lower_cluster_to_runtime_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f20f0ac8-614d-4be5-a30f-2b86a1eedae3 | cpp | tensorflow/tensorflow | snapshot_chunk_provider | tensorflow/core/data/service/snapshot/snapshot_chunk_provider.cc | tensorflow/core/data/service/snapshot/snapshot_chunk_provider_test.cc | #include "tensorflow/core/data/service/snapshot/snapshot_chunk_provider.h"
#include <cstdint>
#include <functional>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/btree_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "xla/tsl/protobuf/status.pb.h"
#include "tensorflow/core/data/service/snapshot/file_utils.h"
#include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/retrying_utils.h"
#include "tsl/platform/status_to_from_proto.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/tstring.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kChunksRead[] = "chunks_read";
constexpr absl::string_view kSetElementDelimiter = ",";
Tensor ConvertToTensor(absl::string_view s) {
Tensor tensor(DT_STRING, TensorShape({}));
tensor.scalar<tsl::tstring>()() = tsl::tstring(s);
return tensor;
}
std::string AbsPath(absl::string_view snapshot_path, absl::string_view chunk) {
return tsl::io::JoinPath(CommittedChunksDirectory(snapshot_path), chunk);
}
void Backoff(int num_retries, tsl::Env* env) {
if (num_retries >= 1) {
absl::Duration retry_backoff = tsl::ComputeRetryBackoff(num_retries - 1);
env->SleepForMicroseconds(absl::ToInt64Microseconds(retry_backoff));
}
}
}
SnapshotChunkProvider::SnapshotChunkProvider(absl::string_view snapshot_path,
tsl::Env* env)
: snapshot_path_(snapshot_path), env_(env) {}
absl::Status SnapshotChunkProvider::GetNext(Tensor* split, bool* end_of_splits)
ABSL_LOCKS_EXCLUDED(mu_) {
for (int num_retries = 0;; ++num_retries) {
Backoff(num_retries, env_);
absl::MutexLock l(&mu_);
TF_RETURN_IF_ERROR(snapshot_state_.status);
if (!chunks_unread_.empty()) {
std::string next_chunk = *chunks_unread_.begin();
chunks_read_.insert(next_chunk);
chunks_unread_.erase(next_chunk);
*split = ConvertToTensor(AbsPath(snapshot_path_, next_chunk));
*end_of_splits = false;
return absl::OkStatus();
}
if (snapshot_state_.snapshot_is_done) {
*end_of_splits = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(UpdateSnapshot());
}
}
absl::Status SnapshotChunkProvider::UpdateSnapshot()
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
TF_ASSIGN_OR_RETURN(snapshot_state_, GetSnapshotState());
TF_RETURN_IF_ERROR(snapshot_state_.status);
TF_ASSIGN_OR_RETURN(std::vector<std::string> chunks, GetAvailableChunks());
for (const std::string& chunk : chunks) {
if (!chunks_read_.contains(chunk)) {
chunks_unread_.insert(std::string(chunk));
}
}
return absl::OkStatus();
}
absl::StatusOr<SnapshotChunkProvider::SnapshotState>
SnapshotChunkProvider::GetSnapshotState() {
std::string error_file_path = SnapshotErrorFilePath(snapshot_path_);
if (env_->FileExists(error_file_path).ok()) {
StatusProto status_proto;
TF_RETURN_IF_ERROR(ReadTextProto(env_, error_file_path, &status_proto));
absl::Status status = tsl::StatusFromProto(status_proto);
if (status.ok()) {
return absl::InternalError(absl::StrCat(
"Unexpected snapshot ERROR file contains an OK status at ",
error_file_path, "."));
}
return SnapshotState(status);
}
return SnapshotState(
env_->FileExists(SnapshotDoneFilePath(snapshot_path_)).ok());
}
absl::StatusOr<std::vector<std::string>>
SnapshotChunkProvider::GetAvailableChunks() {
absl::StatusOr<std::vector<std::string>> status_or_chunks =
GetChildren(CommittedChunksDirectory(snapshot_path_), env_);
if (status_or_chunks.ok()) {
return *std::move(status_or_chunks);
} else if (absl::IsNotFound(status_or_chunks.status())) {
return std::vector<std::string>{};
}
return status_or_chunks.status();
}
absl::Status SnapshotChunkProvider::Reset() {
absl::MutexLock l(&mu_);
chunks_read_.clear();
chunks_unread_.clear();
return UpdateSnapshot();
}
absl::Status SnapshotChunkProvider::Save(
std::function<std::string(std::string)> full_name,
IteratorStateWriter* writer) {
absl::MutexLock l(&mu_);
TF_RETURN_IF_ERROR(
writer->WriteScalar(full_name(kChunksRead), SetToString(chunks_read_)));
return absl::OkStatus();
}
absl::Status SnapshotChunkProvider::Restore(
std::function<std::string(std::string)> full_name,
IteratorStateReader* reader) {
absl::MutexLock l(&mu_);
tsl::tstring chunks_read;
TF_RETURN_IF_ERROR(reader->ReadScalar(full_name(kChunksRead), &chunks_read));
chunks_read_ = SetFromString(chunks_read);
return UpdateSnapshot();
}
int64_t SnapshotChunkProvider::Cardinality() const {
return SnapshotChunksCardinality(snapshot_path_, env_);
}
void SnapshotChunkProvider::Cancel() {
absl::MutexLock l(&mu_);
if (snapshot_state_.snapshot_is_done || !snapshot_state_.status.ok()) {
return;
}
snapshot_state_.status = absl::CancelledError(
absl::StrCat("Cancelled loading tf.data snapshot at ", snapshot_path_));
VLOG(2) << snapshot_state_.status;
}
std::string SnapshotChunkProvider::SetToString(
const SnapshotChunkProvider::OrderedChunkSet& s) {
return absl::StrJoin(s, kSetElementDelimiter);
}
SnapshotChunkProvider::OrderedChunkSet SnapshotChunkProvider::SetFromString(
absl::string_view s) {
if (s.empty()) {
return {};
}
std::vector<std::string> split = absl::StrSplit(s, kSetElementDelimiter);
return OrderedChunkSet(split.begin(), split.end());
}
bool SnapshotChunkProvider::ChunkOrder::operator()(
const std::string& chunk1, const std::string& chunk2) const {
absl::StatusOr<std::tuple<int64_t, int64_t, int64_t>> tokens1 =
ParseChunkFilename(chunk1);
absl::StatusOr<std::tuple<int64_t, int64_t, int64_t>> tokens2 =
ParseChunkFilename(chunk2);
if (!tokens1.status().ok()) {
LOG_EVERY_N_SEC(ERROR, 60) << "Failed to parse tf.data snapshot chunk file "
<< chunk1 << ": " << tokens1.status();
return chunk1 < chunk2;
}
if (!tokens2.status().ok()) {
LOG_EVERY_N_SEC(ERROR, 60) << "Failed to parse tf.data snapshot chunk file "
<< chunk2 << ": " << tokens2.status();
return chunk1 < chunk2;
}
auto [stream_index1, chunk_index1, num_records1] = *tokens1;
auto [stream_index2, chunk_index2, num_records2] = *tokens2;
if (chunk_index1 != chunk_index2) {
return chunk_index1 < chunk_index2;
}
return stream_index1 < stream_index2;
}
}
} | #include "tensorflow/core/data/service/snapshot/snapshot_chunk_provider.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/synchronization/mutex.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/protobuf/status.pb.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/data/service/snapshot/file_utils.h"
#include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/variant_tensor_data.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/status_to_from_proto.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/tstring.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::ElementsAreArray;
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::UnorderedElementsAreArray;
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
absl::StatusOr<std::string> CreateSnapshotDirectory() {
std::string snapshot_path;
if (!tsl::Env::Default()->LocalTempFilename(&snapshot_path)) {
return absl::FailedPreconditionError(
"Failed to create local temp file for snapshot.");
}
TF_RETURN_IF_ERROR(tsl::Env::Default()->RecursivelyCreateDir(
CommittedChunksDirectory(snapshot_path)));
return snapshot_path;
}
absl::Status WriteChunk(absl::string_view snapshot_path,
absl::string_view chunk_file) {
return AtomicallyWriteStringToFile(
tsl::io::JoinPath(CommittedChunksDirectory(snapshot_path), chunk_file),
"", tsl::Env::Default());
}
absl::Status SetDone(absl::string_view snapshot_path) {
return AtomicallyWriteStringToFile(SnapshotDoneFilePath(snapshot_path), "",
tsl::Env::Default());
}
absl::Status SetStatus(absl::string_view snapshot_path,
const absl::Status& status) {
return AtomicallyWriteTextProto(SnapshotErrorFilePath(snapshot_path),
tsl::StatusToProto(status),
tsl::Env::Default());
}
absl::StatusOr<std::string> GetChunk(
SnapshotChunkProvider& snapshot_chunk_provider) {
Tensor split;
bool end_of_splits = false;
TF_RETURN_IF_ERROR(snapshot_chunk_provider.GetNext(&split, &end_of_splits));
if (end_of_splits) {
return absl::OutOfRangeError("No more available chunks.");
}
return split.unaligned_flat<tsl::tstring>().data()[0];
}
absl::StatusOr<std::vector<std::string>> GetAllChunks(
SnapshotChunkProvider& snapshot_chunk_provider) {
std::vector<std::string> chunks;
while (true) {
Tensor split;
bool end_of_splits = false;
TF_RETURN_IF_ERROR(snapshot_chunk_provider.GetNext(&split, &end_of_splits));
if (end_of_splits) {
return chunks;
}
chunks.push_back(split.unaligned_flat<tsl::tstring>().data()[0]);
}
return chunks;
}
std::vector<std::string> JoinPaths(absl::string_view snapshot_path,
const std::vector<std::string> chunks) {
std::vector<std::string> joined_chunks;
for (absl::string_view chunk : chunks) {
joined_chunks.push_back(
tsl::io::JoinPath(CommittedChunksDirectory(snapshot_path), chunk));
}
return joined_chunks;
}
std::string full_name(const std::string& name) {
return FullName("test", name);
}
absl::Status SaveAndRestore(SplitProvider& split_provider) {
VariantTensorDataWriter writer;
TF_RETURN_IF_ERROR(split_provider.Save(full_name, &writer));
std::vector<const VariantTensorData*> variants;
writer.GetData(&variants);
VariantTensorDataReader reader(variants);
TF_RETURN_IF_ERROR(split_provider.Restore(full_name, &reader));
return absl::OkStatus();
}
TEST(SnapshotChunkProviderTest, EmptySnapshot) {
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
TF_ASSERT_OK(SetDone(snapshot_path));
SnapshotChunkProvider snapshot_chunk_provider(snapshot_path,
tsl::Env::Default());
EXPECT_THAT(GetAllChunks(snapshot_chunk_provider), IsOkAndHolds(IsEmpty()));
EXPECT_THAT(GetAllChunks(snapshot_chunk_provider), IsOkAndHolds(IsEmpty()));
}
TEST(SnapshotChunkProviderTest, SingleReader) {
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
std::vector<std::string> chunks = {"chunk_4_4_4", "chunk_3_3_3",
"chunk_2_2_2", "chunk_1_1_1",
"chunk_0_0_0"};
for (absl::string_view chunk : chunks) {
TF_ASSERT_OK(WriteChunk(snapshot_path, chunk));
}
TF_ASSERT_OK(SetDone(snapshot_path));
SnapshotChunkProvider snapshot_chunk_provider(snapshot_path,
tsl::Env::Default());
absl::c_reverse(chunks);
EXPECT_THAT(GetAllChunks(snapshot_chunk_provider),
IsOkAndHolds(ElementsAreArray(JoinPaths(snapshot_path, chunks))));
}
TEST(SnapshotChunkProviderTest, Cardinality) {
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
TF_ASSERT_OK(WriteChunk(snapshot_path, "chunk_0_0_0"));
SnapshotChunkProvider snapshot_chunk_provider(snapshot_path,
tsl::Env::Default());
EXPECT_EQ(snapshot_chunk_provider.Cardinality(), kUnknownCardinality);
std::vector<std::string> chunks = {"chunk_1_1_1", "chunk_2_2_2",
"chunk_3_3_3", "chunk_4_4_4"};
for (absl::string_view chunk : chunks) {
TF_ASSERT_OK(WriteChunk(snapshot_path, chunk));
}
EXPECT_EQ(snapshot_chunk_provider.Cardinality(), kUnknownCardinality);
TF_ASSERT_OK(SetDone(snapshot_path));
EXPECT_EQ(snapshot_chunk_provider.Cardinality(), 5);
}
TEST(SnapshotChunkProviderTest, WaitForSnapshot) {
std::string snapshot_path;
ASSERT_TRUE(tsl::Env::Default()->LocalTempFilename(&snapshot_path));
absl::Mutex mu;
std::vector<std::string> result;
std::unique_ptr<tsl::Thread> reader_thread =
absl::WrapUnique(tsl::Env::Default()->StartThread(
{}, "Reader",
[&snapshot_path, &mu, &result]() {
SnapshotChunkProvider snapshot_chunk_provider(snapshot_path,
tsl::Env::Default());
TF_ASSERT_OK_AND_ASSIGN(std::vector<std::string> chunks,
GetAllChunks(snapshot_chunk_provider));
absl::MutexLock l(&mu);
result = std::move(chunks);
}));
{
absl::MutexLock l(&mu);
EXPECT_TRUE(result.empty());
}
TF_ASSERT_OK(tsl::Env::Default()->RecursivelyCreateDir(
CommittedChunksDirectory(snapshot_path)));
TF_ASSERT_OK(WriteChunk(snapshot_path, "chunk_0_0_0"));
TF_ASSERT_OK(SetDone(snapshot_path));
reader_thread.reset();
absl::MutexLock l(&mu);
EXPECT_THAT(result,
ElementsAreArray(JoinPaths(snapshot_path, {"chunk_0_0_0"})));
}
TEST(SnapshotChunkProviderTest, ConcurrentReadWrite) {
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
const int num_readers = 10;
absl::Mutex mu;
SnapshotChunkProvider snapshot_chunk_provider(snapshot_path,
tsl::Env::Default());
std::vector<std::string> result;
std::vector<std::unique_ptr<tsl::Thread>> reader_threads;
for (int i = 0; i < num_readers; ++i) {
reader_threads.push_back(absl::WrapUnique(tsl::Env::Default()->StartThread(
{}, absl::StrCat("Reader_", i),
[&snapshot_chunk_provider, &mu, &result]() {
while (true) {
tsl::Env::Default()->SleepForMicroseconds(25);
Tensor split;
bool end_of_splits = false;
TF_ASSERT_OK(
snapshot_chunk_provider.GetNext(&split, &end_of_splits));
if (end_of_splits) {
break;
}
absl::MutexLock l(&mu);
result.push_back(split.unaligned_flat<tsl::tstring>().data()[0]);
}
})));
}
int num_streams = 10, num_chunks_per_stream = 50;
std::vector<std::unique_ptr<tsl::Thread>> stream_threads;
for (int i = 0; i < num_streams; ++i) {
stream_threads.push_back(absl::WrapUnique(tsl::Env::Default()->StartThread(
{}, absl::StrCat("Writer_", i),
[&snapshot_path, num_chunks_per_stream, i]() {
for (int j = 0; j < num_chunks_per_stream; ++j) {
std::string filename = absl::StrCat("chunk_", i, "_", j, "_1");
TF_ASSERT_OK(WriteChunk(snapshot_path, filename));
tsl::Env::Default()->SleepForMicroseconds(35);
}
})));
}
stream_threads.clear();
TF_ASSERT_OK(SetDone(snapshot_path));
reader_threads.clear();
std::vector<std::string> expected;
for (int i = 0; i < num_streams; ++i) {
for (int j = 0; j < num_chunks_per_stream; ++j) {
expected.push_back(absl::StrCat("chunk_", i, "_", j, "_1"));
}
}
EXPECT_THAT(result,
UnorderedElementsAreArray(JoinPaths(snapshot_path, expected)));
}
TEST(SnapshotChunkProviderTest, SaveRestore) {
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
std::vector<std::string> chunks = {"chunk_4_4_4", "chunk_3_3_3",
"chunk_2_2_2", "chunk_1_1_1",
"chunk_0_0_0"};
for (absl::string_view chunk : chunks) {
TF_ASSERT_OK(WriteChunk(snapshot_path, chunk));
}
TF_ASSERT_OK(SetDone(snapshot_path));
SnapshotChunkProvider snapshot_chunk_provider(snapshot_path,
tsl::Env::Default());
EXPECT_THAT(GetChunk(snapshot_chunk_provider),
IsOkAndHolds(tsl::io::JoinPath(
CommittedChunksDirectory(snapshot_path), "chunk_0_0_0")));
TF_ASSERT_OK(SaveAndRestore(snapshot_chunk_provider));
EXPECT_THAT(GetAllChunks(snapshot_chunk_provider),
IsOkAndHolds(ElementsAreArray(
JoinPaths(snapshot_path, {"chunk_1_1_1", "chunk_2_2_2",
"chunk_3_3_3", "chunk_4_4_4"}))));
}
TEST(SnapshotChunkProviderTest, SnapshotError) {
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
std::unique_ptr<tsl::Thread> reader_thread =
absl::WrapUnique(tsl::Env::Default()->StartThread(
{}, "Reader", [&snapshot_path]() {
SnapshotChunkProvider snapshot_chunk_provider(snapshot_path,
tsl::Env::Default());
EXPECT_THAT(
GetAllChunks(snapshot_chunk_provider),
StatusIs(absl::StatusCode::kFailedPrecondition, "Test error."));
}));
TF_ASSERT_OK(WriteChunk(snapshot_path, "chunk_0_0_0"));
TF_ASSERT_OK(WriteChunk(snapshot_path, "chunk_1_0_0"));
TF_ASSERT_OK(WriteChunk(snapshot_path, "chunk_2_0_0"));
TF_ASSERT_OK(
SetStatus(snapshot_path, absl::FailedPreconditionError("Test error.")));
reader_thread.reset();
}
TEST(SnapshotChunkProviderTest, Cancel) {
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
SnapshotChunkProvider snapshot_chunk_provider(snapshot_path,
tsl::Env::Default());
std::unique_ptr<tsl::Thread> reader_thread =
absl::WrapUnique(tsl::Env::Default()->StartThread(
{}, "Reader",
[&snapshot_chunk_provider]() {
EXPECT_THAT(
GetAllChunks(snapshot_chunk_provider),
StatusIs(absl::StatusCode::kCancelled,
HasSubstr("Cancelled loading tf.data snapshot at")));
}));
TF_ASSERT_OK(WriteChunk(snapshot_path, "chunk_0_0_0"));
TF_ASSERT_OK(WriteChunk(snapshot_path, "chunk_1_0_0"));
TF_ASSERT_OK(WriteChunk(snapshot_path, "chunk_2_0_0"));
snapshot_chunk_provider.Cancel();
reader_thread.reset();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/snapshot/snapshot_chunk_provider.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/snapshot/snapshot_chunk_provider_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
52bb704e-dee6-462a-9567-d84486186958 | cpp | tensorflow/tensorflow | graph_debug_info_builder | tensorflow/core/graph/graph_debug_info_builder.cc | tensorflow/core/graph/graph_debug_info_builder_test.cc | #include "tensorflow/core/graph/graph_debug_info_builder.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/hash/hash.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph_debug_info.pb.h"
#include "tensorflow/core/framework/logging.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/stack_frame.h"
#include "tsl/platform/path.h"
namespace tensorflow {
static const char* kFilenameToIgnorePrefix = "<embedded";
std::string StackFrameToString(const StackFrame& frame,
int shared_prefix_length) {
std::string out = absl::StrFormat(
"File \"%s\", line %d, in %s",
absl::StrContains(frame.file_name, kFilenameToIgnorePrefix)
? frame.file_name
: frame.file_name.substr(shared_prefix_length),
frame.line_number, frame.function_name);
return out;
}
std::string ToStringHelper(absl::Span<const StackFrame> stack_frames,
int shared_prefix_length) {
return absl::StrJoin(
stack_frames, "\n", [&](std::string* out, const StackFrame& frame) {
absl::StrAppend(out, StackFrameToString(frame, shared_prefix_length));
});
}
FrozenStackTrace::FrozenStackTrace(absl::Span<StackFrame const> frames,
absl::Span<StackFrame const> user_frames)
: frames_(frames.begin(), frames.end()),
user_frames_(user_frames.begin(), user_frames.end()) {
if (user_frames.empty()) {
user_frames_ = frames_;
}
}
FrozenStackTrace::FrozenStackTrace(
const GraphDebugInfo::StackTrace& stack_trace,
const GraphDebugInfo& debug_info) {
auto push_frame = [this,
&debug_info](const GraphDebugInfo::FileLineCol& frame) {
int file_index = frame.file_index();
std::string file_name =
(file_index >= 0 && file_index < debug_info.files_size())
? debug_info.files(file_index)
: "<UNKNOWN_FILE_NAME>";
frames_.push_back(StackFrame(file_name, frame.line(), frame.func()));
};
if (!stack_trace.file_line_cols().empty()) {
for (const GraphDebugInfo::FileLineCol& frame :
stack_trace.file_line_cols()) {
push_frame(frame);
}
} else {
for (const uint64_t frame_id : stack_trace.frame_id()) {
if (debug_info.frames_by_id().contains(frame_id)) {
push_frame(debug_info.frames_by_id().at(frame_id));
} else {
LOG_FIRST_N(ERROR, 5) << "No matching frame for id:" << frame_id;
}
}
}
}
absl::Span<StackFrame const> FrozenStackTrace::ToFrames() const {
return frames_;
}
std::vector<StackFrame> FrozenStackTrace::ToUncachedFrames() const {
return frames_;
}
StackFrame FrozenStackTrace::LastUserFrame() const { return frames_.back(); }
std::vector<StackFrame> FrozenStackTrace::GetUserFrames(int limit) const {
std::vector<StackFrame> result;
if (limit < 0 || limit > user_frames_.size()) {
limit = user_frames_.size();
}
result.reserve(limit);
for (int i = 0; i < limit; ++i) {
result.push_back(user_frames_[i]);
}
return result;
}
std::string FrozenStackTrace::ToString(const TracePrintingOptions& opts) const {
int shared_prefix_length = 0;
if (opts.filter_common_prefix) {
std::vector<std::string> prefix_file_names;
for (const StackFrame& frame : frames_) {
if (!absl::StrContains(frame.file_name, kFilenameToIgnorePrefix)) {
prefix_file_names.push_back(frame.file_name);
}
}
shared_prefix_length = tsl::io::CommonPathPrefix(prefix_file_names).size();
}
if (!opts.drop_internal_frames) {
return ToStringHelper(frames_, shared_prefix_length);
}
std::vector<StackFrame> non_internal_frames;
for (const StackFrame& frame : frames_) {
if (!IsInternalFrameForFilename(frame.file_name)) {
non_internal_frames.push_back(frame);
}
}
return ToStringHelper(non_internal_frames, shared_prefix_length);
}
GraphDebugInfoBuilder::GraphDebugInfoBuilder()
: debug_info_(std::make_unique<GraphDebugInfo>()) {}
void GraphDebugInfoBuilder::AccumulateStackTracesMap(
const StackTracesMap& stack_traces_map, absl::string_view key_suffix,
const GraphDebugInfoBuilder::Options& options) {
trace_to_index_.reserve(trace_to_index_.size() + stack_traces_map.size());
for (const auto& [node_name, stack_trace] : stack_traces_map) {
if (stack_trace == nullptr) continue;
std::string trace_key = absl::StrCat(node_name, key_suffix);
AccumulateStackTrace(stack_trace, trace_key, options);
}
}
void GraphDebugInfoBuilder::AccumulateStackTrace(
std::shared_ptr<AbstractStackTrace> trace, absl::string_view traces_key,
const GraphDebugInfoBuilder::Options& options) {
int trace_index = 0;
StackTracePointer p{trace};
auto found = trace_to_index_.find(p);
if (found != trace_to_index_.end()) {
trace_index = found->second;
} else {
trace_index = debug_info_->traces_by_id().size();
trace_to_index_[p] = trace_index;
GraphDebugInfo::StackTrace& stack_trace_proto =
(*debug_info_->mutable_traces_by_id())[trace_index];
if (options.user_frames) {
frame_to_index_.reserve(
frame_to_index_.size() +
trace->GetUserFrames(options.user_frames_limit).size());
for (const auto& stack_frame :
trace->GetUserFrames(options.user_frames_limit)) {
AppendToStackTraceProto(stack_frame, stack_trace_proto);
}
} else {
frame_to_index_.reserve(frame_to_index_.size() +
trace->ToFrames().size());
for (const auto& stack_frame : trace->ToFrames()) {
AppendToStackTraceProto(stack_frame, stack_trace_proto);
}
}
}
(*debug_info_->mutable_name_to_trace_id())[traces_key] = trace_index;
}
void GraphDebugInfoBuilder::AppendToStackTraceProto(
const StackFrame& stack_frame,
GraphDebugInfo::StackTrace& stack_trace_proto) {
int frame_index = 0;
auto found = frame_to_index_.find(stack_frame);
if (found != frame_to_index_.end()) {
frame_index = found->second;
} else {
frame_index = debug_info_->frames_by_id().size();
frame_to_index_[stack_frame] = frame_index;
GraphDebugInfo::FileLineCol& frame =
(*debug_info_->mutable_frames_by_id())[frame_index];
auto file_index = file_name_to_index_.find(stack_frame.file_name);
if (file_index != file_name_to_index_.end()) {
frame.set_file_index(file_index->second);
} else {
frame.set_file_index(new_name_index_);
file_name_to_index_[stack_frame.file_name] = new_name_index_;
*debug_info_->add_files() = stack_frame.file_name;
new_name_index_++;
}
frame.set_line(stack_frame.line_number);
frame.set_func(stack_frame.function_name);
}
stack_trace_proto.add_frame_id(frame_index);
}
void GraphDebugInfoBuilder::AppendGraphDebugInfo(
absl::string_view prefix, const GraphDebugInfo& new_info) {
for (const auto& pair : new_info.name_to_trace_id()) {
auto trace = new_info.traces_by_id().at(pair.second);
auto frozen = std::make_shared<FrozenStackTrace>(trace, new_info);
std::string key =
prefix.empty() ? pair.first : absl::StrCat(pair.first, "@", prefix);
AccumulateStackTrace(frozen, key, GraphDebugInfoBuilder::Options{});
}
}
GraphDebugInfo GraphDebugInfoBuilder::Build() const { return *debug_info_; }
absl::Status GraphDebugInfoBuilder::AppendGraphDebugInfoStr(
absl::string_view prefix, absl::string_view new_info_str) {
GraphDebugInfo debug_info;
if (!debug_info.ParseFromArray(new_info_str.data(), new_info_str.size())) {
return absl::InvalidArgumentError("Failed to parse GraphDebugInfo proto.");
}
AppendGraphDebugInfo(prefix, debug_info);
return absl::OkStatus();
}
std::string GraphDebugInfoBuilder::ToGraphDebugInfoStr() const {
return Build().SerializeAsString();
}
StackTracesMap LoadTracesFromDebugInfo(const GraphDebugInfo& debug_info) {
StackTracesMap traces;
absl::flat_hash_map<uint64_t, std::shared_ptr<AbstractStackTrace>>
traces_by_id;
traces_by_id.reserve(debug_info.traces_by_id_size());
for (const auto& [id, frames] : debug_info.traces_by_id()) {
traces_by_id[id] = std::make_shared<FrozenStackTrace>(frames, debug_info);
}
traces.reserve(debug_info.name_to_trace_id_size() + debug_info.traces_size());
for (const auto& [name, trace_id] : debug_info.name_to_trace_id()) {
if (!traces_by_id.contains(trace_id)) {
LOG_FIRST_N(ERROR, 5) << "No matching trace for id:" << trace_id;
continue;
}
traces[name] = traces_by_id[trace_id];
}
for (const auto& [name, frames] : debug_info.traces()) {
traces[name] = std::make_shared<FrozenStackTrace>(frames, debug_info);
}
return traces;
}
absl::StatusOr<StackTracesMap> LoadTracesFromDebugInfoStr(
absl::string_view debug_info_str) {
GraphDebugInfo debug_info;
if (!debug_info.ParseFromArray(debug_info_str.data(),
debug_info_str.size())) {
return absl::InvalidArgumentError("Failed to parse GraphDebugInfo proto.");
}
return LoadTracesFromDebugInfo(debug_info);
}
GraphDebugInfo StackTracesMapToGraphDebugInfo(const StackTracesMap& map,
bool user_frames) {
GraphDebugInfoBuilder builder;
GraphDebugInfoBuilder::Options options;
options.user_frames = user_frames;
options.user_frames_limit = -1;
builder.AccumulateStackTracesMap(map, "", options);
return builder.Build();
}
} | #include "tensorflow/core/graph/graph_debug_info_builder.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/graph_debug_info.pb.h"
#include "tensorflow/core/platform/stack_frame.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
using ::testing::Eq;
using ::testing::Ne;
using ::testing::UnorderedElementsAre;
class TestStackTrace : public AbstractStackTrace {
public:
explicit TestStackTrace(const std::vector<StackFrame> frames)
: frames_(std::move(frames)) {}
absl::Span<StackFrame const> ToFrames() const override { return frames_; }
std::vector<StackFrame> ToUncachedFrames() const override { return frames_; }
std::vector<StackFrame> GetUserFrames(int limit) const override {
return frames_;
}
StackFrame LastUserFrame() const override { return frames_.back(); }
string ToString(const TracePrintingOptions& opts) const override {
auto frame = LastUserFrame();
return absl::StrCat(frame.file_name, ":", frame.line_number, ":",
frame.function_name);
}
std::vector<StackFrame> frames_;
};
TEST(GraphDebugInfoBuilderTest, AccumulateStackTrace) {
auto stack_trace = std::make_shared<TestStackTrace>(
std::vector<StackFrame>{{"dummy_file_alpha.cc", 20, "function_bar"},
{"dummy_file_beta.cc", 30, "function_sop"}});
GraphDebugInfoBuilder builder;
builder.AccumulateStackTrace(stack_trace, "alpha_beta");
GraphDebugInfo debug_info = builder.Build();
EXPECT_THAT(debug_info.files(), UnorderedElementsAre("dummy_file_alpha.cc",
"dummy_file_beta.cc"));
EXPECT_THAT(debug_info.traces_by_id_size(), Eq(1));
EXPECT_THAT(debug_info.name_to_trace_id().find("alpha_beta"),
Ne(debug_info.name_to_trace_id().end()));
auto actual_stack_trace = debug_info.traces_by_id().at(
debug_info.name_to_trace_id().at("alpha_beta"));
EXPECT_THAT(actual_stack_trace.frame_id_size(), Eq(2))
<< debug_info.DebugString();
}
TEST(GraphDebugInfoBuilderTest, AccumulateStackTracesMap) {
StackTracesMap stack_traces;
stack_traces["two"] = std::make_shared<TestStackTrace>(
std::vector<StackFrame>{{"dummy_file_alpha.cc", 20, "function_bar"},
{"dummy_file_beta.cc", 30, "function_sop"}});
stack_traces["scale"] =
std::make_shared<TestStackTrace>(std::vector<StackFrame>{
{"dummy_file_alpha.cc", 10, "function_foo"},
{"dummy_file_beta.cc", 30, "function_sop"},
});
stack_traces["y"] = std::make_shared<TestStackTrace>(std::vector<StackFrame>{
{"dummy_file_alpha.cc", 15, "function_flex"},
{"dummy_file_alpha.cc", 20, "function_bar"},
{"dummy_file_beta.cc", 30, "function_sop"},
});
GraphDebugInfoBuilder builder;
builder.AccumulateStackTracesMap(stack_traces, "@func");
GraphDebugInfo debug_info = builder.Build();
EXPECT_THAT(debug_info.files(), UnorderedElementsAre("dummy_file_alpha.cc",
"dummy_file_beta.cc"));
EXPECT_THAT(debug_info.name_to_trace_id_size(), Eq(3));
EXPECT_THAT(debug_info.name_to_trace_id().find("scale@func"),
Ne(debug_info.name_to_trace_id().end()));
auto stack_trace = debug_info.traces_by_id().at(
debug_info.name_to_trace_id().at("scale@func"));
EXPECT_THAT(stack_trace.frame_id_size(), Eq(2));
std::vector<GraphDebugInfo::FileLineCol> file_line_cols;
for (auto& frame_id : stack_trace.frame_id()) {
file_line_cols.push_back(debug_info.frames_by_id().at(frame_id));
}
auto file_line_col_0 = file_line_cols[0];
auto file_line_col_1 = file_line_cols[1];
EXPECT_THAT(std::vector<int>(
{file_line_col_0.file_index(), file_line_col_1.file_index()}),
UnorderedElementsAre(0, 1));
EXPECT_THAT(file_line_col_0.line(), Eq(10));
EXPECT_THAT(file_line_col_0.func(), Eq("function_foo"));
EXPECT_THAT(file_line_col_1.line(), Eq(30));
EXPECT_THAT(file_line_col_1.func(), Eq("function_sop"));
}
TEST(GraphDebugInfoBuilderTest, AppendGraphDebugInfo) {
GraphDebugInfo a;
{
GraphDebugInfoBuilder builder;
StackTracesMap stack_traces;
stack_traces["two"] = std::make_shared<TestStackTrace>(
std::vector<StackFrame>{{"dummy_file_alpha.cc", 20, "function_bar"}});
stack_traces["scale"] = std::make_shared<TestStackTrace>(
std::vector<StackFrame>{{"dummy_file_alpha.cc", 10, "function_foo"}});
builder.AccumulateStackTracesMap(stack_traces, "");
a = builder.Build();
}
GraphDebugInfo b;
{
GraphDebugInfoBuilder builder;
StackTracesMap stack_traces;
stack_traces["y"] =
std::make_shared<TestStackTrace>(std::vector<StackFrame>{
{"dummy_file_alpha.cc", 15, "function_flex"},
});
builder.AccumulateStackTracesMap(stack_traces, "");
b = builder.Build();
}
GraphDebugInfo c;
{
GraphDebugInfoBuilder builder;
StackTracesMap stack_traces;
stack_traces["z"] =
std::make_shared<TestStackTrace>(std::vector<StackFrame>{
{"dummy_file_alpha.cc", 15, "function_flex"},
});
builder.AccumulateStackTracesMap(stack_traces, "@func3");
c = builder.Build();
}
GraphDebugInfoBuilder builder;
builder.AppendGraphDebugInfo("func1", a);
builder.AppendGraphDebugInfo("func2", b);
builder.AppendGraphDebugInfo("", c);
GraphDebugInfo combined = builder.Build();
EXPECT_EQ(combined.name_to_trace_id().size(), 4);
std::vector<std::string> keys{"two@func1", "scale@func1", "y@func2",
"z@func3"};
for (const auto& key : keys) {
EXPECT_THAT(combined.name_to_trace_id().find(key),
Ne(combined.name_to_trace_id().end()));
}
}
TEST(StackTracesMapToGraphDebugInfoTest, EmptyMap) {
StackTracesMap map;
GraphDebugInfo generated = StackTracesMapToGraphDebugInfo(map);
EXPECT_EQ(generated.files_size(), 0);
EXPECT_EQ(generated.traces_size(), 0);
}
TEST(StackTracesMapToGraphDebugInfoTest, EmptyFrames) {
StackTracesMap map;
std::vector<StackFrame> frames;
auto stack_trace = std::make_shared<FrozenStackTrace>(frames);
map.insert({"dummy_name", stack_trace});
GraphDebugInfo generated = StackTracesMapToGraphDebugInfo(map);
EXPECT_EQ(generated.files_size(), 0);
EXPECT_EQ(generated.traces_by_id_size(), 1);
EXPECT_TRUE(generated.name_to_trace_id().contains("dummy_name"));
}
TEST(StackTracesMapToGraphDebugInfoTest, RoundTripStackTraces) {
StackTracesMap map;
std::vector<StackFrame> frames = {
StackFrame({"dummy_file_name", 10, "dummy_function_name"}),
StackFrame({"dummy_file_name", 20, "other_function_name"})};
auto stack_trace = std::make_shared<FrozenStackTrace>(frames);
map.insert({"dummy_name", stack_trace});
GraphDebugInfo generated = StackTracesMapToGraphDebugInfo(map);
StackTracesMap output = LoadTracesFromDebugInfo(generated);
for (auto [name, trace] : output) {
auto orig_trace = map[name];
EXPECT_NE(orig_trace, nullptr);
EXPECT_EQ(orig_trace->ToFrames(), trace->ToFrames());
}
}
TEST(StackTracesTest, ToFrames) {
StackTracesMap map;
std::vector<StackFrame> frames = {
StackFrame({"dummy_file_name", 10, "dummy_function_name"}),
StackFrame({"other_file_name", 20, "other_function_name"})};
auto stack_trace = TestStackTrace(frames);
EXPECT_EQ(stack_trace.ToFrames().size(), 2);
auto uncached_frames = stack_trace.ToUncachedFrames();
EXPECT_EQ(uncached_frames.size(), 2);
EXPECT_EQ(frames[0], uncached_frames[0]);
EXPECT_EQ(frames[1], uncached_frames[1]);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/graph_debug_info_builder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/graph_debug_info_builder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
02ed9897-f442-4266-a42c-c0acb98922cf | cpp | tensorflow/tensorflow | nnapi_delegate_compatibility_checker | tensorflow/lite/tools/delegates/compatibility/nnapi/nnapi_delegate_compatibility_checker.cc | tensorflow/lite/tools/delegates/compatibility/nnapi/nnapi_delegate_compatibility_checker_test.cc | #include "tensorflow/lite/tools/delegates/compatibility/nnapi/nnapi_delegate_compatibility_checker.h"
#include <cctype>
#include <cstdlib>
#include <functional>
#include <limits>
#include <memory>
#include <sstream>
#include <string>
#include <unordered_map>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate_kernel.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/nnapi/NeuralNetworksTypes.h"
#include "tensorflow/lite/tools/delegates/compatibility/common/delegate_compatibility_checker_util.h"
#include "tensorflow/lite/tools/delegates/compatibility/common/online_helper_delegate.h"
#include "tensorflow/lite/tools/delegates/compatibility/protos/compatibility_result.pb.h"
#include "tensorflow/lite/tools/versioning/op_signature.h"
namespace tflite {
namespace tools {
namespace {
void getCanonicalFeatureLevel(int runtime_feature_level,
int& canonical_feature_level) {
switch (runtime_feature_level) {
case 1:
canonical_feature_level = ANEURALNETWORKS_FEATURE_LEVEL_1;
break;
case 2:
canonical_feature_level = ANEURALNETWORKS_FEATURE_LEVEL_2;
break;
case 3:
canonical_feature_level = ANEURALNETWORKS_FEATURE_LEVEL_3;
break;
case 4:
canonical_feature_level = ANEURALNETWORKS_FEATURE_LEVEL_4;
break;
case 5:
canonical_feature_level = ANEURALNETWORKS_FEATURE_LEVEL_5;
break;
case 6:
canonical_feature_level = ANEURALNETWORKS_FEATURE_LEVEL_6;
break;
case 7:
canonical_feature_level = ANEURALNETWORKS_FEATURE_LEVEL_7;
break;
case 8:
canonical_feature_level = ANEURALNETWORKS_FEATURE_LEVEL_8;
break;
default:
canonical_feature_level = ANEURALNETWORKS_FEATURE_LEVEL_8;
}
}
absl::Status IsValidFeatureLevelInt(const std::string& s) {
if (s.size() == 1 && std::isdigit(s[0]) && s[0] > '0' && s[0] < '9') {
return absl::OkStatus();
}
return absl::InvalidArgumentError("Invalid runtime feature level.");
}
absl::Status extractRuntimeFeatureLevel(
const std::unordered_map<std::string, std::string>& dcc_configs,
int& runtime_feature_level) {
std::string str_runtime_feature_level;
if (dcc_configs.find("nnapi-runtime_feature_level") == dcc_configs.end()) {
for (const auto& dcc_config : dcc_configs) {
if (absl::StrContains(dcc_config.first, "nnapi")) {
return absl::InvalidArgumentError(
"The correct flag name is 'nnapi-runtime_feature_level");
}
}
str_runtime_feature_level =
std::to_string(tools::kDefaultRuntimeFeatureLevel);
} else {
str_runtime_feature_level = dcc_configs.at("nnapi-runtime_feature_level");
RETURN_IF_ERROR(IsValidFeatureLevelInt(str_runtime_feature_level));
}
runtime_feature_level = std::stoi(str_runtime_feature_level);
return absl::OkStatus();
}
absl::Status convertToCompatibilityFailureType(
std::vector<delegate::nnapi::NNAPIValidationFailure> map_failures,
proto::OpCompatibilityResult* op_result) {
for (const auto& status : map_failures) {
auto compatibility_failure = op_result->add_compatibility_failures();
compatibility_failure->set_description(status.message);
switch (status.type) {
case delegate::nnapi::NNAPIValidationFailureType::kUnsupportedOperator:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNSUPPORTED_OPERATOR);
break;
case delegate::nnapi::NNAPIValidationFailureType::
kUnsupportedAndroidVersion:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNSUPPORTED_VERSION);
break;
case delegate::nnapi::NNAPIValidationFailureType::
kUnsupportedOperatorVersion:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNSUPPORTED_OPERATOR_VERSION);
break;
case delegate::nnapi::NNAPIValidationFailureType::kUnsupportedInputType:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNSUPPORTED_INPUT_TYPE);
break;
case delegate::nnapi::NNAPIValidationFailureType::
kNotRestrictedScaleCompliant:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::
DCC_NOT_RESTRICTED_SCALE_COMPLIANT);
break;
case delegate::nnapi::NNAPIValidationFailureType::kUnsupportedOutputType:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNSUPPORTED_OUTPUT_TYPE);
break;
case delegate::nnapi::NNAPIValidationFailureType::kUnsupportedOperandSize:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNSUPPORTED_OPERAND_SIZE);
break;
case delegate::nnapi::NNAPIValidationFailureType::
kUnsupportedOperandValue:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNSUPPORTED_OPERAND_VALUE);
break;
case delegate::nnapi::NNAPIValidationFailureType::
kUnsupportedHybridOperator:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNSUPPORTED_HYBRID_OPERATOR);
break;
case delegate::nnapi::NNAPIValidationFailureType::
kUnsupportedQuantizationType:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNSUPPORTED_QUANTIZATION_TYPE);
break;
case delegate::nnapi::NNAPIValidationFailureType::kMissingRequiredOperand:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_MISSING_REQUIRED_OPERAND);
break;
case delegate::nnapi::NNAPIValidationFailureType::kUnsupportedOperandRank:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNSUPPORTED_OPERAND_RANK);
break;
case delegate::nnapi::NNAPIValidationFailureType::
kInputTensorShouldHaveConstantShape:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::
DCC_INPUT_TENSOR_SHOULD_HAVE_CONSTANT_SHAPE);
break;
case delegate::nnapi::NNAPIValidationFailureType::
kUnsupportedOperatorVariant:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNSUPPORTED_OPERATOR_VARIANT);
break;
case delegate::nnapi::NNAPIValidationFailureType::kNoActivationExpected:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_NO_ACTIVATION_EXPECTED);
break;
case delegate::nnapi::NNAPIValidationFailureType::
kUnsupportedQuantizationParameters:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::
DCC_UNSUPPORTED_QUANTIZATION_PARAMETERS);
break;
default:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_INTERNAL_ERROR);
compatibility_failure->set_description(
"Unknown validation failure type.");
}
}
return absl::OkStatus();
}
}
absl::Status
tools::NnapiDelegateCompatibilityChecker::checkOpCompatibilityOnline(
TfLiteContext* context, const TfLiteNode* node,
const TfLiteRegistration* registration,
std::unordered_map<std::string, std::string> dcc_configs,
tflite::proto::OpCompatibilityResult* op_result) {
std::vector<delegate::nnapi::NNAPIValidationFailure> map_failures;
int runtime_feature_level;
RETURN_IF_ERROR(
extractRuntimeFeatureLevel(dcc_configs, runtime_feature_level));
getCanonicalFeatureLevel(runtime_feature_level, runtime_feature_level);
if (NNAPIDelegateKernel::Validate(
context, registration, runtime_feature_level, node,
true,
nullptr, &map_failures)) {
op_result->set_is_supported(true);
} else {
RETURN_IF_ERROR(convertToCompatibilityFailureType(map_failures, op_result));
op_result->set_is_supported(false);
}
return absl::OkStatus();
}
std::unordered_map<std::string, std::string>
tools::NnapiDelegateCompatibilityChecker::getDccConfigurations() {
std::unordered_map<std::string, std::string> dcc_configs;
dcc_configs["nnapi-runtime_feature_level"] =
std::to_string(runtime_feature_level_);
return dcc_configs;
}
absl::Status tools::NnapiDelegateCompatibilityChecker::setDccConfigurations(
const std::unordered_map<std::string, std::string>& dcc_configs) {
RETURN_IF_ERROR(
extractRuntimeFeatureLevel(dcc_configs, runtime_feature_level_));
return absl::OkStatus();
}
absl::Status
tools::NnapiDelegateCompatibilityChecker::checkModelCompatibilityOnline(
tflite::FlatBufferModel* model_buffer,
tflite::proto::CompatibilityResult* result) {
std::unique_ptr<tflite::Interpreter> interpreter;
tflite::ops::builtin::BuiltinOpResolver resolver;
tflite::InterpreterBuilder interpreter_builder(*model_buffer, resolver);
auto dcc_configs = getDccConfigurations();
std::function<absl::Status(TfLiteContext*, const TfLiteNode*,
const TfLiteRegistration*,
std::unordered_map<std::string, std::string>,
proto::OpCompatibilityResult*)>
check_op_func_ptr = &checkOpCompatibilityOnline;
OnlineHelperDelegate delegate(dcc_configs, check_op_func_ptr, result);
interpreter_builder.AddDelegate(&delegate);
interpreter_builder(&interpreter);
return absl::OkStatus();
}
absl::Status tools::NnapiDelegateCompatibilityChecker::checkOpSigCompatibility(
const OpSignature& op_sig,
tflite::proto::OpCompatibilityResult* op_result) {
return absl::UnimplementedError(
"Offline mode is not yet supported on NNAPI delegate compatibility "
"checker.");
}
}
} | #include "tensorflow/lite/tools/delegates/compatibility/nnapi/nnapi_delegate_compatibility_checker.h"
#include <cstdint>
#include <limits>
#include <string>
#include <unordered_map>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/tools/delegates/compatibility/protos/compatibility_result.pb.h"
namespace tflite {
namespace tools {
#ifndef EXPECT_OK
#define EXPECT_OK(x) EXPECT_TRUE(x.ok());
#endif
namespace {
class AddOpModel : public SingleOpModel {
public:
AddOpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output, ActivationFunctionType activation_type) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_ADD, BuiltinOptions_AddOptions,
CreateAddOptions(builder_, activation_type).Union());
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
protected:
int input1_;
int input2_;
int output_;
};
class NnapiDccTest : public ::testing::Test {
protected:
void SetUp() override {}
void TearDown() override { compatibility_result_.Clear(); }
NnapiDelegateCompatibilityChecker nnapi_dcc_;
proto::CompatibilityResult compatibility_result_;
};
}
TEST_F(NnapiDccTest, ValidRuntimeFeatureLevel) {
std::unordered_map dcc_configs = nnapi_dcc_.getDccConfigurations();
EXPECT_EQ(dcc_configs["nnapi-runtime_feature_level"], "8");
EXPECT_OK(nnapi_dcc_.setDccConfigurations(dcc_configs));
dcc_configs["nnapi-runtime_feature_level"] = "1";
EXPECT_OK(nnapi_dcc_.setDccConfigurations(dcc_configs));
dcc_configs["nnapi-runtime_feature_level"] = "8";
EXPECT_OK(nnapi_dcc_.setDccConfigurations(dcc_configs));
dcc_configs.clear();
EXPECT_OK(nnapi_dcc_.setDccConfigurations(dcc_configs));
EXPECT_EQ(nnapi_dcc_.getDccConfigurations()["nnapi-runtime_feature_level"],
"8");
}
TEST_F(NnapiDccTest, InvalidRuntimeFeatureLevel) {
std::unordered_map dcc_configs = nnapi_dcc_.getDccConfigurations();
dcc_configs["nnapi-runtime_feature_level"] = "03";
EXPECT_EQ(nnapi_dcc_.setDccConfigurations(dcc_configs).code(),
absl::StatusCode::kInvalidArgument);
dcc_configs["nnapi-runtime_feature_level"] = "a";
EXPECT_EQ(nnapi_dcc_.setDccConfigurations(dcc_configs).code(),
absl::StatusCode::kInvalidArgument);
dcc_configs["nnapi-runtime_feature_level"] = "28123497123489123841212344516";
EXPECT_EQ(nnapi_dcc_.setDccConfigurations(dcc_configs).code(),
absl::StatusCode::kInvalidArgument);
dcc_configs["nnapi-runtime_feature_level"] = "30.0";
EXPECT_EQ(nnapi_dcc_.setDccConfigurations(dcc_configs).code(),
absl::StatusCode::kInvalidArgument);
dcc_configs["nnapi-runtime_feature_level"] = "-30";
EXPECT_EQ(nnapi_dcc_.setDccConfigurations(dcc_configs).code(),
absl::StatusCode::kInvalidArgument);
dcc_configs["nnapi-runtime_feature_level"] = "9";
EXPECT_EQ(nnapi_dcc_.setDccConfigurations(dcc_configs).code(),
absl::StatusCode::kInvalidArgument);
dcc_configs.clear();
dcc_configs["nnapi-runtim_feature_level"] = "8";
EXPECT_EQ(nnapi_dcc_.setDccConfigurations(dcc_configs).code(),
absl::StatusCode::kInvalidArgument);
}
TEST_F(NnapiDccTest, CompatibleModelOnlineMode) {
const std::string& full_path =
tensorflow::GetDataDependencyFilepath("tensorflow/lite/testdata/add.bin");
auto fb_model = FlatBufferModel::BuildFromFile(full_path.data());
ASSERT_TRUE(fb_model);
auto model = fb_model->GetModel();
EXPECT_EQ(model->subgraphs()->size(), 1);
EXPECT_EQ(model->subgraphs()->Get(0)->operators()->size(), 2);
EXPECT_OK(nnapi_dcc_.checkModelCompatibilityOnline(fb_model.get(),
&compatibility_result_));
for (auto op_compatibility_result :
compatibility_result_.compatibility_results()) {
EXPECT_TRUE(op_compatibility_result.is_supported());
}
EXPECT_EQ(compatibility_result_.compatibility_results_size(), 2);
}
TEST_F(NnapiDccTest, IncompatibleModelOperation) {
AddOpModel add_op_model(
{TensorType_INT32, {1, 2, 2, 1}}, {TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {}}, ActivationFunctionType_RELU_N1_TO_1);
auto fb_model = tflite::FlatBufferModel::BuildFromModel(
tflite::GetModel(add_op_model.GetModelBuffer()));
ASSERT_TRUE(fb_model);
EXPECT_OK(nnapi_dcc_.checkModelCompatibilityOnline(fb_model.get(),
&compatibility_result_));
for (auto op_compatibility_result :
compatibility_result_.compatibility_results()) {
EXPECT_FALSE(op_compatibility_result.is_supported());
}
EXPECT_EQ(compatibility_result_.compatibility_results_size(), 1);
}
TEST_F(NnapiDccTest, IncompatibleModelFeatureLevel) {
AddOpModel add_op_model({TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {}}, ActivationFunctionType_NONE);
auto fb_model = tflite::FlatBufferModel::BuildFromModel(
tflite::GetModel(add_op_model.GetModelBuffer()));
ASSERT_TRUE(fb_model);
auto nnapi_configs = nnapi_dcc_.getDccConfigurations();
nnapi_configs["nnapi-runtime_feature_level"] = "2";
EXPECT_OK(nnapi_dcc_.setDccConfigurations(nnapi_configs));
EXPECT_OK(nnapi_dcc_.checkModelCompatibilityOnline(fb_model.get(),
&compatibility_result_));
for (auto op_compatibility_result :
compatibility_result_.compatibility_results()) {
EXPECT_FALSE(op_compatibility_result.is_supported());
}
EXPECT_EQ(compatibility_result_.compatibility_results_size(), 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/delegates/compatibility/nnapi/nnapi_delegate_compatibility_checker.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/delegates/compatibility/nnapi/nnapi_delegate_compatibility_checker_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4d5bf531-0d43-45fa-a7f4-cfa7237f717c | cpp | tensorflow/tensorflow | remove_nodes | tensorflow/tools/graph_transforms/remove_nodes.cc | tensorflow/tools/graph_transforms/remove_nodes_test.cc | #include "tensorflow/core/common_runtime/constant_folding.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/subgraph.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/fold_constants_lib.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status RemoveNodes(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def) {
if (!context.params.count("op")) {
return errors::InvalidArgument(
"remove_nodes expects at least one 'op'"
"argument, e.g. remove_nodes(op=Identity)");
}
int32_t max_inputs;
TF_RETURN_IF_ERROR(
context.GetOneInt32Parameter("max_inputs", 1, &max_inputs));
std::set<string> required_nodes;
for (const string& input : context.input_names) {
required_nodes.insert(NodeNameFromInput(input));
}
for (const string& output : context.output_names) {
required_nodes.insert(NodeNameFromInput(output));
}
std::vector<string> ops_to_remove = context.params.at("op");
GraphDef current_graph_def = input_graph_def;
for (const string& op : ops_to_remove) {
for (int num_inputs = 1; num_inputs <= max_inputs; ++num_inputs) {
OpTypePattern pattern = {op};
pattern.inputs.resize(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
pattern.inputs[i] = {"*"};
}
bool any_nodes_removed;
do {
any_nodes_removed = false;
std::map<string, string> inputs_to_rename;
GraphDef replaced_graph_def;
TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes(
current_graph_def, pattern,
[&inputs_to_rename, &required_nodes, &any_nodes_removed](
const NodeMatch& match, const std::set<string>& input_nodes,
const std::set<string>& output_nodes,
std::vector<NodeDef>* new_nodes) {
const NodeDef& replace_node = match.node;
if (required_nodes.count(replace_node.name())) {
LOG(INFO) << "Skipping replacement for " << replace_node.name();
CopyOriginalMatch(match, new_nodes);
return OkStatus();
}
const NodeDef& input_node = match.inputs[0].node;
string target_name = input_node.name();
for (const string& input : replace_node.input()) {
if (!input.compare(0, target_name.size(), target_name)) {
if (input.size() == target_name.size() ||
input[target_name.size()] == ':') {
target_name = input;
break;
}
}
}
inputs_to_rename[replace_node.name()] = target_name;
inputs_to_rename["^" + replace_node.name()] =
"^" + input_node.name();
new_nodes->push_back(input_node);
any_nodes_removed = true;
return OkStatus();
},
{true}, &replaced_graph_def));
TF_RETURN_IF_ERROR(
RenameNodeInputs(replaced_graph_def, inputs_to_rename,
std::unordered_set<string>(), ¤t_graph_def));
} while (any_nodes_removed);
}
}
*output_graph_def = current_graph_def;
return OkStatus();
}
REGISTER_GRAPH_TRANSFORM("remove_nodes", RemoveNodes);
}
} | #include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/sendrecv_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status RemoveNodes(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def);
class RemoveNodesTest : public ::testing::Test {
protected:
void TestRemoveNodes() {
GraphDef graph_def;
NodeDef* add_node1 = graph_def.add_node();
add_node1->set_name("add_node1");
add_node1->set_op("Add");
add_node1->add_input("add_node2");
add_node1->add_input("add_node3");
NodeDef* add_node2 = graph_def.add_node();
add_node2->set_name("add_node2");
add_node2->set_op("Add");
add_node2->add_input("identity_node1");
add_node2->add_input("identity_node2");
NodeDef* add_node3 = graph_def.add_node();
add_node3->set_name("add_node3");
add_node3->set_op("Add");
add_node3->add_input("identity_node1");
add_node3->add_input("const_node3");
NodeDef* identity_node1 = graph_def.add_node();
identity_node1->set_name("identity_node1");
identity_node1->set_op("Identity");
identity_node1->add_input("const_node1");
NodeDef* identity_node2 = graph_def.add_node();
identity_node2->set_name("identity_node2");
identity_node2->set_op("Identity");
identity_node2->add_input("const_node2");
NodeDef* identity_node3 = graph_def.add_node();
identity_node3->set_name("identity_node3");
identity_node3->set_op("Identity");
identity_node3->add_input("const_node3");
NodeDef* const_node1 = graph_def.add_node();
const_node1->set_name("const_node1");
const_node1->set_op("Const");
NodeDef* const_node2 = graph_def.add_node();
const_node2->set_name("const_node2");
const_node2->set_op("Const");
NodeDef* const_node3 = graph_def.add_node();
const_node3->set_name("const_node3");
const_node3->set_op("Const");
NodeDef* add_node4 = graph_def.add_node();
add_node4->set_name("add_node4");
add_node4->set_op("Add");
add_node4->add_input("add_node2");
add_node4->add_input("add_node3");
GraphDef result;
TransformFuncContext context;
context.input_names = {};
context.output_names = {"add_node1"};
context.params.insert(
std::pair<string, std::vector<string>>({"op", {string("Identity")}}));
TF_ASSERT_OK(RemoveNodes(graph_def, context, &result));
std::map<string, const NodeDef*> node_lookup;
MapNamesToNodes(result, &node_lookup);
EXPECT_EQ(1, node_lookup.count("add_node1"));
EXPECT_EQ("add_node2", node_lookup.at("add_node1")->input(0));
EXPECT_EQ("add_node3", node_lookup.at("add_node1")->input(1));
EXPECT_EQ(1, node_lookup.count("add_node2"));
EXPECT_EQ("const_node1", node_lookup.at("add_node2")->input(0));
EXPECT_EQ("const_node2", node_lookup.at("add_node2")->input(1));
EXPECT_EQ(1, node_lookup.count("add_node3"));
EXPECT_EQ("const_node1", node_lookup.at("add_node3")->input(0));
EXPECT_EQ("const_node3", node_lookup.at("add_node3")->input(1));
EXPECT_EQ(1, node_lookup.count("add_node4"));
EXPECT_EQ("add_node2", node_lookup.at("add_node4")->input(0));
EXPECT_EQ("add_node3", node_lookup.at("add_node4")->input(1));
EXPECT_EQ(0, node_lookup.count("identity_node1"));
EXPECT_EQ(0, node_lookup.count("identity_node2"));
EXPECT_EQ(0, node_lookup.count("identity_node3"));
EXPECT_EQ(1, node_lookup.count("const_node1"));
EXPECT_EQ("Const", node_lookup.at("const_node1")->op());
EXPECT_EQ(1, node_lookup.count("const_node2"));
EXPECT_EQ("Const", node_lookup.at("const_node2")->op());
EXPECT_EQ(1, node_lookup.count("const_node3"));
EXPECT_EQ("Const", node_lookup.at("const_node3")->op());
}
void TestRemoveOutputNodes() {
GraphDef graph_def;
NodeDef* const_node1 = graph_def.add_node();
const_node1->set_name("const_node1");
const_node1->set_op("Const");
NodeDef* const_node2 = graph_def.add_node();
const_node2->set_name("const_node2");
const_node2->set_op("Const");
NodeDef* add_node = graph_def.add_node();
add_node->set_name("add_node");
add_node->set_op("Add");
add_node->add_input("const_node1");
add_node->add_input("const_node2");
NodeDef* identity_node = graph_def.add_node();
identity_node->set_name("identity_node");
identity_node->set_op("Identity");
identity_node->add_input("add_node");
GraphDef result;
TransformFuncContext context;
context.input_names = {};
context.output_names = {"identity_node"};
context.params.insert(
std::pair<string, std::vector<string>>({"op", {string("Identity")}}));
TF_ASSERT_OK(RemoveNodes(graph_def, context, &result));
std::map<string, const NodeDef*> node_lookup;
MapNamesToNodes(result, &node_lookup);
EXPECT_EQ(1, node_lookup.count("add_node"));
EXPECT_EQ("const_node1", node_lookup.at("add_node")->input(0));
EXPECT_EQ("const_node2", node_lookup.at("add_node")->input(1));
EXPECT_EQ(1, node_lookup.count("identity_node"));
EXPECT_EQ("add_node", node_lookup.at("identity_node")->input(0));
}
void TestRemoveChainedNodes() {
GraphDef graph_def;
NodeDef* const_node1 = graph_def.add_node();
const_node1->set_name("const_node1");
const_node1->set_op("Const");
NodeDef* identity_node1 = graph_def.add_node();
identity_node1->set_name("identity_node1");
identity_node1->set_op("Identity");
identity_node1->add_input("const_node1");
NodeDef* identity_node2 = graph_def.add_node();
identity_node2->set_name("identity_node2");
identity_node2->set_op("Identity");
identity_node2->add_input("identity_node1");
NodeDef* identity_node3 = graph_def.add_node();
identity_node3->set_name("identity_node3");
identity_node3->set_op("Identity");
identity_node3->add_input("identity_node2");
NodeDef* const_node2 = graph_def.add_node();
const_node2->set_name("const_node2");
const_node2->set_op("Const");
NodeDef* add_node = graph_def.add_node();
add_node->set_name("add_node");
add_node->set_op("Add");
add_node->add_input("identity_node3");
add_node->add_input("const_node2");
GraphDef result;
TransformFuncContext context;
context.input_names = {};
context.output_names = {"identity_node"};
context.params.insert(
std::pair<string, std::vector<string>>({"op", {string("Identity")}}));
TF_ASSERT_OK(RemoveNodes(graph_def, context, &result));
std::map<string, const NodeDef*> node_lookup;
MapNamesToNodes(result, &node_lookup);
EXPECT_EQ(1, node_lookup.count("add_node"));
EXPECT_EQ("const_node1", node_lookup.at("add_node")->input(0));
EXPECT_EQ("const_node2", node_lookup.at("add_node")->input(1));
EXPECT_EQ(0, node_lookup.count("identity_node1"));
EXPECT_EQ(0, node_lookup.count("identity_node2"));
EXPECT_EQ(0, node_lookup.count("identity_node3"));
}
void TestRemoveMultipleInputs() {
GraphDef graph_def;
NodeDef* const_node1 = graph_def.add_node();
const_node1->set_name("const_node1");
const_node1->set_op("Const");
NodeDef* const_node2 = graph_def.add_node();
const_node2->set_name("const_node2");
const_node2->set_op("Const");
NodeDef* const_node3 = graph_def.add_node();
const_node3->set_name("const_node3");
const_node3->set_op("Const");
NodeDef* const_node4 = graph_def.add_node();
const_node4->set_name("const_node4");
const_node4->set_op("Const");
NodeDef* fake_quant_node = graph_def.add_node();
fake_quant_node->set_name("fake_quant_node");
fake_quant_node->set_op("FakeQuantWithMinMaxVars");
fake_quant_node->add_input("const_node1");
fake_quant_node->add_input("const_node2");
fake_quant_node->add_input("const_node3");
NodeDef* add_node = graph_def.add_node();
add_node->set_name("add_node");
add_node->set_op("Add");
add_node->add_input("fake_quant_node");
add_node->add_input("const_node4");
GraphDef result;
TransformFuncContext context;
context.input_names = {};
context.output_names = {"add_node"};
context.params.insert(std::pair<string, std::vector<string>>(
{"op", {string("FakeQuantWithMinMaxVars")}}));
context.params.insert(
std::pair<string, std::vector<string>>({"max_inputs", {string("3")}}));
TF_ASSERT_OK(RemoveNodes(graph_def, context, &result));
std::map<string, const NodeDef*> node_lookup;
MapNamesToNodes(result, &node_lookup);
ASSERT_EQ(1, node_lookup.count("const_node1"));
ASSERT_EQ(1, node_lookup.count("const_node4"));
ASSERT_EQ(0, node_lookup.count("fake_quant_node"));
ASSERT_EQ(1, node_lookup.count("add_node"));
EXPECT_EQ("const_node1", node_lookup.at("add_node")->input(0));
EXPECT_EQ("const_node4", node_lookup.at("add_node")->input(1));
}
};
TEST_F(RemoveNodesTest, TestRemoveNodes) { TestRemoveNodes(); }
TEST_F(RemoveNodesTest, TestRemoveOutputNodes) { TestRemoveOutputNodes(); }
TEST_F(RemoveNodesTest, TestRemoveChainedNodes) { TestRemoveChainedNodes(); }
TEST_F(RemoveNodesTest, TestRemoveMultipleInputs) {
TestRemoveMultipleInputs();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/remove_nodes.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/remove_nodes_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
aa86dfbf-cb18-4df6-841b-8d5d3aa12dbf | cpp | tensorflow/tensorflow | tf2xla_rewriter | tensorflow/compiler/mlir/tf2xla/transforms/tf2xla_rewriter.cc | tensorflow/compiler/mlir/tf2xla/transforms/tf2xla_rewriter_test.cc | #include "tensorflow/compiler/mlir/tf2xla/transforms/tf2xla_rewriter.h"
#include <cstdint>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/memory/memory.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/IRMapping.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Value.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/op_or_arg_name_mapper.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tpu_embedding_ops_registry.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/export_tf_dialect_op.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_type.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/translate_utils.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/passes.h"
#include "tensorflow/compiler/tf2xla/xla_compilation_device.h"
#include "tensorflow/compiler/tf2xla/xla_context.h"
#include "tensorflow/compiler/tf2xla/xla_expression.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/client/xla_builder.h"
#include "xla/client/xla_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/translate/hlo_to_mhlo/hlo_function_importer.h"
#include "xla/hlo/translate/hlo_to_mhlo/hlo_to_mlir_hlo.h"
#include "xla/hlo/translate/mhlo_to_hlo/type_to_shape.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/service/hlo.pb.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/session_options.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace mlir {
namespace mhlo {
namespace {
using ::mlir::ModuleOp;
using ::tensorflow::Tensor;
using ::tsl::StatusOr;
using ::xla::XlaComputation;
class OpOrArgLocNameMapperWithoutInvalidCharacters
: public tensorflow::OpOrArgLocNameMapper {
public:
OpOrArgLocNameMapperWithoutInvalidCharacters() = default;
~OpOrArgLocNameMapperWithoutInvalidCharacters() override = default;
protected:
std::string GetName(tensorflow::OpOrVal op_or_val) override {
std::string name = OpOrArgLocNameMapper::GetName(op_or_val);
return absl::StrReplaceAll(name, {{";", "."}});
}
};
static std::unique_ptr<tensorflow::StaticDeviceMgr> CreateDeviceMgr(
const std::string& device_type) {
tensorflow::XlaOpRegistry::RegisterCompilationKernels();
auto device = std::make_unique<tensorflow::XlaCompilationDevice>(
tensorflow::SessionOptions(), tensorflow::DeviceType(device_type));
return std::make_unique<tensorflow::StaticDeviceMgr>(std::move(device));
}
bool RootInstructionIsTuple(const xla::HloModule& hlo_module) {
xla::HloInstruction* root_instruction =
hlo_module.entry_computation()->root_instruction();
return root_instruction->opcode() == xla::HloOpcode::kTuple;
}
};
LogicalResult Tf2XlaRewriter::RewriteOp(Operation* op,
PatternRewriter& rewriter,
const std::string& device_type) {
Tf2XlaRewriter tf2xla_rewriter(op, rewriter, device_type);
return tf2xla_rewriter.LegalizeOp();
}
Tf2XlaRewriter::Tf2XlaRewriter(Operation* op, PatternRewriter& rewriter,
const std::string& device_type)
: op_(op),
device_type_(device_type),
rewriter_(rewriter),
name_mapper_(
std::make_unique<OpOrArgLocNameMapperWithoutInvalidCharacters>()),
context_(nullptr),
xla_builder_(op_->getName().getStringRef().str()) {}
Tf2XlaRewriter::~Tf2XlaRewriter() {
if (context_) context_->Unref();
}
absl::StatusOr<mhlo::TupleOp> Tf2XlaRewriter::ImportXlaComputation(
XlaComputation& computation) {
xla::DebugOptions debug_options;
TF_ASSIGN_OR_RETURN(auto hlo_module_config,
xla::HloModule::CreateModuleConfigFromProto(
computation.proto(), debug_options));
TF_ASSIGN_OR_RETURN(
std::unique_ptr<xla::HloModule> hlo_module,
xla::HloModule::CreateFromProto(computation.proto(), hlo_module_config));
if (!RootInstructionIsTuple(*hlo_module)) {
return tsl::errors::InvalidArgument("Imported XLA Root is not a tuple op");
}
if (op_->getNumOperands() !=
hlo_module->entry_computation()->num_parameters()) {
return tsl::errors::InvalidArgument(
"Entry computation does not have equal number of parameters to op "
"operands");
}
ModuleOp mlir_module = op_->getParentOfType<ModuleOp>();
mlir::OpBuilder builder(op_);
mlir::SymbolTable symbol_table(mlir_module);
llvm::SmallVector<mlir::Value> arguments;
for (int i = 0; i < op_->getNumOperands(); i++) {
arguments.push_back(op_->getOperand(i));
}
TF_ASSIGN_OR_RETURN(
mlir::Value root_value,
xla::HloFunctionImporter::ImportInstructions(
*hlo_module->entry_computation(), arguments, symbol_table, &builder));
mhlo::TupleOp root_tuple =
mlir::dyn_cast_or_null<mhlo::TupleOp>(root_value.getDefiningOp());
if (!root_tuple) {
return tsl::errors::InvalidArgument(
"Imported XLA Root Value is not a tuple op");
}
return root_tuple;
}
LogicalResult Tf2XlaRewriter::PrepareParams() {
context_ = new tensorflow::XlaContext(nullptr, &xla_builder_,
nullptr);
context_->Ref();
device_mgr_ = CreateDeviceMgr(device_type_);
if (!device_mgr_) return failure();
device_ = device_mgr_->ListDevices().front();
params_.device = device_;
params_.resource_manager = device_->resource_manager();
auto cleanup = [](const std::string& name) {};
step_container_ = std::make_unique<tensorflow::ScopedStepContainer>(
0, cleanup);
absl::Status status = step_container_->Create(
device_->resource_manager(),
tensorflow::XlaContext::kXlaContextResourceName, context_);
if (!status.ok()) {
return emitRemark(op_->getLoc())
<< "failed to create XlaContext resource: " << status.ToString();
}
params_.step_container = step_container_.get();
absl::StatusOr<int64_t> version_or = tensorflow::GetTfGraphProducerVersion(
op_->getParentOfType<mlir::ModuleOp>());
if (!version_or.ok()) {
return emitError(op_->getLoc()) << version_or.status().ToString();
}
flib_def_ = std::make_unique<tensorflow::FunctionLibraryDefinition>(
tensorflow::OpRegistry::Global(), tensorflow::FunctionDefLibrary());
pflr_ = std::make_unique<tensorflow::ProcessFunctionLibraryRuntime>(
device_mgr_.get(), tensorflow::Env::Default(), nullptr,
version_or.value(), flib_def_.get(), tensorflow::OptimizerOptions());
params_.function_library = pflr_->GetFLR(device_->name());
return success();
}
bool IsBounded(Type ty) {
auto ranked_ty = mlir::dyn_cast<RankedTensorType>(ty);
if (!ranked_ty) return false;
if (ranked_ty.hasStaticShape()) return true;
auto encoding =
mlir::dyn_cast_or_null<TypeExtensionsAttr>(ranked_ty.getEncoding());
if (!encoding) return false;
for (int i = 0; i < ranked_ty.getRank(); ++i) {
if (ranked_ty.isDynamicDim(i) &&
encoding.getBounds()[i] == ShapedType::kDynamic) {
return false;
}
}
return true;
}
bool HasSymbolRefAttr(Operation* op) {
for (const auto& attr : op->getAttrs()) {
Attribute attr_value = attr.getValue();
if (mlir::isa<SymbolRefAttr>(attr_value)) {
return true;
} else if (auto array_attr = mlir::dyn_cast<ArrayAttr>(attr_value)) {
if (!array_attr.empty() &&
mlir::isa<SymbolRefAttr>(*array_attr.begin())) {
return true;
}
}
}
return false;
}
LogicalResult Tf2XlaRewriter::PrepareKernelInputs(
const llvm::SmallDenseSet<int>& required_consts,
std::vector<tensorflow::XlaExpression>& expressions,
std::vector<tensorflow::Tensor>& tensors,
std::vector<tensorflow::TensorValue>& inputs) {
for (auto it : llvm::enumerate(op_->getOperands())) {
Value operand = it.value();
size_t idx = it.index();
tensorflow::XlaExpression expr = GetExprForOperand(operand, op_, idx);
tensorflow::XlaExpression::Kind kind = expr.kind();
if (kind == tensorflow::XlaExpression::Kind::kInvalid) return failure();
expressions.push_back(expr);
if (!tensorflow::DataTypeCanUseMemcpy(expr.dtype())) {
return op_->emitRemark()
<< "skipping legalization due to unsupported type "
<< operand.getType();
}
auto shape_or = expr.GetShape();
if (!shape_or.ok()) {
return op_->emitRemark()
<< "failed to get shape for expression. " << expr.HumanString();
}
tensors.emplace_back(
device_->GetAllocator(tensorflow::AllocatorAttributes()), expr.dtype(),
shape_or.value());
tensorflow::Tensor& tensor = tensors.back();
tensorflow::XlaExpression::AssignExpressionToTensor(expr, &tensor);
inputs.emplace_back(&tensor);
}
return success();
}
LogicalResult Tf2XlaRewriter::LegalizeOp() {
for (Type ty : op_->getOperandTypes()) {
auto ranked_ty = mlir::dyn_cast<ShapedType>(ty);
if (!IsBounded(ranked_ty)) {
return op_->emitRemark()
<< "lowering requires bounded tensor operands " << ranked_ty;
}
}
if (HasSymbolRefAttr(op_)) {
return op_->emitRemark() << "ops with symbol references are not supported";
}
auto nodedef_or = tensorflow::ConvertTFDialectOpToNodeDef(
op_, name_mapper_->GetUniqueName(op_),
true);
if (!nodedef_or.ok()) {
return op_->emitRemark() << "failed to convert op to NodeDef: "
<< nodedef_or.status().ToString();
}
if (failed(PrepareParams())) return failure();
std::shared_ptr<const tensorflow::NodeProperties> props;
absl::Status status = tensorflow::NodeProperties::CreateFromNodeDef(
*nodedef_or.value(),
params_.function_library->GetFunctionLibraryDefinition(), &props);
if (!status.ok()) {
return op_->emitRemark()
<< "failed to create NodeProperties: " << status.ToString();
}
tensorflow::OpKernel* op_kernel_raw;
status = params_.function_library->CreateKernel(props, &op_kernel_raw);
if (!status.ok()) {
return op_->emitRemark()
<< "failed to create tf2xla kernel: " << status.ToString();
}
auto op_kernel = absl::WrapUnique(op_kernel_raw);
std::vector<int> required_constants;
status = tensorflow::XlaOpRegistry::CompileTimeConstantInputs(
*op_kernel, &required_constants);
if (!status.ok()) {
return op_->emitRemark()
<< "failed to compute required constants: " << status.ToString();
}
llvm::SmallDenseSet<int> required_consts;
required_consts.insert(required_constants.begin(), required_constants.end());
std::vector<tensorflow::XlaExpression> expressions;
std::vector<tensorflow::Tensor> tensors;
std::vector<tensorflow::TensorValue> inputs;
expressions.reserve(op_->getNumOperands());
tensors.reserve(op_->getNumOperands());
inputs.reserve(op_->getNumOperands());
if (failed(
PrepareKernelInputs(required_consts, expressions, tensors, inputs)))
return failure();
params_.inputs = inputs;
params_.op_kernel = op_kernel.get();
llvm::SmallVector<tensorflow::AllocatorAttributes, 4> output_attr(
op_->getNumResults());
params_.output_attr_array = output_attr.data();
tensorflow::OpKernelContext op_context(¶ms_, op_->getNumResults());
device_->Compute(params_.op_kernel, &op_context);
status = op_context.status();
if (!status.ok()) {
return op_->emitRemark()
<< "compilation to HLO failed: " << status.ToString();
}
if (failed(VerifyOpResults(op_context))) return failure();
absl::StatusOr<mhlo::TupleOp> tuple_result_or_status =
CompileWithHloImporter(op_context);
if (!tuple_result_or_status.ok()) {
return op_->emitRemark() << tuple_result_or_status.status().ToString();
}
mhlo::TupleOp tuple_result = tuple_result_or_status.value();
llvm::SmallVector<Value> output_values;
if (failed(GetKernelOutputs(op_context, tuple_result, output_values))) {
return failure();
}
rewriter_.replaceOp(op_, output_values);
return success();
}
absl::StatusOr<mhlo::TupleOp> Tf2XlaRewriter::CompileWithHloImporter(
tensorflow::OpKernelContext& op_context) {
std::vector<xla::XlaOp> output_values;
for (int i = 0, e = op_->getNumResults(); i < e; i++) {
tensorflow::Tensor* output = op_context.mutable_output(i);
const tensorflow::XlaExpression* expr =
tensorflow::XlaExpression::CastExpressionFromTensor(*output);
output_values.push_back(expr->AsXlaOp(&xla_builder_));
}
absl::Span<const xla::XlaOp> return_values(output_values);
xla::XlaOp root_value = xla::Tuple(&xla_builder_, return_values);
TF_ASSIGN_OR_RETURN(XlaComputation computation,
xla_builder_.Build(root_value,
false));
return ImportXlaComputation(computation);
}
mlir::LogicalResult Tf2XlaRewriter::VerifyOpResults(
tensorflow::OpKernelContext& op_context) {
for (int i = 0, e = op_->getNumResults(); i < e; i++) {
tensorflow::Tensor* output = op_context.mutable_output(i);
const tensorflow::XlaExpression* expr =
tensorflow::XlaExpression::CastExpressionFromTensor(*output);
if (expr->kind() != tensorflow::XlaExpression::Kind::kXlaOp &&
expr->kind() != tensorflow::XlaExpression::Kind::kConstant) {
return op_->emitRemark(absl::StrCat(
"expects XlaExpression of kind kXlaOp or kConstant in compiled "
"output index ",
i));
}
}
return success();
}
mlir::LogicalResult Tf2XlaRewriter::UnpackTupleResults(
mhlo::TupleOp tuple_result, llvm::SmallVector<Value>& outputs) {
if (tuple_result->getNumOperands() != op_->getNumResults()) {
return op_->emitRemark() << "Translated TF2XLA tuple has different "
"number of results than original op";
}
for (int i = 0; i < tuple_result->getNumOperands(); i++) {
outputs.push_back(tuple_result->getOperand(i));
}
tuple_result.getOperation()->erase();
return success();
}
mlir::LogicalResult Tf2XlaRewriter::GetKernelOutputs(
tensorflow::OpKernelContext& op_context, mhlo::TupleOp tuple_results,
llvm::SmallVector<Value>& outputs) {
outputs.reserve(op_->getNumResults());
return UnpackTupleResults(tuple_results, outputs);
}
tensorflow::XlaExpression Tf2XlaRewriter::GetExprForOperand(
Value operand, Operation* op, int64_t operand_index) {
ElementsAttr const_attr;
auto defining_op = operand.getDefiningOp();
::xla::XlaOp xla_op = xla::Parameter(&xla_builder_, operand_index,
xla::TypeToShape(operand.getType()),
std::to_string(operand_index));
if (defining_op && matchPattern(defining_op, m_Constant(&const_attr))) {
tensorflow::Tensor tensor;
auto status = tensorflow::ConvertToTensor(const_attr, &tensor);
if (!status.ok()) {
op->emitRemark() << "skipping legalization due to failed const conversion"
<< status.ToString();
return tensorflow::XlaExpression::Invalid();
}
return tensorflow::XlaExpression::Constant(tensor);
}
tensorflow::DataType dtype;
auto status = tensorflow::ConvertToDataType(operand.getType(), &dtype);
if (!status.ok()) {
op->emitRemark() << "skipping legalization due to " << status.ToString();
return tensorflow::XlaExpression::Invalid();
}
return tensorflow::XlaExpression::XlaOp(xla_op, dtype);
}
}
} | #include "tensorflow/compiler/mlir/tf2xla/transforms/tf2xla_rewriter.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/Visitors.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/test_utils.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/client/xla_builder.h"
#include "xla/client/xla_computation.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/shape_util.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace mlir {
namespace mhlo {
using ::mlir::LogicalResult;
using ::mlir::ModuleOp;
using ::mlir::OpBuilder;
using ::mlir::Operation;
using ::mlir::func::FuncOp;
using ::tsl::Status;
using ::tsl::StatusOr;
using ::xla::ReplicaGroup;
using ::xla::ShapeUtil;
using ::xla::XlaBuilder;
using ::xla::XlaComputation;
using ::xla::XlaOp;
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 1442 : i32}} {
func.func @main(%arg0: tensor<3xi64> {tf._user_specified_name = "resource", tf.aliasing_output = 3 : i64}) -> () attributes {tf.entry_function = {control_outputs = "stateful_normal/RngReadAndSkip,stateful_uniform/RngReadAndSkip,stateful_uniform_full_int/RngReadAndSkip", inputs = "stateful_normal_rngreadandskip_resource", outputs = "identity_RetVal,identity_1_RetVal,identity_2_RetVal"}} {
%0:3 = "tf.Unpack"(%arg0) {axis = 0 : i64} : (tensor<3xi64>) -> (tensor<i64>, tensor<i64>, tensor<i64>)
return
}
})";
XlaComputation GetTestXlaComputation() {
XlaBuilder xla_builder("test");
auto param =
Parameter(&xla_builder, 0, ShapeUtil::MakeScalarShape(xla::F32), "a");
XlaOp add = xla::Add(param, xla::ConstantR0<float>(&xla_builder, 2.0));
std::vector<XlaOp> tuple_values;
tuple_values.push_back(add);
xla::Tuple(&xla_builder, tuple_values);
return xla_builder.Build().value();
}
class EmptyPatternRewriter : public mlir::PatternRewriter {
public:
explicit EmptyPatternRewriter(const OpBuilder& other_builder)
: mlir::PatternRewriter(other_builder) {}
~EmptyPatternRewriter() override = default;
};
class Tf2XlaRewriterTestPeer {
public:
explicit Tf2XlaRewriterTestPeer() = delete;
explicit Tf2XlaRewriterTestPeer(mlir::Operation* op)
: op_builder_(op),
empty_rewriter_(op_builder_),
tf2xla_rewriter_(op, empty_rewriter_,
"XLA_CPU_JIT") {}
absl::StatusOr<TupleOp> ImportXlaComputationIntoModule(
XlaComputation& computation) {
return tf2xla_rewriter_.ImportXlaComputation(computation);
}
private:
OpBuilder op_builder_;
EmptyPatternRewriter empty_rewriter_;
Tf2XlaRewriter tf2xla_rewriter_;
};
class Tf2XlaRewriterTest : public ::testing::Test {
public:
void SetUp() override {
tensorflow::XlaOpRegistry::RegisterCompilationKernels();
}
Status CreateMlirModule(std::string module_string = kMlirModuleStr) {
TF_ASSIGN_OR_RETURN(
module_, test::GetMlirModuleFromString(module_string, &context_));
context_.loadAllAvailableDialects();
return absl::OkStatus();
}
Status LegalizeSingleOp(Operation& op) {
SourceMgrDiagnosticHandler sourceMgrHandler(source_manager_, &context_);
OpBuilder op_builder(&op);
EmptyPatternRewriter pattern_rewriter(op_builder);
LogicalResult result =
Tf2XlaRewriter::RewriteOp(&op, pattern_rewriter,
"XLA_CPU_JIT");
if (!result.succeeded()) {
return tsl::errors::Internal("Failed to rewrite op");
}
return absl::OkStatus();
}
Status LegalizeModule(std::string module_string = kMlirModuleStr) {
TF_EXPECT_OK(CreateMlirModule(module_string));
FuncOp main = module_->lookupSymbol<mlir::func::FuncOp>("main");
if (!main) {
return tsl::errors::InvalidArgument("Could not find a main function");
}
WalkResult walk_result = main.walk([&](Operation* op) {
if (op->getDialect()->getNamespace() !=
TF::TensorFlowDialect::getDialectNamespace()) {
return WalkResult::advance();
}
if (!LegalizeSingleOp(*op).ok()) {
return WalkResult::interrupt();
}
return WalkResult::advance();
});
if (walk_result.wasInterrupted()) {
return tsl::errors::Internal("Could not legalize all ops");
}
return absl::OkStatus();
}
mlir::func::FuncOp GetMainFunc() {
func::FuncOp main_func = module_->lookupSymbol<mlir::func::FuncOp>("main");
EXPECT_TRUE(main_func);
return main_func;
}
mlir::Operation& GetFirstOpFromMain() {
mlir::func::FuncOp main_func = GetMainFunc();
return main_func.getBody().front().front();
}
absl::StatusOr<TupleOp> ImportXlaComputationIntoModule(
XlaComputation& computation) {
SourceMgrDiagnosticHandler sourceMgrHandler(source_manager_, &context_);
mlir::Operation& first_op = GetFirstOpFromMain();
Tf2XlaRewriterTestPeer test_peer(&first_op);
return test_peer.ImportXlaComputationIntoModule(computation);
}
protected:
MLIRContext context_;
OwningOpRef<ModuleOp> module_;
llvm::SourceMgr source_manager_;
};
TEST_F(Tf2XlaRewriterTest, LegalizesOpWithTf2xlaHloImporter) {
TF_EXPECT_OK(LegalizeModule());
int num_tuple_ops = 0;
module_->walk([&num_tuple_ops](TupleOp tuple_op) { num_tuple_ops += 1; });
EXPECT_EQ(num_tuple_ops, 0);
}
TEST_F(Tf2XlaRewriterTest, ImportsXlaComputationIntoModule) {
TF_ASSERT_OK(CreateMlirModule());
XlaComputation computation = GetTestXlaComputation();
TF_ASSERT_OK_AND_ASSIGN(TupleOp root_tuple,
ImportXlaComputationIntoModule(computation));
ModuleOp parent_module =
root_tuple.getOperation()->getParentOfType<ModuleOp>();
EXPECT_EQ(parent_module, *module_);
}
TEST_F(Tf2XlaRewriterTest, FailsWithoutRootTuple) {
TF_ASSERT_OK(CreateMlirModule());
XlaBuilder xla_builder("test_fail");
xla::Add(xla::ConstantR0<float>(&xla_builder, 1.0),
xla::ConstantR0<float>(&xla_builder, 2.0));
XlaComputation bad_computation = xla_builder.Build().value();
EXPECT_FALSE(ImportXlaComputationIntoModule(bad_computation).ok());
}
TEST_F(Tf2XlaRewriterTest, ImportsSingleComputation) {
XlaBuilder builder("test_builder");
XlaComputation to_apply;
{
auto sub_builder = builder.CreateSubBuilder("add");
auto arg0 = Parameter(sub_builder.get(), 0,
ShapeUtil::MakeScalarShape(xla::F32), "x");
auto arg1 = Parameter(sub_builder.get(), 1,
ShapeUtil::MakeScalarShape(xla::F32), "y");
Add(arg0, arg1);
TF_ASSERT_OK_AND_ASSIGN(to_apply, sub_builder->Build());
}
auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(xla::F32, {4, 16}), "x");
ReplicaGroup group;
group.add_replica_ids(0);
group.add_replica_ids(1);
XlaOp reduce_scatter =
ReduceScatter(x, to_apply, 1, 2,
{group});
std::vector<XlaOp> tuple_values;
tuple_values.push_back(reduce_scatter);
xla::Tuple(&builder, tuple_values);
TF_ASSERT_OK_AND_ASSIGN(XlaComputation computation, builder.Build());
EXPECT_EQ(computation.proto().computations_size(), 2);
TF_ASSERT_OK(CreateMlirModule());
TF_ASSERT_OK_AND_ASSIGN(TupleOp root_tuple,
ImportXlaComputationIntoModule(computation));
EXPECT_TRUE(root_tuple);
int num_func_ops = 0;
module_->walk([&num_func_ops](func::FuncOp func_op) { num_func_ops++; });
EXPECT_EQ(num_func_ops, 1);
}
TEST_F(Tf2XlaRewriterTest, InsertsConstantParameters) {
static constexpr char kModuleWithConstParam[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 1442 : i32}} {
func.func @main(%arg0: tensor<2xf32>) -> tensor<2xf32> {
%0 = "tf.Const"() {value = dense<1.42> : tensor<2xf32>} : () -> tensor<2xf32>
%1 = "tf.Atan2"(%arg0, %0) : (tensor<2xf32>, tensor<2xf32>) -> tensor<2xf32>
func.return %0 : tensor<2xf32>
}
})";
TF_ASSERT_OK(LegalizeModule(kModuleWithConstParam));
}
TEST_F(Tf2XlaRewriterTest, DoesntEnforceCompileTimeConstantCheck) {
static constexpr char kModuleWithNonConstParam[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 1610 : i32}} {
func.func @main(%arg0: tensor<3x3x10xbf16>, %arg1: tensor<3xi32>) -> tensor<1x?x4xbf16> attributes {allow_soft_placement = false, tf.entry_function = {control_outputs = "", inputs = "_arg0,_arg1,_arg2", outputs = "_retval0"}} {
%cst = "tf.Const"() {value = dense<[1, -1, 4]> : tensor<3xi32>} : () -> tensor<3xi32>
%0 = "tf.Slice"(%arg0, %arg1, %cst) {_XlaHasReferenceVars = false, _xla_inferred_shapes = [#tf_type.shape<1x?x4>], device = "/job:localhost/replica:0/task:0/device:TPU:0"} : (tensor<3x3x10xbf16>, tensor<3xi32>, tensor<3xi32>) -> tensor<1x?x4xbf16>
return %0 : tensor<1x?x4xbf16>
}
})";
TF_ASSERT_OK(LegalizeModule(kModuleWithNonConstParam));
}
TEST_F(Tf2XlaRewriterTest, CreatesDefaultValues) {
static constexpr char kModuleWithOpWithoutValuesThatShouldBeDefaulted[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 1610 : i32}} {
func.func @main() -> tensor<1x2x3x4xf32> attributes {allow_soft_placement = false, tf.entry_function = {control_outputs = "", inputs = "_arg0,_arg1,_arg2", outputs = "_retval0"}} {
%cst = "tf.Const"() {value = dense<[1, 2, 3, 4]> : tensor<4xi32>} : () -> tensor<4xi32>
%0 = "tf.RandomUniform"(%cst) : (tensor<4xi32>) -> tensor<1x2x3x4xf32>
return %0 : tensor<1x2x3x4xf32>
}
})";
TF_ASSERT_OK(LegalizeModule(kModuleWithOpWithoutValuesThatShouldBeDefaulted));
}
TEST_F(Tf2XlaRewriterTest, OpWithLocationDoesntBreakNodeDefName) {
static constexpr char kModuleWithOpWithoutValuesThatShouldBeDefaulted[] =
R"mlir(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 1610 : i32}} {
func.func @main(%arg0: tensor<2xf32>) -> tensor<2xf32> {
%0 = "tf.Exp"(%arg0) : (tensor<2xf32>) -> tensor<2xf32> loc(fused["exp"("exp"), "exp"])
func.return %0 : tensor<2xf32>
}
})mlir";
TF_ASSERT_OK(LegalizeModule(kModuleWithOpWithoutValuesThatShouldBeDefaulted));
}
TEST_F(Tf2XlaRewriterTest, ErrorsWithInvalidNumberOfParametersToArgs) {
XlaBuilder builder("test_builder");
XlaComputation to_apply;
{
auto sub_builder = builder.CreateSubBuilder("add");
auto arg0 = Parameter(sub_builder.get(), 0,
ShapeUtil::MakeScalarShape(xla::F32), "x");
auto arg1 = Parameter(sub_builder.get(), 1,
ShapeUtil::MakeScalarShape(xla::F32), "y");
Add(arg0, arg1);
TF_ASSERT_OK_AND_ASSIGN(to_apply, sub_builder->Build());
}
auto a = Parameter(&builder, 0, ShapeUtil::MakeScalarShape(xla::F32), "a");
auto b = Parameter(&builder, 1, ShapeUtil::MakeScalarShape(xla::F32), "b");
XlaOp call_op = xla::Call(&builder, to_apply, {a, b});
std::vector<XlaOp> tuple_values;
tuple_values.push_back(call_op);
xla::Tuple(&builder, tuple_values);
TF_ASSERT_OK_AND_ASSIGN(XlaComputation computation, builder.Build());
EXPECT_EQ(computation.proto().computations_size(), 2);
TF_ASSERT_OK(CreateMlirModule());
absl::StatusOr<TupleOp> status_or_tuple_op =
ImportXlaComputationIntoModule(computation);
EXPECT_FALSE(status_or_tuple_op.ok());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/transforms/tf2xla_rewriter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/transforms/tf2xla_rewriter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
824e9a92-d73e-40d9-bb47-186cbd4b9da5 | cpp | tensorflow/tensorflow | host_memory_transfer_asyncifier | third_party/xla/xla/service/host_memory_transfer_asyncifier.cc | third_party/xla/xla/service/host_memory_transfer_asyncifier_test.cc | #include "xla/service/host_memory_transfer_asyncifier.h"
#include <cstdint>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class HostMemoryTransferAsyncifierVisitor : public DfsHloVisitorWithDefault {
public:
explicit HostMemoryTransferAsyncifierVisitor(int64_t host_memory_space_color)
: kHostMemorySpaceColor(host_memory_space_color) {}
bool Changed() const { return changed_; }
absl::Status DefaultAction(HloInstruction* hlo_instruction) override {
return absl::OkStatus();
}
absl::Status HandleDynamicSlice(HloInstruction* dynamic_slice) override {
HloInstruction* dynamic_slice_operand = dynamic_slice->mutable_operand(0);
if (!dynamic_slice->shape().has_layout()) {
return InternalStrCat(dynamic_slice->name(), " does not have a layout.");
}
if (!dynamic_slice_operand->shape().has_layout()) {
return InternalStrCat(dynamic_slice->name(), "'s operand, ",
dynamic_slice_operand->name(),
", does not have a layout.");
}
VLOG(3) << absl::StreamFormat(
"\"%s\" from S(%d) to S(%d)", dynamic_slice->name(),
dynamic_slice_operand->shape().layout().memory_space(),
dynamic_slice->shape().layout().memory_space());
if (dynamic_slice_operand->shape().layout().memory_space() !=
kHostMemorySpaceColor) {
return absl::OkStatus();
}
if (dynamic_slice->shape().layout().memory_space() !=
xla::Layout::kDefaultMemorySpace) {
return absl::OkStatus();
}
const Shape context_shape = ShapeUtil::MakeScalarShape(U32);
const Shape transfer_bytes_shape = ShapeUtil::MakeScalarShape(S32);
TF_ASSIGN_OR_RETURN(
HloInstruction * async_done,
dynamic_slice->parent()->CreateAsyncInstructions(
dynamic_slice, {context_shape, transfer_bytes_shape}));
VLOG(1) << "DynamicSlice \"" << dynamic_slice->ToString()
<< "\" is slicing from host memory. Converting to async "
<< async_done->ToString();
MarkAsChanged();
return absl::OkStatus();
}
absl::Status HandleDynamicUpdateSlice(
HloInstruction* dynamic_update_slice) override {
HloInstruction* dynamic_update_slice_operand =
dynamic_update_slice->mutable_operand(0);
HloInstruction* dynamic_update_slice_update =
dynamic_update_slice->mutable_operand(1);
if (!dynamic_update_slice->shape().has_layout()) {
return InternalStrCat(dynamic_update_slice->name(),
" does not have a layout.");
}
if (!dynamic_update_slice_operand->shape().has_layout()) {
return InternalStrCat(dynamic_update_slice->name(), "'s operand, ",
dynamic_update_slice_operand->name(),
", does not have a layout.");
}
if (!dynamic_update_slice_update->shape().has_layout()) {
return InternalStrCat(dynamic_update_slice->name(), "'s update, ",
dynamic_update_slice_update->name(),
", does not have a layout.");
}
if (dynamic_update_slice_update->shape().layout().memory_space() !=
xla::Layout::kDefaultMemorySpace) {
return absl::OkStatus();
}
if (dynamic_update_slice->shape().layout().memory_space() !=
kHostMemorySpaceColor) {
return absl::OkStatus();
}
if (dynamic_update_slice_operand->shape().layout().memory_space() !=
dynamic_update_slice->shape().layout().memory_space()) {
return InternalStrCat(
"Unexpected that ", dynamic_update_slice_operand->name(),
"'s memory space is not the same as the dynamic-update-slice.");
}
const Shape context_shape = ShapeUtil::MakeScalarShape(U32);
TF_ASSIGN_OR_RETURN(HloInstruction * async_done,
dynamic_update_slice->parent()->CreateAsyncInstructions(
dynamic_update_slice, {context_shape}));
VLOG(1) << "DynamicUpdateSlice \"" << dynamic_update_slice->ToString()
<< "\" is slicing into host memory space. Converting to async "
<< async_done->ToString();
MarkAsChanged();
return absl::OkStatus();
}
absl::Status HandleCopy(HloInstruction* copy) override {
HloInstruction* operand = copy->mutable_operand(0);
if (!operand->shape().has_layout()) {
return InternalStrCat(operand->name(), " does not have a layout.");
}
if (!copy->shape().has_layout()) {
return InternalStrCat(copy->name(), " does not have a layout.");
}
const auto copy_src_memory_space = operand->shape().layout().memory_space();
const auto copy_dst_memory_space = copy->shape().layout().memory_space();
if (!((copy_src_memory_space == kHostMemorySpaceColor &&
copy_dst_memory_space == xla::Layout::kDefaultMemorySpace) ||
(copy_src_memory_space == xla::Layout::kDefaultMemorySpace &&
copy_dst_memory_space == kHostMemorySpaceColor))) {
VLOG(2)
<< "Skipping copy because it is not a copy between device memory and "
"host memory: "
<< copy->ToString();
return absl::OkStatus();
}
const Shape context_shape = ShapeUtil::MakeScalarShape(U32);
TF_ASSIGN_OR_RETURN(
HloInstruction * async_done,
copy->parent()->CreateAsyncInstructions(copy, {context_shape}));
VLOG(1)
<< "Copy \"" << copy->name()
<< "\" is between device and host memory space. Converting to async "
<< async_done->ToString();
MarkAsChanged();
return absl::OkStatus();
}
private:
const int64_t kHostMemorySpaceColor;
bool changed_ = false;
void MarkAsChanged() { changed_ = true; }
};
}
absl::StatusOr<bool> HostMemoryTransferAsyncifier::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
HostMemoryTransferAsyncifierVisitor visitor(kHostMemorySpaceColor);
for (HloComputation* computation : module->MakeNonfusionComputations()) {
TF_RETURN_IF_ERROR(computation->Accept(&visitor));
}
return visitor.Changed();
}
} | #include "xla/service/host_memory_transfer_asyncifier.h"
#include <cstdint>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace m = ::xla::match;
class HostMemoryTransferAsyncifierTest : public HloTestBase {
protected:
absl::StatusOr<bool> RunAsyncifier(absl::string_view hlo_string) {
TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSIGN_OR_RETURN(bool changed, RunAsyncifier(module.get()));
return changed;
}
absl::StatusOr<bool> RunAsyncifier(HloModule* module) {
TF_EXPECT_OK(verifier().Run(module).status());
if (module->has_schedule()) {
return absl::InternalError("Expected a non-scheduled module");
}
HostMemoryTransferAsyncifier asyncifier(kHostMemorySpaceColor);
return asyncifier.Run(module);
}
private:
static constexpr int64_t kHostMemorySpaceColor{5};
};
TEST_F(HostMemoryTransferAsyncifierTest, DynamicUpdateSliceFromHostToHost) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
host_operand = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0)
host_update = f32[1,1,1]{2,1,0:T(2,128)S(5)} parameter(1)
constant_0 = s32[] constant(0)
ROOT dynamic-update-slice = f32[32,1,1]{2,1,0:T(2,128)S(5)} dynamic-update-slice(host_operand, host_update, constant_0, constant_0, constant_0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicUpdateSlice()));
}
TEST_F(HostMemoryTransferAsyncifierTest, DynamicUpdateSliceFromDeviceToDevice) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
operand = f32[32,1,1]{2,1,0:T(2,128)} parameter(0)
update = f32[1,1,1]{2,1,0:T(2,128)} parameter(1)
constant_0 = s32[] constant(0)
ROOT dynamic-update-slice = f32[32,1,1]{2,1,0:T(2,128)} dynamic-update-slice(operand, update, constant_0, constant_0, constant_0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicUpdateSlice()));
}
TEST_F(HostMemoryTransferAsyncifierTest, DynamicUpdateSliceFromHostToDevice) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
operand = f32[32,1,1]{2,1,0:T(2,128)} parameter(0)
host_update = f32[1,1,1]{2,1,0:T(2,128)S(5)} parameter(1)
constant_0 = s32[] constant(0)
ROOT dynamic-update-slice = f32[32,1,1]{2,1,0:T(2,128)} dynamic-update-slice(operand, host_update, constant_0, constant_0, constant_0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicUpdateSlice()));
}
TEST_F(HostMemoryTransferAsyncifierTest, DynamicUpdateSliceFromDeviceToHost) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
host_operand = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0)
update = f32[1,1,1]{2,1,0:T(2,128)} parameter(1)
constant_0 = s32[] constant(0)
ROOT dynamic-update-slice = f32[32,1,1]{2,1,0:T(2,128)S(5)} dynamic-update-slice(host_operand, update, constant_0, constant_0, constant_0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_TRUE(changed);
HloInstruction* dynamic_update_slice_start;
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Op()
.WithOpcode(HloOpcode::kAsyncDone)
.WithOperand(0, m::Op(&dynamic_update_slice_start)
.WithOpcode(HloOpcode::kAsyncStart))));
ASSERT_EQ(dynamic_update_slice_start->called_computations().size(), 1);
HloComputation* async_dynamic_slice_computation =
dynamic_update_slice_start->called_computations().at(0);
EXPECT_THAT(async_dynamic_slice_computation->root_instruction(),
GmockMatch(m::DynamicUpdateSlice()));
}
TEST_F(HostMemoryTransferAsyncifierTest, DynamicSliceFromHostToHost) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
host_memory = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0)
constant_0 = s32[] constant(0)
ROOT dynamic-slice = f32[1,1,1]{2,1,0:T(2,128)S(5)} dynamic-slice(host_memory, constant_0, constant_0, constant_0), dynamic_slice_sizes={1,1,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicSlice()));
}
TEST_F(HostMemoryTransferAsyncifierTest, DynamicSliceFromDeviceToDevice) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
device = f32[32,1,1]{2,1,0:T(2,128)} parameter(0)
constant_0 = s32[] constant(0)
ROOT dynamic-slice = f32[1,1,1]{2,1,0:T(2,128)} dynamic-slice(device, constant_0, constant_0, constant_0), dynamic_slice_sizes={1,1,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicSlice()));
}
TEST_F(HostMemoryTransferAsyncifierTest, DynamicSliceFromDeviceToHost) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
device = f32[32,1,1]{2,1,0:T(2,128)} parameter(0)
constant_0 = s32[] constant(0)
ROOT dynamic-slice = f32[1,1,1]{2,1,0:T(2,128)S(5)} dynamic-slice(device, constant_0, constant_0, constant_0), dynamic_slice_sizes={1,1,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::DynamicSlice()));
}
TEST_F(HostMemoryTransferAsyncifierTest, DynamicSliceFromHostToDevice) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
host_memory = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0)
constant_0 = s32[] constant(0)
ROOT dynamic-slice = f32[1,1,1]{2,1,0:T(2,128)} dynamic-slice(host_memory, constant_0, constant_0, constant_0), dynamic_slice_sizes={1,1,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_TRUE(changed);
HloInstruction* dynamic_slice_start;
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Op()
.WithOpcode(HloOpcode::kAsyncDone)
.WithOperand(0, m::Op(&dynamic_slice_start)
.WithOpcode(HloOpcode::kAsyncStart))));
ASSERT_EQ(dynamic_slice_start->called_computations().size(), 1);
HloComputation* async_dynamic_slice_computation =
dynamic_slice_start->called_computations().at(0);
EXPECT_THAT(async_dynamic_slice_computation->root_instruction(),
GmockMatch(m::DynamicSlice()));
}
TEST_F(HostMemoryTransferAsyncifierTest, CopyFromHostToHost) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
host_memory = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0)
ROOT copy = f32[32,1,1]{2,1,0:T(2,128)S(5)} copy(host_memory)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Copy()));
}
TEST_F(HostMemoryTransferAsyncifierTest, CopyFromDeviceToDevice) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
device = f32[32,1,1]{2,1,0:T(2,128)} parameter(0)
ROOT copy = f32[32,1,1]{2,1,0:T(2,128)} copy(device)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Copy()));
}
TEST_F(HostMemoryTransferAsyncifierTest, DISABLED_CopyFromDeviceToHost) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
device = f32[32,1,1]{2,1,0:T(2,128)} parameter(0)
ROOT copy = f32[32,1,1]{2,1,0:T(2,128)S(5)} copy(device)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_TRUE(changed);
HloInstruction* copy_start;
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Op()
.WithOpcode(HloOpcode::kAsyncDone)
.WithOperand(
0, m::Op(©_start).WithOpcode(HloOpcode::kAsyncStart))));
ASSERT_EQ(copy_start->called_computations().size(), 1);
HloComputation* async_copy_computation =
copy_start->called_computations().at(0);
EXPECT_THAT(async_copy_computation->root_instruction(),
GmockMatch(m::Copy()));
}
TEST_F(HostMemoryTransferAsyncifierTest, OldCopyFromDeviceToHost) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
device = f32[32,1,1]{2,1,0:T(2,128)} parameter(0)
ROOT copy = f32[32,1,1]{2,1,0:T(2,128)S(5)} copy(device)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_TRUE(changed);
HloInstruction* copy_start;
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Op()
.WithOpcode(HloOpcode::kCopyDone)
.WithOperand(
0, m::Op(©_start).WithOpcode(HloOpcode::kCopyStart))));
}
TEST_F(HostMemoryTransferAsyncifierTest, DISABLED_CopyFromHostToDevice) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
host_memory = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0)
ROOT copy = f32[32,1,1]{2,1,0:T(2,128)} copy(host_memory)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_TRUE(changed);
HloInstruction* copy_start;
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Op()
.WithOpcode(HloOpcode::kAsyncDone)
.WithOperand(
0, m::Op(©_start).WithOpcode(HloOpcode::kAsyncStart))));
ASSERT_EQ(copy_start->called_computations().size(), 1);
HloComputation* async_copy_computation =
copy_start->called_computations().at(0);
EXPECT_THAT(async_copy_computation->root_instruction(),
GmockMatch(m::Copy()));
}
TEST_F(HostMemoryTransferAsyncifierTest, OldCopyFromHostToDevice) {
const std::string& hlo_string = R"(
HloModule MyModule
ENTRY main {
host_memory = f32[32,1,1]{2,1,0:T(2,128)S(5)} parameter(0)
ROOT copy = f32[32,1,1]{2,1,0:T(2,128)} copy(host_memory)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunAsyncifier(module.get()));
EXPECT_TRUE(changed);
HloInstruction* copy_start;
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Op()
.WithOpcode(HloOpcode::kCopyDone)
.WithOperand(
0, m::Op(©_start).WithOpcode(HloOpcode::kCopyStart))));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/host_memory_transfer_asyncifier.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/host_memory_transfer_asyncifier_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c4a404f6-3ce8-42e1-9890-5427ba37c72f | cpp | google/cel-cpp | reference_resolver | runtime/reference_resolver.cc | runtime/reference_resolver_test.cc | #include "runtime/reference_resolver.h"
#include "absl/base/macros.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "common/native_type.h"
#include "eval/compiler/qualified_reference_resolver.h"
#include "internal/casts.h"
#include "internal/status_macros.h"
#include "runtime/internal/runtime_friend_access.h"
#include "runtime/internal/runtime_impl.h"
#include "runtime/runtime.h"
#include "runtime/runtime_builder.h"
namespace cel {
namespace {
using ::cel::internal::down_cast;
using ::cel::runtime_internal::RuntimeFriendAccess;
using ::cel::runtime_internal::RuntimeImpl;
absl::StatusOr<RuntimeImpl*> RuntimeImplFromBuilder(RuntimeBuilder& builder) {
Runtime& runtime = RuntimeFriendAccess::GetMutableRuntime(builder);
if (RuntimeFriendAccess::RuntimeTypeId(runtime) !=
NativeTypeId::For<RuntimeImpl>()) {
return absl::UnimplementedError(
"regex precompilation only supported on the default cel::Runtime "
"implementation.");
}
RuntimeImpl& runtime_impl = down_cast<RuntimeImpl&>(runtime);
return &runtime_impl;
}
google::api::expr::runtime::ReferenceResolverOption Convert(
ReferenceResolverEnabled enabled) {
switch (enabled) {
case ReferenceResolverEnabled::kCheckedExpressionOnly:
return google::api::expr::runtime::ReferenceResolverOption::kCheckedOnly;
case ReferenceResolverEnabled::kAlways:
return google::api::expr::runtime::ReferenceResolverOption::kAlways;
}
ABSL_LOG(FATAL) << "unsupported ReferenceResolverEnabled enumerator: "
<< static_cast<int>(enabled);
}
}
absl::Status EnableReferenceResolver(RuntimeBuilder& builder,
ReferenceResolverEnabled enabled) {
CEL_ASSIGN_OR_RETURN(RuntimeImpl * runtime_impl,
RuntimeImplFromBuilder(builder));
ABSL_ASSERT(runtime_impl != nullptr);
runtime_impl->expr_builder().AddAstTransform(
NewReferenceResolverExtension(Convert(enabled)));
return absl::OkStatus();
}
} | #include "runtime/reference_resolver.h"
#include <cstdint>
#include <utility>
#include "google/api/expr/v1alpha1/checked.pb.h"
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "base/function_adapter.h"
#include "common/value.h"
#include "extensions/protobuf/runtime_adapter.h"
#include "internal/testing.h"
#include "parser/parser.h"
#include "runtime/activation.h"
#include "runtime/managed_value_factory.h"
#include "runtime/register_function_helper.h"
#include "runtime/runtime_builder.h"
#include "runtime/runtime_options.h"
#include "runtime/standard_runtime_builder_factory.h"
#include "google/protobuf/text_format.h"
namespace cel {
namespace {
using ::cel::extensions::ProtobufRuntimeAdapter;
using ::google::api::expr::v1alpha1::CheckedExpr;
using ::google::api::expr::v1alpha1::Expr;
using ::google::api::expr::v1alpha1::ParsedExpr;
using ::google::api::expr::parser::Parse;
using ::absl_testing::StatusIs;
using ::testing::HasSubstr;
TEST(ReferenceResolver, ResolveQualifiedFunctions) {
RuntimeOptions options;
ASSERT_OK_AND_ASSIGN(RuntimeBuilder builder,
CreateStandardRuntimeBuilder(options));
ASSERT_OK(
EnableReferenceResolver(builder, ReferenceResolverEnabled::kAlways));
absl::Status status =
RegisterHelper<BinaryFunctionAdapter<int64_t, int64_t, int64_t>>::
RegisterGlobalOverload(
"com.example.Exp",
[](ValueManager& value_factory, int64_t base,
int64_t exp) -> int64_t {
int64_t result = 1;
for (int64_t i = 0; i < exp; ++i) {
result *= base;
}
return result;
},
builder.function_registry());
ASSERT_OK(status);
ASSERT_OK_AND_ASSIGN(auto runtime, std::move(builder).Build());
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr,
Parse("com.example.Exp(2, 3) == 8"));
ASSERT_OK_AND_ASSIGN(auto program, ProtobufRuntimeAdapter::CreateProgram(
*runtime, parsed_expr));
ManagedValueFactory value_factory(program->GetTypeProvider(),
MemoryManagerRef::ReferenceCounting());
Activation activation;
ASSERT_OK_AND_ASSIGN(Value value,
program->Evaluate(activation, value_factory.get()));
ASSERT_TRUE(value->Is<BoolValue>());
EXPECT_TRUE(value.GetBool().NativeValue());
}
TEST(ReferenceResolver, ResolveQualifiedFunctionsCheckedOnly) {
RuntimeOptions options;
ASSERT_OK_AND_ASSIGN(RuntimeBuilder builder,
CreateStandardRuntimeBuilder(options));
ASSERT_OK(EnableReferenceResolver(
builder, ReferenceResolverEnabled::kCheckedExpressionOnly));
absl::Status status =
RegisterHelper<BinaryFunctionAdapter<int64_t, int64_t, int64_t>>::
RegisterGlobalOverload(
"com.example.Exp",
[](ValueManager& value_factory, int64_t base,
int64_t exp) -> int64_t {
int64_t result = 1;
for (int64_t i = 0; i < exp; ++i) {
result *= base;
}
return result;
},
builder.function_registry());
ASSERT_OK(status);
ASSERT_OK_AND_ASSIGN(auto runtime, std::move(builder).Build());
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr,
Parse("com.example.Exp(2, 3) == 8"));
EXPECT_THAT(ProtobufRuntimeAdapter::CreateProgram(*runtime, parsed_expr),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("No overloads provided")));
}
constexpr absl::string_view kIdentifierExpression = R"pb(
reference_map: {
key: 3
value: { name: "com.example.x" }
}
reference_map: {
key: 4
value: { overload_id: "add_int64" }
}
reference_map: {
key: 7
value: { name: "com.example.y" }
}
type_map: {
key: 3
value: { primitive: INT64 }
}
type_map: {
key: 4
value: { primitive: INT64 }
}
type_map: {
key: 7
value: { primitive: INT64 }
}
source_info: {
location: "<input>"
line_offsets: 30
positions: { key: 1 value: 0 }
positions: { key: 2 value: 3 }
positions: { key: 3 value: 11 }
positions: { key: 4 value: 14 }
positions: { key: 5 value: 16 }
positions: { key: 6 value: 19 }
positions: { key: 7 value: 27 }
}
expr: {
id: 4
call_expr: {
function: "_+_"
args: {
id: 3
# compilers typically already apply this rewrite, but older saved
# expressions might preserve the original parse.
select_expr {
operand {
id: 8
select_expr {
operand: {
id: 9
ident_expr { name: "com" }
}
field: "example"
}
}
field: "x"
}
}
args: {
id: 7
ident_expr: { name: "com.example.y" }
}
}
})pb";
TEST(ReferenceResolver, ResolveQualifiedIdentifiers) {
RuntimeOptions options;
ASSERT_OK_AND_ASSIGN(RuntimeBuilder builder,
CreateStandardRuntimeBuilder(options));
ASSERT_OK(EnableReferenceResolver(
builder, ReferenceResolverEnabled::kCheckedExpressionOnly));
ASSERT_OK_AND_ASSIGN(auto runtime, std::move(builder).Build());
CheckedExpr checked_expr;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(kIdentifierExpression,
&checked_expr));
ASSERT_OK_AND_ASSIGN(auto program, ProtobufRuntimeAdapter::CreateProgram(
*runtime, checked_expr));
ManagedValueFactory value_factory(program->GetTypeProvider(),
MemoryManagerRef::ReferenceCounting());
Activation activation;
activation.InsertOrAssignValue("com.example.x",
value_factory.get().CreateIntValue(3));
activation.InsertOrAssignValue("com.example.y",
value_factory.get().CreateIntValue(4));
ASSERT_OK_AND_ASSIGN(Value value,
program->Evaluate(activation, value_factory.get()));
ASSERT_TRUE(value->Is<IntValue>());
EXPECT_EQ(value.GetInt().NativeValue(), 7);
}
TEST(ReferenceResolver, ResolveQualifiedIdentifiersSkipParseOnly) {
RuntimeOptions options;
ASSERT_OK_AND_ASSIGN(RuntimeBuilder builder,
CreateStandardRuntimeBuilder(options));
ASSERT_OK(EnableReferenceResolver(
builder, ReferenceResolverEnabled::kCheckedExpressionOnly));
ASSERT_OK_AND_ASSIGN(auto runtime, std::move(builder).Build());
CheckedExpr checked_expr;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(kIdentifierExpression,
&checked_expr));
Expr unchecked_expr = checked_expr.expr();
ASSERT_OK_AND_ASSIGN(auto program, ProtobufRuntimeAdapter::CreateProgram(
*runtime, checked_expr.expr()));
ManagedValueFactory value_factory(program->GetTypeProvider(),
MemoryManagerRef::ReferenceCounting());
Activation activation;
activation.InsertOrAssignValue("com.example.x",
value_factory.get().CreateIntValue(3));
activation.InsertOrAssignValue("com.example.y",
value_factory.get().CreateIntValue(4));
ASSERT_OK_AND_ASSIGN(Value value,
program->Evaluate(activation, value_factory.get()));
ASSERT_TRUE(value->Is<ErrorValue>());
EXPECT_THAT(value.GetError().NativeValue(),
StatusIs(absl::StatusCode::kUnknown, HasSubstr("\"com\"")));
}
constexpr absl::string_view kEnumExpr = R"pb(
reference_map: {
key: 8
value: {
name: "google.api.expr.test.v1.proto2.GlobalEnum.GAZ"
value: { int64_value: 2 }
}
}
reference_map: {
key: 9
value: { overload_id: "equals" }
}
type_map: {
key: 8
value: { primitive: INT64 }
}
type_map: {
key: 9
value: { primitive: BOOL }
}
type_map: {
key: 10
value: { primitive: INT64 }
}
source_info: {
location: "<input>"
line_offsets: 1
line_offsets: 64
line_offsets: 77
positions: { key: 1 value: 13 }
positions: { key: 2 value: 19 }
positions: { key: 3 value: 23 }
positions: { key: 4 value: 28 }
positions: { key: 5 value: 33 }
positions: { key: 6 value: 36 }
positions: { key: 7 value: 43 }
positions: { key: 8 value: 54 }
positions: { key: 9 value: 59 }
positions: { key: 10 value: 62 }
}
expr: {
id: 9
call_expr: {
function: "_==_"
args: {
id: 8
ident_expr: { name: "google.api.expr.test.v1.proto2.GlobalEnum.GAZ" }
}
args: {
id: 10
const_expr: { int64_value: 2 }
}
}
})pb";
TEST(ReferenceResolver, ResolveEnumConstants) {
RuntimeOptions options;
ASSERT_OK_AND_ASSIGN(RuntimeBuilder builder,
CreateStandardRuntimeBuilder(options));
ASSERT_OK(EnableReferenceResolver(
builder, ReferenceResolverEnabled::kCheckedExpressionOnly));
ASSERT_OK_AND_ASSIGN(auto runtime, std::move(builder).Build());
CheckedExpr checked_expr;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(kEnumExpr, &checked_expr));
ASSERT_OK_AND_ASSIGN(auto program, ProtobufRuntimeAdapter::CreateProgram(
*runtime, checked_expr));
ManagedValueFactory value_factory(program->GetTypeProvider(),
MemoryManagerRef::ReferenceCounting());
Activation activation;
ASSERT_OK_AND_ASSIGN(Value value,
program->Evaluate(activation, value_factory.get()));
ASSERT_TRUE(value->Is<BoolValue>());
EXPECT_TRUE(value.GetBool().NativeValue());
}
TEST(ReferenceResolver, ResolveEnumConstantsSkipParseOnly) {
RuntimeOptions options;
ASSERT_OK_AND_ASSIGN(RuntimeBuilder builder,
CreateStandardRuntimeBuilder(options));
ASSERT_OK(EnableReferenceResolver(
builder, ReferenceResolverEnabled::kCheckedExpressionOnly));
ASSERT_OK_AND_ASSIGN(auto runtime, std::move(builder).Build());
CheckedExpr checked_expr;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(kEnumExpr, &checked_expr));
Expr unchecked_expr = checked_expr.expr();
ASSERT_OK_AND_ASSIGN(auto program, ProtobufRuntimeAdapter::CreateProgram(
*runtime, unchecked_expr));
ManagedValueFactory value_factory(program->GetTypeProvider(),
MemoryManagerRef::ReferenceCounting());
Activation activation;
ASSERT_OK_AND_ASSIGN(Value value,
program->Evaluate(activation, value_factory.get()));
ASSERT_TRUE(value->Is<ErrorValue>());
EXPECT_THAT(
value.GetError().NativeValue(),
StatusIs(absl::StatusCode::kUnknown,
HasSubstr("\"google.api.expr.test.v1.proto2.GlobalEnum.GAZ\"")));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/runtime/reference_resolver.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/runtime/reference_resolver_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
167d38bf-d218-48de-bf28-3f9bb553562d | cpp | tensorflow/tensorflow | stringpiece | tensorflow/core/lib/core/stringpiece.h | third_party/xla/third_party/tsl/tsl/platform/stringpiece_test.cc | #ifndef TENSORFLOW_CORE_LIB_CORE_STRINGPIECE_H_
#define TENSORFLOW_CORE_LIB_CORE_STRINGPIECE_H_
#include "tensorflow/core/platform/stringpiece.h"
#endif | #include "tsl/platform/stringpiece.h"
#include <unordered_map>
#include "tsl/platform/test.h"
namespace tsl {
TEST(StringPiece, Ctor) {
{
const char* hello = "hello";
absl::string_view s20(hello);
EXPECT_TRUE(s20.data() == hello);
EXPECT_EQ(5, s20.size());
absl::string_view s21(hello, 4);
EXPECT_TRUE(s21.data() == hello);
EXPECT_EQ(4, s21.size());
absl::string_view s22(hello, 6);
EXPECT_TRUE(s22.data() == hello);
EXPECT_EQ(6, s22.size());
}
{
string hola = "hola";
absl::string_view s30(hola);
EXPECT_TRUE(s30.data() == hola.data());
EXPECT_EQ(4, s30.size());
hola.push_back('\0');
hola.append("h2");
hola.push_back('\0');
absl::string_view s31(hola);
EXPECT_TRUE(s31.data() == hola.data());
EXPECT_EQ(8, s31.size());
}
}
TEST(StringPiece, ConversionToString) {
EXPECT_EQ("", string(absl::string_view("")));
EXPECT_EQ("foo", string(absl::string_view("foo")));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/core/stringpiece.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/stringpiece_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e49e8816-3d1e-41ac-87bf-ca044879aeaf | cpp | tensorflow/tensorflow | fold_batch_norms | tensorflow/tools/graph_transforms/fold_batch_norms.cc | tensorflow/tools/graph_transforms/fold_batch_norms_test.cc | #include "tensorflow/core/common_runtime/constant_folding.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/subgraph.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/fold_constants_lib.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status FoldBatchNorms(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def) {
GraphDef replaced_graph_def;
TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes(
input_graph_def,
{"Mul",
{
{"Conv2D|MatMul|DepthwiseConv2dNative",
{
{"*"},
{"Const"},
}
},
{"Const"},
}
},
[](const NodeMatch& match, const std::set<string>& input_nodes,
const std::set<string>& output_nodes,
std::vector<NodeDef>* new_nodes) {
const NodeDef& mul_node = match.node;
const NodeDef& conv_node = match.inputs[0].node;
const NodeDef& input_node = match.inputs[0].inputs[0].node;
const NodeDef& weights_node = match.inputs[0].inputs[1].node;
const NodeDef& mul_values_node = match.inputs[1].node;
for (const auto& node : {conv_node, weights_node, mul_values_node}) {
if (output_nodes.count(node.name())) {
new_nodes->insert(new_nodes->end(),
{mul_node, conv_node, input_node, weights_node,
mul_values_node});
return OkStatus();
}
}
Tensor weights = GetNodeTensorAttr(weights_node, "value");
Tensor mul_values = GetNodeTensorAttr(mul_values_node, "value");
int64_t weights_cols;
if (conv_node.op() == "Conv2D") {
weights_cols = weights.shape().dim_size(3);
} else if (conv_node.op() == "DepthwiseConv2dNative") {
weights_cols =
weights.shape().dim_size(2) * weights.shape().dim_size(3);
} else {
weights_cols = weights.shape().dim_size(1);
}
if ((mul_values.shape().dims() != 1) ||
(mul_values.shape().dim_size(0) != weights_cols)) {
return errors::InvalidArgument(
"Mul constant input to batch norm has bad shape: ",
mul_values.shape().DebugString());
}
auto weights_vector = weights.flat<float>();
Tensor scaled_weights(DT_FLOAT, weights.shape());
auto scaled_weights_vector = scaled_weights.flat<float>();
for (int64_t row = 0; row < weights_vector.dimension(0); ++row) {
scaled_weights_vector(row) =
weights_vector(row) *
mul_values.flat<float>()(row % weights_cols);
}
NodeDef scaled_weights_node;
scaled_weights_node.set_op("Const");
scaled_weights_node.set_name(weights_node.name());
SetNodeAttr("dtype", DT_FLOAT, &scaled_weights_node);
SetNodeTensorAttr<float>("value", scaled_weights, &scaled_weights_node);
new_nodes->push_back(scaled_weights_node);
new_nodes->push_back(input_node);
NodeDef new_conv_node;
new_conv_node = conv_node;
new_conv_node.set_name(mul_node.name());
new_nodes->push_back(new_conv_node);
return OkStatus();
},
{}, &replaced_graph_def));
*output_graph_def = replaced_graph_def;
return OkStatus();
}
REGISTER_GRAPH_TRANSFORM("fold_batch_norms", FoldBatchNorms);
}
} | #include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/sendrecv_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status FoldBatchNorms(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def);
class FoldBatchNormsTest : public ::testing::Test {
protected:
void TestFoldBatchNormsConv2D() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor input_data(DT_FLOAT, TensorShape({1, 1, 6, 2}));
test::FillValues<float>(
&input_data, {1.0f, 4.0f, 2.0f, 5.0f, 3.0f, 6.0f, -1.0f, -4.0f, -2.0f,
-5.0f, -3.0f, -6.0f});
Output input_op =
Const(root.WithOpName("input_op"), Input::Initializer(input_data));
Tensor weights_data(DT_FLOAT, TensorShape({1, 2, 2, 2}));
test::FillValues<float>(&weights_data,
{1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f});
Output weights_op =
Const(root.WithOpName("weights_op"), Input::Initializer(weights_data));
Output conv_op = Conv2D(root.WithOpName("conv_op"), input_op, weights_op,
{1, 1, 1, 1}, "VALID");
Tensor mul_values_data(DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&mul_values_data, {2.0f, 3.0f});
Output mul_values_op = Const(root.WithOpName("mul_values"),
Input::Initializer(mul_values_data));
Output mul_op = Mul(root.WithOpName("output"), conv_op, mul_values_op);
GraphDef original_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&original_graph_def));
std::unique_ptr<Session> original_session(NewSession(SessionOptions()));
TF_ASSERT_OK(original_session->Create(original_graph_def));
std::vector<Tensor> original_outputs;
TF_ASSERT_OK(original_session->Run({}, {"output"}, {}, &original_outputs));
GraphDef fused_graph_def;
TF_ASSERT_OK(
FoldBatchNorms(original_graph_def, {{}, {"output"}}, &fused_graph_def));
std::unique_ptr<Session> fused_session(NewSession(SessionOptions()));
TF_ASSERT_OK(fused_session->Create(fused_graph_def));
std::vector<Tensor> fused_outputs;
TF_ASSERT_OK(fused_session->Run({}, {"output"}, {}, &fused_outputs));
test::ExpectTensorNear<float>(original_outputs[0], fused_outputs[0], 1e-5);
for (const NodeDef& node : fused_graph_def.node()) {
EXPECT_NE("Mul", node.op());
}
}
void TestFoldBatchNormsDepthwiseConv2dNative() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor input_data(DT_FLOAT, TensorShape({1, 1, 6, 2}));
test::FillValues<float>(
&input_data, {1.0f, 4.0f, 2.0f, 5.0f, 3.0f, 6.0f, -1.0f, -4.0f, -2.0f,
-5.0f, -3.0f, -6.0f});
Output input_op =
Const(root.WithOpName("input_op"), Input::Initializer(input_data));
Tensor weights_data(DT_FLOAT, TensorShape({1, 2, 2, 2}));
test::FillValues<float>(&weights_data,
{1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f});
Output weights_op =
Const(root.WithOpName("weights_op"), Input::Initializer(weights_data));
Output conv_op = DepthwiseConv2dNative(root.WithOpName("conv_op"), input_op,
weights_op, {1, 1, 1, 1}, "VALID");
Tensor mul_values_data(DT_FLOAT, TensorShape({4}));
test::FillValues<float>(&mul_values_data, {2.0f, 3.0f, 4.0f, 5.0f});
Output mul_values_op = Const(root.WithOpName("mul_values"),
Input::Initializer(mul_values_data));
Output mul_op = Mul(root.WithOpName("output"), conv_op, mul_values_op);
GraphDef original_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&original_graph_def));
std::unique_ptr<Session> original_session(NewSession(SessionOptions()));
TF_ASSERT_OK(original_session->Create(original_graph_def));
std::vector<Tensor> original_outputs;
TF_ASSERT_OK(original_session->Run({}, {"output"}, {}, &original_outputs));
GraphDef fused_graph_def;
TF_ASSERT_OK(
FoldBatchNorms(original_graph_def, {{}, {"output"}}, &fused_graph_def));
std::unique_ptr<Session> fused_session(NewSession(SessionOptions()));
TF_ASSERT_OK(fused_session->Create(fused_graph_def));
std::vector<Tensor> fused_outputs;
TF_ASSERT_OK(fused_session->Run({}, {"output"}, {}, &fused_outputs));
test::ExpectTensorNear<float>(original_outputs[0], fused_outputs[0], 1e-5);
for (const NodeDef& node : fused_graph_def.node()) {
EXPECT_NE("Mul", node.op());
}
}
void TestFoldBatchNormsConv2DShared() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor input_data(DT_FLOAT, TensorShape({1, 1, 6, 2}));
test::FillValues<float>(
&input_data, {1.0f, 4.0f, 2.0f, 5.0f, 3.0f, 6.0f, -1.0f, -4.0f, -2.0f,
-5.0f, -3.0f, -6.0f});
Output input_op =
Const(root.WithOpName("input_op"), Input::Initializer(input_data));
Tensor weights_data(DT_FLOAT, TensorShape({1, 2, 2, 2}));
test::FillValues<float>(&weights_data,
{1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f});
Output weights_op =
Const(root.WithOpName("weights_op"), Input::Initializer(weights_data));
Output conv_op = Conv2D(root.WithOpName("conv_op"), input_op, weights_op,
{1, 1, 1, 1}, "VALID");
Tensor mul_values_data(DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&mul_values_data, {2.0f, 3.0f});
Output mul_values_op = Const(root.WithOpName("mul_values"),
Input::Initializer(mul_values_data));
Output mul_op = Mul(root.WithOpName("output"), conv_op, mul_values_op);
Tensor mul_values_data_2(DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&mul_values_data_2, {1.0f, 2.0f});
Output mul_values_op_2 = Const(root.WithOpName("mul_values_2"),
Input::Initializer(mul_values_data));
Output mul_op_2 =
Mul(root.WithOpName("output_2"), conv_op, mul_values_op_2);
GraphDef original_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&original_graph_def));
std::unique_ptr<Session> original_session(NewSession(SessionOptions()));
TF_ASSERT_OK(original_session->Create(original_graph_def));
std::vector<Tensor> original_outputs;
TF_ASSERT_OK(original_session->Run({}, {"output", "output_2"}, {},
&original_outputs));
GraphDef fused_graph_def;
TF_ASSERT_OK(FoldBatchNorms(
original_graph_def, {{}, {"output", "output_2"}}, &fused_graph_def));
std::unique_ptr<Session> fused_session(NewSession(SessionOptions()));
TF_ASSERT_OK(fused_session->Create(fused_graph_def));
std::vector<Tensor> fused_outputs;
TF_ASSERT_OK(
fused_session->Run({}, {"output", "output_2"}, {}, &fused_outputs));
test::ExpectTensorNear<float>(original_outputs[0], fused_outputs[0], 1e-5);
test::ExpectTensorNear<float>(original_outputs[1], fused_outputs[1], 1e-5);
}
void TestFoldBatchNormsMatMul() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor input_data(DT_FLOAT, TensorShape({6, 2}));
test::FillValues<float>(
&input_data, {1.0f, 4.0f, 2.0f, 5.0f, 3.0f, 6.0f, -1.0f, -4.0f, -2.0f,
-5.0f, -3.0f, -6.0f});
Output input_op =
Const(root.WithOpName("input_op"), Input::Initializer(input_data));
Tensor weights_data(DT_FLOAT, TensorShape({2, 2}));
test::FillValues<float>(&weights_data, {1.0f, 2.0f, 0.3f, 0.4f});
Output weights_op =
Const(root.WithOpName("weights_op"), Input::Initializer(weights_data));
Output matmul_op =
MatMul(root.WithOpName("matmul_op"), input_op, weights_op);
Tensor mul_values_data(DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&mul_values_data, {2.0f, 3.0f});
Output mul_values_op = Const(root.WithOpName("mul_values"),
Input::Initializer(mul_values_data));
Output mul_op = Mul(root.WithOpName("output"), matmul_op, mul_values_op);
GraphDef original_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&original_graph_def));
std::unique_ptr<Session> original_session(NewSession(SessionOptions()));
TF_ASSERT_OK(original_session->Create(original_graph_def));
std::vector<Tensor> original_outputs;
TF_ASSERT_OK(original_session->Run({}, {"output"}, {}, &original_outputs));
GraphDef fused_graph_def;
TF_ASSERT_OK(
FoldBatchNorms(original_graph_def, {{}, {"output"}}, &fused_graph_def));
std::unique_ptr<Session> fused_session(NewSession(SessionOptions()));
TF_ASSERT_OK(fused_session->Create(fused_graph_def));
std::vector<Tensor> fused_outputs;
TF_ASSERT_OK(fused_session->Run({}, {"output"}, {}, &fused_outputs));
test::ExpectTensorNear<float>(original_outputs[0], fused_outputs[0], 1e-5);
for (const NodeDef& node : fused_graph_def.node()) {
EXPECT_NE("Mul", node.op());
}
}
};
TEST_F(FoldBatchNormsTest, TestFoldBatchNormsConv2D) {
TestFoldBatchNormsConv2D();
}
TEST_F(FoldBatchNormsTest, TestFoldBatchNormsMatMul) {
TestFoldBatchNormsMatMul();
}
TEST_F(FoldBatchNormsTest, TestFoldBatchNormsDepthwiseConv2dNative) {
TestFoldBatchNormsDepthwiseConv2dNative();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/fold_batch_norms.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/fold_batch_norms_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
23361921-0139-49e4-8795-9f4dd027c06f | cpp | tensorflow/tensorflow | conv_pointwise | tensorflow/lite/delegates/gpu/common/tasks/special/conv_pointwise.cc | tensorflow/lite/delegates/gpu/common/tasks/special/conv_pointwise_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/special/conv_pointwise.h"
#include <cstdint>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/util.h"
namespace tflite {
namespace gpu {
namespace {
std::string GenerateCode(const ConvPointwiseAttributes& attr) {
std::string c = R"(
MAIN_FUNCTION($0) {
int linear_id = GLOBAL_ID_0;
int X = linear_id / args.dst_tensor.Batch();
int B = linear_id % args.dst_tensor.Batch();
args.weights_tensor.SetBatchRef(B);
args.src_tensor.SetBatchRef(B);
args.dst_tensor.SetBatchRef(B);
int Y = GLOBAL_ID_1;
int S = GLOBAL_ID_2;
if (X >= args.dst_tensor.Width() ||
Y >= args.dst_tensor.Height() ||
S >= args.dst_tensor.Slices()) return;
int4 offset0 = args.offsets.Read(S * 2 + 0, 0);
int4 offset1 = args.offsets.Read(S * 2 + 1, 0);
ACCUM_FLT4 res = INIT_ACCUM_FLT4(0.0f);
FLT4 last_mask;
int last_src_ch = (args.src_tensor.Slices() - 1) * 4;
last_mask.x = INIT_FLT(1.0f);
last_mask.y = last_src_ch + 1 < args.src_tensor.Channels() ? INIT_FLT(1.0f) : INIT_FLT(0.0f);
last_mask.z = last_src_ch + 2 < args.src_tensor.Channels() ? INIT_FLT(1.0f) : INIT_FLT(0.0f);
last_mask.w = last_src_ch + 3 < args.src_tensor.Channels() ? INIT_FLT(1.0f) : INIT_FLT(0.0f);
for (int s = 0; s < args.src_tensor.Slices(); ++s) {
FLT4 src = args.src_tensor.Read(X, Y, s);
FLT4 w0 = args.weights_tensor.Read(X + offset0.x, Y + offset0.y, s);
FLT4 w1 = args.weights_tensor.Read(X + offset0.z, Y + offset0.w, s);
FLT4 w2 = args.weights_tensor.Read(X + offset1.x, Y + offset1.y, s);
FLT4 w3 = args.weights_tensor.Read(X + offset1.z, Y + offset1.w, s);
FLT4 mask = INIT_FLT4(1.0f);
if (s == (args.src_tensor.Slices() - 1)) {
mask = last_mask;
}
src *= mask;
res.x += dot(src, w0);
res.y += dot(src, w1);
res.z += dot(src, w2);
res.w += dot(src, w3);
}
FLT4 result = TO_FLT4(res);
)";
if (attr.mean) {
c += " result = result / INIT_FLT(args.src_tensor.Channels());\n";
}
c += " args.dst_tensor.Write(result, X, Y, S);\n";
c += "}\n";
return c;
}
struct NodeContext {
Node* node;
std::vector<Value*> inputs;
std::vector<Value*> outputs;
};
absl::Status IsNode(const GraphFloat32& graph, OperationType op_type,
int inputs_count, int outputs_count, Node* node,
NodeContext* node_context) {
const std::string op_desc = ToString(op_type);
node_context->node = node;
if (node_context->node == nullptr) {
return absl::NotFoundError(absl::StrCat("Invalid ", op_desc, " node."));
}
if (OperationTypeFromString(node_context->node->operation.type) != op_type) {
return absl::InternalError(
absl::StrCat("Not correct node type. Expected ", op_desc, ", received ",
node_context->node->operation.type));
}
node_context->inputs = graph.FindInputs(node_context->node->id);
node_context->outputs = graph.FindOutputs(node_context->node->id);
if (inputs_count != -1) {
if (node_context->inputs.size() != inputs_count) {
return absl::InternalError(
absl::StrCat("Expected ", inputs_count, " input in a ", op_desc,
" node. Node has ", node_context->inputs.size()));
}
}
if (node_context->outputs.size() != outputs_count) {
return absl::InternalError(
absl::StrCat("Expected ", outputs_count, " output in a ", op_desc,
" node. Node has ", node_context->outputs.size()));
}
return absl::OkStatus();
}
absl::Status IsMeanNode(const GraphFloat32& graph, Node* node,
NodeContext* node_context) {
RETURN_IF_ERROR(IsNode(graph, OperationType::MEAN, 1, 1, node, node_context));
auto mean_attr =
absl::any_cast<MeanAttributes>(node_context->node->operation.attributes);
if (mean_attr.dims != std::set<Axis>{Axis::CHANNELS}) {
return absl::InternalError("Expected mean node with channels reduction.");
}
return absl::OkStatus();
}
absl::Status IsReduceSumNode(const GraphFloat32& graph, Node* node,
NodeContext* node_context) {
RETURN_IF_ERROR(
IsNode(graph, OperationType::REDUCE_SUM, 1, 1, node, node_context));
auto reduce_attr =
std::any_cast<ReduceAttributes>(node_context->node->operation.attributes);
if (reduce_attr.dims != std::set<Axis>{Axis::CHANNELS}) {
return absl::InternalError(
"Expected reduce_sum node with channels reduction.");
}
return absl::OkStatus();
}
absl::Status IsMulNode(const GraphFloat32& graph, Node* node,
NodeContext* node_context) {
RETURN_IF_ERROR(IsNode(graph, OperationType::MUL, 2, 1, node, node_context));
if (node_context->inputs[0]->tensor.shape !=
node_context->inputs[1]->tensor.shape) {
return absl::InternalError("Expected mul node with 2 equal tensors.");
}
return absl::OkStatus();
}
absl::Status IsSliceNode(const GraphFloat32& graph, Node* node,
NodeContext* node_context) {
RETURN_IF_ERROR(
IsNode(graph, OperationType::SLICE, 1, 1, node, node_context));
auto slice_attr =
absl::any_cast<SliceAttributes>(node_context->node->operation.attributes);
if (slice_attr.strides != BHWC(1, 1, 1, 1)) {
return absl::InternalError("Not valid attributes in slice node.");
}
return absl::OkStatus();
}
absl::Status IsConcatNode(const GraphFloat32& graph, Node* node,
NodeContext* node_context) {
RETURN_IF_ERROR(
IsNode(graph, OperationType::CONCAT, -1, 1, node, node_context));
auto concat_attr = absl::any_cast<ConcatAttributes>(
node_context->node->operation.attributes);
if (concat_attr.axis != Axis::CHANNELS) {
return absl::InternalError("Not valid attributes in concat node.");
}
return absl::OkStatus();
}
absl::Status GetOffset(const GraphFloat32& graph, NodeId concat_input_node,
NodeId second_commom_input_id, int* offset_x,
int* offset_y, std::set<NodeId>* consumed_nodes) {
NodeContext reduce_node, mul_node, slice_node;
absl::Status status =
IsMeanNode(graph, graph.FindProducer(concat_input_node), &reduce_node);
if (!status.ok()) {
RETURN_IF_ERROR(IsReduceSumNode(
graph, graph.FindProducer(concat_input_node), &reduce_node));
}
RETURN_IF_ERROR(IsMulNode(
graph, graph.FindProducer(reduce_node.inputs[0]->id), &mul_node));
const ValueId slice_output_id =
mul_node.inputs[0]->id == second_commom_input_id ? mul_node.inputs[1]->id
: mul_node.inputs[0]->id;
RETURN_IF_ERROR(
IsSliceNode(graph, graph.FindProducer(slice_output_id), &slice_node));
auto slice_attr =
absl::any_cast<SliceAttributes>(slice_node.node->operation.attributes);
*offset_x = slice_attr.starts.w;
*offset_y = slice_attr.starts.h;
consumed_nodes->insert(reduce_node.node->id);
consumed_nodes->insert(mul_node.node->id);
consumed_nodes->insert(slice_node.node->id);
return absl::OkStatus();
}
}
GPUOperation CreateConvPointwise(const OperationDef& definition,
const ConvPointwiseAttributes& attr) {
const int dst_channels = attr.offsets.size();
const int dst_depth = DivideRoundUp(dst_channels, 4);
std::vector<int32_t> offsets_data(dst_depth * 2 * 4, 0);
for (int i = 0; i < attr.offsets.size(); ++i) {
offsets_data[i * 2 + 0] = attr.offsets[i].x;
offsets_data[i * 2 + 1] = attr.offsets[i].y;
}
for (int i = attr.offsets.size(); i < offsets_data.size() / 2; ++i) {
offsets_data[i * 2 + 0] = attr.offsets.back().x;
offsets_data[i * 2 + 1] = attr.offsets.back().y;
}
GPUOperation op(definition);
op.AddSrcTensor("src_tensor", definition.src_tensors[0]);
op.AddSrcTensor("weights_tensor", definition.src_tensors[1]);
op.AddDstTensor("dst_tensor", definition.dst_tensors[0]);
op.code_ = GenerateCode(attr);
op.tensor_to_grid_ = TensorToGrid::kWBToX_HDToY_SToZ;
TensorDescriptor desc = CreateConstantHWVec4TensorDescriptor(
DataType::INT32, TensorStorageType::TEXTURE_2D, dst_depth * 2, 1,
reinterpret_cast<uint8_t*>(offsets_data.data()));
op.args_.AddObject("offsets", std::make_unique<TensorDescriptor>(desc));
return op;
}
absl::Status TryFusedPointwiseConv(
const GraphFloat32& graph, NodeId first_node_id,
CalculationsPrecision precision,
const std::map<ValueId, TensorDescriptor>& tensor_descriptors,
std::set<NodeId>* consumed_nodes, GPUOperationsSubgraph* gpu_subgraph) {
NodeContext slice_node;
RETURN_IF_ERROR(
IsSliceNode(graph, graph.GetNode(first_node_id), &slice_node));
const auto& first_commom_input = slice_node.inputs[0];
auto slice_consumers = graph.FindConsumers(slice_node.outputs[0]->id);
if (slice_consumers.size() != 1) {
return absl::NotFoundError("FusedPointwiseConv not suitable.");
}
NodeContext mul_node;
RETURN_IF_ERROR(IsMulNode(graph, slice_consumers[0], &mul_node));
const auto& second_commom_input =
mul_node.inputs[0]->id == slice_node.outputs[0]->id ? mul_node.inputs[1]
: mul_node.inputs[0];
auto mul_consumers = graph.FindConsumers(mul_node.outputs[0]->id);
if (mul_consumers.size() != 1) {
return absl::NotFoundError("FusedPointwiseConv not suitable.");
}
NodeContext reduce_node;
bool mean = true;
absl::Status status = IsMeanNode(graph, mul_consumers[0], &reduce_node);
if (!status.ok()) {
RETURN_IF_ERROR(IsReduceSumNode(graph, mul_consumers[0], &reduce_node));
mean = false;
}
auto reduce_consumers = graph.FindConsumers(reduce_node.outputs[0]->id);
if (reduce_consumers.size() != 1) {
return absl::NotFoundError("FusedPointwiseConv not suitable.");
}
NodeContext concat_node;
RETURN_IF_ERROR(IsConcatNode(graph, reduce_consumers[0], &concat_node));
ConvPointwiseAttributes op_attr;
op_attr.mean = mean;
std::set<NodeId> temp_consumed_nodes;
for (const auto& concat_input : concat_node.inputs) {
int offset_x, offset_y;
RETURN_IF_ERROR(GetOffset(graph, concat_input->id, second_commom_input->id,
&offset_x, &offset_y, &temp_consumed_nodes));
op_attr.offsets.push_back(int2(offset_x, offset_y));
}
consumed_nodes->insert(temp_consumed_nodes.begin(),
temp_consumed_nodes.end());
consumed_nodes->insert(concat_node.node->id);
OperationDef op_def;
op_def.precision = precision;
auto it = tensor_descriptors.find(second_commom_input->id);
if (it != tensor_descriptors.end()) {
op_def.src_tensors.push_back(it->second);
}
it = tensor_descriptors.find(first_commom_input->id);
if (it != tensor_descriptors.end()) {
op_def.src_tensors.push_back(it->second);
}
it = tensor_descriptors.find(concat_node.outputs[0]->id);
if (it != tensor_descriptors.end()) {
op_def.dst_tensors.push_back(it->second);
}
std::unique_ptr<GPUOperation>* gpu_op =
InitSingleOpSubgraph({second_commom_input, first_commom_input},
{concat_node.outputs[0]}, gpu_subgraph);
auto operation = CreateConvPointwise(op_def, op_attr);
*gpu_op = std::make_unique<GPUOperation>(std::move(operation));
return absl::OkStatus();
}
}
} | #include "tensorflow/lite/delegates/gpu/common/tasks/special/conv_pointwise.h"
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/precision.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/task/gpu_operation.h"
#include "tensorflow/lite/delegates/gpu/common/task/testing_util.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
namespace tflite {
namespace gpu {
namespace cl {
TEST_F(OpenCLOperationTest, SliceMulMeanConcat) {
TestExecutionEnvironment* env = &exec_env_;
TensorFloat32 src_tensor;
src_tensor.shape = BHWC(1, 2, 1, 2);
src_tensor.data = {3.0f, 4.0f, 5.0f, 6.0f};
TensorFloat32 weights_tensor;
weights_tensor.shape = BHWC(1, 2, 1, 2);
weights_tensor.data = {1.0f, 2.0f, 1.0f, 2.0f};
ConvPointwiseAttributes op_attr;
op_attr.mean = true;
op_attr.offsets.push_back(int2(0, 0));
for (auto precision : env->GetSupportedPrecisions()) {
auto data_type = DeduceDataTypeFromPrecision(precision);
for (auto storage : env->GetSupportedStorages(data_type)) {
const float eps = precision == CalculationsPrecision::F32 ? 1e-6f : 1e-2f;
OperationDef op_def;
op_def.precision = precision;
op_def.src_tensors.push_back({data_type, storage, Layout::HWC});
op_def.src_tensors.push_back({data_type, storage, Layout::HWC});
op_def.dst_tensors.push_back({data_type, storage, Layout::HWC});
TensorFloat32 dst_tensor;
GPUOperation operation = CreateConvPointwise(op_def, op_attr);
ASSERT_OK(env->ExecuteGPUOperation(
{src_tensor, weights_tensor},
std::make_unique<GPUOperation>(std::move(operation)),
BHWC(1, 2, 1, 2), &dst_tensor));
ASSERT_OK(PointWiseNear({5.5f, 5.5f, 8.5f, 8.5f}, dst_tensor.data, eps));
}
}
}
TEST_F(OpenCLOperationTest, SliceMulSumConcat) {
TestExecutionEnvironment* env = &exec_env_;
TensorFloat32 src_tensor;
src_tensor.shape = BHWC(1, 2, 1, 2);
src_tensor.data = {3.0f, 4.0f, 5.0f, 6.0f};
TensorFloat32 weights_tensor;
weights_tensor.shape = BHWC(1, 2, 1, 2);
weights_tensor.data = {1.0f, 2.0f, 1.0f, 2.0f};
ConvPointwiseAttributes op_attr;
op_attr.mean = false;
op_attr.offsets.push_back(int2(0, 0));
for (auto precision : env->GetSupportedPrecisions()) {
auto data_type = DeduceDataTypeFromPrecision(precision);
for (auto storage : env->GetSupportedStorages(data_type)) {
const float eps = precision == CalculationsPrecision::F32 ? 1e-6f : 1e-2f;
OperationDef op_def;
op_def.precision = precision;
op_def.src_tensors.push_back({data_type, storage, Layout::HWC});
op_def.src_tensors.push_back({data_type, storage, Layout::HWC});
op_def.dst_tensors.push_back({data_type, storage, Layout::HWC});
TensorFloat32 dst_tensor;
GPUOperation operation = CreateConvPointwise(op_def, op_attr);
ASSERT_OK(env->ExecuteGPUOperation(
{src_tensor, weights_tensor},
std::make_unique<GPUOperation>(std::move(operation)),
BHWC(1, 2, 1, 2), &dst_tensor));
ASSERT_OK(
PointWiseNear({11.0f, 11.0f, 17.0f, 17.0f}, dst_tensor.data, eps));
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/special/conv_pointwise.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/special/conv_pointwise_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3394c0a7-db8c-4a2f-8b1d-18e30532a010 | cpp | tensorflow/tensorflow | sparse_to_dense | tensorflow/lite/kernels/sparse_to_dense.cc | tensorflow/lite/kernels/sparse_to_dense_test.cc | #include <stdint.h>
#include <vector>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/op_macros.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace sparse_to_dense {
constexpr int kIndicesTensor = 0;
constexpr int kOutputShapeTensor = 1;
constexpr int kValueInputTensor = 2;
constexpr int kDefaultValueTensor = 3;
constexpr int kOutputTensor = 0;
constexpr int kMaxDimensions = 4;
template <typename T>
TfLiteStatus Resize(TfLiteContext* context, const TfLiteTensor* output_shape,
TfLiteTensor* output) {
const int output_dimensions = NumElements(output_shape);
TfLiteIntArray* output_shape_array = TfLiteIntArrayCreate(output_dimensions);
for (int i = 0; i < output_dimensions; ++i) {
output_shape_array->data[i] = GetTensorData<T>(output_shape)[i];
}
return context->ResizeTensor(context, output, output_shape_array);
}
TfLiteStatus CheckDimensionsMatch(TfLiteContext* context,
const TfLiteTensor* indices,
const TfLiteTensor* output_shape,
const TfLiteTensor* values) {
switch (NumDimensions(indices)) {
case 0:
case 1: {
if (NumDimensions(values) == 0) {
TF_LITE_ENSURE_EQ(context, NumElements(indices), NumElements(values));
}
TF_LITE_ENSURE_EQ(context, NumElements(output_shape), 1);
break;
}
case 2: {
TF_LITE_ENSURE_EQ(context, SizeOfDimension(indices, 1),
NumElements(output_shape));
if (NumDimensions(values) == 0)
TF_LITE_ENSURE_EQ(context, SizeOfDimension(indices, 0),
NumElements(values));
break;
}
default:
TF_LITE_KERNEL_LOG(context,
"Wrong indices dimensions %d, should be less than 3.",
NumDimensions(indices));
return kTfLiteError;
}
return kTfLiteOk;
}
template <typename T>
TfLiteStatus GetIndicesVector(TfLiteContext* context,
const TfLiteTensor* indices,
const int num_indices,
std::vector<std::vector<T>>* indices_vector) {
switch (NumDimensions(indices)) {
case 0:
case 1: {
const auto indices_data = GetTensorData<T>(indices);
for (int i = 0; i < num_indices; ++i) {
std::vector<T> index({0, 0, 0, indices_data[i]});
indices_vector->push_back(index);
}
break;
}
case 2: {
const int true_dimensions = SizeOfDimension(indices, 1);
TF_LITE_ENSURE(context, true_dimensions <= kMaxDimensions);
for (int i = 0; i < num_indices; ++i) {
std::vector<T> index;
index.reserve(kMaxDimensions);
for (int j = 0; j < kMaxDimensions - true_dimensions; ++j) {
index.push_back(0);
}
for (int j = 0; j < true_dimensions; ++j) {
index.push_back(GetTensorData<T>(indices)[i * true_dimensions + j]);
}
indices_vector->push_back(index);
}
break;
}
default:
TF_LITE_KERNEL_LOG(context,
"Indices dimensions problem, got %d dimensions",
NumDimensions(indices));
return kTfLiteError;
}
return kTfLiteOk;
}
TfLiteStatus ResizeOutputShape(TfLiteContext* context,
const TfLiteTensor* output_shape,
TfLiteTensor* output) {
if (output_shape->type == kTfLiteInt32) {
return Resize<int32_t>(context, output_shape, output);
} else if (output_shape->type == kTfLiteInt64) {
return Resize<int64_t>(context, output_shape, output);
} else {
TF_LITE_KERNEL_LOG(context, "Dense shape type %d not supported.",
output_shape->type);
return kTfLiteError;
}
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 4);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* indices;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kIndicesTensor, &indices));
const TfLiteTensor* output_shape;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kOutputShapeTensor, &output_shape));
const TfLiteTensor* values;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kValueInputTensor, &values));
const TfLiteTensor* default_value;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kDefaultValueTensor,
&default_value));
TF_LITE_ASSERT(NumDimensions(indices) >= 0);
TF_LITE_ENSURE(context, NumDimensions(indices) < 3);
TF_LITE_ASSERT(NumDimensions(output_shape) >= 0);
TF_LITE_ENSURE_EQ(context, NumDimensions(output_shape), 1);
TF_LITE_ASSERT(NumDimensions(values) >= 0);
TF_LITE_ENSURE(context, NumDimensions(values) < 2);
TF_LITE_ENSURE_EQ(context, NumElements(default_value), 1);
TF_LITE_ENSURE(
context, indices->type == kTfLiteInt32 || indices->type == kTfLiteInt64);
TF_LITE_ENSURE(context, output_shape->type == kTfLiteInt32 ||
output_shape->type == kTfLiteInt64);
TF_LITE_ENSURE(context, values->type == kTfLiteInt32 ||
values->type == kTfLiteInt64 ||
values->type == kTfLiteInt8 ||
values->type == kTfLiteUInt8 ||
values->type == kTfLiteFloat32);
TF_LITE_ENSURE_TYPES_EQ(context, values->type, default_value->type);
TF_LITE_ENSURE_OK(
context, CheckDimensionsMatch(context, indices, output_shape, values));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
output->type = values->type;
TF_LITE_ENSURE_EQ(context, NumDimensions(output_shape), 1);
if (!IsConstantOrPersistentTensor(output_shape)) {
SetTensorToDynamic(output);
return kTfLiteOk;
}
return ResizeOutputShape(context, output_shape, output);
}
template <typename T, typename TI>
TfLiteStatus SparseToDenseImpl(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* indices;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kIndicesTensor, &indices));
const TfLiteTensor* output_shape;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kOutputShapeTensor, &output_shape));
const TfLiteTensor* values;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kValueInputTensor, &values));
const TfLiteTensor* default_value;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kDefaultValueTensor,
&default_value));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_OK(context,
ResizeOutputShape(context, output_shape, output));
}
const int num_indices = SizeOfDimension(indices, 0);
const bool value_is_scalar = NumDimensions(values) == 0;
std::vector<std::vector<TI>> indices_vector;
indices_vector.reserve(num_indices);
TF_LITE_ENSURE_OK(context, GetIndicesVector<TI>(context, indices, num_indices,
&indices_vector));
reference_ops::SparseToDense(indices_vector, GetTensorData<T>(values),
*GetTensorData<T>(default_value),
value_is_scalar, GetTensorShape(output),
GetTensorData<T>(output));
return kTfLiteOk;
}
template <typename T>
TfLiteStatus EvalForIndexType(TfLiteContext* context, TfLiteNode* node,
const TfLiteTensor* indices) {
switch (indices->type) {
case kTfLiteInt32: {
return SparseToDenseImpl<T, int32_t>(context, node);
}
case kTfLiteInt64: {
return SparseToDenseImpl<T, int64_t>(context, node);
}
default:
TF_LITE_KERNEL_LOG(
context,
"Indice type %s is currently not supported by sparse to dense.",
TfLiteTypeGetName(indices->type));
return kTfLiteError;
}
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* indices;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kIndicesTensor, &indices));
const TfLiteTensor* values;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kValueInputTensor, &values));
switch (values->type) {
case kTfLiteFloat32:
return EvalForIndexType<float>(context, node, indices);
case kTfLiteInt32:
return EvalForIndexType<int32_t>(context, node, indices);
case kTfLiteInt64:
return EvalForIndexType<int64_t>(context, node, indices);
case kTfLiteInt8:
return EvalForIndexType<int8_t>(context, node, indices);
case kTfLiteUInt8:
return EvalForIndexType<uint8_t>(context, node, indices);
default:
TF_LITE_KERNEL_LOG(
context,
"Value type %s is currently not supported by sparse to dense.",
TfLiteTypeGetName(values->type));
return kTfLiteError;
}
}
}
TfLiteRegistration* Register_SPARSE_TO_DENSE() {
static TfLiteRegistration r = {nullptr, nullptr, sparse_to_dense::Prepare,
sparse_to_dense::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <initializer_list>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
enum class TestType {
kPersistentRo = 0,
kConstant = 1,
kDynamic = 2,
};
template <typename T>
class SparseToDenseOpModel : public SingleOpModel {
public:
SparseToDenseOpModel(std::initializer_list<int> indices_shape,
std::initializer_list<int> output_shape_shape,
std::initializer_list<int> values_shape, T default_value,
TensorType tensor_index_type,
TensorType tensor_input_type,
std::initializer_list<int> output_shape_data,
TestType test_type)
: test_type_(test_type) {
indices_ = AddInput(tensor_index_type);
output_shape_ = test_type == TestType::kConstant
? AddConstInput(TensorType_INT32, output_shape_data,
output_shape_shape)
: AddInput(TensorType_INT32);
values_ = AddInput(tensor_input_type);
default_value_ = AddInput(tensor_input_type);
output_ = AddOutput(tensor_input_type);
SetBuiltinOp(BuiltinOperator_SPARSE_TO_DENSE,
BuiltinOptions_SparseToDenseOptions,
CreateSparseToDenseOptions(builder_, false).Union());
BuildInterpreter({indices_shape, output_shape_shape, values_shape, {1}},
-1,
false,
true, false);
if (test_type == TestType::kPersistentRo) {
interpreter_->tensor(output_shape_)->allocation_type =
kTfLitePersistentRo;
interpreter_->ResizeInputTensorStrict(output_shape_, output_shape_shape);
PopulateTensor<int32_t>(output_shape_, output_shape_data);
}
AllocateAndDelegate(true);
PopulateTensor<T>(default_value_, {default_value});
}
int indices() { return indices_; }
int output_shape() { return output_shape_; }
int values() { return values_; }
bool IsDynamicOutput() {
const TfLiteTensor* tensor = interpreter_->tensor(output_);
return tensor->allocation_type == kTfLiteDynamic;
}
std::vector<T> GetOutput() { return ExtractVector<T>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int indices_;
int output_shape_;
int values_;
int default_value_;
int output_;
TestType test_type_;
};
class SparseToDenseOpModelTest : public ::testing::TestWithParam<TestType> {};
TEST_P(SparseToDenseOpModelTest, ZeroDimensionTest) {
SparseToDenseOpModel<float> m({1}, {1}, {1}, 0, TensorType_INT32,
TensorType_FLOAT32, {5}, GetParam());
m.PopulateTensor<int32_t>(m.indices(), {3});
if (GetParam() != TestType::kConstant) {
m.PopulateTensor<int32_t>(m.output_shape(), {5});
}
m.PopulateTensor<float>(m.values(), {7});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
ASSERT_EQ(m.IsDynamicOutput(), GetParam() == TestType::kDynamic);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({0, 0, 0, 7, 0}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({5}));
}
TEST_P(SparseToDenseOpModelTest, OneDimensionTest) {
SparseToDenseOpModel<float> m({3}, {1}, {3}, 0, TensorType_INT32,
TensorType_FLOAT32, {7}, GetParam());
m.PopulateTensor<int32_t>(m.indices(), {1, 3, 5});
if (GetParam() != TestType::kConstant) {
m.PopulateTensor<int32_t>(m.output_shape(), {7});
}
m.PopulateTensor<float>(m.values(), {2, 4, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
ASSERT_EQ(m.IsDynamicOutput(), GetParam() == TestType::kDynamic);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({0, 2, 0, 4, 0, 6, 0}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({7}));
}
TEST_P(SparseToDenseOpModelTest, TwoDimensionsTest) {
SparseToDenseOpModel<float> m({3, 3}, {3}, {3}, 0, TensorType_INT32,
TensorType_FLOAT32, {3, 3, 3}, GetParam());
m.PopulateTensor<int32_t>(m.indices(), {0, 0, 0, 1, 2, 1, 2, 0, 1});
if (GetParam() != TestType::kConstant) {
m.PopulateTensor<int32_t>(m.output_shape(), {3, 3, 3});
}
m.PopulateTensor<float>(m.values(), {2, 4, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
ASSERT_EQ(m.IsDynamicOutput(), GetParam() == TestType::kDynamic);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 4, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 3, 3}));
}
TEST_P(SparseToDenseOpModelTest, Int64IndexTest) {
SparseToDenseOpModel<float> m({3, 3}, {3}, {3}, -1, TensorType_INT64,
TensorType_FLOAT32, {3, 3, 3}, GetParam());
m.PopulateTensor<int64_t>(m.indices(), {0, 0, 0, 1, 2, 1, 2, 0, 1});
if (GetParam() != TestType::kConstant) {
m.PopulateTensor<int32_t>(m.output_shape(), {3, 3, 3});
}
m.PopulateTensor<float>(m.values(), {2, 4, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
ASSERT_EQ(m.IsDynamicOutput(), GetParam() == TestType::kDynamic);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, 4, -1, -1, 6, -1, -1, -1, -1, -1, -1, -1}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 3, 3}));
}
TEST_P(SparseToDenseOpModelTest, DefaultValueTest) {
SparseToDenseOpModel<float> m({3, 3}, {3}, {3}, -1, TensorType_INT32,
TensorType_FLOAT32, {3, 3, 3}, GetParam());
m.PopulateTensor<int32_t>(m.indices(), {0, 0, 0, 1, 2, 1, 2, 0, 1});
if (GetParam() != TestType::kConstant) {
m.PopulateTensor<int32_t>(m.output_shape(), {3, 3, 3});
}
m.PopulateTensor<float>(m.values(), {2, 4, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
ASSERT_EQ(m.IsDynamicOutput(), GetParam() == TestType::kDynamic);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, 4, -1, -1, 6, -1, -1, -1, -1, -1, -1, -1}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 3, 3}));
}
TEST_P(SparseToDenseOpModelTest, Int32ValueTest) {
SparseToDenseOpModel<int32_t> m({3, 3}, {3}, {3}, -1, TensorType_INT32,
TensorType_INT32, {3, 3, 3}, GetParam());
m.PopulateTensor<int32_t>(m.indices(), {0, 0, 0, 1, 2, 1, 2, 0, 1});
if (GetParam() != TestType::kConstant) {
m.PopulateTensor<int32_t>(m.output_shape(), {3, 3, 3});
}
m.PopulateTensor<int32_t>(m.values(), {2, 4, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
ASSERT_EQ(m.IsDynamicOutput(), GetParam() == TestType::kDynamic);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, 4, -1, -1, 6, -1, -1, -1, -1, -1, -1, -1}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 3, 3}));
}
TEST_P(SparseToDenseOpModelTest, Int64ValueTest) {
SparseToDenseOpModel<int64_t> m({3, 3}, {3}, {3}, -1, TensorType_INT32,
TensorType_INT64, {3, 3, 3}, GetParam());
m.PopulateTensor<int32_t>(m.indices(), {0, 0, 0, 1, 2, 1, 2, 0, 1});
if (GetParam() != TestType::kConstant) {
m.PopulateTensor<int32_t>(m.output_shape(), {3, 3, 3});
}
m.PopulateTensor<int64_t>(m.values(), {2, 4, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
ASSERT_EQ(m.IsDynamicOutput(), GetParam() == TestType::kDynamic);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, 4, -1, -1, 6, -1, -1, -1, -1, -1, -1, -1}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 3, 3}));
}
TEST_P(SparseToDenseOpModelTest, Int8ValueTest) {
SparseToDenseOpModel<int8_t> m({3, 3}, {3}, {3}, -1, TensorType_INT32,
TensorType_INT8, {3, 3, 3}, GetParam());
m.PopulateTensor<int32_t>(m.indices(), {0, 0, 0, 1, 2, 1, 2, 0, 1});
if (GetParam() != TestType::kConstant) {
m.PopulateTensor<int32_t>(m.output_shape(), {3, 3, 3});
}
m.PopulateTensor<int8_t>(m.values(), {2, 4, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
ASSERT_EQ(m.IsDynamicOutput(), GetParam() == TestType::kDynamic);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, 4, -1, -1, 6, -1, -1, -1, -1, -1, -1, -1}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 3, 3}));
}
TEST_P(SparseToDenseOpModelTest, UInt8ValueTest) {
SparseToDenseOpModel<uint8_t> m({3, 3}, {3}, {3}, 1, TensorType_INT32,
TensorType_UINT8, {3, 3, 3}, GetParam());
m.PopulateTensor<int32_t>(m.indices(), {0, 0, 0, 1, 2, 1, 2, 0, 1});
if (GetParam() != TestType::kConstant) {
m.PopulateTensor<int32_t>(m.output_shape(), {3, 3, 3});
}
m.PopulateTensor<uint8_t>(m.values(), {2, 4, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
ASSERT_EQ(m.IsDynamicOutput(), GetParam() == TestType::kDynamic);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 4, 1, 1, 6, 1, 1, 1, 1, 1, 1, 1}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 3, 3}));
}
INSTANTIATE_TEST_SUITE_P(SparseToDenseOpModelTest, SparseToDenseOpModelTest,
::testing::Values(TestType::kPersistentRo,
TestType::kConstant,
TestType::kDynamic));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/sparse_to_dense.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/sparse_to_dense_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b007e8c5-3217-4f91-b2a8-1b4f0950743e | cpp | tensorflow/tensorflow | pooling | tensorflow/lite/delegates/gpu/gl/kernels/pooling.cc | tensorflow/lite/delegates/gpu/cl/kernels/pooling_test.cc | #include "tensorflow/lite/delegates/gpu/gl/kernels/pooling.h"
#include <algorithm>
#include <any>
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
absl::Status GenerateMaxPoolingCode(const Pooling2DAttributes& attr,
const NodeShader::GenerationContext& ctx,
GeneratedCode* generated_code) {
if (attr.padding.prepended.h > attr.kernel.h ||
attr.padding.prepended.w > attr.kernel.w) {
return absl::InvalidArgumentError("Padding is bigger than kernel.");
}
std::vector<Variable> parameters = {
{"input_data_0_h", static_cast<int>(ctx.input_shapes[0][1])},
{"input_data_0_w", static_cast<int>(ctx.input_shapes[0][2])},
{"stride", int2(attr.strides.w, attr.strides.h)},
{"offset", int2(attr.padding.prepended.w, attr.padding.prepended.h)},
{"window_h", attr.kernel.h},
{"window_w", attr.kernel.w},
};
std::string source = R"(
const highp float inf = -(1.0f / 0.0f);
value_0 = vec4(inf);)";
if (attr.output_indices) {
source += R"(
ivec4 value_1;
)";
}
source += R"(
ivec2 base_coord = gid.xy * $stride$ - $offset$;
for (int a = 0; a < $window_h$; ++a) {
for (int b = 0; b < $window_w$; ++b) {
ivec2 coord = base_coord + ivec2(b, a);
if (coord.x < 0 || coord.y < 0 || coord.x >= $input_data_0_w$ || coord.y >= $input_data_0_h$) {
continue;
}
vec4 input_ = $input_data_0[coord.x, coord.y, gid.z]$;)";
if (attr.output_indices) {
source += R"(
int window_index = a * $window_w$ + b;
if (input_.x > value_0.x) value_1.x = window_index;
if (input_.y > value_0.y) value_1.y = window_index;
if (input_.z > value_0.z) value_1.z = window_index;
if (input_.w > value_0.w) value_1.w = window_index;)";
}
source += R"(
value_0 = max(value_0, input_);
}
}
)";
*generated_code = {
std::move(parameters),
{},
{},
uint3(),
uint3(),
std::move(source),
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
absl::Status GenerateAveragePoolingCode(
const Pooling2DAttributes& attr, const NodeShader::GenerationContext& ctx,
GeneratedCode* generated_code) {
std::vector<Variable> parameters = {
{"input_data_0_h", static_cast<int>(ctx.input_shapes[0][1])},
{"input_data_0_w", static_cast<int>(ctx.input_shapes[0][2])},
{"stride", int2(attr.strides.w, attr.strides.h)},
{"offset", int2(attr.padding.prepended.w, attr.padding.prepended.h)},
{"window_h", attr.kernel.h},
{"window_w", attr.kernel.w},
};
auto x_in_bounds = [input_width = ctx.input_shapes[0][2],
kernel_width = attr.kernel.w](int64_t x) -> bool {
return 0 <= x && x + kernel_width <= input_width;
};
auto y_in_bounds = [input_height = ctx.input_shapes[0][1],
kernel_height = attr.kernel.h](int64_t y) -> bool {
return 0 <= y && y + kernel_height <= input_height;
};
const int64_t output_shape_max_y = ctx.output_shapes[0][1] - 1;
const int64_t output_shape_max_x = ctx.output_shapes[0][2] - 1;
const int64_t base_x = -attr.padding.prepended.w;
const int64_t base_y = -attr.padding.prepended.h;
const bool bounds_check_necessary =
!(x_in_bounds(base_x) &&
x_in_bounds(base_x + output_shape_max_x * attr.strides.w) &&
y_in_bounds(base_y) &&
y_in_bounds(base_y + output_shape_max_y * attr.strides.h));
std::string source = bounds_check_necessary ?
R"(
int window_size = 0;
for (int a = 0; a < $window_h$; ++a) {
for (int b = 0; b < $window_w$; ++b) {
ivec2 coord = gid.xy * $stride$ - $offset$ + ivec2(b, a);
if (coord.x >= 0 && coord.y >= 0 && coord.x < $input_data_0_w$ && coord.y < $input_data_0_h$) {
value_0 += $input_data_0[coord.x, coord.y, gid.z]$;
window_size++;
}
}
}
value_0 /= float(window_size);
)"
:
R"(
for (int a = 0; a < $window_h$; ++a) {
for (int b = 0; b < $window_w$; ++b) {
ivec2 coord = gid.xy * $stride$ - $offset$ + ivec2(b, a);
value_0 += $input_data_0[coord.x, coord.y, gid.z]$;
}
}
value_0 /= float($window_h$ * $window_w$);
)";
*generated_code = {
std::move(parameters),
{},
{},
uint3(),
uint3(),
std::move(source),
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
class Pooling : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
const auto& attr = std::any_cast<const Pooling2DAttributes&>(ctx.op_attr);
switch (attr.type) {
case PoolingType::AVERAGE:
return GenerateAveragePoolingCode(attr, ctx, generated_code);
case PoolingType::MAX:
return GenerateMaxPoolingCode(attr, ctx, generated_code);
default:
return absl::InvalidArgumentError("Incorrect attributes' type.");
}
}
};
}
std::unique_ptr<NodeShader> NewPoolingNodeShader() {
return std::make_unique<Pooling>();
}
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/pooling_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, AveragePooling) {
auto status = AveragePoolingTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, AveragePoolingNonEmptyPadding) {
auto status = AveragePoolingNonEmptyPaddingTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, MaxPooling) {
auto status = MaxPoolingTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, MaxPoolingIndices) {
auto status = MaxPoolingIndicesTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/kernels/pooling.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/pooling_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
986d4332-9393-4e1e-9d83-25ea2e3f208e | cpp | tensorflow/tensorflow | cpu_info | third_party/xla/third_party/tsl/tsl/platform/cpu_info.cc | third_party/xla/third_party/tsl/tsl/platform/cpu_info_test.cc | #include "tsl/platform/cpu_info.h"
#include "absl/base/call_once.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/platform.h"
#include "tsl/platform/types.h"
#if defined(PLATFORM_IS_X86)
#include <mutex>
#endif
#if defined(PLATFORM_IS_ARM64) && !defined(__APPLE__) && !defined(__OpenBSD__)
#include <sys/auxv.h>
#ifndef HWCAP_CPUID
#define HWCAP_CPUID (1 << 11)
#endif
#include <fstream>
#endif
#ifdef PLATFORM_IS_X86
#ifdef PLATFORM_WINDOWS
#define GETCPUID(a, b, c, d, a_inp, c_inp) \
{ \
int cpu_info[4] = {-1}; \
__cpuidex(cpu_info, a_inp, c_inp); \
a = cpu_info[0]; \
b = cpu_info[1]; \
c = cpu_info[2]; \
d = cpu_info[3]; \
}
#else
#define GETCPUID(a, b, c, d, a_inp, c_inp) \
asm("mov %%rbx, %%rdi\n" \
"cpuid\n" \
"xchg %%rdi, %%rbx\n" \
: "=a"(a), "=D"(b), "=c"(c), "=d"(d) \
: "a"(a_inp), "2"(c_inp))
#endif
#endif
namespace tsl {
namespace port {
namespace {
#ifdef PLATFORM_IS_X86
class CPUIDInfo;
void InitCPUIDInfo();
CPUIDInfo *cpuid = nullptr;
#ifdef PLATFORM_WINDOWS
int GetXCR0EAX() { return _xgetbv(0); }
#else
int GetXCR0EAX() {
int eax, edx;
asm("XGETBV" : "=a"(eax), "=d"(edx) : "c"(0));
return eax;
}
#endif
class CPUIDInfo {
public:
CPUIDInfo()
: have_adx_(0),
have_aes_(0),
have_amx_bf16_(0),
have_amx_fp16_(0),
have_amx_int8_(0),
have_amx_tile_(0),
have_avx_(0),
have_avx2_(0),
have_avx512f_(0),
have_avx512cd_(0),
have_avx512er_(0),
have_avx512pf_(0),
have_avx512vl_(0),
have_avx512bw_(0),
have_avx512dq_(0),
have_avx512vbmi_(0),
have_avx512ifma_(0),
have_avx512_4vnniw_(0),
have_avx512_4fmaps_(0),
have_avx512_bf16_(0),
have_avx512_fp16_(0),
have_avx512_vnni_(0),
have_avx_vnni_(0),
have_avx_vnni_int8_(0),
have_avx_ne_convert_(0),
have_bmi1_(0),
have_bmi2_(0),
have_cmov_(0),
have_cmpxchg16b_(0),
have_cmpxchg8b_(0),
have_f16c_(0),
have_fma_(0),
have_mmx_(0),
have_pclmulqdq_(0),
have_popcnt_(0),
have_prefetchw_(0),
have_prefetchwt1_(0),
have_rdrand_(0),
have_rdseed_(0),
have_smap_(0),
have_sse_(0),
have_sse2_(0),
have_sse3_(0),
have_sse4_1_(0),
have_sse4_2_(0),
have_ssse3_(0),
have_hypervisor_(0) {}
static void Initialize() {
CHECK(cpuid == nullptr) << __func__ << " ran more than once";
cpuid = new CPUIDInfo;
uint32 eax, ebx, ecx, edx;
GETCPUID(eax, ebx, ecx, edx, 0, 0);
cpuid->vendor_str_.append(reinterpret_cast<char *>(&ebx), 4);
cpuid->vendor_str_.append(reinterpret_cast<char *>(&edx), 4);
cpuid->vendor_str_.append(reinterpret_cast<char *>(&ecx), 4);
GETCPUID(eax, ebx, ecx, edx, 1, 0);
cpuid->model_num_ = static_cast<int>((eax >> 4) & 0xf);
cpuid->family_ = static_cast<int>((eax >> 8) & 0xf);
cpuid->have_aes_ = (ecx >> 25) & 0x1;
cpuid->have_cmov_ = (edx >> 15) & 0x1;
cpuid->have_cmpxchg16b_ = (ecx >> 13) & 0x1;
cpuid->have_cmpxchg8b_ = (edx >> 8) & 0x1;
cpuid->have_mmx_ = (edx >> 23) & 0x1;
cpuid->have_pclmulqdq_ = (ecx >> 1) & 0x1;
cpuid->have_popcnt_ = (ecx >> 23) & 0x1;
cpuid->have_rdrand_ = (ecx >> 30) & 0x1;
cpuid->have_sse2_ = (edx >> 26) & 0x1;
cpuid->have_sse3_ = ecx & 0x1;
cpuid->have_sse4_1_ = (ecx >> 19) & 0x1;
cpuid->have_sse4_2_ = (ecx >> 20) & 0x1;
cpuid->have_sse_ = (edx >> 25) & 0x1;
cpuid->have_ssse3_ = (ecx >> 9) & 0x1;
cpuid->have_hypervisor_ = (ecx >> 31) & 1;
const uint64 xcr0_xmm_mask = 0x2;
const uint64 xcr0_ymm_mask = 0x4;
const uint64 xcr0_maskreg_mask = 0x20;
const uint64 xcr0_zmm0_15_mask = 0x40;
const uint64 xcr0_zmm16_31_mask = 0x80;
const uint64 xcr0_avx_mask = xcr0_xmm_mask | xcr0_ymm_mask;
const uint64 xcr0_avx512_mask = xcr0_avx_mask | xcr0_maskreg_mask |
xcr0_zmm0_15_mask | xcr0_zmm16_31_mask;
const bool have_avx =
((ecx >> 27) & 0x1) &&
((GetXCR0EAX() & xcr0_avx_mask) == xcr0_avx_mask) &&
((ecx >> 28) & 0x1);
const bool have_avx512 =
((ecx >> 27) & 0x1) &&
((GetXCR0EAX() & xcr0_avx512_mask) == xcr0_avx512_mask);
cpuid->have_avx_ = have_avx;
cpuid->have_fma_ = have_avx && ((ecx >> 12) & 0x1);
cpuid->have_f16c_ = have_avx && ((ecx >> 29) & 0x1);
GETCPUID(eax, ebx, ecx, edx, 7, 0);
const uint32 kMaxNumSubLeaves = eax;
cpuid->have_adx_ = (ebx >> 19) & 0x1;
cpuid->have_avx2_ = have_avx && ((ebx >> 5) & 0x1);
cpuid->have_bmi1_ = (ebx >> 3) & 0x1;
cpuid->have_bmi2_ = (ebx >> 8) & 0x1;
cpuid->have_prefetchwt1_ = ecx & 0x1;
cpuid->have_rdseed_ = (ebx >> 18) & 0x1;
cpuid->have_smap_ = (ebx >> 20) & 0x1;
cpuid->have_avx512f_ = have_avx512 && ((ebx >> 16) & 0x1);
cpuid->have_avx512cd_ = have_avx512 && ((ebx >> 28) & 0x1);
cpuid->have_avx512er_ = have_avx512 && ((ebx >> 27) & 0x1);
cpuid->have_avx512pf_ = have_avx512 && ((ebx >> 26) & 0x1);
cpuid->have_avx512vl_ = have_avx512 && ((ebx >> 31) & 0x1);
cpuid->have_avx512bw_ = have_avx512 && ((ebx >> 30) & 0x1);
cpuid->have_avx512dq_ = have_avx512 && ((ebx >> 17) & 0x1);
cpuid->have_avx512vbmi_ = have_avx512 && ((ecx >> 1) & 0x1);
cpuid->have_avx512ifma_ = have_avx512 && ((ebx >> 21) & 0x1);
cpuid->have_avx512_4vnniw_ = have_avx512 && ((edx >> 2) & 0x1);
cpuid->have_avx512_4fmaps_ = have_avx512 && ((edx >> 3) & 0x1);
cpuid->have_avx512_vnni_ = have_avx512 && ((ecx >> 11) & 0x1);
cpuid->have_amx_tile_ = (edx >> 24) & 0x1;
cpuid->have_amx_int8_ = (edx >> 25) & 0x1;
cpuid->have_amx_bf16_ = (edx >> 22) & 0x1;
cpuid->have_avx512_fp16_ = have_avx512 && ((edx >> 23) & 0x1);
if (kMaxNumSubLeaves >= 1) {
GETCPUID(eax, ebx, ecx, edx, 7, 1);
cpuid->have_avx_vnni_ = (eax >> 4) & 0x1;
cpuid->have_avx512_bf16_ = have_avx512 && ((eax >> 5) & 0x1);
cpuid->have_amx_fp16_ = (eax >> 21) & 0x1;
cpuid->have_avx_vnni_int8_ = (edx >> 4) & 0x1;
cpuid->have_avx_ne_convert_ = (edx >> 5) & 0x1;
}
}
static bool TestFeature(CPUFeature feature) {
InitCPUIDInfo();
switch (feature) {
case ADX: return cpuid->have_adx_;
case AES: return cpuid->have_aes_;
case AMX_BF16: return cpuid->have_amx_bf16_;
case AMX_FP16: return cpuid->have_amx_fp16_;
case AMX_INT8: return cpuid->have_amx_int8_;
case AMX_TILE: return cpuid->have_amx_tile_;
case AVX2: return cpuid->have_avx2_;
case AVX: return cpuid->have_avx_;
case AVX512F: return cpuid->have_avx512f_;
case AVX512CD: return cpuid->have_avx512cd_;
case AVX512PF: return cpuid->have_avx512pf_;
case AVX512ER: return cpuid->have_avx512er_;
case AVX512VL: return cpuid->have_avx512vl_;
case AVX512BW: return cpuid->have_avx512bw_;
case AVX512DQ: return cpuid->have_avx512dq_;
case AVX512VBMI: return cpuid->have_avx512vbmi_;
case AVX512IFMA: return cpuid->have_avx512ifma_;
case AVX512_4VNNIW: return cpuid->have_avx512_4vnniw_;
case AVX512_4FMAPS: return cpuid->have_avx512_4fmaps_;
case AVX512_BF16: return cpuid->have_avx512_bf16_;
case AVX512_FP16: return cpuid->have_avx512_fp16_;
case AVX512_VNNI: return cpuid->have_avx512_vnni_;
case AVX_VNNI: return cpuid->have_avx_vnni_;
case AVX_VNNI_INT8: return cpuid->have_avx_vnni_int8_;
case AVX_NE_CONVERT: return cpuid->have_avx_ne_convert_;
case BMI1: return cpuid->have_bmi1_;
case BMI2: return cpuid->have_bmi2_;
case CMOV: return cpuid->have_cmov_;
case CMPXCHG16B: return cpuid->have_cmpxchg16b_;
case CMPXCHG8B: return cpuid->have_cmpxchg8b_;
case F16C: return cpuid->have_f16c_;
case FMA: return cpuid->have_fma_;
case MMX: return cpuid->have_mmx_;
case PCLMULQDQ: return cpuid->have_pclmulqdq_;
case POPCNT: return cpuid->have_popcnt_;
case PREFETCHW: return cpuid->have_prefetchw_;
case PREFETCHWT1: return cpuid->have_prefetchwt1_;
case RDRAND: return cpuid->have_rdrand_;
case RDSEED: return cpuid->have_rdseed_;
case SMAP: return cpuid->have_smap_;
case SSE2: return cpuid->have_sse2_;
case SSE3: return cpuid->have_sse3_;
case SSE4_1: return cpuid->have_sse4_1_;
case SSE4_2: return cpuid->have_sse4_2_;
case SSE: return cpuid->have_sse_;
case SSSE3: return cpuid->have_ssse3_;
case HYPERVISOR: return cpuid->have_hypervisor_;
default:
break;
}
return false;
}
string vendor_str() const { return vendor_str_; }
int family() const { return family_; }
int model_num() { return model_num_; }
private:
int have_adx_ : 1;
int have_aes_ : 1;
int have_amx_bf16_ : 1;
int have_amx_fp16_ : 1;
int have_amx_int8_ : 1;
int have_amx_tile_ : 1;
int have_avx_ : 1;
int have_avx2_ : 1;
int have_avx512f_ : 1;
int have_avx512cd_ : 1;
int have_avx512er_ : 1;
int have_avx512pf_ : 1;
int have_avx512vl_ : 1;
int have_avx512bw_ : 1;
int have_avx512dq_ : 1;
int have_avx512vbmi_ : 1;
int have_avx512ifma_ : 1;
int have_avx512_4vnniw_ : 1;
int have_avx512_4fmaps_ : 1;
int have_avx512_bf16_ : 1;
int have_avx512_fp16_ : 1;
int have_avx512_vnni_ : 1;
int have_avx_vnni_ : 1;
int have_avx_vnni_int8_ : 1;
int have_avx_ne_convert_ : 1;
int have_bmi1_ : 1;
int have_bmi2_ : 1;
int have_cmov_ : 1;
int have_cmpxchg16b_ : 1;
int have_cmpxchg8b_ : 1;
int have_f16c_ : 1;
int have_fma_ : 1;
int have_mmx_ : 1;
int have_pclmulqdq_ : 1;
int have_popcnt_ : 1;
int have_prefetchw_ : 1;
int have_prefetchwt1_ : 1;
int have_rdrand_ : 1;
int have_rdseed_ : 1;
int have_smap_ : 1;
int have_sse_ : 1;
int have_sse2_ : 1;
int have_sse3_ : 1;
int have_sse4_1_ : 1;
int have_sse4_2_ : 1;
int have_ssse3_ : 1;
int have_hypervisor_ : 1;
string vendor_str_;
int family_;
int model_num_;
};
absl::once_flag cpuid_once_flag;
void InitCPUIDInfo() {
absl::call_once(cpuid_once_flag, CPUIDInfo::Initialize);
}
#endif
#if defined(PLATFORM_IS_ARM64) && !defined(__APPLE__) && !defined(__OpenBSD__)
class CPUIDInfo;
void InitCPUIDInfo();
CPUIDInfo *cpuid = nullptr;
class CPUIDInfo {
public:
CPUIDInfo()
: implementer_(0),
variant_(0),
cpunum_(0),
is_arm_neoverse_v1_(0),
is_arm_neoverse_n1_(0) {}
static void Initialize() {
if (cpuid != nullptr) return;
cpuid = new CPUIDInfo;
if (!(getauxval(AT_HWCAP) & HWCAP_CPUID)) {
return;
}
int present_cpu = -1;
#ifndef PLATFORM_WINDOWS
std::ifstream CPUspresent;
CPUspresent.open("/sys/devices/system/cpu/present", std::ios::in);
if (CPUspresent.is_open()) {
std::string line;
if (static_cast<bool>(getline(CPUspresent, line))) {
auto ending = line.end();
for (auto i = line.begin(); i < line.end(); ++i) {
if (*i == '-' || *i == ',') {
ending = i;
break;
}
}
line.erase(ending, line.end());
present_cpu = std::stoi(line);
}
}
#endif
if (present_cpu == -1) {
return;
}
#ifndef PLATFORM_WINDOWS
std::stringstream str;
str << "/sys/devices/system/cpu/cpu" << present_cpu
<< "/regs/identification/midr_el1";
std::ifstream midr_el1_file(str.str(), std::ios::in);
if (midr_el1_file.is_open()) {
std::string line;
if (static_cast<bool>(getline(midr_el1_file, line))) {
uint32 midr_el1 = std::stoul(line, nullptr, 16);
cpuid->implementer_ = (midr_el1 >> 24) & 0xFF;
cpuid->variant_ = (midr_el1 >> 20) & 0xF;
cpuid->cpunum_ = (midr_el1 >> 4) & 0xFFF;
if (cpuid->implementer_ == 0x41) {
switch (cpuid->cpunum_) {
case 0xd40:
cpuid->is_arm_neoverse_v1_ = 1;
break;
case 0xd0c:
cpuid->is_arm_neoverse_n1_ = 1;
break;
default:
break;
}
}
}
}
#endif
}
int implementer() const { return implementer_; }
int cpunum() const { return cpunum_; }
static bool TestAarch64CPU(Aarch64CPU cpu) {
InitCPUIDInfo();
switch (cpu) {
case ARM_NEOVERSE_V1:
return cpuid->is_arm_neoverse_v1_;
default:
return 0;
}
}
private:
int implementer_;
int variant_;
int cpunum_;
int is_arm_neoverse_v1_;
int is_arm_neoverse_n1_;
};
absl::once_flag cpuid_once_flag;
void InitCPUIDInfo() {
absl::call_once(cpuid_once_flag, CPUIDInfo::Initialize);
}
#endif
}
bool TestCPUFeature(CPUFeature feature) {
#ifdef PLATFORM_IS_X86
return CPUIDInfo::TestFeature(feature);
#else
return false;
#endif
}
bool TestAarch64CPU(Aarch64CPU cpu) {
#if defined(PLATFORM_IS_ARM64) && !defined(__APPLE__) && !defined(__OpenBSD__)
return CPUIDInfo::TestAarch64CPU(cpu);
#else
return false;
#endif
}
std::string CPUVendorIDString() {
#ifdef PLATFORM_IS_X86
InitCPUIDInfo();
return cpuid->vendor_str();
#else
return "";
#endif
}
int CPUFamily() {
#ifdef PLATFORM_IS_X86
InitCPUIDInfo();
return cpuid->family();
#elif defined(PLATFORM_IS_ARM64) && !defined(__APPLE__) && !defined(__OpenBSD__)
InitCPUIDInfo();
return cpuid->implementer();
#else
return 0;
#endif
}
int CPUModelNum() {
#ifdef PLATFORM_IS_X86
InitCPUIDInfo();
return cpuid->model_num();
#elif defined(PLATFORM_IS_ARM64) && !defined(__APPLE__) && !defined(__OpenBSD__)
InitCPUIDInfo();
return cpuid->cpunum();
#else
return 0;
#endif
}
int CPUIDNumSMT() {
#ifdef PLATFORM_IS_X86
uint32 eax, ebx, ecx, edx;
GETCPUID(eax, ebx, ecx, edx, 0, 0);
if (eax >= 11) {
GETCPUID(eax, ebx, ecx, edx, 11, 0);
if (ebx != 0 && ((ecx & 0xff00) >> 8) == 1) {
return 1 << (eax & 0x1f);
}
}
#endif
return 0;
}
}
} | #include "tsl/platform/cpu_info.h"
#include "tsl/platform/test.h"
namespace tsl {
TEST(CPUInfo, CommonX86CPU) {
if (port::TestCPUFeature(port::CPUFeature::SSE)) {
EXPECT_TRUE(port::IsX86CPU());
}
}
TEST(CPUInfo, Aarch64NeoverseV1CPU) {
if (port::TestAarch64CPU(port::Aarch64CPU::ARM_NEOVERSE_V1)) {
EXPECT_TRUE(port::IsAarch64CPU());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/cpu_info.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/cpu_info_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fba84131-3d0e-4f17-933c-2161333e37ff | cpp | tensorflow/tensorflow | dynamic_stitch_op | tensorflow/compiler/tf2xla/kernels/dynamic_stitch_op.cc | tensorflow/core/kernels/dynamic_stitch_op_test.cc | #include <algorithm>
#include <vector>
#include "tensorflow/compiler/tf2xla/shape_util.h"
#include "tensorflow/compiler/tf2xla/type_util.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/literal_util.h"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
namespace tensorflow {
namespace {
class DynamicStitchOp : public XlaOpKernel {
public:
explicit DynamicStitchOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
OP_REQUIRES(
ctx, ctx->num_inputs() > 0,
errors::InvalidArgument("DynamicStitchOp: Must have some inputs"));
OP_REQUIRES(ctx, ctx->num_inputs() % 2 == 0,
errors::InvalidArgument(
"DynamicStitchOp: Must have even number of arguments"));
const int n = ctx->num_inputs() / 2;
const DataType dt = ctx->input_type(n);
DataTypeVector expected;
for (int i = 0; i < n; i++) {
expected.push_back(DT_INT32);
}
for (int i = 0; i < n; i++) {
expected.push_back(dt);
}
OP_REQUIRES_OK(ctx, ctx->MatchSignature(expected, {dt}));
}
void Compile(XlaOpKernelContext* ctx) override {
std::vector<xla::Literal> indices_input;
OP_REQUIRES_OK(ctx, ctx->ConstantInputList("indices", &indices_input));
std::vector<xla::XlaOp> data;
std::vector<TensorShape> data_shapes;
OP_REQUIRES_OK(ctx, ctx->InputList("data", &data, &data_shapes));
std::vector<xla::Literal> indices(indices_input.size());
const TensorShape& data0_shape = data_shapes[0];
TensorShape indices0_shape;
OP_REQUIRES_OK(
ctx, XLAShapeToTensorShape(indices_input[0].shape(), &indices0_shape));
for (int input_num = 0; input_num < indices_input.size(); input_num++) {
TensorShape indices_shape;
OP_REQUIRES_OK(ctx,
XLAShapeToTensorShape(indices_input[input_num].shape(),
&indices_shape));
TensorShape& data_shape = data_shapes[input_num];
if (!TensorShapeUtils::StartsWith(data_shape, indices_shape)) {
for (int64_t i = 0; i < indices_shape.dims(); ++i) {
data_shape.set_dim(i, indices_shape.dim_size(i));
data[input_num] = xla::SliceInDim(data[input_num], 0,
indices_shape.dim_size(i), 1, i);
}
}
OP_REQUIRES(
ctx, TensorShapeUtils::StartsWith(data_shape, indices_shape),
errors::InvalidArgument("data[", input_num,
"].shape = ", data_shape.DebugString(),
" does not start with indices[", input_num,
"].shape = ", indices_shape.DebugString()));
OP_REQUIRES(
ctx,
input_num == 0 || SameExtraShape(data0_shape, indices0_shape,
data_shape, indices_shape),
errors::InvalidArgument(
"Need data[0].shape[", indices0_shape.dims(), ":] = data[",
input_num, "].shape[", indices_shape.dims(),
":], got data[0].shape = ", data0_shape.DebugString(), ", data[",
input_num, "].shape = ", data_shape.DebugString(),
", indices[0].shape = ", indices0_shape.DebugString(),
", indices[", input_num,
"].shape = ", indices_shape.DebugString()));
OP_REQUIRES_OK(ctx,
XlaHelpers::ReshapeLiteral(indices_input[input_num],
{indices_shape.num_elements()},
&indices[input_num]));
}
int max_index = -1;
for (int input_num = 0; input_num < indices.size(); input_num++) {
for (int i = 0; i < indices[input_num].shape().dimensions(0); ++i) {
max_index = std::max(max_index, indices[input_num].Get<int>({i}));
}
}
int number_of_indices = max_index + 1;
int64_t result_rank = 1 + data0_shape.dims() - indices0_shape.dims();
if (number_of_indices == 0) {
std::vector<int64_t> result_shape(result_rank);
for (int d = indices0_shape.dims(); d < data0_shape.dims(); d++) {
result_shape[d - indices0_shape.dims() + 1] = data0_shape.dim_size(d);
}
xla::PrimitiveType element_type =
ctx->input_xla_type(ctx->num_inputs() - 1);
xla::Literal empty_literal = xla::Literal::CreateFromShape(
xla::ShapeUtil::MakeShape(element_type, result_shape));
ctx->SetOutput(0, xla::ConstantLiteral(ctx->builder(), empty_literal));
return;
}
std::vector<int32> src_input_vector(number_of_indices);
std::vector<int32> src_slice_vector(number_of_indices);
std::vector<bool> src_index_used(number_of_indices);
int index_used_count = 0;
for (int input_num = 0; input_num < indices.size(); input_num++) {
for (int i = 0; i < indices[input_num].shape().dimensions(0); ++i) {
int index = indices[input_num].Get<int>({i});
OP_REQUIRES(
ctx, index >= 0,
errors::InvalidArgument("indices[", index, "] is out of range"));
src_input_vector[index] = input_num;
src_slice_vector[index] = i;
if (!src_index_used[index]) {
src_index_used[index] = true;
++index_used_count;
}
}
}
OP_REQUIRES(ctx, index_used_count == number_of_indices,
errors::InvalidArgument("not all indices are used"));
std::vector<xla::XlaOp> input(indices.size());
for (int input_num = 0; input_num < indices.size(); input_num++) {
TensorShape new_shape;
new_shape.AddDim(indices[input_num].shape().dimensions(0));
for (int d = indices0_shape.dims(); d < data0_shape.dims(); d++) {
new_shape.AddDim(data0_shape.dim_size(d));
}
auto handle = data[input_num];
if (new_shape == data_shapes[input_num]) {
input[input_num] = handle;
} else {
input[input_num] = xla::Reshape(handle, new_shape.dim_sizes());
}
}
std::vector<int64_t> slice_start(result_rank);
std::vector<int64_t> slice_limit(result_rank);
std::vector<int64_t> stride(result_rank, 1);
for (int d = indices0_shape.dims(); d < data0_shape.dims(); d++) {
slice_limit[1 + d - indices0_shape.dims()] = data0_shape.dim_size(d);
}
std::vector<xla::XlaOp> to_concat(number_of_indices);
for (int index_num = 0; index_num < number_of_indices; index_num++) {
const auto& expression = input[src_input_vector[index_num]];
slice_start[0] = src_slice_vector[index_num];
slice_limit[0] = src_slice_vector[index_num] + 1;
to_concat[index_num] =
xla::Slice(expression, slice_start, slice_limit, stride);
}
ctx->SetOutput(0, xla::ConcatInDim(ctx->builder(), to_concat, 0));
}
private:
static bool SameExtraShape(const TensorShape& data0_shape,
const TensorShape& indices0,
const TensorShape& data1_shape,
const TensorShape& indices1) {
const int extra0 = data0_shape.dims() - indices0.dims();
const int extra1 = data1_shape.dims() - indices1.dims();
if (extra0 != extra1) return false;
for (int i = 0; i < extra0; i++) {
if (data0_shape.dim_size(indices0.dims() + i) !=
data1_shape.dim_size(indices1.dims() + i)) {
return false;
}
}
return true;
}
};
REGISTER_XLA_OP(Name("DynamicStitch").CompileTimeConstantInput("indices"),
DynamicStitchOp);
REGISTER_XLA_OP(
Name("ParallelDynamicStitch").CompileTimeConstantInput("indices"),
DynamicStitchOp);
}
} | #include <functional>
#include <memory>
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class DynamicStitchOpTest : public OpsTestBase {
protected:
void MakeOp(int n, DataType dt) {
TF_ASSERT_OK(NodeDefBuilder("myop", "DynamicStitch")
.Input(FakeInput(n, DT_INT32))
.Input(FakeInput(n, dt))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
TEST_F(DynamicStitchOpTest, Simple_OneD) {
MakeOp(2, DT_FLOAT);
AddInputFromArray<int32>(TensorShape({3}), {0, 4, 7});
AddInputFromArray<int32>(TensorShape({5}), {1, 6, 2, 3, 5});
AddInputFromArray<float>(TensorShape({3}), {0, 40, 70});
AddInputFromArray<float>(TensorShape({5}), {10, 60, 20, 30, 50});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({8}));
test::FillValues<float>(&expected, {0, 10, 20, 30, 40, 50, 60, 70});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(DynamicStitchOpTest, Simple_TwoD) {
MakeOp(3, DT_FLOAT);
AddInputFromArray<int32>(TensorShape({3}), {0, 4, 7});
AddInputFromArray<int32>(TensorShape({2}), {1, 6});
AddInputFromArray<int32>(TensorShape({3}), {2, 3, 5});
AddInputFromArray<float>(TensorShape({3, 2}), {0, 1, 40, 41, 70, 71});
AddInputFromArray<float>(TensorShape({2, 2}), {10, 11, 60, 61});
AddInputFromArray<float>(TensorShape({3, 2}), {20, 21, 30, 31, 50, 51});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({8, 2}));
test::FillValues<float>(&expected, {0, 1, 10, 11, 20, 21, 30, 31, 40, 41, 50,
51, 60, 61, 70, 71});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(DynamicStitchOpTest, IndicesNotCoverAllPortions) {
MakeOp(1, DT_FLOAT);
AddInputFromArray<int32>(TensorShape({1}), {2});
AddInputFromArray<float>(TensorShape({1}), {1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({3}));
test::FillValues<float>(&expected, {0, 0, 1});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(DynamicStitchOpTest, Error_IndicesMultiDimensional) {
MakeOp(2, DT_FLOAT);
AddInputFromArray<int32>(TensorShape({3}), {0, 4, 7});
AddInputFromArray<int32>(TensorShape({1, 5}), {1, 6, 2, 3, 5});
AddInputFromArray<float>(TensorShape({3}), {0, 40, 70});
AddInputFromArray<float>(TensorShape({5}), {10, 60, 20, 30, 50});
Status s = RunOpKernel();
EXPECT_TRUE(absl::StrContains(
s.ToString(),
"data[1].shape = [5] does not start with indices[1].shape = [1,5]"))
<< s;
}
TEST_F(DynamicStitchOpTest, Error_DataNumDimsMismatch) {
MakeOp(2, DT_FLOAT);
AddInputFromArray<int32>(TensorShape({3}), {0, 4, 7});
AddInputFromArray<int32>(TensorShape({5}), {1, 6, 2, 3, 5});
AddInputFromArray<float>(TensorShape({3}), {0, 40, 70});
AddInputFromArray<float>(TensorShape({1, 5}), {10, 60, 20, 30, 50});
Status s = RunOpKernel();
EXPECT_TRUE(absl::StrContains(
s.ToString(),
"data[1].shape = [1,5] does not start with indices[1].shape = [5]"))
<< s;
}
TEST_F(DynamicStitchOpTest, Error_DataDimSizeMismatch) {
MakeOp(2, DT_FLOAT);
AddInputFromArray<int32>(TensorShape({3}), {0, 4, 5});
AddInputFromArray<int32>(TensorShape({4}), {1, 6, 2, 3});
AddInputFromArray<float>(TensorShape({3, 1}), {0, 40, 70});
AddInputFromArray<float>(TensorShape({4, 2}),
{10, 11, 60, 61, 20, 21, 30, 31});
Status s = RunOpKernel();
EXPECT_TRUE(
absl::StrContains(s.ToString(),
"Need data[0].shape[1:] = data[1].shape[1:], got "
"data[0].shape = [3,1], data[1].shape = [4,2]"))
<< s;
}
TEST_F(DynamicStitchOpTest, Error_DataAndIndicesSizeMismatch) {
MakeOp(2, DT_FLOAT);
AddInputFromArray<int32>(TensorShape({3}), {0, 4, 7});
AddInputFromArray<int32>(TensorShape({5}), {1, 6, 2, 3, 5});
AddInputFromArray<float>(TensorShape({3}), {0, 40, 70});
AddInputFromArray<float>(TensorShape({4}), {10, 60, 20, 30});
Status s = RunOpKernel();
EXPECT_TRUE(absl::StrContains(
s.ToString(),
"data[1].shape = [4] does not start with indices[1].shape = [5]"))
<< s;
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/dynamic_stitch_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/dynamic_stitch_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
626a0766-a0f3-40f1-ae8f-10783946c2f8 | cpp | tensorflow/tensorflow | audio_microfrontend | tensorflow/lite/experimental/microfrontend/audio_microfrontend.cc | tensorflow/lite/experimental/microfrontend/audio_microfrontend_test.cc | #include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/experimental/microfrontend/lib/frontend.h"
#include "tensorflow/lite/experimental/microfrontend/lib/frontend_util.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace audio_microfrontend {
constexpr int kInputTensor = 0;
constexpr int kOutputTensor = 0;
typedef struct {
int sample_rate;
FrontendState* state;
int left_context;
int right_context;
int frame_stride;
bool zero_padding;
int out_scale;
bool out_float;
} TfLiteAudioMicrofrontendParams;
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* data = new TfLiteAudioMicrofrontendParams;
const uint8_t* buffer_t = reinterpret_cast<const uint8_t*>(buffer);
const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap();
data->sample_rate = m["sample_rate"].AsInt32();
struct FrontendConfig config;
config.window.size_ms = m["window_size"].AsInt32();
config.window.step_size_ms = m["window_step"].AsInt32();
config.filterbank.num_channels = m["num_channels"].AsInt32();
config.filterbank.upper_band_limit = m["upper_band_limit"].AsFloat();
config.filterbank.lower_band_limit = m["lower_band_limit"].AsFloat();
config.noise_reduction.smoothing_bits = m["smoothing_bits"].AsInt32();
config.noise_reduction.even_smoothing = m["even_smoothing"].AsFloat();
config.noise_reduction.odd_smoothing = m["odd_smoothing"].AsFloat();
config.noise_reduction.min_signal_remaining =
m["min_signal_remaining"].AsFloat();
config.pcan_gain_control.enable_pcan = m["enable_pcan"].AsBool();
config.pcan_gain_control.strength = m["pcan_strength"].AsFloat();
config.pcan_gain_control.offset = m["pcan_offset"].AsFloat();
config.pcan_gain_control.gain_bits = m["gain_bits"].AsInt32();
config.log_scale.enable_log = m["enable_log"].AsBool();
config.log_scale.scale_shift = m["scale_shift"].AsInt32();
data->state = new FrontendState;
FrontendPopulateState(&config, data->state, data->sample_rate);
data->left_context = m["left_context"].AsInt32();
data->right_context = m["right_context"].AsInt32();
data->frame_stride = m["frame_stride"].AsInt32();
data->zero_padding = m["zero_padding"].AsBool();
data->out_scale = m["out_scale"].AsInt32();
data->out_float = m["out_float"].AsBool();
return data;
}
void Free(TfLiteContext* context, void* buffer) {
auto* data = reinterpret_cast<TfLiteAudioMicrofrontendParams*>(buffer);
FrontendFreeStateContents(data->state);
delete data->state;
delete data;
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
auto* data =
reinterpret_cast<TfLiteAudioMicrofrontendParams*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1);
TF_LITE_ENSURE_EQ(context, input->type, kTfLiteInt16);
output->type = kTfLiteInt32;
if (data->out_float) {
output->type = kTfLiteFloat32;
}
TfLiteIntArray* output_size = TfLiteIntArrayCreate(2);
int num_frames = 0;
if (input->dims->data[0] >= data->state->window.size) {
num_frames = (input->dims->data[0] - data->state->window.size) /
data->state->window.step / data->frame_stride +
1;
}
output_size->data[0] = num_frames;
output_size->data[1] = data->state->filterbank.num_channels *
(1 + data->left_context + data->right_context);
return context->ResizeTensor(context, output, output_size);
}
template <typename T>
void GenerateFeatures(TfLiteAudioMicrofrontendParams* data,
const TfLiteTensor* input, TfLiteTensor* output) {
const int16_t* audio_data = GetTensorData<int16_t>(input);
int64_t audio_size = input->dims->data[0];
T* filterbanks_flat = GetTensorData<T>(output);
int num_frames = 0;
if (audio_size >= data->state->window.size) {
num_frames = (input->dims->data[0] - data->state->window.size) /
data->state->window.step +
1;
}
std::vector<std::vector<T>> frame_buffer(num_frames);
int frame_index = 0;
while (audio_size > 0) {
size_t num_samples_read;
struct FrontendOutput output = FrontendProcessSamples(
data->state, audio_data, audio_size, &num_samples_read);
audio_data += num_samples_read;
audio_size -= num_samples_read;
if (output.values != nullptr) {
frame_buffer[frame_index].reserve(output.size);
int i;
for (i = 0; i < output.size; ++i) {
frame_buffer[frame_index].push_back(static_cast<T>(output.values[i]) /
data->out_scale);
}
++frame_index;
}
}
int index = 0;
std::vector<T> pad(data->state->filterbank.num_channels, 0);
int anchor;
for (anchor = 0; anchor < frame_buffer.size(); anchor += data->frame_stride) {
int frame;
for (frame = anchor - data->left_context;
frame <= anchor + data->right_context; ++frame) {
std::vector<T>* feature;
if (data->zero_padding && (frame < 0 || frame >= frame_buffer.size())) {
feature = &pad;
} else if (frame < 0) {
feature = &frame_buffer[0];
} else if (frame >= frame_buffer.size()) {
feature = &frame_buffer[frame_buffer.size() - 1];
} else {
feature = &frame_buffer[frame];
}
for (auto f : *feature) {
filterbanks_flat[index++] = f;
}
}
}
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* data =
reinterpret_cast<TfLiteAudioMicrofrontendParams*>(node->user_data);
FrontendReset(data->state);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (data->out_float) {
GenerateFeatures<float>(data, input, output);
} else {
GenerateFeatures<int32>(data, input, output);
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_AUDIO_MICROFRONTEND() {
static TfLiteRegistration r = {
audio_microfrontend::Init, audio_microfrontend::Free,
audio_microfrontend::Prepare, audio_microfrontend::Eval};
return &r;
}
}
}
} | #include "tensorflow/lite/experimental/microfrontend/audio_microfrontend.h"
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace ops {
namespace custom {
namespace {
using ::testing::ElementsAreArray;
class MicroFrontendOpModel : public SingleOpModel {
public:
MicroFrontendOpModel(int n_input, int n_frame, int n_frequency_per_frame,
int n_left_context, int n_right_context,
int n_frame_stride,
const std::vector<std::vector<int>>& input_shapes)
: n_input_(n_input),
n_frame_(n_frame),
n_frequency_per_frame_(n_frequency_per_frame),
n_left_context_(n_left_context),
n_right_context_(n_right_context),
n_frame_stride_(n_frame_stride) {
input_ = AddInput(TensorType_INT16);
output_ = AddOutput(TensorType_INT32);
flexbuffers::Builder fbb;
fbb.Map([&]() {
fbb.Int("sample_rate", 1000);
fbb.Int("window_size", 25);
fbb.Int("window_step", 10);
fbb.Int("num_channels", 2);
fbb.Float("upper_band_limit", 450.0);
fbb.Float("lower_band_limit", 8.0);
fbb.Int("smoothing_bits", 10);
fbb.Float("even_smoothing", 0.025);
fbb.Float("odd_smoothing", 0.06);
fbb.Float("min_signal_remaining", 0.05);
fbb.Bool("enable_pcan", true);
fbb.Float("pcan_strength", 0.95);
fbb.Float("pcan_offset", 80.0);
fbb.Int("gain_bits", 21);
fbb.Bool("enable_log", true);
fbb.Int("scale_shift", 6);
fbb.Int("left_context", n_left_context);
fbb.Int("right_context", n_right_context);
fbb.Int("frame_stride", n_frame_stride);
fbb.Bool("zero_padding", true);
fbb.Int("out_scale", 1);
fbb.Bool("out_float", false);
});
fbb.Finish();
SetCustomOp("MICRO_FRONTEND", fbb.GetBuffer(),
Register_AUDIO_MICROFRONTEND);
BuildInterpreter(input_shapes);
}
void SetInput(const std::vector<int16_t>& data) {
PopulateTensor(input_, data);
}
std::vector<int> GetOutput() { return ExtractVector<int>(output_); }
int num_inputs() { return n_input_; }
int num_frmes() { return n_frame_; }
int num_frequency_per_frame() { return n_frequency_per_frame_; }
int num_left_context() { return n_left_context_; }
int num_right_context() { return n_right_context_; }
int num_frame_stride() { return n_frame_stride_; }
protected:
int input_;
int output_;
int n_input_;
int n_frame_;
int n_frequency_per_frame_;
int n_left_context_;
int n_right_context_;
int n_frame_stride_;
};
class BaseMicroFrontendTest : public ::testing::Test {
protected:
std::vector<int16_t> micro_frontend_input_;
void VerifyGoldens(const std::vector<int16_t>& input,
const std::vector<std::vector<int>>& output,
MicroFrontendOpModel* micro_frontend,
float tolerance = 1e-5) {
const int num_inputs = micro_frontend->num_inputs();
EXPECT_GT(num_inputs, 0);
const int num_frames = micro_frontend->num_frmes();
EXPECT_GT(num_frames, 0);
EXPECT_EQ(num_frames, output.size());
const int num_frequency_per_frame =
micro_frontend->num_frequency_per_frame();
EXPECT_GT(num_frequency_per_frame, 0);
EXPECT_EQ(num_frequency_per_frame, output[0].size());
micro_frontend->SetInput(input);
ASSERT_EQ(micro_frontend->Invoke(), kTfLiteOk);
std::vector<int> output_flattened;
int anchor;
for (anchor = 0; anchor < output.size();
anchor += micro_frontend->num_frame_stride()) {
int frame;
for (frame = anchor - micro_frontend->num_left_context();
frame <= anchor + micro_frontend->num_right_context(); ++frame) {
if (frame < 0 || frame >= output.size()) {
int j;
for (j = 0; j < num_frequency_per_frame; ++j) {
output_flattened.push_back(0.0);
}
} else {
for (auto data_point : output[frame]) {
output_flattened.push_back(data_point);
}
}
}
}
EXPECT_THAT(micro_frontend->GetOutput(),
ElementsAreArray(output_flattened));
}
};
class TwoConsecutive36InputsMicroFrontendTest : public BaseMicroFrontendTest {
void SetUp() override {
micro_frontend_input_ = {
0, 32767, 0, -32768, 0, 32767, 0, -32768, 0, 32767, 0, -32768,
0, 32767, 0, -32768, 0, 32767, 0, -32768, 0, 32767, 0, -32768,
0, 32767, 0, -32768, 0, 32767, 0, -32768, 0, 32767, 0, -32768};
}
};
TEST_F(TwoConsecutive36InputsMicroFrontendTest, MicroFrontendBlackBoxTest) {
const int n_input = 36;
const int n_frame = 2;
const int n_frequency_per_frame = 2;
MicroFrontendOpModel micro_frontend(n_input, n_frame, n_frequency_per_frame,
1, 1, 1,
{
{n_input},
});
const std::vector<std::vector<int>> micro_frontend_golden_output = {
{479, 425}, {436, 378}};
VerifyGoldens(micro_frontend_input_, micro_frontend_golden_output,
µ_frontend);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/microfrontend/audio_microfrontend.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/microfrontend/audio_microfrontend_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0389467c-ba24-47c6-a136-10cfbb920e4a | cpp | google/arolla | batched_forest_evaluator | arolla/decision_forest/batched_evaluation/batched_forest_evaluator.cc | arolla/decision_forest/batched_evaluation/batched_forest_evaluator_test.cc | #include "arolla/decision_forest/batched_evaluation/batched_forest_evaluator.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/base/no_destructor.h"
#include "absl/log/check.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "arolla/array/array.h"
#include "arolla/array/qtype/types.h"
#include "arolla/decision_forest/decision_forest.h"
#include "arolla/decision_forest/pointwise_evaluation/forest_evaluator.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/memory/buffer.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/raw_buffer_factory.h"
#include "arolla/qtype/array_like/array_like_qtype.h"
#include "arolla/qtype/array_like/frame_iter.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/threading.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
namespace {
absl::StatusOr<TypedValue> AddFullFloatArrays(TypedRef a, TypedRef b) {
if (a.GetType() == GetDenseArrayQType<float>() &&
b.GetType() == GetDenseArrayQType<float>()) {
const auto& va = a.UnsafeAs<DenseArray<float>>();
const auto& vb = b.UnsafeAs<DenseArray<float>>();
DCHECK_EQ(va.size(), vb.size());
DCHECK(va.IsFull() && vb.IsFull());
Buffer<float>::Builder bldr(va.size());
auto sa = va.values.span();
auto sb = vb.values.span();
auto sr = bldr.GetMutableSpan();
for (int64_t i = 0; i < va.size(); ++i) {
sr[i] = sa[i] + sb[i];
}
return TypedValue::FromValue(DenseArray<float>{std::move(bldr).Build()});
} else if (a.GetType() == GetArrayQType<float>() &&
b.GetType() == GetArrayQType<float>()) {
const auto& va = a.UnsafeAs<Array<float>>();
const auto& vb = b.UnsafeAs<Array<float>>();
DCHECK_EQ(va.size(), vb.size());
DCHECK(va.IsFullForm() && vb.IsFullForm());
Buffer<float>::Builder bldr(va.size());
auto sa = va.dense_data().values.span();
auto sb = vb.dense_data().values.span();
auto sr = bldr.GetMutableSpan();
for (int64_t i = 0; i < va.size(); ++i) {
sr[i] = sa[i] + sb[i];
}
return TypedValue::FromValue(Array<float>{std::move(bldr).Build()});
} else {
return absl::InternalError("Invalid type in BatchedForestEvaluator/Add");
}
}
absl::StatusOr<std::vector<ForestEvaluator>> CreatePointwiseEvaluators(
const BatchedForestEvaluator::CompilationParams& params,
const DecisionForest& decision_forest, const std::vector<TypedSlot>& inputs,
const std::vector<ForestEvaluator::Output>& outputs) {
int64_t split_count = 0;
for (const auto& tree : decision_forest.GetTrees()) {
split_count += tree.split_nodes.size();
}
int64_t evaluator_count = std::max<int64_t>(
1, (split_count + params.optimal_splits_per_evaluator - 1) /
params.optimal_splits_per_evaluator);
std::vector<ForestEvaluator> evaluators;
evaluators.reserve(evaluator_count);
if (evaluator_count == 1) {
ASSIGN_OR_RETURN(auto evaluator, ForestEvaluator::Compile(decision_forest,
inputs, outputs));
evaluators.push_back(std::move(evaluator));
return evaluators;
}
int64_t splits_per_evaluator =
(split_count + evaluator_count - 1) / evaluator_count;
int64_t estimated_trees_per_evaluator =
(decision_forest.GetTrees().size() + evaluator_count - 1) /
evaluator_count;
std::vector<DecisionTree> trees;
trees.reserve(estimated_trees_per_evaluator);
int64_t current_split_count = 0;
for (const auto& tree : decision_forest.GetTrees()) {
trees.push_back(tree);
current_split_count += tree.split_nodes.size();
if (current_split_count >= splits_per_evaluator) {
ASSIGN_OR_RETURN(auto partial_forest,
DecisionForest::FromTrees(std::move(trees)));
ASSIGN_OR_RETURN(auto evaluator, ForestEvaluator::Compile(
*partial_forest, inputs, outputs));
evaluators.push_back(std::move(evaluator));
trees.clear();
trees.reserve(estimated_trees_per_evaluator);
current_split_count = 0;
}
}
if (!trees.empty()) {
ASSIGN_OR_RETURN(auto partial_forest,
DecisionForest::FromTrees(std::move(trees)));
ASSIGN_OR_RETURN(auto evaluator, ForestEvaluator::Compile(*partial_forest,
inputs, outputs));
evaluators.push_back(std::move(evaluator));
}
return evaluators;
}
}
absl::NoDestructor<std::unique_ptr<ThreadingInterface>>
BatchedForestEvaluator::threading_;
int64_t BatchedForestEvaluator::min_rows_per_thread_;
absl::StatusOr<std::unique_ptr<BatchedForestEvaluator>>
BatchedForestEvaluator::Compile(const DecisionForest& decision_forest,
absl::Span<const TreeFilter> groups,
const CompilationParams& params) {
FrameLayout::Builder bldr;
std::vector<SlotMapping> input_slots_mapping;
TypedSlot placeholder =
TypedSlot::FromSlot(FrameLayout::Slot<float>::UnsafeUninitializedSlot());
std::vector<TypedSlot> input_pointwise_slots;
for (const auto& kv : decision_forest.GetRequiredQTypes()) {
TypedSlot pointwise_slot = AddSlot(kv.second, &bldr);
while (input_pointwise_slots.size() <= kv.first) {
input_pointwise_slots.push_back(placeholder);
}
input_pointwise_slots[kv.first] = pointwise_slot;
input_slots_mapping.push_back({kv.first, pointwise_slot});
}
std::vector<ForestEvaluator::Output> pointwise_outputs;
std::vector<TypedSlot> output_pointwise_slots;
pointwise_outputs.reserve(groups.size());
output_pointwise_slots.reserve(groups.size());
for (const TreeFilter& filter : groups) {
auto slot = bldr.AddSlot<float>();
pointwise_outputs.push_back({filter, slot});
output_pointwise_slots.push_back(TypedSlot::FromSlot(slot));
}
auto pointwise_layout = std::move(bldr).Build();
ASSIGN_OR_RETURN(
std::vector<ForestEvaluator> pointwise_evaluators,
CreatePointwiseEvaluators(params, decision_forest, input_pointwise_slots,
pointwise_outputs));
return absl::WrapUnique(new BatchedForestEvaluator(
std::move(pointwise_layout), std::move(input_slots_mapping),
std::move(output_pointwise_slots), std::move(pointwise_evaluators)));
}
absl::Status BatchedForestEvaluator::GetInputsFromSlots(
absl::Span<const TypedSlot> input_slots, ConstFramePtr frame,
std::vector<TypedRef>* input_arrays) const {
if (input_slots.size() < input_count_) {
return absl::InvalidArgumentError(
absl::StrFormat("not enough inputs: at least %d expected, %d found",
input_count_, input_slots.size()));
}
for (auto m : input_mapping_) {
input_arrays->push_back(
TypedRef::FromSlot(input_slots[m.input_index], frame));
}
return absl::OkStatus();
}
absl::Status BatchedForestEvaluator::EvalBatch(
absl::Span<const TypedSlot> input_slots,
absl::Span<const TypedSlot> output_slots, FramePtr frame,
RawBufferFactory* buffer_factory, std::optional<int64_t> row_count) const {
std::vector<TypedRef> input_arrays;
input_arrays.reserve(input_mapping_.size());
RETURN_IF_ERROR(GetInputsFromSlots(input_slots, frame, &input_arrays));
if (!row_count.has_value()) {
if (!input_arrays.empty()) {
ASSIGN_OR_RETURN(row_count, GetArraySize(input_arrays[0]));
} else if (!input_slots.empty()) {
ASSIGN_OR_RETURN(row_count,
GetArraySize(TypedRef::FromSlot(input_slots[0], frame)));
}
}
int thread_count = 1;
auto run_evaluator = [&](const ForestEvaluator& eval) -> absl::Status {
ASSIGN_OR_RETURN(
auto frame_iterator,
FrameIterator::Create(
input_arrays, {input_pointwise_slots_.data(), input_arrays.size()},
output_slots, output_pointwise_slots_, &pointwise_layout_,
FrameIterator::Options{.row_count = row_count,
.frame_buffer_count = 64 * thread_count,
.buffer_factory = buffer_factory}));
if (thread_count > 1) {
frame_iterator.ForEachFrame([&eval](FramePtr f) { eval.Eval(f, f); },
**threading_, thread_count);
} else {
frame_iterator.ForEachFrame([&eval](FramePtr f) { eval.Eval(f, f); });
}
return frame_iterator.StoreOutput(frame);
};
if (pointwise_evaluators_.size() == 1) {
return run_evaluator(pointwise_evaluators_.front());
} else {
std::vector<TypedValue> res_sum;
res_sum.reserve(output_slots.size());
RETURN_IF_ERROR(run_evaluator(pointwise_evaluators_.front()));
for (const auto& s : output_slots) {
res_sum.push_back(TypedValue::FromSlot(s, frame));
}
for (int eval_id = 1; eval_id < pointwise_evaluators_.size() - 1;
++eval_id) {
RETURN_IF_ERROR(run_evaluator(pointwise_evaluators_[eval_id]));
for (int i = 0; i < output_slots.size(); ++i) {
ASSIGN_OR_RETURN(
res_sum[i],
AddFullFloatArrays(res_sum[i].AsRef(),
TypedRef::FromSlot(output_slots[i], frame)));
}
}
RETURN_IF_ERROR(run_evaluator(pointwise_evaluators_.back()));
for (int i = 0; i < output_slots.size(); ++i) {
ASSIGN_OR_RETURN(
TypedValue full_sum,
AddFullFloatArrays(res_sum[i].AsRef(),
TypedRef::FromSlot(output_slots[i], frame)));
RETURN_IF_ERROR(full_sum.CopyToSlot(output_slots[i], frame));
}
return absl::OkStatus();
}
}
} | #include "arolla/decision_forest/batched_evaluation/batched_forest_evaluator.h"
#include <cmath>
#include <cstdint>
#include <limits>
#include <memory>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/random/distributions.h"
#include "absl/random/random.h"
#include "absl/status/statusor.h"
#include "arolla/array/array.h"
#include "arolla/array/qtype/types.h"
#include "arolla/decision_forest/decision_forest.h"
#include "arolla/decision_forest/split_conditions/interval_split_condition.h"
#include "arolla/decision_forest/split_conditions/set_of_values_split_condition.h"
#include "arolla/decision_forest/testing/test_util.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/threading.h"
namespace arolla {
namespace {
absl::StatusOr<DecisionForestPtr> CreateTestForest() {
constexpr float kInf = std::numeric_limits<float>::infinity();
constexpr auto S = DecisionTreeNodeId::SplitNodeId;
constexpr auto A = DecisionTreeNodeId::AdjustmentId;
std::vector<DecisionTree> trees(2);
trees[0].tag = {.submodel_id = 0};
trees[0].adjustments = {0.5, 1.5, 2.5, 3.5};
trees[0].split_nodes = {
{S(1), S(2), IntervalSplit(0, 1.5, kInf)},
{A(0), A(2), SetOfValuesSplit<int64_t>(1, {1, 2}, false)},
{A(1), A(3), IntervalSplit(0, -kInf, 10)}};
trees[1].tag = {.submodel_id = 1};
trees[1].adjustments = {-1.0, 1.0};
trees[1].split_nodes = {{A(0), A(1), IntervalSplit(0, 1, 5)}};
return DecisionForest::FromTrees(std::move(trees));
}
TEST(BatchedForestEvaluator, EvalBatch) {
ASSERT_OK_AND_ASSIGN(auto forest, CreateTestForest());
std::vector<TreeFilter> groups{{.submodels = {0}}, {.submodels = {1}}};
ASSERT_OK_AND_ASSIGN(auto eval,
BatchedForestEvaluator::Compile(*forest, groups));
FrameLayout::Builder bldr;
auto in1_slot = bldr.AddSlot<DenseArray<float>>();
auto in2_slot = bldr.AddSlot<DenseArray<int64_t>>();
auto out1_slot = bldr.AddSlot<DenseArray<float>>();
auto out2_slot = bldr.AddSlot<DenseArray<float>>();
FrameLayout layout = std::move(bldr).Build();
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
frame.Set(in1_slot,
CreateDenseArray<float>({0, 0, 1.2, 1.6, 7.0, 13.5, NAN}));
frame.Set(in2_slot, CreateDenseArray<int64_t>({3, 1, 1, 1, 1, 1, {}}));
{
ASSERT_OK(eval->EvalBatch(
{TypedSlot::FromSlot(in1_slot), TypedSlot::FromSlot(in2_slot)},
{TypedSlot::FromSlot(out1_slot), TypedSlot::FromSlot(out2_slot)},
frame));
EXPECT_THAT(frame.Get(out1_slot),
::testing::ElementsAre(0.5, 2.5, 2.5, 3.5, 3.5, 1.5, 0.5));
EXPECT_THAT(frame.Get(out2_slot),
::testing::ElementsAre(-1, -1, 1, 1, -1, -1, -1));
}
frame.Set(out1_slot, DenseArray<float>());
frame.Set(out2_slot, DenseArray<float>());
{
BatchedForestEvaluator::SetThreading(std::make_unique<StdThreading>(2));
ASSERT_OK(eval->EvalBatch(
{TypedSlot::FromSlot(in1_slot), TypedSlot::FromSlot(in2_slot)},
{TypedSlot::FromSlot(out1_slot), TypedSlot::FromSlot(out2_slot)},
frame));
EXPECT_THAT(frame.Get(out1_slot),
::testing::ElementsAre(0.5, 2.5, 2.5, 3.5, 3.5, 1.5, 0.5));
EXPECT_THAT(frame.Get(out2_slot),
::testing::ElementsAre(-1, -1, 1, 1, -1, -1, -1));
BatchedForestEvaluator::SetThreading(nullptr);
}
frame.Set(out1_slot, DenseArray<float>());
frame.Set(out2_slot, DenseArray<float>());
{
BatchedForestEvaluator::SetThreading(std::make_unique<StdThreading>(2),
1);
ASSERT_OK(eval->EvalBatch(
{TypedSlot::FromSlot(in1_slot), TypedSlot::FromSlot(in2_slot)},
{TypedSlot::FromSlot(out1_slot), TypedSlot::FromSlot(out2_slot)},
frame));
EXPECT_THAT(frame.Get(out1_slot),
::testing::ElementsAre(0.5, 2.5, 2.5, 3.5, 3.5, 1.5, 0.5));
EXPECT_THAT(frame.Get(out2_slot),
::testing::ElementsAre(-1, -1, 1, 1, -1, -1, -1));
BatchedForestEvaluator::SetThreading(nullptr);
}
}
TEST(BatchedForestEvaluator, UnusedInputs) {
constexpr auto A = DecisionTreeNodeId::AdjustmentId;
DecisionTree tree;
tree.adjustments = {-1, 1};
tree.split_nodes = {{A(0), A(1), IntervalSplit(2, 0, 1)}};
ASSERT_OK_AND_ASSIGN(auto forest, DecisionForest::FromTrees({tree}));
ASSERT_OK_AND_ASSIGN(auto eval, BatchedForestEvaluator::Compile(*forest));
FrameLayout::Builder bldr;
auto unused1_slot = bldr.AddSlot<DenseArray<int64_t>>();
auto unused2_slot = bldr.AddSlot<DenseArray<int64_t>>();
auto in_slot = bldr.AddSlot<DenseArray<float>>();
auto out_slot = bldr.AddSlot<DenseArray<float>>();
FrameLayout layout = std::move(bldr).Build();
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
frame.Set(in_slot, CreateDenseArray<float>({-1, 0.5, 2}));
ASSERT_OK(eval->EvalBatch(
{TypedSlot::FromSlot(unused1_slot), TypedSlot::FromSlot(unused2_slot),
TypedSlot::FromSlot(in_slot)},
{TypedSlot::FromSlot(out_slot)}, frame));
EXPECT_THAT(frame.Get(out_slot), ::testing::ElementsAre(-1, 1, -1));
}
TEST(BatchedForestEvaluator, AllInputUnused) {
std::vector<DecisionTree> trees(1);
trees[0].adjustments = {1.5};
ASSERT_OK_AND_ASSIGN(DecisionForestPtr forest,
DecisionForest::FromTrees(std::move(trees)));
std::vector<TreeFilter> groups{{.submodels = {0}}};
ASSERT_OK_AND_ASSIGN(auto eval,
BatchedForestEvaluator::Compile(*forest, groups));
FrameLayout::Builder bldr;
auto in1_slot = bldr.AddSlot<DenseArray<float>>();
auto in2_slot = bldr.AddSlot<DenseArray<int64_t>>();
auto out_slot = bldr.AddSlot<DenseArray<float>>();
FrameLayout layout = std::move(bldr).Build();
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
frame.Set(in1_slot,
CreateDenseArray<float>({0, 0, 1.2, 1.6, 7.0, 13.5, NAN}));
frame.Set(in2_slot, CreateDenseArray<int64_t>({3, 1, 1, 1, 1, 1, {}}));
ASSERT_OK(eval->EvalBatch(
{TypedSlot::FromSlot(in1_slot), TypedSlot::FromSlot(in2_slot)},
{TypedSlot::FromSlot(out_slot)}, frame));
EXPECT_THAT(frame.Get(out_slot),
::testing::ElementsAre(1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5));
}
TEST(BatchedForestEvaluator, SplitCountPerEvaluator) {
constexpr int64_t min_num_splits = 10;
constexpr int64_t max_num_splits = 30;
constexpr int64_t num_trees = 100;
constexpr int64_t batch_size = 10;
absl::BitGen rnd;
constexpr int64_t min_total_split_count = num_trees * min_num_splits;
int64_t split_count_per_evaluator = absl::Uniform<int64_t>(
rnd, min_total_split_count / 5, min_total_split_count * 4 / 5);
auto forest =
CreateRandomFloatForest(&rnd, 10, true,
min_num_splits, max_num_splits, num_trees);
ASSERT_OK_AND_ASSIGN(auto evaluator,
BatchedForestEvaluator::Compile(*forest));
ASSERT_OK_AND_ASSIGN(
auto subdivided_evaluator,
BatchedForestEvaluator::Compile(*forest, {TreeFilter()},
{split_count_per_evaluator}));
std::vector<TypedSlot> slots;
FrameLayout::Builder layout_builder;
ASSERT_OK(CreateArraySlotsForForest(*forest, &layout_builder, &slots));
auto dense_array_output_slot = layout_builder.AddSlot<DenseArray<float>>();
auto array_output_slot = layout_builder.AddSlot<Array<float>>();
FrameLayout layout = std::move(layout_builder).Build();
MemoryAllocation ctx(&layout);
FramePtr frame = ctx.frame();
for (auto slot : slots) {
ASSERT_OK(FillArrayWithRandomValues(batch_size, slot, frame, &rnd));
}
ASSERT_OK(evaluator->EvalBatch(slots,
{TypedSlot::FromSlot(dense_array_output_slot)},
frame, nullptr, batch_size));
ASSERT_OK(evaluator->EvalBatch(slots,
{TypedSlot::FromSlot(array_output_slot)},
frame, nullptr, batch_size));
DenseArray<float> dense_array1 = frame.Get(dense_array_output_slot);
Array<float> array1 = frame.Get(array_output_slot);
frame.Set(dense_array_output_slot, DenseArray<float>());
frame.Set(array_output_slot, Array<float>());
ASSERT_OK(subdivided_evaluator->EvalBatch(
slots, {TypedSlot::FromSlot(dense_array_output_slot)}, frame, nullptr,
batch_size));
ASSERT_OK(subdivided_evaluator->EvalBatch(
slots, {TypedSlot::FromSlot(array_output_slot)}, frame, nullptr,
batch_size));
DenseArray<float> dense_array2 = frame.Get(dense_array_output_slot);
Array<float> array2 = frame.Get(array_output_slot);
ASSERT_EQ(dense_array1.size(), batch_size);
ASSERT_EQ(array1.size(), batch_size);
ASSERT_EQ(dense_array2.size(), batch_size);
ASSERT_EQ(array2.size(), batch_size);
for (int64_t i = 0; i < batch_size; ++i) {
bool present = array1[i].present;
EXPECT_EQ(array2[i].present, present);
EXPECT_EQ(dense_array1[i].present, present);
EXPECT_EQ(dense_array2[i].present, present);
if (present) {
float value = array1[i].value;
EXPECT_FLOAT_EQ(array2[i].value, value);
EXPECT_FLOAT_EQ(dense_array1[i].value, value);
EXPECT_FLOAT_EQ(dense_array2[i].value, value);
}
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/decision_forest/batched_evaluation/batched_forest_evaluator.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/decision_forest/batched_evaluation/batched_forest_evaluator_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
ead10a6f-65f0-4f43-9abf-756f28550ec5 | cpp | tensorflow/tensorflow | ceil | tensorflow/lite/experimental/shlo/ops/ceil.cc | tensorflow/lite/delegates/xnnpack/ceil_test.cc | #include "tensorflow/lite/experimental/shlo/ops/ceil.h"
#include <cmath>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Ceil {
template <class T>
T operator()(T v) const {
return std::ceil(v);
}
};
template <>
F16 Ceil::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Ceil::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
CeilOp Create(CeilOp::Attributes) { return {}; }
absl::Status Prepare(CeilOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(
CheckCtx("ceil"), input, IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("ceil"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(CeilOp& op, const Tensor& input, Tensor& output) {
Ceil ceil;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), ceil, input,
output)
} else if (IsFloatTensor(input)) {
DISPATCH_FLOAT(detail::EvaluateNoQuantization, input.tensor_element_type(),
ceil, input, output);
}
return absl::FailedPreconditionError("Unsupported tensor type.");
}
}; | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/unary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace xnnpack {
TEST(Ceil, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_CEIL, xnnpack_delegate.get());
}
TEST(Ceil, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, width, channels})
.Test(BuiltinOperator_CEIL, xnnpack_delegate.get());
}
TEST(Ceil, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, channels})
.Test(BuiltinOperator_CEIL, xnnpack_delegate.get());
}
TEST(Ceil, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
UnaryElementwiseTester().Shape({batch}).Test(BuiltinOperator_CEIL,
xnnpack_delegate.get());
}
TEST(Ceil, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_CEIL, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/ceil.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/ceil_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ebea579e-d2e7-430c-8d2e-95b124dbf235 | cpp | abseil/abseil-cpp | damerau_levenshtein_distance | absl/strings/internal/damerau_levenshtein_distance.cc | absl/strings/internal/damerau_levenshtein_distance_test.cc | #include "absl/strings/internal/damerau_levenshtein_distance.h"
#include <algorithm>
#include <array>
#include <numeric>
#include "absl/strings/string_view.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace strings_internal {
uint8_t CappedDamerauLevenshteinDistance(absl::string_view s1,
absl::string_view s2, uint8_t cutoff) {
const uint8_t MAX_SIZE = 100;
const uint8_t _cutoff = std::min(MAX_SIZE, cutoff);
const uint8_t cutoff_plus_1 = static_cast<uint8_t>(_cutoff + 1);
if (s1.size() > s2.size()) std::swap(s1, s2);
if (s1.size() + _cutoff < s2.size() || s2.size() > MAX_SIZE)
return cutoff_plus_1;
if (s1.empty())
return static_cast<uint8_t>(s2.size());
const uint8_t lower_diag =
_cutoff - static_cast<uint8_t>(s2.size() - s1.size());
const uint8_t upper_diag = _cutoff;
std::array<std::array<uint8_t, MAX_SIZE + 2>, MAX_SIZE + 2> d;
std::iota(d[0].begin(), d[0].begin() + upper_diag + 1, 0);
d[0][cutoff_plus_1] = cutoff_plus_1;
for (size_t i = 1; i <= s1.size(); ++i) {
size_t j_begin = 1;
if (i > lower_diag) {
j_begin = i - lower_diag;
d[i][j_begin - 1] = cutoff_plus_1;
} else {
d[i][0] = static_cast<uint8_t>(i);
}
size_t j_end = i + upper_diag;
if (j_end > s2.size()) {
j_end = s2.size();
} else {
d[i][j_end + 1] = cutoff_plus_1;
}
for (size_t j = j_begin; j <= j_end; ++j) {
const uint8_t deletion_distance = d[i - 1][j] + 1;
const uint8_t insertion_distance = d[i][j - 1] + 1;
const uint8_t mismatched_tail_cost = s1[i - 1] == s2[j - 1] ? 0 : 1;
const uint8_t mismatch_distance = d[i - 1][j - 1] + mismatched_tail_cost;
uint8_t transposition_distance = _cutoff + 1;
if (i > 1 && j > 1 && s1[i - 1] == s2[j - 2] && s1[i - 2] == s2[j - 1])
transposition_distance = d[i - 2][j - 2] + 1;
d[i][j] = std::min({cutoff_plus_1, deletion_distance, insertion_distance,
mismatch_distance, transposition_distance});
}
}
return d[s1.size()][s2.size()];
}
}
ABSL_NAMESPACE_END
} | #include "absl/strings/internal/damerau_levenshtein_distance.h"
#include <cstdint>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
namespace {
using absl::strings_internal::CappedDamerauLevenshteinDistance;
TEST(Distance, TestDistances) {
EXPECT_THAT(CappedDamerauLevenshteinDistance("ab", "ab", 6), uint8_t{0});
EXPECT_THAT(CappedDamerauLevenshteinDistance("a", "b", 6), uint8_t{1});
EXPECT_THAT(CappedDamerauLevenshteinDistance("ca", "abc", 6), uint8_t{3});
EXPECT_THAT(CappedDamerauLevenshteinDistance("abcd", "ad", 6), uint8_t{2});
EXPECT_THAT(CappedDamerauLevenshteinDistance("abcd", "cadb", 6), uint8_t{4});
EXPECT_THAT(CappedDamerauLevenshteinDistance("abcd", "bdac", 6), uint8_t{4});
EXPECT_THAT(CappedDamerauLevenshteinDistance("ab", "ab", 0), uint8_t{0});
EXPECT_THAT(CappedDamerauLevenshteinDistance("", "", 0), uint8_t{0});
EXPECT_THAT(CappedDamerauLevenshteinDistance("abc", "abc", 6), uint8_t{0});
for (auto res :
{"", "ca", "efg", "ea", "ce", "ceb", "eca", "cae", "cea", "bea"}) {
EXPECT_THAT(CappedDamerauLevenshteinDistance("abc", res, 6), uint8_t{3});
EXPECT_THAT(CappedDamerauLevenshteinDistance(res, "abc", 6), uint8_t{3});
}
for (auto res :
{"a", "b", "c", "ba", "cb", "bca", "cab", "cba", "ace",
"efc", "ebf", "aef", "ae", "be", "eb", "ec", "ecb", "bec",
"bce", "cbe", "ace", "eac", "aeb", "bae", "eab", "eba"}) {
EXPECT_THAT(CappedDamerauLevenshteinDistance("abc", res, 6), uint8_t{2});
EXPECT_THAT(CappedDamerauLevenshteinDistance(res, "abc", 6), uint8_t{2});
}
for (auto res : {"ab", "ac", "bc", "acb", "bac", "ebc", "aec", "abe"}) {
EXPECT_THAT(CappedDamerauLevenshteinDistance("abc", res, 6), uint8_t{1});
EXPECT_THAT(CappedDamerauLevenshteinDistance(res, "abc", 6), uint8_t{1});
}
}
TEST(Distance, TestCutoff) {
EXPECT_THAT(CappedDamerauLevenshteinDistance("abcd", "a", 3), uint8_t{3});
EXPECT_THAT(CappedDamerauLevenshteinDistance("abcd", "a", 2), uint8_t{3});
EXPECT_THAT(CappedDamerauLevenshteinDistance("abcd", "a", 1), uint8_t{2});
EXPECT_THAT(CappedDamerauLevenshteinDistance("abcdefg", "a", 2), uint8_t{3});
EXPECT_THAT(CappedDamerauLevenshteinDistance("a", "abcde", 2), uint8_t{3});
EXPECT_THAT(CappedDamerauLevenshteinDistance(std::string(102, 'a'),
std::string(102, 'a'), 105),
uint8_t{101});
EXPECT_THAT(CappedDamerauLevenshteinDistance(std::string(100, 'a'),
std::string(100, 'a'), 100),
uint8_t{0});
EXPECT_THAT(CappedDamerauLevenshteinDistance(std::string(100, 'a'),
std::string(100, 'b'), 100),
uint8_t{100});
EXPECT_THAT(CappedDamerauLevenshteinDistance(std::string(100, 'a'),
std::string(99, 'a'), 2),
uint8_t{1});
EXPECT_THAT(CappedDamerauLevenshteinDistance(std::string(100, 'a'),
std::string(101, 'a'), 2),
uint8_t{3});
EXPECT_THAT(CappedDamerauLevenshteinDistance(std::string(100, 'a'),
std::string(101, 'a'), 2),
uint8_t{3});
EXPECT_THAT(CappedDamerauLevenshteinDistance(std::string(UINT8_MAX + 1, 'a'),
std::string(UINT8_MAX + 1, 'b'),
UINT8_MAX),
uint8_t{101});
EXPECT_THAT(CappedDamerauLevenshteinDistance(std::string(UINT8_MAX - 1, 'a'),
std::string(UINT8_MAX - 1, 'b'),
UINT8_MAX),
uint8_t{101});
EXPECT_THAT(
CappedDamerauLevenshteinDistance(std::string(UINT8_MAX, 'a'),
std::string(UINT8_MAX, 'b'), UINT8_MAX),
uint8_t{101});
EXPECT_THAT(CappedDamerauLevenshteinDistance(std::string(UINT8_MAX - 1, 'a'),
std::string(UINT8_MAX - 1, 'a'),
UINT8_MAX),
uint8_t{101});
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/damerau_levenshtein_distance.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/damerau_levenshtein_distance_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
5fe37cdb-4572-49d7-8a2a-00bf0f3af8ef | cpp | google/quiche | quic_client_session_cache | quiche/quic/core/crypto/quic_client_session_cache.cc | quiche/quic/core/crypto/quic_client_session_cache_test.cc | #include "quiche/quic/core/crypto/quic_client_session_cache.h"
#include <memory>
#include <string>
#include <utility>
#include "quiche/quic/core/quic_clock.h"
namespace quic {
namespace {
const size_t kDefaultMaxEntries = 1024;
bool IsValid(SSL_SESSION* session, uint64_t now) {
if (!session) return false;
return !(now + 1 < SSL_SESSION_get_time(session) ||
now >= SSL_SESSION_get_time(session) +
SSL_SESSION_get_timeout(session));
}
bool DoApplicationStatesMatch(const ApplicationState* state,
ApplicationState* other) {
if ((state && !other) || (!state && other)) return false;
if ((!state && !other) || *state == *other) return true;
return false;
}
}
QuicClientSessionCache::QuicClientSessionCache()
: QuicClientSessionCache(kDefaultMaxEntries) {}
QuicClientSessionCache::QuicClientSessionCache(size_t max_entries)
: cache_(max_entries) {}
QuicClientSessionCache::~QuicClientSessionCache() { Clear(); }
void QuicClientSessionCache::Insert(const QuicServerId& server_id,
bssl::UniquePtr<SSL_SESSION> session,
const TransportParameters& params,
const ApplicationState* application_state) {
QUICHE_DCHECK(session) << "TLS session is not inserted into client cache.";
auto iter = cache_.Lookup(server_id);
if (iter == cache_.end()) {
CreateAndInsertEntry(server_id, std::move(session), params,
application_state);
return;
}
QUICHE_DCHECK(iter->second->params);
if (params == *iter->second->params &&
DoApplicationStatesMatch(application_state,
iter->second->application_state.get())) {
iter->second->PushSession(std::move(session));
return;
}
cache_.Erase(iter);
CreateAndInsertEntry(server_id, std::move(session), params,
application_state);
}
std::unique_ptr<QuicResumptionState> QuicClientSessionCache::Lookup(
const QuicServerId& server_id, QuicWallTime now, const SSL_CTX* ) {
auto iter = cache_.Lookup(server_id);
if (iter == cache_.end()) return nullptr;
if (!IsValid(iter->second->PeekSession(), now.ToUNIXSeconds())) {
QUIC_DLOG(INFO) << "TLS Session expired for host:" << server_id.host();
cache_.Erase(iter);
return nullptr;
}
auto state = std::make_unique<QuicResumptionState>();
state->tls_session = iter->second->PopSession();
if (iter->second->params != nullptr) {
state->transport_params =
std::make_unique<TransportParameters>(*iter->second->params);
}
if (iter->second->application_state != nullptr) {
state->application_state =
std::make_unique<ApplicationState>(*iter->second->application_state);
}
if (!iter->second->token.empty()) {
state->token = iter->second->token;
iter->second->token.clear();
}
return state;
}
void QuicClientSessionCache::ClearEarlyData(const QuicServerId& server_id) {
auto iter = cache_.Lookup(server_id);
if (iter == cache_.end()) return;
for (auto& session : iter->second->sessions) {
if (session) {
QUIC_DLOG(INFO) << "Clear early data for for host: " << server_id.host();
session.reset(SSL_SESSION_copy_without_early_data(session.get()));
}
}
}
void QuicClientSessionCache::OnNewTokenReceived(const QuicServerId& server_id,
absl::string_view token) {
if (token.empty()) {
return;
}
auto iter = cache_.Lookup(server_id);
if (iter == cache_.end()) {
return;
}
iter->second->token = std::string(token);
}
void QuicClientSessionCache::RemoveExpiredEntries(QuicWallTime now) {
auto iter = cache_.begin();
while (iter != cache_.end()) {
if (!IsValid(iter->second->PeekSession(), now.ToUNIXSeconds())) {
iter = cache_.Erase(iter);
} else {
++iter;
}
}
}
void QuicClientSessionCache::Clear() { cache_.Clear(); }
void QuicClientSessionCache::CreateAndInsertEntry(
const QuicServerId& server_id, bssl::UniquePtr<SSL_SESSION> session,
const TransportParameters& params,
const ApplicationState* application_state) {
auto entry = std::make_unique<Entry>();
entry->PushSession(std::move(session));
entry->params = std::make_unique<TransportParameters>(params);
if (application_state) {
entry->application_state =
std::make_unique<ApplicationState>(*application_state);
}
cache_.Insert(server_id, std::move(entry));
}
QuicClientSessionCache::Entry::Entry() = default;
QuicClientSessionCache::Entry::Entry(Entry&&) = default;
QuicClientSessionCache::Entry::~Entry() = default;
void QuicClientSessionCache::Entry::PushSession(
bssl::UniquePtr<SSL_SESSION> session) {
if (sessions[0] != nullptr) {
sessions[1] = std::move(sessions[0]);
}
sessions[0] = std::move(session);
}
bssl::UniquePtr<SSL_SESSION> QuicClientSessionCache::Entry::PopSession() {
if (sessions[0] == nullptr) return nullptr;
bssl::UniquePtr<SSL_SESSION> session = std::move(sessions[0]);
sessions[0] = std::move(sessions[1]);
sessions[1] = nullptr;
return session;
}
SSL_SESSION* QuicClientSessionCache::Entry::PeekSession() {
return sessions[0].get();
}
} | #include "quiche/quic/core/crypto/quic_client_session_cache.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/mock_clock.h"
#include "quiche/common/quiche_text_utils.h"
namespace quic {
namespace test {
namespace {
const QuicTime::Delta kTimeout = QuicTime::Delta::FromSeconds(1000);
const QuicVersionLabel kFakeVersionLabel = 0x01234567;
const QuicVersionLabel kFakeVersionLabel2 = 0x89ABCDEF;
const uint64_t kFakeIdleTimeoutMilliseconds = 12012;
const uint8_t kFakeStatelessResetTokenData[16] = {
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D, 0x9E, 0x9F};
const uint64_t kFakeMaxPacketSize = 9001;
const uint64_t kFakeInitialMaxData = 101;
const bool kFakeDisableMigration = true;
const auto kCustomParameter1 =
static_cast<TransportParameters::TransportParameterId>(0xffcd);
const char* kCustomParameter1Value = "foo";
const auto kCustomParameter2 =
static_cast<TransportParameters::TransportParameterId>(0xff34);
const char* kCustomParameter2Value = "bar";
std::vector<uint8_t> CreateFakeStatelessResetToken() {
return std::vector<uint8_t>(
kFakeStatelessResetTokenData,
kFakeStatelessResetTokenData + sizeof(kFakeStatelessResetTokenData));
}
TransportParameters::LegacyVersionInformation
CreateFakeLegacyVersionInformation() {
TransportParameters::LegacyVersionInformation legacy_version_information;
legacy_version_information.version = kFakeVersionLabel;
legacy_version_information.supported_versions.push_back(kFakeVersionLabel);
legacy_version_information.supported_versions.push_back(kFakeVersionLabel2);
return legacy_version_information;
}
TransportParameters::VersionInformation CreateFakeVersionInformation() {
TransportParameters::VersionInformation version_information;
version_information.chosen_version = kFakeVersionLabel;
version_information.other_versions.push_back(kFakeVersionLabel);
return version_information;
}
std::unique_ptr<TransportParameters> MakeFakeTransportParams() {
auto params = std::make_unique<TransportParameters>();
params->perspective = Perspective::IS_CLIENT;
params->legacy_version_information = CreateFakeLegacyVersionInformation();
params->version_information = CreateFakeVersionInformation();
params->max_idle_timeout_ms.set_value(kFakeIdleTimeoutMilliseconds);
params->stateless_reset_token = CreateFakeStatelessResetToken();
params->max_udp_payload_size.set_value(kFakeMaxPacketSize);
params->initial_max_data.set_value(kFakeInitialMaxData);
params->disable_active_migration = kFakeDisableMigration;
params->custom_parameters[kCustomParameter1] = kCustomParameter1Value;
params->custom_parameters[kCustomParameter2] = kCustomParameter2Value;
return params;
}
static const char kCachedSession[] =
"30820ad7020101020203040402130104206594ce84e61a866b56163c4ba09079aebf1d4f"
"6cbcbd38dc9d7066a38a76c9cf0420ec9062063582a4cc0a44f9ff93256a195153ba6032"
"0cf3c9189990932d838adaa10602046196f7b9a205020302a300a382039f3082039b3082"
"0183a00302010202021001300d06092a864886f70d010105050030623111300f06035504"
"030c08426f677573204941310b300906035504080c024d41310b30090603550406130255"
"533121301f06092a864886f70d0109011612626f67757340626f6775732d69612e636f6d"
"3110300e060355040a0c07426f6775734941301e170d3231303132383136323030315a17"
"0d3331303132363136323030315a3069311d301b06035504030c14746573745f6563632e"
"6578616d706c652e636f6d310b300906035504080c024d41310b30090603550406130255"
"53311e301c06092a864886f70d010901160f626f67757340626f6775732e636f6d310e30"
"0c060355040a0c05426f6775733059301306072a8648ce3d020106082a8648ce3d030107"
"034200041ba5e2b6f24e64990b9f24ae6d23473d8c77fbcfb7f554f36559529a69a57170"
"a10a81b7fe4a36ebf37b0a8c5e467a8443d8b8c002892aa5c1194bd843f42c9aa31f301d"
"301b0603551d11041430128210746573742e6578616d706c652e636f6d300d06092a8648"
"86f70d0101050500038202010019921d54ac06948763d609215f64f5d6540e3da886c6c9"
"61bc737a437719b4621416ef1229f39282d7d3234e1a5d57535473066233bd246eec8e96"
"1e0633cf4fe014c800e62599981820ec33d92e74ded0fa2953db1d81e19cb6890b6305b6"
"3ede8d3e9fcf3c09f3f57283acf08aa57be4ee9a68d00bb3e2ded5920c619b5d83e5194a"
"adb77ae5d61ed3e0a5670f0ae61cc3197329f0e71e3364dcab0405e9e4a6646adef8f022"
"6415ec16c8046307b1769029fe780bd576114dde2fa9b4a32aa70bc436549a24ee4907a9"
"045f6457ce8dfd8d62cc65315afe798ae1a948eefd70b035d415e73569c48fb20085de1a"
"87de039e6b0b9a5fcb4069df27f3a7a1409e72d1ac739c72f29ef786134207e61c79855f"
"c22e3ee5f6ad59a7b1ff0f18d79776f1c95efaebbebe381664132a58a1e7ff689945b7e0"
"88634b0872feeefbf6be020884b994c6a7ff435f2b3f609077ff97cb509cfa17ff479b34"
"e633e4b5bc46b20c5f27c80a2e2943f795a928acd5a3fc43c3af8425ad600c048b41d87e"
"6361bc72fc4e5e44680a3d325674ba6ffa760d2fc7d9e4847a8e0dd9d35a543324e18b94"
"2d42af6391ed1dd54a39e3f4a4c6b32486eb4ba72815dbd89c56fc053743a0b0483ce676"
"15defce6800c629b99d0cbc56da162487f475b7c246099eaf1e6d10a022b2f49c6af1da3"
"e8ed66096f267c4a76976b9572db7456ef90278330a4020400aa81b60481b3494e534543"
"55524500f3439e548c21d2ad6e5634cc1cc0045730819702010102020304040213010400"
"0420ec9062063582a4cc0a44f9ff93256a195153ba60320cf3c9189990932d838adaa106"
"02046196f7b9a205020302a300a4020400b20302011db5060404130800cdb807020500ff"
"ffffffb9050203093a80ba0404026833bb030101ffbc23042100d27d985bfce04833f02d"
"38366b219f4def42bc4ba1b01844d1778db11731487dbd020400be020400b20302011db3"
"8205da308205d6308203bea00302010202021000300d06092a864886f70d010105050030"
"62310b3009060355040613025553310b300906035504080c024d413110300e060355040a"
"0c07426f67757343413111300f06035504030c08426f6775732043413121301f06092a86"
"4886f70d0109011612626f67757340626f6775732d63612e636f6d3020170d3231303132"
"383136313935385a180f32303730303531313136313935385a30623111300f0603550403"
"0c08426f677573204941310b300906035504080c024d41310b3009060355040613025553"
"3121301f06092a864886f70d0109011612626f67757340626f6775732d69612e636f6d31"
"10300e060355040a0c07426f677573494130820222300d06092a864886f70d0101010500"
"0382020f003082020a028202010096c03a0ffc61bcedcd5ec9bf6f848b8a066b43f08377"
"3af518a6a0044f22e666e24d2ae741954e344302c4be04612185bd53bcd848eb322bf900"
"724eb0848047d647033ffbddb00f01d1de7c1cdb684f83c9bf5fd18ff60afad5a53b0d7d"
"2c2a50abc38df019cd7f50194d05bc4597a1ef8570ea04069a2c36d74496af126573ca18"
"8e470009b56250fadf2a04e837ee3837b36b1f08b7a0cfe2533d05f26484ce4e30203d01"
"517fffd3da63d0341079ddce16e9ab4dbf9d4049e5cc52326031e645dd682fe6220d9e0e"
"95451f5a82f3e1720dc13e8499466426a0bdbea9f6a76b3c9228dd3c79ab4dcc4c145ef0"
"e78d1ee8bfd4650692d7e28a54bed809d8f7b37fe24c586be59cc46638531cb291c8c156"
"8f08d67e768e51563e95a639c1f138b275ffad6a6a2a042ba9e26ad63c2ce63b600013f0"
"a6f0703ee51c4f457f7bab0391c2fc4c5bb3213742c9cf9941bff68cc2e1cc96139d35ed"
"1885244ddde0bf658416c486701841b81f7b17503d08c59a4db08a2a80755e007aa3b6c7"
"eadcaa9e07c8325f3689f100de23970b12c9d9f6d0a8fb35ba0fd75c64410318db4a13ac"
"3972ad16cdf6408af37013c7bcd7c42f20d6d04c3e39436c7531e8dafa219dd04b784ef0"
"3c70ee5a4782b33cafa925aa3deca62a14aed704f179b932efabc2b0c5c15a8a99bfc9e6"
"189dce7da50ea303594b6af9c933dd54b6e9d17c472d0203010001a38193308190300f06"
"03551d130101ff040530030101ff301d0603551d0e041604141a98e80029a80992b7e5e0"
"068ab9b3486cd839d6301f0603551d23041830168014780beeefe2fa419c48a438bdb30b"
"e37ef0b7a94e300b0603551d0f0404030202a430130603551d25040c300a06082b060105"
"05070301301b0603551d11041430128207426f67757343418207426f6775734941300d06"
"092a864886f70d010105050003820201009e822ed8064b1aabaddf1340010ea147f68c06"
"5a5a599ea305349f1b0e545a00817d6e55c7bf85560fab429ca72186c4d520b52f5cc121"
"abd068b06f3111494431d2522efa54642f907059e7db80b73bb5ecf621377195b8700bba"
"df798cece8c67a9571548d0e6592e81ae5d934877cb170aef18d3b97f635600fe0890d98"
"f88b33fe3d1fd34c1c915beae4e5c0b133f476c40b21d220f16ce9cdd9e8f97a36a31723"
"68875f052c9271648d9cb54687c6fdc3ea96f2908003bc5e5e79de00a21da7b8429f8b08"
"af4c4d34641e386d72eabf5f01f106363f2ffd18969bf0bb9a4d17627c6427ff772c4308"
"83c276feef5fc6dba9582c22fdbe9df7e8dfca375695f028ed588df54f3c86462dbf4c07"
"91d80ca738988a1419c86bb4dd8d738b746921f01f39422e5ffd488b6f00195b996e6392"
"3a820a32cd78b5989f339c0fcf4f269103964a30a16347d0ffdc8df1f3653ddc1515fa09"
"22c7aef1af1fbcb23e93ae7622ab1ee11fcfa98319bad4c37c091cad46bd0337b3cc78b5"
"5b9f1ea7994acc1f89c49a0b4cb540d2137e266fd43e56a9b5b778217b6f77df530e1eaf"
"b3417262b5ddb86d3c6c5ac51e3f326c650dcc2434473973b7182c66220d1f3871bde7ee"
"47d3f359d3d4c5bdd61baa684c03db4c75f9d6690c9e6e3abe6eaf5fa2c33c4daf26b373"
"d85a1e8a7d671ac4a0a97b14e36e81280de4593bbb12da7695b5060404130800cdb60301"
"0100b70402020403b807020500ffffffffb9050203093a80ba0404026833bb030101ffbd"
"020400be020400";
class QuicClientSessionCacheTest : public QuicTest {
public:
QuicClientSessionCacheTest() : ssl_ctx_(SSL_CTX_new(TLS_method())) {
clock_.AdvanceTime(QuicTime::Delta::FromSeconds(1));
}
protected:
bssl::UniquePtr<SSL_SESSION> NewSSLSession() {
std::string cached_session;
EXPECT_TRUE(absl::HexStringToBytes(kCachedSession, &cached_session));
SSL_SESSION* session = SSL_SESSION_from_bytes(
reinterpret_cast<const uint8_t*>(cached_session.data()),
cached_session.size(), ssl_ctx_.get());
QUICHE_DCHECK(session);
return bssl::UniquePtr<SSL_SESSION>(session);
}
bssl::UniquePtr<SSL_SESSION> MakeTestSession(
QuicTime::Delta timeout = kTimeout) {
bssl::UniquePtr<SSL_SESSION> session = NewSSLSession();
SSL_SESSION_set_time(session.get(), clock_.WallNow().ToUNIXSeconds());
SSL_SESSION_set_timeout(session.get(), timeout.ToSeconds());
return session;
}
bssl::UniquePtr<SSL_CTX> ssl_ctx_;
MockClock clock_;
};
TEST_F(QuicClientSessionCacheTest, SingleSession) {
QuicClientSessionCache cache;
auto params = MakeFakeTransportParams();
auto session = MakeTestSession();
QuicServerId id1("a.com", 443);
auto params2 = MakeFakeTransportParams();
auto session2 = MakeTestSession();
SSL_SESSION* unowned2 = session2.get();
QuicServerId id2("b.com", 443);
EXPECT_EQ(nullptr, cache.Lookup(id1, clock_.WallNow(), ssl_ctx_.get()));
EXPECT_EQ(nullptr, cache.Lookup(id2, clock_.WallNow(), ssl_ctx_.get()));
EXPECT_EQ(0u, cache.size());
cache.Insert(id1, std::move(session), *params, nullptr);
EXPECT_EQ(1u, cache.size());
EXPECT_EQ(
*params,
*(cache.Lookup(id1, clock_.WallNow(), ssl_ctx_.get())->transport_params));
EXPECT_EQ(nullptr, cache.Lookup(id2, clock_.WallNow(), ssl_ctx_.get()));
EXPECT_EQ(1u, cache.size());
EXPECT_EQ(nullptr, cache.Lookup(id1, clock_.WallNow(), ssl_ctx_.get()));
EXPECT_EQ(0u, cache.size());
auto session3 = MakeTestSession();
SSL_SESSION* unowned3 = session3.get();
QuicServerId id3("c.com", 443);
cache.Insert(id3, std::move(session3), *params, nullptr);
cache.Insert(id2, std::move(session2), *params2, nullptr);
EXPECT_EQ(2u, cache.size());
EXPECT_EQ(
unowned2,
cache.Lookup(id2, clock_.WallNow(), ssl_ctx_.get())->tls_session.get());
EXPECT_EQ(
unowned3,
cache.Lookup(id3, clock_.WallNow(), ssl_ctx_.get())->tls_session.get());
EXPECT_EQ(nullptr, cache.Lookup(id1, clock_.WallNow(), ssl_ctx_.get()));
EXPECT_EQ(nullptr, cache.Lookup(id2, clock_.WallNow(), ssl_ctx_.get()));
EXPECT_EQ(nullptr, cache.Lookup(id3, clock_.WallNow(), ssl_ctx_.get()));
EXPECT_EQ(0u, cache.size());
}
TEST_F(QuicClientSessionCacheTest, MultipleSessions) {
QuicClientSessionCache cache;
auto params = MakeFakeTransportParams();
auto session = MakeTestSession();
QuicServerId id1("a.com", 443);
auto session2 = MakeTestSession();
SSL_SESSION* unowned2 = session2.get();
auto session3 = MakeTestSession();
SSL_SESSION* unowned3 = session3.get();
cache.Insert(id1, std::move(session), *params, nullptr);
cache.Insert(id1, std::move(session2), *params, nullptr);
cache.Insert(id1, std::move(session3), *params, nullptr);
EXPECT_EQ(
unowned3,
cache.Lookup(id1, clock_.WallNow(), ssl_ctx_.get())->tls_session.get());
EXPECT_EQ(
unowned2,
cache.Lookup(id1, clock_.WallNow(), ssl_ctx_.get())->tls_session.get());
EXPECT_EQ(nullptr, cache.Lookup(id1, clock_.WallNow(), ssl_ctx_.get()));
}
TEST_F(QuicClientSessionCacheTest, DifferentTransportParams) {
QuicClientSessionCache cache;
auto params = MakeFakeTransportParams();
auto session = MakeTestSession();
QuicServerId id1("a.com", 443);
auto session2 = MakeTestSession();
auto session3 = MakeTestSession();
SSL_SESSION* unowned3 = session3.get();
cache.Insert(id1, std::move(session), *params, nullptr);
cache.Insert(id1, std::move(session2), *params, nullptr);
params->perspective = Perspective::IS_SERVER;
cache.Insert(id1, std::move(session3), *params, nullptr);
auto resumption_state = cache.Lookup(id1, clock_.WallNow(), ssl_ctx_.get());
EXPECT_EQ(unowned3, resumption_state->tls_session.get());
EXPECT_EQ(*params.get(), *resumption_state->transport_params);
EXPECT_EQ(nullptr, cache.Lookup(id1, clock_.WallNow(), ssl_ctx_.get()));
}
TEST_F(QuicClientSessionCacheTest, DifferentApplicationState) {
QuicClientSessionCache cache;
auto params = MakeFakeTransportParams();
auto session = MakeTestSession();
QuicServerId id1("a.com", 443);
auto session2 = MakeTestSession();
auto session3 = MakeTestSession();
SSL_SESSION* unowned3 = session3.get();
ApplicationState state;
state.push_back('a');
cache.Insert(id1, std::move(session), *params, &state);
cache.Insert(id1, std::move(session2), *params, &state);
cache.Insert(id1, std::move(session3), *params, nullptr);
auto resumption_state = cache.Lookup(id1, clock_.WallNow(), ssl_ctx_.get());
EXPECT_EQ(unowned3, resumption_state->tls_session.get());
EXPECT_EQ(nullptr, resumption_state->application_state);
EXPECT_EQ(nullptr, cache.Lookup(id1, clock_.WallNow(), ssl_ctx_.get()));
}
TEST_F(QuicClientSessionCacheTest, BothStatesDifferent) {
QuicClientSessionCache cache;
auto params = MakeFakeTransportParams();
auto session = MakeTestSession();
QuicServerId id1("a.com", 443);
auto session2 = MakeTestSession();
auto session3 = MakeTestSession();
SSL_SESSION* unowned3 = session3.get();
ApplicationState state;
state.push_back('a');
cache.Insert(id1, std::move(session), *params, &state);
cache.Insert(id1, std::move(session2), *params, &state);
params->perspective = Perspective::IS_SERVER;
cache.Insert(id1, std::move(session3), *params, nullptr);
auto resumption_state = cache.Lookup(id1, clock_.WallNow(), ssl_ctx_.get());
EXPECT_EQ(unowned3, resumption_state->tls_session.get());
EXPECT_EQ(*params.get(), *resumption_state->transport_params);
EXPECT_EQ(nullptr, resumption_state->application_state);
EXPECT_EQ(nullptr, cache.Lookup(id1, clock_.WallNow(), ssl_ctx_.get()));
}
TEST_F(QuicClientSessionCacheTest, SizeLimit) {
QuicClientSessionCache cache(2);
auto params = MakeFakeTransportParams();
auto session = MakeTestSession();
QuicServerId id1("a.com", 443);
auto session2 = MakeTestSession();
SSL_SESSION* unowned2 = session2.get();
QuicServerId id2("b.com", 443);
auto session3 = MakeTestSession();
SSL_SESSION* unowned3 = session3.get();
QuicServerId id3("c.com", 443);
cache.Insert(id1, std::move(session), *params, nullptr);
cache.Insert(id2, std::move(session2), *params, nullptr);
cache.Insert(id3, std::move(session3), *params, nullptr);
EXPECT_EQ(2u, cache.size());
EXPECT_EQ(
unowned2,
cache.Lookup(id2, clock_.WallNow(), ssl_ctx_.get())->tls_session.get());
EXPECT_EQ(
unowned3,
cache.Lookup(id3, clock_.WallNow(), ssl_ctx_.get())->tls_session.get());
EXPECT_EQ(nullptr, cache.Lookup(id1, clock_.WallNow(), ssl_ctx_.get()));
}
TEST_F(QuicClientSessionCacheTest, ClearEarlyData) {
QuicClientSessionCache cache;
SSL_CTX_set_early_data_enabled(ssl_ctx_.get(), 1);
auto params = MakeFakeTransportParams();
auto session = MakeTestSession();
QuicServerId id1("a.com", 443);
auto session2 = MakeTestSession();
EXPECT_TRUE(SSL_SESSION_early_data_capable(session.get()));
EXPECT_TRUE(SSL_SESSION_early_data_capable(session2.get()));
cache.Insert(id1, std::move(session), *params, nullptr);
cache.Insert(id1, std::move(session2), *params, nullptr);
cache.ClearEarlyData(id1);
auto resumption_state = cache.Lookup(id1, clock_.WallNow(), ssl_ctx_.get());
EXPECT_FALSE(
SSL_SESSION_early_data_capable(resumption_state->tls_session.get()));
resumption_state = cache.Lookup(id1, clock_.WallNow(), ssl_ctx_.get());
EXPECT_FALSE(
SSL_SESSION_early_data_capable(resumption_state->tls_session.get()));
EXPECT_EQ(nullptr, cache.Lookup(id1, clock_.WallNow(), ssl_ctx_.get()));
}
TEST_F(QuicClientSessionCacheTest, Expiration) {
QuicClientSessionCache cache;
auto params = MakeFakeTransportParams();
auto session = MakeTestSession();
QuicServerId id1("a.com", 443);
auto session2 = MakeTestSession(3 * kTimeout);
SSL_SESSION* unowned2 = session2.get();
QuicServerId id2("b.com", 443);
cache.Insert(id1, std::move(session), *params, nullptr);
cache.Insert(id2, std::move(session2), *params, nullptr);
EXPECT_EQ(2u, cache.size());
clock_.AdvanceTime(kTimeout * 2);
EXPECT_EQ(2u, cache.size());
EXPECT_EQ(nullptr, cache.Lookup(id1, clock_.WallNow(), ssl_ctx_.get()));
EXPECT_EQ(1u, cache.size());
EXPECT_EQ(
unowned2,
cache.Lookup(id2, clock_.WallNow(), ssl_ctx_.get())->tls_session.get());
EXPECT_EQ(1u, cache.size());
}
TEST_F(QuicClientSessionCacheTest, RemoveExpiredEntriesAndClear) {
QuicClientSessionCache cache;
auto params = MakeFakeTransportParams();
auto session = MakeTestSession();
quic::QuicServerId id1("a.com", 443);
auto session2 = MakeTestSession(3 * kTimeout);
quic::QuicServerId id2("b.com", 443);
cache.Insert(id1, std::move(session), *params, nullptr);
cache.Insert(id2, std::move(session2), *params, nullptr);
EXPECT_EQ(2u, cache.size());
clock_.AdvanceTime(kTimeout * 2);
EXPECT_EQ(2u, cache.size());
cache.RemoveExpiredEntries(clock_.WallNow());
EXPECT_EQ(nullptr, cache.Lookup(id1, clock_.WallNow(), ssl_ctx_.get()));
EXPECT_EQ(1u, cache.size());
cache.Clear();
EXPECT_EQ(0u, cache.size());
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/quic_client_session_cache.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/quic_client_session_cache_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
fc6b01ea-f0a3-4da6-bd55-c32d499105a4 | cpp | tensorflow/tensorflow | curl_http_request | third_party/xla/third_party/tsl/tsl/platform/cloud/curl_http_request.cc | third_party/xla/third_party/tsl/tsl/platform/cloud/curl_http_request_test.cc | #include "tsl/platform/cloud/curl_http_request.h"
#include <algorithm>
#include "xla/tsl/lib/gtl/map_util.h"
#include "xla/tsl/util/env_var.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/scanner.h"
#include "tsl/platform/str_util.h"
#include "tsl/platform/types.h"
#define CHECK_CURL_OK(expr) CHECK_EQ(expr, CURLE_OK)
namespace tsl {
namespace {
constexpr uint64 kVerboseOutput = 0;
class LibCurlProxy : public LibCurl {
public:
static LibCurlProxy* Load() {
static LibCurlProxy* libcurl = []() -> LibCurlProxy* {
curl_global_init(CURL_GLOBAL_ALL);
return new LibCurlProxy;
}();
return libcurl;
}
CURL* curl_easy_init() override { return ::curl_easy_init(); }
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
uint64 param) override {
return ::curl_easy_setopt(curl, option, param);
}
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
const char* param) override {
return ::curl_easy_setopt(curl, option, param);
}
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
void* param) override {
return ::curl_easy_setopt(curl, option, param);
}
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
size_t (*param)(void*, size_t, size_t,
FILE*)) override {
return ::curl_easy_setopt(curl, option, param);
}
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
size_t (*param)(const void*, size_t, size_t,
void*)) override {
return ::curl_easy_setopt(curl, option, param);
}
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
int (*param)(void* clientp, curl_off_t dltotal,
curl_off_t dlnow, curl_off_t ultotal,
curl_off_t ulnow)) override {
return ::curl_easy_setopt(curl, option, param);
}
CURLcode curl_easy_perform(CURL* curl) override {
return ::curl_easy_perform(curl);
}
CURLcode curl_easy_getinfo(CURL* curl, CURLINFO info,
uint64* value) override {
return ::curl_easy_getinfo(curl, info, value);
}
CURLcode curl_easy_getinfo(CURL* curl, CURLINFO info,
double* value) override {
return ::curl_easy_getinfo(curl, info, value);
}
void curl_easy_cleanup(CURL* curl) override {
return ::curl_easy_cleanup(curl);
}
char* curl_easy_escape(CURL* curl, const char* str, int length) override {
return ::curl_easy_escape(curl, str, length);
}
curl_slist* curl_slist_append(curl_slist* list, const char* str) override {
return ::curl_slist_append(list, str);
}
void curl_slist_free_all(curl_slist* list) override {
return ::curl_slist_free_all(list);
}
void curl_free(void* p) override { ::curl_free(p); }
};
}
CurlHttpRequest::CurlHttpRequest() : CurlHttpRequest(LibCurlProxy::Load()) {}
CurlHttpRequest::CurlHttpRequest(LibCurl* libcurl, Env* env)
: libcurl_(libcurl), env_(env) {
default_response_buffer_.reserve(CURL_MAX_WRITE_SIZE);
curl_ = libcurl_->curl_easy_init();
CHECK(curl_ != nullptr) << "Couldn't initialize a curl session.";
std::string value = "";
TF_CHECK_OK(ReadStringFromEnvVar("CURL_CA_BUNDLE", "", &value));
if (!value.empty()) {
CHECK_CURL_OK(
libcurl_->curl_easy_setopt(curl_, CURLOPT_CAINFO, value.c_str()));
}
CHECK_CURL_OK(
libcurl_->curl_easy_setopt(curl_, CURLOPT_VERBOSE, kVerboseOutput));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_USERAGENT, "TSL"));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_NOSIGNAL, 1L));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_HTTP_VERSION,
CURL_HTTP_VERSION_1_1));
CHECK_CURL_OK(
libcurl_->curl_easy_setopt(curl_, CURLOPT_NOPROGRESS, uint64{0}));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_XFERINFODATA, this));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_XFERINFOFUNCTION,
&CurlHttpRequest::ProgressCallback));
SetResultBuffer(&default_response_buffer_);
}
CurlHttpRequest::~CurlHttpRequest() {
if (curl_headers_) {
libcurl_->curl_slist_free_all(curl_headers_);
}
if (resolve_list_) {
libcurl_->curl_slist_free_all(resolve_list_);
}
if (put_body_) {
if (fclose(put_body_) != 0) {
LOG(ERROR) << "fclose() failed: " << strerror(errno);
}
}
if (curl_) {
libcurl_->curl_easy_cleanup(curl_);
}
}
string CurlHttpRequest::EscapeString(const string& str) {
char* out_char_str = libcurl_->curl_easy_escape(curl_, str.c_str(), 0);
string out_str(out_char_str);
libcurl_->curl_free(out_char_str);
return out_str;
}
void CurlHttpRequest::SetUri(const string& uri) {
CheckNotSent();
is_uri_set_ = true;
uri_ = uri;
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_URL, uri.c_str()));
}
void CurlHttpRequest::SetRange(uint64 start, uint64 end) {
CheckNotSent();
CHECK_CURL_OK(libcurl_->curl_easy_setopt(
curl_, CURLOPT_RANGE, strings::StrCat(start, "-", end).c_str()));
}
void CurlHttpRequest::AddHeader(const string& name, const string& value) {
CheckNotSent();
curl_headers_ = libcurl_->curl_slist_append(
curl_headers_, strings::StrCat(name, ": ", value).c_str());
}
void CurlHttpRequest::AddResolveOverride(const string& hostname, int64_t port,
const string& ip_addr) {
CheckNotSent();
resolve_list_ = libcurl_->curl_slist_append(
resolve_list_,
strings::StrCat(hostname, ":", port, ":", ip_addr).c_str());
}
void CurlHttpRequest::AddAuthBearerHeader(const string& auth_token) {
CheckNotSent();
if (!auth_token.empty()) {
AddHeader("Authorization", strings::StrCat("Bearer ", auth_token));
}
}
void CurlHttpRequest::SetRequestStats(RequestStats* stats) {
CheckNotSent();
CHECK(stats_ == nullptr) << "SetRequestStats already called";
stats_ = stats;
}
void CurlHttpRequest::SetDeleteRequest() {
CheckNotSent();
CheckMethodNotSet();
is_method_set_ = true;
method_ = RequestMethod::kDelete;
CHECK_CURL_OK(
libcurl_->curl_easy_setopt(curl_, CURLOPT_CUSTOMREQUEST, "DELETE"));
}
absl::Status CurlHttpRequest::SetPutFromFile(const string& body_filepath,
size_t offset) {
CheckNotSent();
CheckMethodNotSet();
is_method_set_ = true;
method_ = RequestMethod::kPut;
if (put_body_) {
if (fclose(put_body_) != 0) {
LOG(ERROR) << "fclose() failed: " << strerror(errno);
}
}
put_body_ = fopen(body_filepath.c_str(), "r");
if (!put_body_) {
return errors::InvalidArgument("Couldn't open the specified file: " +
body_filepath);
}
fseek(put_body_, 0, SEEK_END);
const auto size = ftell(put_body_) - offset;
fseek(put_body_, offset, SEEK_SET);
curl_headers_ = libcurl_->curl_slist_append(
curl_headers_, strings::StrCat("Content-Length: ", size).c_str());
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_PUT, 1));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_READDATA,
reinterpret_cast<void*>(put_body_)));
return absl::OkStatus();
}
void CurlHttpRequest::SetPutEmptyBody() {
CheckNotSent();
CheckMethodNotSet();
is_method_set_ = true;
method_ = RequestMethod::kPut;
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_PUT, 1));
AddHeader("Content-Length", "0");
AddHeader("Transfer-Encoding", "identity");
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_READDATA,
reinterpret_cast<void*>(this)));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_READFUNCTION,
&CurlHttpRequest::ReadCallback));
}
void CurlHttpRequest::SetPostFromBuffer(const char* buffer, size_t size) {
CheckNotSent();
CheckMethodNotSet();
is_method_set_ = true;
method_ = RequestMethod::kPost;
curl_headers_ = libcurl_->curl_slist_append(
curl_headers_, strings::StrCat("Content-Length: ", size).c_str());
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_POST, 1));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_READDATA,
reinterpret_cast<void*>(this)));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_READFUNCTION,
&CurlHttpRequest::ReadCallback));
post_body_buffer_ = absl::string_view(buffer, size);
}
void CurlHttpRequest::SetPostEmptyBody() {
CheckNotSent();
CheckMethodNotSet();
is_method_set_ = true;
method_ = RequestMethod::kPost;
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_POST, 1));
AddHeader("Content-Length", "0");
AddHeader("Transfer-Encoding", "identity");
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_READDATA,
reinterpret_cast<void*>(this)));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_READFUNCTION,
&CurlHttpRequest::ReadCallback));
}
void CurlHttpRequest::SetResultBuffer(std::vector<char>* out_buffer) {
CheckNotSent();
CHECK(out_buffer != nullptr);
out_buffer->clear();
response_buffer_ = out_buffer;
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_WRITEDATA,
reinterpret_cast<void*>(this)));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_WRITEFUNCTION,
&CurlHttpRequest::WriteCallback));
}
void CurlHttpRequest::SetResultBufferDirect(char* buffer, size_t size) {
CHECK(buffer != nullptr);
CheckNotSent();
direct_response_ = DirectResponseState{buffer, size, 0, 0};
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_WRITEDATA,
reinterpret_cast<void*>(this)));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(
curl_, CURLOPT_WRITEFUNCTION, &CurlHttpRequest::WriteCallbackDirect));
}
bool CurlHttpRequest::IsDirectResponse() const {
return direct_response_.buffer_ != nullptr;
}
size_t CurlHttpRequest::WriteCallbackDirect(const void* ptr, size_t size,
size_t nmemb, void* userdata) {
CHECK(ptr != nullptr);
auto that = reinterpret_cast<CurlHttpRequest*>(userdata);
DirectResponseState* state = &that->direct_response_;
CHECK(state->buffer_ != nullptr);
CHECK(state->bytes_transferred_ <= state->buffer_size_);
size_t curl_bytes_received = size * nmemb;
size_t user_buffer_bytes_available =
state->buffer_size_ - state->bytes_transferred_;
size_t bytes_to_copy =
std::min<size_t>(curl_bytes_received, user_buffer_bytes_available);
memcpy(&state->buffer_[state->bytes_transferred_], ptr, bytes_to_copy);
state->bytes_transferred_ += bytes_to_copy;
state->bytes_received_ += curl_bytes_received;
return bytes_to_copy;
}
size_t CurlHttpRequest::GetResultBufferDirectBytesTransferred() {
CHECK(direct_response_.buffer_ != nullptr);
return direct_response_.bytes_transferred_;
}
void CurlHttpRequest::SetTimeouts(uint32 connection, uint32 inactivity,
uint32 total) {
CheckNotSent();
connect_timeout_secs_ = connection;
inactivity_timeout_secs_ = inactivity;
request_timeout_secs_ = total;
}
size_t CurlHttpRequest::WriteCallback(const void* ptr, size_t size,
size_t nmemb, void* this_object) {
CHECK(ptr);
auto that = reinterpret_cast<CurlHttpRequest*>(this_object);
CHECK(that->response_buffer_);
const size_t bytes_to_copy = size * nmemb;
that->response_buffer_->insert(
that->response_buffer_->end(), reinterpret_cast<const char*>(ptr),
reinterpret_cast<const char*>(ptr) + bytes_to_copy);
return bytes_to_copy;
}
size_t CurlHttpRequest::ReadCallback(void* ptr, size_t size, size_t nmemb,
FILE* this_object) {
CHECK(ptr);
auto that = reinterpret_cast<CurlHttpRequest*>(this_object);
CHECK(that->post_body_read_ <= that->post_body_buffer_.size());
const size_t bytes_to_copy = std::min(
size * nmemb, that->post_body_buffer_.size() - that->post_body_read_);
memcpy(ptr, that->post_body_buffer_.data() + that->post_body_read_,
bytes_to_copy);
that->post_body_read_ += bytes_to_copy;
return bytes_to_copy;
}
size_t CurlHttpRequest::HeaderCallback(const void* ptr, size_t size,
size_t nmemb, void* this_object) {
CHECK(ptr);
auto that = reinterpret_cast<CurlHttpRequest*>(this_object);
absl::string_view header(reinterpret_cast<const char*>(ptr), size * nmemb);
absl::string_view name, value;
if (strings::Scanner(header)
.ScanEscapedUntil(':')
.StopCapture()
.OneLiteral(": ")
.GetResult(&value, &name)) {
string str_value(value);
absl::StripTrailingAsciiWhitespace(&str_value);
that->response_headers_[string(name)] = str_value;
}
return size * nmemb;
}
absl::Status CurlHttpRequest::Send() {
CheckNotSent();
CHECK(is_uri_set_) << "URI has not been set.";
is_sent_ = true;
if (curl_headers_) {
CHECK_CURL_OK(
libcurl_->curl_easy_setopt(curl_, CURLOPT_HTTPHEADER, curl_headers_));
}
if (resolve_list_) {
CHECK_CURL_OK(
libcurl_->curl_easy_setopt(curl_, CURLOPT_RESOLVE, resolve_list_));
}
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_HEADERDATA,
reinterpret_cast<void*>(this)));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_HEADERFUNCTION,
&CurlHttpRequest::HeaderCallback));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_TIMEOUT,
request_timeout_secs_));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_CONNECTTIMEOUT,
connect_timeout_secs_));
char error_buffer[CURL_ERROR_SIZE] = {0};
CHECK_CURL_OK(
libcurl_->curl_easy_setopt(curl_, CURLOPT_ERRORBUFFER, error_buffer));
if (stats_ != nullptr) {
stats_->RecordRequest(this, uri_, method_);
}
const CURLcode curl_result = libcurl_->curl_easy_perform(curl_);
TF_RETURN_IF_ERROR(CURLcodeToStatus(curl_result, error_buffer));
double written_size = 0;
CHECK_CURL_OK(libcurl_->curl_easy_getinfo(curl_, CURLINFO_SIZE_DOWNLOAD,
&written_size));
CHECK_CURL_OK(libcurl_->curl_easy_getinfo(curl_, CURLINFO_RESPONSE_CODE,
&response_code_));
auto get_error_message = [this]() -> string {
string error_message = strings::StrCat(
"Error executing an HTTP request: HTTP response code ", response_code_);
absl::string_view body = GetResponse();
if (!body.empty()) {
return strings::StrCat(
error_message, " with body '",
body.substr(0, std::min(body.size(), response_to_error_limit_)), "'");
}
return error_message;
};
absl::Status result;
switch (response_code_) {
case 200:
case 201:
case 204:
case 206:
result = absl::OkStatus();
break;
case 416:
response_buffer_->clear();
if (IsDirectResponse()) {
direct_response_.bytes_transferred_ = 0;
}
result = absl::OkStatus();
break;
case 400:
case 406:
case 411:
case 414:
result = errors::InvalidArgument(get_error_message());
break;
case 401:
case 403:
case 407:
result = errors::PermissionDenied(get_error_message());
break;
case 404:
case 410:
result = errors::NotFound(get_error_message());
break;
case 302:
case 303:
case 304:
case 307:
case 412:
case 413:
result = errors::FailedPrecondition(get_error_message());
break;
case 308:
case 409:
case 429:
case 500:
case 502:
case 503:
default:
result = errors::Unavailable(get_error_message());
break;
}
if (!result.ok()) {
response_buffer_->clear();
}
if (stats_ != nullptr) {
stats_->RecordResponse(this, uri_, method_, result);
}
return result;
}
void CurlHttpRequest::CheckMethodNotSet() const {
CHECK(!is_method_set_) << "HTTP method has been already set.";
}
void CurlHttpRequest::CheckNotSent() const {
CHECK(!is_sent_) << "The request has already been sent.";
}
absl::string_view CurlHttpRequest::GetResponse() const {
absl::string_view response;
if (IsDirectResponse()) {
response = absl::string_view(direct_response_.buffer_,
direct_response_.bytes_transferred_);
} else {
response =
absl::string_view(response_buffer_->data(), response_buffer_->size());
}
return response;
}
string CurlHttpRequest::GetResponseHeader(const string& name) const {
const auto& header = response_headers_.find(name);
return header != response_headers_.end() ? header->second : "";
}
uint64 CurlHttpRequest::GetResponseCode() const { return response_code_; }
int CurlHttpRequest::ProgressCallback(void* this_object, curl_off_t dltotal,
curl_off_t dlnow, curl_off_t ultotal,
curl_off_t ulnow) {
auto that = reinterpret_cast<CurlHttpRequest*>(this_object);
const auto now = that->env_->NowSeconds();
const auto current_progress = dlnow + ulnow;
if (that->last_progress_timestamp_ == 0 ||
current_progress > that->last_progress_bytes_) {
that->last_progress_timestamp_ = now;
that->last_progress_bytes_ = current_progress;
return 0;
}
if (now - that->last_progress_timestamp_ > that->inactivity_timeout_secs_) {
double lookup_time = -1;
const auto lookup_time_status = that->libcurl_->curl_easy_getinfo(
that->curl_, CURLINFO_NAMELOOKUP_TIME, &lookup_time);
double connect_time = -1;
const auto connect_time_status = that->libcurl_->curl_easy_getinfo(
that->curl_, CURLINFO_CONNECT_TIME, &connect_time);
double pretransfer_time = -1;
const auto pretransfer_time_status = that->libcurl_->curl_easy_getinfo(
that->curl_, CURLINFO_PRETRANSFER_TIME, &pretransfer_time);
double starttransfer_time = -1;
const auto starttransfer_time_status = that->libcurl_->curl_easy_getinfo(
that->curl_, CURLINFO_STARTTRANSFER_TIME, &starttransfer_time);
LOG(ERROR) << "The transmission of request " << this_object
<< " (URI: " << that->uri_ << ") has been stuck at "
<< current_progress << " of " << dltotal + ultotal
<< " bytes for " << now - that->last_progress_timestamp_
<< " seconds and will be aborted. CURL timing information: "
<< "lookup time: " << lookup_time << " ("
<< curl_easy_strerror(lookup_time_status)
<< "), connect time: " << connect_time << " ("
<< curl_easy_strerror(connect_time_status)
<< "), pre-transfer time: " << pretransfer_time << " ("
<< curl_easy_strerror(pretransfer_time_status)
<< "), start-transfer time: " << starttransfer_time << " ("
<< curl_easy_strerror(starttransfer_time_status) << ")";
return 1;
}
return 0;
}
absl::Status CurlHttpRequest::CURLcodeToStatus(CURLcode code,
const char* error_buffer) {
if (code == CURLE_OK) {
return absl::OkStatus();
}
string error_message = strings::StrCat(
"Error executing an HTTP request: libcurl code ", code, " meaning '",
curl_easy_strerror(code), "', error details: ");
if (code == CURLE_WRITE_ERROR && IsDirectResponse() &&
direct_response_.bytes_received_ > direct_response_.buffer_size_) {
string overflow_message = strings::StrCat(
"Received ", direct_response_.bytes_received_, " response bytes ",
"for a ", direct_response_.buffer_size_, "-byte buffer");
uint64 response_code = 0;
const CURLcode get_response_result = libcurl_->curl_easy_getinfo(
curl_, CURLINFO_RESPONSE_CODE, &response_code);
if (get_response_result == CURLE_OK && response_code == 416) {
return absl::OkStatus();
}
return errors::FailedPrecondition(
strings::StrCat(error_message, overflow_message));
}
if (code == CURLE_COULDNT_RESOLVE_HOST || code == CURLE_SSL_CACERT_BADFILE) {
return errors::FailedPrecondition(
strings::StrCat(error_message, error_buffer));
}
return errors::Unavailable(
strings::StrCat(error_message, *error_buffer ? error_buffer : "(none)"));
}
} | #include "tsl/platform/cloud/curl_http_request.h"
#include <fstream>
#include <string>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/mem.h"
#include "tsl/platform/path.h"
#include "tsl/platform/platform.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace {
const string kTestContent = "random original scratch content";
class FakeEnv : public EnvWrapper {
public:
FakeEnv() : EnvWrapper(Env::Default()) {}
uint64 NowSeconds() const override { return now_; }
uint64 now_ = 10000;
};
class FakeLibCurl : public LibCurl {
public:
FakeLibCurl(const string& response_content, uint64 response_code)
: response_content_(response_content), response_code_(response_code) {}
FakeLibCurl(const string& response_content, uint64 response_code,
std::vector<std::tuple<uint64, curl_off_t>> progress_ticks,
FakeEnv* env)
: response_content_(response_content),
response_code_(response_code),
progress_ticks_(std::move(progress_ticks)),
env_(env) {}
FakeLibCurl(const string& response_content, uint64 response_code,
const std::vector<string>& response_headers)
: response_content_(response_content),
response_code_(response_code),
response_headers_(response_headers) {}
CURL* curl_easy_init() override {
is_initialized_ = true;
return reinterpret_cast<CURL*>(this);
}
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
uint64 param) override {
switch (option) {
case CURLOPT_POST:
is_post_ = param;
break;
case CURLOPT_PUT:
is_put_ = param;
break;
default:
break;
}
return CURLE_OK;
}
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
const char* param) override {
return curl_easy_setopt(curl, option,
reinterpret_cast<void*>(const_cast<char*>(param)));
}
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
void* param) override {
switch (option) {
case CURLOPT_URL:
url_ = reinterpret_cast<char*>(param);
break;
case CURLOPT_RANGE:
range_ = reinterpret_cast<char*>(param);
break;
case CURLOPT_CUSTOMREQUEST:
custom_request_ = reinterpret_cast<char*>(param);
break;
case CURLOPT_HTTPHEADER:
headers_ = reinterpret_cast<std::vector<string>*>(param);
break;
case CURLOPT_ERRORBUFFER:
error_buffer_ = reinterpret_cast<char*>(param);
break;
case CURLOPT_CAINFO:
ca_info_ = reinterpret_cast<char*>(param);
break;
case CURLOPT_WRITEDATA:
write_data_ = reinterpret_cast<FILE*>(param);
break;
case CURLOPT_HEADERDATA:
header_data_ = reinterpret_cast<FILE*>(param);
break;
case CURLOPT_READDATA:
read_data_ = reinterpret_cast<FILE*>(param);
break;
case CURLOPT_XFERINFODATA:
progress_data_ = param;
break;
default:
break;
}
return CURLE_OK;
}
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
size_t (*param)(void*, size_t, size_t,
FILE*)) override {
read_callback_ = param;
return CURLE_OK;
}
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
size_t (*param)(const void*, size_t, size_t,
void*)) override {
switch (option) {
case CURLOPT_WRITEFUNCTION:
write_callback_ = param;
break;
case CURLOPT_HEADERFUNCTION:
header_callback_ = param;
break;
default:
break;
}
return CURLE_OK;
}
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
int (*param)(void* clientp, curl_off_t dltotal,
curl_off_t dlnow, curl_off_t ultotal,
curl_off_t ulnow)) override {
progress_callback_ = param;
return CURLE_OK;
}
CURLcode curl_easy_perform(CURL* curl) override {
if (is_post_ || is_put_) {
char buffer[3];
int bytes_read;
posted_content_ = "";
do {
bytes_read = read_callback_(buffer, 1, sizeof(buffer), read_data_);
posted_content_ = strings::StrCat(
posted_content_, absl::string_view(buffer, bytes_read));
} while (bytes_read > 0);
}
if (write_data_ || write_callback_) {
size_t bytes_handled = write_callback_(
response_content_.c_str(), 1, response_content_.size(), write_data_);
if (bytes_handled != response_content_.size()) {
curl_easy_perform_result_ = CURLE_WRITE_ERROR;
}
}
for (const auto& header : response_headers_) {
header_callback_(header.c_str(), 1, header.size(), header_data_);
}
if (error_buffer_) {
strncpy(error_buffer_, curl_easy_perform_error_message_.c_str(),
curl_easy_perform_error_message_.size() + 1);
}
for (const auto& tick : progress_ticks_) {
env_->now_ = std::get<0>(tick);
if (progress_callback_(progress_data_, 0, std::get<1>(tick), 0, 0)) {
return CURLE_ABORTED_BY_CALLBACK;
}
}
return curl_easy_perform_result_;
}
CURLcode curl_easy_getinfo(CURL* curl, CURLINFO info,
uint64* value) override {
switch (info) {
case CURLINFO_RESPONSE_CODE:
*value = response_code_;
break;
default:
break;
}
return CURLE_OK;
}
CURLcode curl_easy_getinfo(CURL* curl, CURLINFO info,
double* value) override {
switch (info) {
case CURLINFO_SIZE_DOWNLOAD:
*value = response_content_.size();
break;
default:
break;
}
return CURLE_OK;
}
void curl_easy_cleanup(CURL* curl) override { is_cleaned_up_ = true; }
curl_slist* curl_slist_append(curl_slist* list, const char* str) override {
std::vector<string>* v = list ? reinterpret_cast<std::vector<string>*>(list)
: new std::vector<string>();
v->push_back(str);
return reinterpret_cast<curl_slist*>(v);
}
char* curl_easy_escape(CURL* curl, const char* str, int length) override {
const string victim = "/";
const string encoded = "%2F";
string temp_str = str;
std::string::size_type n = 0;
while ((n = temp_str.find(victim, n)) != std::string::npos) {
temp_str.replace(n, victim.size(), encoded);
n += encoded.size();
}
char* out_char_str = reinterpret_cast<char*>(
port::Malloc(sizeof(char) * temp_str.size() + 1));
std::copy(temp_str.begin(), temp_str.end(), out_char_str);
out_char_str[temp_str.size()] = '\0';
return out_char_str;
}
void curl_slist_free_all(curl_slist* list) override {
delete reinterpret_cast<std::vector<string>*>(list);
}
void curl_free(void* p) override { port::Free(p); }
string response_content_;
uint64 response_code_;
std::vector<string> response_headers_;
string url_;
string range_;
string custom_request_;
string ca_info_;
char* error_buffer_ = nullptr;
bool is_initialized_ = false;
bool is_cleaned_up_ = false;
std::vector<string>* headers_ = nullptr;
bool is_post_ = false;
bool is_put_ = false;
void* write_data_ = nullptr;
size_t (*write_callback_)(const void* ptr, size_t size, size_t nmemb,
void* userdata) = nullptr;
void* header_data_ = nullptr;
size_t (*header_callback_)(const void* ptr, size_t size, size_t nmemb,
void* userdata) = nullptr;
FILE* read_data_ = nullptr;
size_t (*read_callback_)(void* ptr, size_t size, size_t nmemb,
FILE* userdata) = &fread;
int (*progress_callback_)(void* clientp, curl_off_t dltotal, curl_off_t dlnow,
curl_off_t ultotal, curl_off_t ulnow) = nullptr;
void* progress_data_ = nullptr;
string posted_content_;
CURLcode curl_easy_perform_result_ = CURLE_OK;
string curl_easy_perform_error_message_;
std::vector<std::tuple<uint64, curl_off_t>> progress_ticks_;
FakeEnv* env_ = nullptr;
};
TEST(CurlHttpRequestTest, GetRequest) {
FakeLibCurl libcurl("get response", 200);
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch;
scratch.insert(scratch.begin(), kTestContent.begin(), kTestContent.end());
scratch.reserve(100);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetRange(100, 199);
http_request.SetResultBuffer(&scratch);
TF_EXPECT_OK(http_request.Send());
EXPECT_EQ("get response", string(scratch.begin(), scratch.end()));
EXPECT_TRUE(libcurl.is_initialized_);
EXPECT_EQ("http:
EXPECT_EQ("100-199", libcurl.range_);
EXPECT_EQ("", libcurl.custom_request_);
EXPECT_EQ("", libcurl.ca_info_);
EXPECT_EQ(1, libcurl.headers_->size());
EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]);
EXPECT_FALSE(libcurl.is_post_);
EXPECT_EQ(200, http_request.GetResponseCode());
}
TEST(CurlHttpRequestTest, GetRequest_Direct) {
FakeLibCurl libcurl("get response", 200);
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch(100, 0);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetRange(100, 199);
http_request.SetResultBufferDirect(scratch.data(), scratch.capacity());
TF_EXPECT_OK(http_request.Send());
string expected_response = "get response";
size_t response_bytes_transferred =
http_request.GetResultBufferDirectBytesTransferred();
EXPECT_EQ(expected_response.size(), response_bytes_transferred);
EXPECT_EQ(
"get response",
string(scratch.begin(), scratch.begin() + response_bytes_transferred));
EXPECT_TRUE(libcurl.is_initialized_);
EXPECT_EQ("http:
EXPECT_EQ("100-199", libcurl.range_);
EXPECT_EQ("", libcurl.custom_request_);
EXPECT_EQ("", libcurl.ca_info_);
EXPECT_EQ(1, libcurl.headers_->size());
EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]);
EXPECT_FALSE(libcurl.is_post_);
EXPECT_EQ(200, http_request.GetResponseCode());
}
TEST(CurlHttpRequestTest, GetRequest_CustomCaInfoFlag) {
static char set_var[] = "CURL_CA_BUNDLE=test";
putenv(set_var);
FakeLibCurl libcurl("get response", 200);
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch;
scratch.insert(scratch.begin(), kTestContent.begin(), kTestContent.end());
scratch.reserve(100);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetRange(100, 199);
http_request.SetResultBuffer(&scratch);
TF_EXPECT_OK(http_request.Send());
EXPECT_EQ("get response", string(scratch.begin(), scratch.end()));
EXPECT_TRUE(libcurl.is_initialized_);
EXPECT_EQ("http:
EXPECT_EQ("100-199", libcurl.range_);
EXPECT_EQ("", libcurl.custom_request_);
EXPECT_EQ("test", libcurl.ca_info_);
EXPECT_EQ(1, libcurl.headers_->size());
EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]);
EXPECT_FALSE(libcurl.is_post_);
EXPECT_EQ(200, http_request.GetResponseCode());
}
TEST(CurlHttpRequestTest, GetRequest_Direct_ResponseTooLarge) {
FakeLibCurl libcurl("get response", 200);
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch(5, 0);
http_request.SetUri("http:
http_request.SetResultBufferDirect(scratch.data(), scratch.size());
const absl::Status& status = http_request.Send();
EXPECT_EQ(error::FAILED_PRECONDITION, status.code());
EXPECT_EQ(
"Error executing an HTTP request: libcurl code 23 meaning "
"'Failed writing received data to disk/application', error details: "
"Received 12 response bytes for a 5-byte buffer",
status.message());
EXPECT_EQ(5, http_request.GetResultBufferDirectBytesTransferred());
EXPECT_EQ("get r", string(scratch.begin(), scratch.begin() + 5));
}
TEST(CurlHttpRequestTest, GetRequest_Direct_RangeOutOfBound) {
FakeLibCurl libcurl("get response", 416);
CurlHttpRequest http_request(&libcurl);
const string initialScratch = "abcde";
std::vector<char> scratch;
scratch.insert(scratch.end(), initialScratch.begin(), initialScratch.end());
http_request.SetUri("http:
http_request.SetRange(0, 4);
http_request.SetResultBufferDirect(scratch.data(), scratch.size());
TF_EXPECT_OK(http_request.Send());
EXPECT_EQ(416, http_request.GetResponseCode());
EXPECT_EQ(0, http_request.GetResultBufferDirectBytesTransferred());
EXPECT_EQ("get r", string(scratch.begin(), scratch.end()));
}
TEST(CurlHttpRequestTest, GetRequest_Empty) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch;
scratch.resize(0);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetRange(100, 199);
http_request.SetResultBuffer(&scratch);
TF_EXPECT_OK(http_request.Send());
EXPECT_TRUE(scratch.empty());
EXPECT_TRUE(libcurl.is_initialized_);
EXPECT_EQ("http:
EXPECT_EQ("100-199", libcurl.range_);
EXPECT_EQ("", libcurl.custom_request_);
EXPECT_EQ(1, libcurl.headers_->size());
EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]);
EXPECT_FALSE(libcurl.is_post_);
EXPECT_EQ(200, http_request.GetResponseCode());
}
TEST(CurlHttpRequestTest, GetRequest_RangeOutOfBound) {
FakeLibCurl libcurl("get response", 416);
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch;
scratch.insert(scratch.end(), kTestContent.begin(), kTestContent.end());
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetRange(100, 199);
http_request.SetResultBuffer(&scratch);
TF_EXPECT_OK(http_request.Send());
EXPECT_TRUE(scratch.empty());
EXPECT_EQ(416, http_request.GetResponseCode());
}
TEST(CurlHttpRequestTest, GetRequest_503) {
FakeLibCurl libcurl("get response", 503);
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch;
scratch.insert(scratch.end(), kTestContent.begin(), kTestContent.end());
http_request.SetUri("http:
http_request.SetResultBuffer(&scratch);
const auto& status = http_request.Send();
EXPECT_EQ(error::UNAVAILABLE, status.code());
EXPECT_EQ(
"Error executing an HTTP request: HTTP response code 503 with body "
"'get response'",
status.message());
}
TEST(CurlHttpRequestTest, GetRequest_HttpCode0) {
FakeLibCurl libcurl("get response", 0);
libcurl.curl_easy_perform_result_ = CURLE_OPERATION_TIMEDOUT;
libcurl.curl_easy_perform_error_message_ = "Operation timed out";
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch;
scratch.insert(scratch.end(), kTestContent.begin(), kTestContent.end());
http_request.SetUri("http:
const auto& status = http_request.Send();
EXPECT_EQ(error::UNAVAILABLE, status.code());
EXPECT_EQ(
"Error executing an HTTP request: libcurl code 28 meaning "
"'Timeout was reached', error details: Operation timed out",
status.message());
EXPECT_EQ(0, http_request.GetResponseCode());
}
TEST(CurlHttpRequestTest, GetRequest_CouldntResolveHost) {
FakeLibCurl libcurl("get response", 0);
libcurl.curl_easy_perform_result_ = CURLE_COULDNT_RESOLVE_HOST;
libcurl.curl_easy_perform_error_message_ =
"Could not resolve host 'metadata'";
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch;
scratch.insert(scratch.end(), kTestContent.begin(), kTestContent.end());
http_request.SetUri("http:
const auto& status = http_request.Send();
EXPECT_EQ(error::FAILED_PRECONDITION, status.code());
EXPECT_EQ(
absl::StrCat(
"Error executing an HTTP request: libcurl code 6 meaning ",
(kIsOpenSource ? "'Couldn't resolve host name', error details: "
: "'Could not resolve hostname', error details: "),
"Could not resolve host ", "'metadata'"),
status.message());
EXPECT_EQ(0, http_request.GetResponseCode());
}
TEST(CurlHttpRequestTest, GetRequest_SslBadCertfile) {
FakeLibCurl libcurl("get response", 0);
libcurl.curl_easy_perform_result_ = CURLE_SSL_CACERT_BADFILE;
libcurl.curl_easy_perform_error_message_ =
"error setting certificate verify locations:";
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch;
scratch.insert(scratch.end(), kTestContent.begin(), kTestContent.end());
http_request.SetUri("http:
const auto& status = http_request.Send();
EXPECT_EQ(error::FAILED_PRECONDITION, status.code());
EXPECT_EQ(
"Error executing an HTTP request: libcurl code 77 meaning "
"'Problem with the SSL CA cert (path? access rights?)', error details: "
"error setting certificate verify locations:",
status.message());
EXPECT_EQ(0, http_request.GetResponseCode());
}
TEST(CurlHttpRequestTest, ResponseHeaders) {
FakeLibCurl libcurl(
"get response", 200,
{"Location: abcd", "Content-Type: text", "unparsable header"});
CurlHttpRequest http_request(&libcurl);
http_request.SetUri("http:
TF_EXPECT_OK(http_request.Send());
EXPECT_EQ("abcd", http_request.GetResponseHeader("Location"));
EXPECT_EQ("text", http_request.GetResponseHeader("Content-Type"));
EXPECT_EQ("", http_request.GetResponseHeader("Not-Seen-Header"));
}
TEST(CurlHttpRequestTest, PutRequest_WithBody_FromFile) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
auto content_filename = io::JoinPath(testing::TmpDir(), "content");
std::ofstream content(content_filename, std::ofstream::binary);
content << "post body content";
content.close();
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
TF_EXPECT_OK(http_request.SetPutFromFile(content_filename, 0));
TF_EXPECT_OK(http_request.Send());
EXPECT_TRUE(libcurl.is_initialized_);
EXPECT_EQ("http:
EXPECT_EQ("", libcurl.custom_request_);
EXPECT_EQ(2, libcurl.headers_->size());
EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]);
EXPECT_EQ("Content-Length: 17", (*libcurl.headers_)[1]);
EXPECT_TRUE(libcurl.is_put_);
EXPECT_EQ("post body content", libcurl.posted_content_);
std::remove(content_filename.c_str());
}
TEST(CurlHttpRequestTest, PutRequest_WithBody_FromFile_NonZeroOffset) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
auto content_filename = io::JoinPath(testing::TmpDir(), "content");
std::ofstream content(content_filename, std::ofstream::binary);
content << "post body content";
content.close();
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
TF_EXPECT_OK(http_request.SetPutFromFile(content_filename, 7));
TF_EXPECT_OK(http_request.Send());
EXPECT_EQ("dy content", libcurl.posted_content_);
std::remove(content_filename.c_str());
}
TEST(CurlHttpRequestTest, PutRequest_WithoutBody) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetPutEmptyBody();
TF_EXPECT_OK(http_request.Send());
EXPECT_TRUE(libcurl.is_initialized_);
EXPECT_EQ("http:
EXPECT_EQ("", libcurl.custom_request_);
EXPECT_EQ(3, libcurl.headers_->size());
EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]);
EXPECT_EQ("Content-Length: 0", (*libcurl.headers_)[1]);
EXPECT_EQ("Transfer-Encoding: identity", (*libcurl.headers_)[2]);
EXPECT_TRUE(libcurl.is_put_);
EXPECT_EQ("", libcurl.posted_content_);
}
TEST(CurlHttpRequestTest, PostRequest_WithBody_FromMemory) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
string content = "post body content";
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetPostFromBuffer(content.c_str(), content.size());
TF_EXPECT_OK(http_request.Send());
EXPECT_TRUE(libcurl.is_initialized_);
EXPECT_EQ("http:
EXPECT_EQ("", libcurl.custom_request_);
EXPECT_EQ(2, libcurl.headers_->size());
EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]);
EXPECT_EQ("Content-Length: 17", (*libcurl.headers_)[1]);
EXPECT_TRUE(libcurl.is_post_);
EXPECT_EQ("post body content", libcurl.posted_content_);
}
TEST(CurlHttpRequestTest, PostRequest_WithoutBody) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetPostEmptyBody();
TF_EXPECT_OK(http_request.Send());
EXPECT_TRUE(libcurl.is_initialized_);
EXPECT_EQ("http:
EXPECT_EQ("", libcurl.custom_request_);
EXPECT_EQ(3, libcurl.headers_->size());
EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]);
EXPECT_EQ("Content-Length: 0", (*libcurl.headers_)[1]);
EXPECT_EQ("Transfer-Encoding: identity", (*libcurl.headers_)[2]);
EXPECT_TRUE(libcurl.is_post_);
EXPECT_EQ("", libcurl.posted_content_);
}
TEST(CurlHttpRequestTest, DeleteRequest) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetDeleteRequest();
TF_EXPECT_OK(http_request.Send());
EXPECT_TRUE(libcurl.is_initialized_);
EXPECT_EQ("http:
EXPECT_EQ("DELETE", libcurl.custom_request_);
EXPECT_EQ(1, libcurl.headers_->size());
EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]);
EXPECT_FALSE(libcurl.is_post_);
}
TEST(CurlHttpRequestTest, WrongSequenceOfCalls_NoUri) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
ASSERT_DEATH((void)http_request.Send(), "URI has not been set");
}
TEST(CurlHttpRequestTest, WrongSequenceOfCalls_TwoSends) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
http_request.SetUri("http:
TF_EXPECT_OK(http_request.Send());
ASSERT_DEATH((void)http_request.Send(), "The request has already been sent");
}
TEST(CurlHttpRequestTest, WrongSequenceOfCalls_ReusingAfterSend) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
http_request.SetUri("http:
TF_EXPECT_OK(http_request.Send());
ASSERT_DEATH(http_request.SetUri("http:
"The request has already been sent");
}
TEST(CurlHttpRequestTest, WrongSequenceOfCalls_SettingMethodTwice) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
http_request.SetDeleteRequest();
ASSERT_DEATH(http_request.SetPostEmptyBody(),
"HTTP method has been already set");
}
TEST(CurlHttpRequestTest, EscapeString) {
FakeLibCurl libcurl("get response", 200);
CurlHttpRequest http_request(&libcurl);
const string test_string = "a/b/c";
EXPECT_EQ("a%2Fb%2Fc", http_request.EscapeString(test_string));
}
TEST(CurlHttpRequestTest, ErrorReturnsNoResponse) {
FakeLibCurl libcurl("get response", 500);
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch;
scratch.insert(scratch.begin(), kTestContent.begin(), kTestContent.end());
scratch.reserve(100);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetRange(100, 199);
http_request.SetResultBuffer(&scratch);
EXPECT_EQ(error::UNAVAILABLE, http_request.Send().code());
EXPECT_EQ("", string(scratch.begin(), scratch.end()));
}
TEST(CurlHttpRequestTest, ProgressIsOk) {
FakeEnv env;
FakeLibCurl libcurl(
"test", 200,
{
std::make_tuple(100, 0) ,
std::make_tuple(110, 0) ,
std::make_tuple(200, 100)
},
&env);
CurlHttpRequest http_request(&libcurl, &env);
http_request.SetUri("http:
TF_EXPECT_OK(http_request.Send());
}
TEST(CurlHttpRequestTest, ProgressIsStuck) {
FakeEnv env;
FakeLibCurl libcurl(
"test", 200,
{
std::make_tuple(100, 10) ,
std::make_tuple(130, 10) ,
std::make_tuple(170, 10)
},
&env);
CurlHttpRequest http_request(&libcurl, &env);
http_request.SetUri("http:
auto status = http_request.Send();
EXPECT_EQ(error::UNAVAILABLE, status.code());
EXPECT_EQ(
"Error executing an HTTP request: libcurl code 42 meaning 'Operation "
"was aborted by an application callback', error details: (none)",
status.message());
}
class TestStats : public HttpRequest::RequestStats {
public:
~TestStats() override = default;
void RecordRequest(const HttpRequest* request, const string& uri,
HttpRequest::RequestMethod method) override {
has_recorded_request_ = true;
record_request_request_ = request;
record_request_uri_ = uri;
record_request_method_ = method;
}
void RecordResponse(const HttpRequest* request, const string& uri,
HttpRequest::RequestMethod method,
const absl::Status& result) override {
has_recorded_response_ = true;
record_response_request_ = request;
record_response_uri_ = uri;
record_response_method_ = method;
record_response_result_ = result;
}
const HttpRequest* record_request_request_ = nullptr;
string record_request_uri_ = "http:
HttpRequest::RequestMethod record_request_method_ =
HttpRequest::RequestMethod::kGet;
const HttpRequest* record_response_request_ = nullptr;
string record_response_uri_ = "http:
HttpRequest::RequestMethod record_response_method_ =
HttpRequest::RequestMethod::kGet;
absl::Status record_response_result_;
bool has_recorded_request_ = false;
bool has_recorded_response_ = false;
};
class StatsTestFakeLibCurl : public FakeLibCurl {
public:
StatsTestFakeLibCurl(TestStats* stats, const string& response_content,
uint64 response_code)
: FakeLibCurl(response_content, response_code), stats_(stats) {}
CURLcode curl_easy_perform(CURL* curl) override {
CHECK(!performed_request_);
performed_request_ = true;
stats_had_recorded_request_ = stats_->has_recorded_request_;
stats_had_recorded_response_ = stats_->has_recorded_response_;
return FakeLibCurl::curl_easy_perform(curl);
};
TestStats* stats_;
bool performed_request_ = false;
bool stats_had_recorded_request_;
bool stats_had_recorded_response_;
};
TEST(CurlHttpRequestTest, StatsGetSuccessful) {
TestStats stats;
StatsTestFakeLibCurl libcurl(&stats, "get response", 200);
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch;
scratch.insert(scratch.begin(), kTestContent.begin(), kTestContent.end());
scratch.reserve(100);
http_request.SetRequestStats(&stats);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetRange(100, 199);
http_request.SetResultBuffer(&scratch);
TF_EXPECT_OK(http_request.Send());
EXPECT_EQ("get response", string(scratch.begin(), scratch.end()));
ASSERT_TRUE(stats.has_recorded_request_);
EXPECT_EQ(&http_request, stats.record_request_request_);
EXPECT_EQ("http:
EXPECT_EQ(HttpRequest::RequestMethod::kGet, stats.record_request_method_);
ASSERT_TRUE(stats.has_recorded_response_);
EXPECT_EQ(&http_request, stats.record_response_request_);
EXPECT_EQ("http:
EXPECT_EQ(HttpRequest::RequestMethod::kGet, stats.record_response_method_);
TF_EXPECT_OK(stats.record_response_result_);
EXPECT_TRUE(libcurl.performed_request_);
EXPECT_TRUE(libcurl.stats_had_recorded_request_);
EXPECT_FALSE(libcurl.stats_had_recorded_response_);
}
TEST(CurlHttpRequestTest, StatsGetNotFound) {
TestStats stats;
StatsTestFakeLibCurl libcurl(&stats, "get other response", 404);
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch;
scratch.insert(scratch.begin(), kTestContent.begin(), kTestContent.end());
scratch.reserve(100);
http_request.SetRequestStats(&stats);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetRange(100, 199);
http_request.SetResultBuffer(&scratch);
absl::Status s = http_request.Send();
ASSERT_TRUE(stats.has_recorded_request_);
EXPECT_EQ(&http_request, stats.record_request_request_);
EXPECT_EQ("http:
EXPECT_EQ(HttpRequest::RequestMethod::kGet, stats.record_request_method_);
ASSERT_TRUE(stats.has_recorded_response_);
EXPECT_EQ(&http_request, stats.record_response_request_);
EXPECT_EQ("http:
EXPECT_EQ(HttpRequest::RequestMethod::kGet, stats.record_response_method_);
EXPECT_TRUE(absl::IsNotFound(stats.record_response_result_));
EXPECT_EQ(s, stats.record_response_result_);
EXPECT_TRUE(libcurl.performed_request_);
EXPECT_TRUE(libcurl.stats_had_recorded_request_);
EXPECT_FALSE(libcurl.stats_had_recorded_response_);
}
TEST(CurlHttpRequestTest, StatsPost) {
TestStats stats;
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
http_request.SetRequestStats(&stats);
string content = "post body content";
http_request.SetUri("http:
http_request.SetPostFromBuffer(content.c_str(), content.size());
TF_EXPECT_OK(http_request.Send());
ASSERT_TRUE(stats.has_recorded_request_);
EXPECT_EQ(&http_request, stats.record_request_request_);
EXPECT_EQ("http:
EXPECT_EQ(HttpRequest::RequestMethod::kPost, stats.record_request_method_);
ASSERT_TRUE(stats.has_recorded_response_);
EXPECT_EQ(&http_request, stats.record_response_request_);
EXPECT_EQ("http:
EXPECT_EQ(HttpRequest::RequestMethod::kPost, stats.record_response_method_);
TF_EXPECT_OK(stats.record_response_result_);
}
TEST(CurlHttpRequestTest, StatsDelete) {
TestStats stats;
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
http_request.SetRequestStats(&stats);
http_request.SetUri("http:
http_request.SetDeleteRequest();
TF_EXPECT_OK(http_request.Send());
ASSERT_TRUE(stats.has_recorded_request_);
EXPECT_EQ(&http_request, stats.record_request_request_);
EXPECT_EQ("http:
EXPECT_EQ(HttpRequest::RequestMethod::kDelete, stats.record_request_method_);
ASSERT_TRUE(stats.has_recorded_response_);
EXPECT_EQ(&http_request, stats.record_response_request_);
EXPECT_EQ("http:
EXPECT_EQ(HttpRequest::RequestMethod::kDelete, stats.record_response_method_);
TF_EXPECT_OK(stats.record_response_result_);
}
TEST(CurlHttpRequestTest, StatsPut) {
TestStats stats;
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
http_request.SetRequestStats(&stats);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetPutEmptyBody();
TF_EXPECT_OK(http_request.Send());
ASSERT_TRUE(stats.has_recorded_request_);
EXPECT_EQ(&http_request, stats.record_request_request_);
EXPECT_EQ("http:
EXPECT_EQ(HttpRequest::RequestMethod::kPut, stats.record_request_method_);
ASSERT_TRUE(stats.has_recorded_response_);
EXPECT_EQ(&http_request, stats.record_response_request_);
EXPECT_EQ("http:
EXPECT_EQ(HttpRequest::RequestMethod::kPut, stats.record_response_method_);
TF_EXPECT_OK(stats.record_response_result_);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/cloud/curl_http_request.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/cloud/curl_http_request_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
62bcdf42-4fcb-4005-9282-b66430d5d200 | cpp | tensorflow/tensorflow | fold_old_batch_norms | tensorflow/tools/graph_transforms/fold_old_batch_norms.cc | tensorflow/tools/graph_transforms/fold_old_batch_norms_test.cc | #include "tensorflow/core/common_runtime/constant_folding.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/subgraph.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/fold_constants_lib.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
namespace {
Status ErrorIfNotVector(const Tensor& input, const string& input_name,
int expected_width) {
if ((input.shape().dims() != 1) ||
(input.shape().dim_size(0) != expected_width)) {
return errors::InvalidArgument(
input_name,
" input to batch norm has bad shape: ", input.shape().DebugString());
}
return OkStatus();
}
Status GetScaleAndOffsetValues(const NodeMatch& match,
std::vector<float>* scale_values,
std::vector<float>* offset_values) {
const NodeDef& batch_norm_node = match.node;
CHECK(batch_norm_node.op() == "BatchNormWithGlobalNormalization" ||
batch_norm_node.op() == "FusedBatchNorm");
const bool is_fused = batch_norm_node.op() == "FusedBatchNorm";
const int mean_idx = is_fused ? 3 : 1;
const int var_idx = is_fused ? 4 : 2;
const int beta_idx = is_fused ? 2 : 3;
const int gamma_idx = is_fused ? 1 : 4;
const string epsilon_attr = is_fused ? "epsilon" : "variance_epsilon";
const bool scale_after_normalization =
is_fused || batch_norm_node.attr().at("scale_after_normalization").b();
const NodeDef& mean_node = match.inputs[mean_idx].node;
CHECK_EQ("Const", mean_node.op());
const NodeDef& variance_node = match.inputs[var_idx].node;
CHECK_EQ("Const", variance_node.op());
const NodeDef& beta_node = match.inputs[beta_idx].node;
CHECK_EQ("Const", beta_node.op());
const NodeDef& gamma_node = match.inputs[gamma_idx].node;
CHECK_EQ("Const", gamma_node.op());
Tensor mean = GetNodeTensorAttr(mean_node, "value");
Tensor variance = GetNodeTensorAttr(variance_node, "value");
Tensor beta = GetNodeTensorAttr(beta_node, "value");
Tensor gamma = GetNodeTensorAttr(gamma_node, "value");
const float variance_epsilon = batch_norm_node.attr().at(epsilon_attr).f();
const int64_t num_cols = mean.shape().dim_size(0);
TF_RETURN_IF_ERROR(ErrorIfNotVector(variance, "Variance", num_cols));
TF_RETURN_IF_ERROR(ErrorIfNotVector(beta, "Beta", num_cols));
TF_RETURN_IF_ERROR(ErrorIfNotVector(gamma, "gamma", num_cols));
scale_values->resize(num_cols);
offset_values->resize(num_cols);
if (scale_after_normalization) {
for (int i = 0; i < num_cols; ++i) {
(*scale_values)[i] =
(1.0f / sqrtf(variance.flat<float>()(i) + variance_epsilon)) *
gamma.flat<float>()(i);
}
} else {
for (int i = 0; i < num_cols; ++i) {
(*scale_values)[i] =
(1.0f / sqrtf(variance.flat<float>()(i) + variance_epsilon));
}
}
for (int i = 0; i < num_cols; ++i) {
(*offset_values)[i] =
(-mean.flat<float>()(i) * (*scale_values)[i]) + beta.flat<float>()(i);
}
return OkStatus();
}
Status FuseScaleOffsetToConvWeights(const std::vector<float>& scale_values,
const std::vector<float>& offset_values,
const NodeMatch& conv_node_match,
const string& conv_output_name,
std::vector<NodeDef>* new_nodes) {
const NodeDef& conv_node = conv_node_match.node;
const NodeDef& input_node = conv_node_match.inputs[0].node;
const NodeDef& weights_node = conv_node_match.inputs[1].node;
CHECK_EQ("Const", weights_node.op());
Tensor weights = GetNodeTensorAttr(weights_node, "value");
int64_t weights_cols;
if (conv_node.op() == "Conv2D") {
weights_cols = weights.shape().dim_size(3);
} else if (conv_node.op() == "DepthwiseConv2dNative") {
weights_cols = weights.shape().dim_size(2) * weights.shape().dim_size(3);
} else {
weights_cols = weights.shape().dim_size(1);
}
CHECK_EQ(weights_cols, scale_values.size());
auto weights_vector = weights.flat<float>();
Tensor scaled_weights(DT_FLOAT, weights.shape());
auto scaled_weights_vector = scaled_weights.flat<float>();
for (int64_t row = 0; row < weights_vector.dimension(0); ++row) {
scaled_weights_vector(row) =
weights_vector(row) * scale_values[row % weights_cols];
}
Tensor bias_offset(DT_FLOAT, {weights_cols});
auto bias_offset_vector = bias_offset.flat<float>();
for (int64_t col = 0; col < weights_cols; ++col) {
bias_offset_vector(col) = offset_values[col];
}
NodeDef scaled_weights_node;
scaled_weights_node.set_op("Const");
scaled_weights_node.set_name(weights_node.name());
SetNodeAttr("dtype", DT_FLOAT, &scaled_weights_node);
SetNodeTensorAttr<float>("value", scaled_weights, &scaled_weights_node);
new_nodes->push_back(scaled_weights_node);
new_nodes->push_back(input_node);
new_nodes->push_back(conv_node);
NodeDef bias_offset_node;
bias_offset_node.set_op("Const");
bias_offset_node.set_name(conv_node.name() + "_bn_offset");
SetNodeAttr("dtype", DT_FLOAT, &bias_offset_node);
SetNodeTensorAttr<float>("value", bias_offset, &bias_offset_node);
new_nodes->push_back(bias_offset_node);
NodeDef bias_add_node;
bias_add_node.set_op("BiasAdd");
bias_add_node.set_name(conv_output_name);
if (conv_node.attr().count("data_format")) {
CopyNodeAttr(conv_node, "data_format", "data_format", &bias_add_node);
}
CopyNodeAttr(conv_node, "T", "T", &bias_add_node);
AddNodeInput(conv_node.name(), &bias_add_node);
AddNodeInput(bias_offset_node.name(), &bias_add_node);
new_nodes->push_back(bias_add_node);
return OkStatus();
}
Status FuseBatchNormWithConv(const NodeMatch& match,
std::vector<NodeDef>* new_nodes) {
std::vector<float> scale_values;
std::vector<float> offset_values;
TF_RETURN_IF_ERROR(
GetScaleAndOffsetValues(match, &scale_values, &offset_values));
const NodeDef& batch_norm_node = match.node;
TF_RETURN_IF_ERROR(
FuseScaleOffsetToConvWeights(scale_values, offset_values, match.inputs[0],
batch_norm_node.name(), new_nodes));
return OkStatus();
}
Status FuseBatchNormWithBatchToSpace(const NodeMatch& match,
std::vector<NodeDef>* new_nodes) {
std::vector<float> scale_values;
std::vector<float> offset_values;
TF_RETURN_IF_ERROR(
GetScaleAndOffsetValues(match, &scale_values, &offset_values));
const NodeDef& batch_norm_node = match.node;
const NodeMatch& batch_to_space_node_match = match.inputs[0];
const NodeMatch& conv_node_match = batch_to_space_node_match.inputs[0];
const NodeDef& batch_to_space_node = batch_to_space_node_match.node;
const NodeDef& conv_node = conv_node_match.node;
string biasadd_name = conv_node.name() + "/biasadd";
TF_RETURN_IF_ERROR(FuseScaleOffsetToConvWeights(
scale_values, offset_values, conv_node_match, biasadd_name, new_nodes));
NodeDef new_batch_to_space_node = batch_to_space_node;
new_batch_to_space_node.set_name(batch_norm_node.name());
new_batch_to_space_node.set_input(0, biasadd_name);
new_nodes->push_back(batch_to_space_node_match.inputs[1].node);
new_nodes->push_back(batch_to_space_node_match.inputs[2].node);
new_nodes->push_back(new_batch_to_space_node);
return OkStatus();
}
Status FuseBatchNormWithConvConcat(const NodeMatch& match,
std::vector<NodeDef>* new_nodes) {
std::vector<float> scale_values;
std::vector<float> offset_values;
TF_RETURN_IF_ERROR(
GetScaleAndOffsetValues(match, &scale_values, &offset_values));
const NodeDef& batch_norm_node = match.node;
const NodeMatch& concat_node_match = match.inputs[0];
NodeDef concat_node = concat_node_match.node;
CHECK_EQ("ConcatV2", concat_node.op());
NodeDef axis_node = concat_node_match.inputs[2].node;
CHECK_EQ("Const", axis_node.op());
Tensor axis = GetNodeTensorAttr(axis_node, "value");
int32_t axis_scalar = (axis.scalar<int32>())();
std::vector<float> scale0(scale_values);
std::vector<float> offset0(offset_values);
std::vector<float> scale1(scale_values);
std::vector<float> offset1(offset_values);
if (axis_scalar == 3) {
const NodeDef& weights0_node = concat_node_match.inputs[0].inputs[1].node;
Tensor weights0 = GetNodeTensorAttr(weights0_node, "value");
const int64_t split_cols = weights0.shape().dim_size(3);
scale0.erase(scale0.begin() + split_cols, scale0.end());
offset0.erase(offset0.begin() + split_cols, offset0.end());
scale1.erase(scale1.begin(), scale1.begin() + split_cols);
offset1.erase(offset1.begin(), offset1.begin() + split_cols);
}
const string concat0_output_name = concat_node.name() + "_bn_in0";
TF_RETURN_IF_ERROR(
FuseScaleOffsetToConvWeights(scale0, offset0, concat_node_match.inputs[0],
concat0_output_name, new_nodes));
const string concat1_output_name = concat_node.name() + "_bn_in1";
TF_RETURN_IF_ERROR(
FuseScaleOffsetToConvWeights(scale1, offset1, concat_node_match.inputs[1],
concat1_output_name, new_nodes));
new_nodes->push_back(concat_node_match.inputs[2].node);
concat_node.set_name(batch_norm_node.name());
concat_node.set_input(0, concat0_output_name);
concat_node.set_input(1, concat1_output_name);
new_nodes->push_back(concat_node);
return OkStatus();
}
}
Status FoldOldBatchNorms(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def) {
GraphDef current_graph_def = input_graph_def;
bool did_graph_change;
do {
did_graph_change = false;
GraphDef replaced_graph_def;
TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes(
current_graph_def,
{"BatchNormWithGlobalNormalization|FusedBatchNorm",
{
{"Conv2D|DepthwiseConv2dNative",
{
{"*"},
{"Const"},
}
},
{"Const"},
{"Const"},
{"Const"},
{"Const"},
}
},
[&did_graph_change](const NodeMatch& match,
const std::set<string>& input_nodes,
const std::set<string>& output_nodes,
std::vector<NodeDef>* new_nodes) {
TF_RETURN_IF_ERROR(FuseBatchNormWithConv(match, new_nodes));
did_graph_change = true;
return OkStatus();
},
{}, &replaced_graph_def));
current_graph_def = replaced_graph_def;
} while (did_graph_change);
do {
did_graph_change = false;
GraphDef replaced_graph_def;
TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes(
current_graph_def,
{"BatchNormWithGlobalNormalization|FusedBatchNorm",
{
{"BatchToSpaceND",
{
{"Conv2D|DepthwiseConv2dNative",
{
{"*"},
{"Const"},
}
},
{"Const"},
{"Const"},
}
},
{"Const"},
{"Const"},
{"Const"},
{"Const"},
}
},
[&did_graph_change](const NodeMatch& match,
const std::set<string>& input_nodes,
const std::set<string>& output_nodes,
std::vector<NodeDef>* new_nodes) {
TF_RETURN_IF_ERROR(FuseBatchNormWithBatchToSpace(match, new_nodes));
did_graph_change = true;
return OkStatus();
},
{}, &replaced_graph_def));
current_graph_def = replaced_graph_def;
} while (did_graph_change);
do {
did_graph_change = false;
GraphDef replaced_graph_def;
TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes(
current_graph_def,
{"BatchNormWithGlobalNormalization|FusedBatchNorm",
{
{"ConcatV2|Concat",
{
{"Conv2D|DepthwiseConv2dNative",
{
{"*"},
{"Const"},
}
},
{"Conv2D|DepthwiseConv2dNative",
{
{"*"},
{"Const"},
}
},
{"Const"},
},
},
{"Const"},
{"Const"},
{"Const"},
{"Const"},
}
},
[&did_graph_change](const NodeMatch& match,
const std::set<string>& input_nodes,
const std::set<string>& output_nodes,
std::vector<NodeDef>* new_nodes) {
TF_RETURN_IF_ERROR(FuseBatchNormWithConvConcat(match, new_nodes));
did_graph_change = true;
return OkStatus();
},
{}, &replaced_graph_def));
current_graph_def = replaced_graph_def;
} while (did_graph_change);
*output_graph_def = current_graph_def;
return OkStatus();
}
REGISTER_GRAPH_TRANSFORM("fold_old_batch_norms", FoldOldBatchNorms);
}
} | #include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/sendrecv_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status FoldOldBatchNorms(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def);
class FoldOldBatchNormsTest : public ::testing::Test {
protected:
void TestFoldOldBatchNorms() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor input_data(DT_FLOAT, TensorShape({1, 1, 6, 2}));
test::FillValues<float>(
&input_data, {1.0f, 4.0f, 2.0f, 5.0f, 3.0f, 6.0f, -1.0f, -4.0f, -2.0f,
-5.0f, -3.0f, -6.0f});
Output input_op =
Const(root.WithOpName("input_op"), Input::Initializer(input_data));
Tensor weights_data(DT_FLOAT, TensorShape({1, 2, 2, 2}));
test::FillValues<float>(&weights_data,
{1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f});
Output weights_op =
Const(root.WithOpName("weights_op"), Input::Initializer(weights_data));
Output conv_op = Conv2D(root.WithOpName("conv_op"), input_op, weights_op,
{1, 1, 1, 1}, "VALID");
Tensor mean_data(DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&mean_data, {10.0f, 20.0f});
Output mean_op =
Const(root.WithOpName("mean_op"), Input::Initializer(mean_data));
Tensor variance_data(DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&variance_data, {0.25f, 0.5f});
Output variance_op = Const(root.WithOpName("variance_op"),
Input::Initializer(variance_data));
Tensor beta_data(DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&beta_data, {0.1f, 0.6f});
Output beta_op =
Const(root.WithOpName("beta_op"), Input::Initializer(beta_data));
Tensor gamma_data(DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&gamma_data, {1.0f, 2.0f});
Output gamma_op =
Const(root.WithOpName("gamma_op"), Input::Initializer(gamma_data));
GraphDef original_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&original_graph_def));
NodeDef batch_norm_node;
batch_norm_node.set_op("BatchNormWithGlobalNormalization");
batch_norm_node.set_name("output");
AddNodeInput("conv_op", &batch_norm_node);
AddNodeInput("mean_op", &batch_norm_node);
AddNodeInput("variance_op", &batch_norm_node);
AddNodeInput("beta_op", &batch_norm_node);
AddNodeInput("gamma_op", &batch_norm_node);
SetNodeAttr("T", DT_FLOAT, &batch_norm_node);
SetNodeAttr("variance_epsilon", 0.00001f, &batch_norm_node);
SetNodeAttr("scale_after_normalization", false, &batch_norm_node);
*(original_graph_def.mutable_node()->Add()) = batch_norm_node;
original_graph_def.mutable_versions()->set_producer(8);
std::unique_ptr<Session> original_session(NewSession(SessionOptions()));
TF_ASSERT_OK(original_session->Create(original_graph_def));
std::vector<Tensor> original_outputs;
TF_ASSERT_OK(original_session->Run({}, {"output"}, {}, &original_outputs));
GraphDef fused_graph_def;
TF_ASSERT_OK(FoldOldBatchNorms(original_graph_def, {{}, {"output"}},
&fused_graph_def));
std::unique_ptr<Session> fused_session(NewSession(SessionOptions()));
TF_ASSERT_OK(fused_session->Create(fused_graph_def));
std::vector<Tensor> fused_outputs;
TF_ASSERT_OK(fused_session->Run({}, {"output"}, {}, &fused_outputs));
test::ExpectTensorNear<float>(original_outputs[0], fused_outputs[0], 1e-5);
for (const NodeDef& node : fused_graph_def.node()) {
EXPECT_NE("BatchNormWithGlobalNormalization", node.op());
}
}
void TestFoldOldBatchNormsAfterDepthwiseConv2dNative() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor input_data(DT_FLOAT, TensorShape({1, 1, 6, 2}));
test::FillValues<float>(
&input_data, {1.0f, 4.0f, 2.0f, 5.0f, 3.0f, 6.0f, -1.0f, -4.0f, -2.0f,
-5.0f, -3.0f, -6.0f});
Output input_op =
Const(root.WithOpName("input_op"), Input::Initializer(input_data));
Tensor weights_data(DT_FLOAT, TensorShape({1, 2, 2, 2}));
test::FillValues<float>(&weights_data,
{1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f});
Output weights_op =
Const(root.WithOpName("weights_op"), Input::Initializer(weights_data));
Output conv_op = DepthwiseConv2dNative(root.WithOpName("conv_op"), input_op,
weights_op, {1, 1, 1, 1}, "VALID");
Tensor mean_data(DT_FLOAT, TensorShape({4}));
test::FillValues<float>(&mean_data, {10.0f, 20.0f, 30.0f, 40.0f});
Output mean_op =
Const(root.WithOpName("mean_op"), Input::Initializer(mean_data));
Tensor variance_data(DT_FLOAT, TensorShape({4}));
test::FillValues<float>(&variance_data, {0.25f, 0.5f, 0.75f, 1.0f});
Output variance_op = Const(root.WithOpName("variance_op"),
Input::Initializer(variance_data));
Tensor beta_data(DT_FLOAT, TensorShape({4}));
test::FillValues<float>(&beta_data, {0.1f, 0.6f, 1.1f, 1.6f});
Output beta_op =
Const(root.WithOpName("beta_op"), Input::Initializer(beta_data));
Tensor gamma_data(DT_FLOAT, TensorShape({4}));
test::FillValues<float>(&gamma_data, {1.0f, 2.0f, 3.0f, 4.0f});
Output gamma_op =
Const(root.WithOpName("gamma_op"), Input::Initializer(gamma_data));
GraphDef original_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&original_graph_def));
NodeDef batch_norm_node;
batch_norm_node.set_op("BatchNormWithGlobalNormalization");
batch_norm_node.set_name("output");
AddNodeInput("conv_op", &batch_norm_node);
AddNodeInput("mean_op", &batch_norm_node);
AddNodeInput("variance_op", &batch_norm_node);
AddNodeInput("beta_op", &batch_norm_node);
AddNodeInput("gamma_op", &batch_norm_node);
SetNodeAttr("T", DT_FLOAT, &batch_norm_node);
SetNodeAttr("variance_epsilon", 0.00001f, &batch_norm_node);
SetNodeAttr("scale_after_normalization", false, &batch_norm_node);
*(original_graph_def.mutable_node()->Add()) = batch_norm_node;
original_graph_def.mutable_versions()->set_producer(8);
std::unique_ptr<Session> original_session(NewSession(SessionOptions()));
TF_ASSERT_OK(original_session->Create(original_graph_def));
std::vector<Tensor> original_outputs;
TF_ASSERT_OK(original_session->Run({}, {"output"}, {}, &original_outputs));
GraphDef fused_graph_def;
TF_ASSERT_OK(FoldOldBatchNorms(original_graph_def, {{}, {"output"}},
&fused_graph_def));
std::unique_ptr<Session> fused_session(NewSession(SessionOptions()));
TF_ASSERT_OK(fused_session->Create(fused_graph_def));
std::vector<Tensor> fused_outputs;
TF_ASSERT_OK(fused_session->Run({}, {"output"}, {}, &fused_outputs));
test::ExpectTensorNear<float>(original_outputs[0], fused_outputs[0], 1e-5);
for (const NodeDef& node : fused_graph_def.node()) {
EXPECT_NE("BatchNormWithGlobalNormalization", node.op());
}
}
void TestFoldFusedBatchNorms() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor input_data(DT_FLOAT, TensorShape({1, 1, 6, 2}));
test::FillValues<float>(
&input_data, {1.0f, 4.0f, 2.0f, 5.0f, 3.0f, 6.0f, -1.0f, -4.0f, -2.0f,
-5.0f, -3.0f, -6.0f});
Output input_op =
Const(root.WithOpName("input_op"), Input::Initializer(input_data));
Tensor weights_data(DT_FLOAT, TensorShape({1, 2, 2, 2}));
test::FillValues<float>(&weights_data,
{1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f});
Output weights_op =
Const(root.WithOpName("weights_op"), Input::Initializer(weights_data));
Output conv_op = Conv2D(root.WithOpName("conv_op"), input_op, weights_op,
{1, 1, 1, 1}, "VALID");
Tensor mean_data(DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&mean_data, {10.0f, 20.0f});
Output mean_op =
Const(root.WithOpName("mean_op"), Input::Initializer(mean_data));
Tensor variance_data(DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&variance_data, {0.25f, 0.5f});
Output variance_op = Const(root.WithOpName("variance_op"),
Input::Initializer(variance_data));
Tensor beta_data(DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&beta_data, {0.1f, 0.6f});
Output beta_op =
Const(root.WithOpName("beta_op"), Input::Initializer(beta_data));
Tensor gamma_data(DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&gamma_data, {1.0f, 2.0f});
Output gamma_op =
Const(root.WithOpName("gamma_op"), Input::Initializer(gamma_data));
GraphDef original_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&original_graph_def));
NodeDef batch_norm_node;
batch_norm_node.set_op("FusedBatchNorm");
batch_norm_node.set_name("output");
AddNodeInput("conv_op", &batch_norm_node);
AddNodeInput("gamma_op", &batch_norm_node);
AddNodeInput("beta_op", &batch_norm_node);
AddNodeInput("mean_op", &batch_norm_node);
AddNodeInput("variance_op", &batch_norm_node);
SetNodeAttr("T", DT_FLOAT, &batch_norm_node);
SetNodeAttr("epsilon", 0.00001f, &batch_norm_node);
SetNodeAttr("is_training", false, &batch_norm_node);
*(original_graph_def.mutable_node()->Add()) = batch_norm_node;
std::unique_ptr<Session> original_session(NewSession(SessionOptions()));
TF_ASSERT_OK(original_session->Create(original_graph_def));
std::vector<Tensor> original_outputs;
TF_ASSERT_OK(original_session->Run({}, {"output"}, {}, &original_outputs));
GraphDef fused_graph_def;
TF_ASSERT_OK(FoldOldBatchNorms(original_graph_def, {{}, {"output"}},
&fused_graph_def));
std::unique_ptr<Session> fused_session(NewSession(SessionOptions()));
TF_ASSERT_OK(fused_session->Create(fused_graph_def));
std::vector<Tensor> fused_outputs;
TF_ASSERT_OK(fused_session->Run({}, {"output"}, {}, &fused_outputs));
test::ExpectTensorNear<float>(original_outputs[0], fused_outputs[0], 2e-5);
for (const NodeDef& node : fused_graph_def.node()) {
EXPECT_NE("FusedBatchNorm", node.op());
}
}
void TestFoldFusedBatchNormsAfterDepthwiseConv2dNative() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor input_data(DT_FLOAT, TensorShape({1, 1, 6, 2}));
test::FillValues<float>(
&input_data, {1.0f, 4.0f, 2.0f, 5.0f, 3.0f, 6.0f, -1.0f, -4.0f, -2.0f,
-5.0f, -3.0f, -6.0f});
Output input_op =
Const(root.WithOpName("input_op"), Input::Initializer(input_data));
Tensor weights_data(DT_FLOAT, TensorShape({1, 2, 2, 2}));
test::FillValues<float>(&weights_data,
{1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f});
Output weights_op =
Const(root.WithOpName("weights_op"), Input::Initializer(weights_data));
Output conv_op = DepthwiseConv2dNative(root.WithOpName("conv_op"), input_op,
weights_op, {1, 1, 1, 1}, "VALID");
Tensor mean_data(DT_FLOAT, TensorShape({4}));
test::FillValues<float>(&mean_data, {10.0f, 20.0f, 30.0f, 40.0f});
Output mean_op =
Const(root.WithOpName("mean_op"), Input::Initializer(mean_data));
Tensor variance_data(DT_FLOAT, TensorShape({4}));
test::FillValues<float>(&variance_data, {0.25f, 0.5f, 0.75f, 1.0f});
Output variance_op = Const(root.WithOpName("variance_op"),
Input::Initializer(variance_data));
Tensor beta_data(DT_FLOAT, TensorShape({4}));
test::FillValues<float>(&beta_data, {0.1f, 0.6f, 1.1f, 1.6f});
Output beta_op =
Const(root.WithOpName("beta_op"), Input::Initializer(beta_data));
Tensor gamma_data(DT_FLOAT, TensorShape({4}));
test::FillValues<float>(&gamma_data, {1.0f, 2.0f, 3.0f, 4.0f});
Output gamma_op =
Const(root.WithOpName("gamma_op"), Input::Initializer(gamma_data));
GraphDef original_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&original_graph_def));
NodeDef batch_norm_node;
batch_norm_node.set_op("FusedBatchNorm");
batch_norm_node.set_name("output");
AddNodeInput("conv_op", &batch_norm_node);
AddNodeInput("gamma_op", &batch_norm_node);
AddNodeInput("beta_op", &batch_norm_node);
AddNodeInput("mean_op", &batch_norm_node);
AddNodeInput("variance_op", &batch_norm_node);
SetNodeAttr("T", DT_FLOAT, &batch_norm_node);
SetNodeAttr("epsilon", 0.00001f, &batch_norm_node);
SetNodeAttr("is_training", false, &batch_norm_node);
*(original_graph_def.mutable_node()->Add()) = batch_norm_node;
std::unique_ptr<Session> original_session(NewSession(SessionOptions()));
TF_ASSERT_OK(original_session->Create(original_graph_def));
std::vector<Tensor> original_outputs;
TF_ASSERT_OK(original_session->Run({}, {"output"}, {}, &original_outputs));
GraphDef fused_graph_def;
TF_ASSERT_OK(FoldOldBatchNorms(original_graph_def, {{}, {"output"}},
&fused_graph_def));
std::unique_ptr<Session> fused_session(NewSession(SessionOptions()));
TF_ASSERT_OK(fused_session->Create(fused_graph_def));
std::vector<Tensor> fused_outputs;
TF_ASSERT_OK(fused_session->Run({}, {"output"}, {}, &fused_outputs));
test::ExpectClose(original_outputs[0], fused_outputs[0], 2e-5,
2e-5);
for (const NodeDef& node : fused_graph_def.node()) {
EXPECT_NE("FusedBatchNorm", node.op());
}
}
void TestFoldFusedBatchNormsWithConcat(const bool split) {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
auto input_shape =
split ? TensorShape({1, 1, 6, 2}) : TensorShape({1, 1, 12, 1});
Tensor input_data(DT_FLOAT, input_shape);
test::FillValues<float>(
&input_data, {1.0f, 4.0f, 2.0f, 5.0f, 3.0f, 6.0f, -1.0f, -4.0f, -2.0f,
-5.0f, -3.0f, -6.0f});
Output input0_op =
Const(root.WithOpName("input_op0"), Input::Initializer(input_data));
auto weight_shape =
split ? TensorShape({1, 2, 2, 1}) : TensorShape({1, 2, 1, 2});
Tensor weights0_data(DT_FLOAT, weight_shape);
test::FillValues<float>(&weights0_data, {1.0f, 2.0f, 3.0f, 4.0f});
Output weights0_op = Const(root.WithOpName("weights1_op"),
Input::Initializer(weights0_data));
Output conv0_op = Conv2D(root.WithOpName("conv1_op"), input0_op,
weights0_op, {1, 1, 1, 1}, "VALID");
Output input1_op =
Const(root.WithOpName("input1_op"), Input::Initializer(input_data));
Tensor weights1_data(DT_FLOAT, weight_shape);
test::FillValues<float>(&weights1_data, {1.0f, 2.0f, 3.0f, 4.0f});
Output weights1_op = Const(root.WithOpName("weights1_op"),
Input::Initializer(weights1_data));
Output conv1_op = Conv2D(root.WithOpName("conv1_op"), input1_op,
weights1_op, {1, 1, 1, 1}, "VALID");
Tensor shape_tensor(DT_INT32, TensorShape({}));
int32_t concat_axis = split ? 3 : 2;
test::FillValues<int32>(&shape_tensor, {concat_axis});
Output shape_op =
Const(root.WithOpName("shape_op"), Input::Initializer(shape_tensor));
Output concat_op =
Concat(root.WithOpName("concat_op"), {conv0_op, conv1_op}, shape_op);
Tensor mean_data(DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&mean_data, {10.0f, 20.0f});
Output mean_op =
Const(root.WithOpName("mean_op"), Input::Initializer(mean_data));
Tensor variance_data(DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&variance_data, {0.25f, 0.5f});
Output variance_op = Const(root.WithOpName("variance_op"),
Input::Initializer(variance_data));
Tensor beta_data(DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&beta_data, {0.1f, 0.6f});
Output beta_op =
Const(root.WithOpName("beta_op"), Input::Initializer(beta_data));
Tensor gamma_data(DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&gamma_data, {1.0f, 2.0f});
Output gamma_op =
Const(root.WithOpName("gamma_op"), Input::Initializer(gamma_data));
GraphDef original_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&original_graph_def));
NodeDef batch_norm_node;
batch_norm_node.set_op("FusedBatchNorm");
batch_norm_node.set_name("output");
AddNodeInput("concat_op", &batch_norm_node);
AddNodeInput("gamma_op", &batch_norm_node);
AddNodeInput("beta_op", &batch_norm_node);
AddNodeInput("mean_op", &batch_norm_node);
AddNodeInput("variance_op", &batch_norm_node);
SetNodeAttr("T", DT_FLOAT, &batch_norm_node);
SetNodeAttr("epsilon", 0.00001f, &batch_norm_node);
SetNodeAttr("is_training", false, &batch_norm_node);
*(original_graph_def.mutable_node()->Add()) = batch_norm_node;
std::unique_ptr<Session> original_session(NewSession(SessionOptions()));
TF_ASSERT_OK(original_session->Create(original_graph_def));
std::vector<Tensor> original_outputs;
TF_ASSERT_OK(original_session->Run({}, {"output"}, {}, &original_outputs));
GraphDef fused_graph_def;
TF_ASSERT_OK(FoldOldBatchNorms(original_graph_def, {{}, {"output"}},
&fused_graph_def));
std::unique_ptr<Session> fused_session(NewSession(SessionOptions()));
TF_ASSERT_OK(fused_session->Create(fused_graph_def));
std::vector<Tensor> fused_outputs;
TF_ASSERT_OK(fused_session->Run({}, {"output"}, {}, &fused_outputs));
test::ExpectClose(original_outputs[0], fused_outputs[0]);
for (const NodeDef& node : fused_graph_def.node()) {
EXPECT_NE("FusedBatchNorm", node.op());
}
}
};
void TestFoldFusedBatchNormsWithBatchToSpace() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor input_data(DT_FLOAT, TensorShape({2, 1, 3, 2}));
test::FillValues<float>(
&input_data, {1.0f, 4.0f, 2.0f, 5.0f, 3.0f, 6.0f, -1.0f, -4.0f, -2.0f,
-5.0f, -3.0f, -6.0f});
Output input_op =
Const(root.WithOpName("input_op"), Input::Initializer(input_data));
Tensor weights_data(DT_FLOAT, TensorShape({1, 2, 2, 2}));
test::FillValues<float>(&weights_data,
{1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f});
Output weights_op =
Const(root.WithOpName("weights_op"), Input::Initializer(weights_data));
Output conv_op = Conv2D(root.WithOpName("conv_op"), input_op, weights_op,
{1, 1, 1, 1}, "VALID");
Tensor block_shape_data(DT_INT32, TensorShape({2}));
test::FillValues<int32>(&block_shape_data, {1, 2});
Output block_shape_op = Const(root.WithOpName("block_shape_op"),
Input::Initializer(block_shape_data));
Tensor crops_data(DT_INT32, TensorShape({2, 2}));
test::FillValues<int32>(&crops_data, {0, 0, 0, 1});
Output crops_op =
Const(root.WithOpName("crops_op"), Input::Initializer(crops_data));
Output batch_to_space_op =
BatchToSpaceND(root.WithOpName("batch_to_space_op"), conv_op,
block_shape_op, crops_data);
Tensor mean_data(DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&mean_data, {10.0f, 20.0f});
Output mean_op =
Const(root.WithOpName("mean_op"), Input::Initializer(mean_data));
Tensor variance_data(DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&variance_data, {0.25f, 0.5f});
Output variance_op =
Const(root.WithOpName("variance_op"), Input::Initializer(variance_data));
Tensor beta_data(DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&beta_data, {0.1f, 0.6f});
Output beta_op =
Const(root.WithOpName("beta_op"), Input::Initializer(beta_data));
Tensor gamma_data(DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&gamma_data, {1.0f, 2.0f});
Output gamma_op =
Const(root.WithOpName("gamma_op"), Input::Initializer(gamma_data));
GraphDef original_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&original_graph_def));
NodeDef batch_norm_node;
batch_norm_node.set_op("FusedBatchNorm");
batch_norm_node.set_name("output");
AddNodeInput("batch_to_space_op", &batch_norm_node);
AddNodeInput("gamma_op", &batch_norm_node);
AddNodeInput("beta_op", &batch_norm_node);
AddNodeInput("mean_op", &batch_norm_node);
AddNodeInput("variance_op", &batch_norm_node);
SetNodeAttr("T", DT_FLOAT, &batch_norm_node);
SetNodeAttr("epsilon", 0.00001f, &batch_norm_node);
SetNodeAttr("is_training", false, &batch_norm_node);
*(original_graph_def.mutable_node()->Add()) = batch_norm_node;
std::unique_ptr<Session> original_session(NewSession(SessionOptions()));
TF_ASSERT_OK(original_session->Create(original_graph_def));
std::vector<Tensor> original_outputs;
TF_ASSERT_OK(original_session->Run({}, {"output"}, {}, &original_outputs));
GraphDef fused_graph_def;
TF_ASSERT_OK(FoldOldBatchNorms(original_graph_def, {{}, {"output"}},
&fused_graph_def));
std::unique_ptr<Session> fused_session(NewSession(SessionOptions()));
TF_ASSERT_OK(fused_session->Create(fused_graph_def));
std::vector<Tensor> fused_outputs;
TF_ASSERT_OK(fused_session->Run({}, {"output"}, {}, &fused_outputs));
test::ExpectTensorNear<float>(original_outputs[0], fused_outputs[0], 1e-5);
for (const NodeDef& node : fused_graph_def.node()) {
EXPECT_NE("FusedBatchNormWithBatchToSpace", node.op());
}
}
TEST_F(FoldOldBatchNormsTest, TestFoldOldBatchNorms) {
TestFoldOldBatchNorms();
}
TEST_F(FoldOldBatchNormsTest, TestFoldFusedBatchNorms) {
TestFoldFusedBatchNorms();
}
TEST_F(FoldOldBatchNormsTest, TestFoldFusedBatchNormsWithConcat) {
TestFoldFusedBatchNormsWithConcat(true);
TestFoldFusedBatchNormsWithConcat(false);
}
TEST_F(FoldOldBatchNormsTest, TestFoldFusedBatchNormsWithBatchToSpace) {
TestFoldFusedBatchNormsWithBatchToSpace();
}
TEST_F(FoldOldBatchNormsTest, TestFoldOldBatchNormsAfterDepthwiseConv2dNative) {
TestFoldOldBatchNormsAfterDepthwiseConv2dNative();
}
TEST_F(FoldOldBatchNormsTest,
TestFoldFusedBatchNormsAfterDepthwiseConv2dNative) {
TestFoldFusedBatchNormsAfterDepthwiseConv2dNative();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/fold_old_batch_norms.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/fold_old_batch_norms_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d6649578-7be7-42e1-8e40-fd3691a3885d | cpp | tensorflow/tensorflow | mapped_ptr_container_sorter | third_party/xla/xla/service/mapped_ptr_container_sorter.h | third_party/xla/xla/service/mapped_ptr_container_sorter_test.cc | #ifndef XLA_SERVICE_MAPPED_PTR_CONTAINER_SORTER_H_
#define XLA_SERVICE_MAPPED_PTR_CONTAINER_SORTER_H_
#include <array>
#include <cstddef>
#include <functional>
#include <limits>
#include <list>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
template <typename PointedToTy>
class MappedPtrContainerSorter {
public:
using MapPtrFn = absl::FunctionRef<const PointedToTy*(const PointedToTy*)>;
using UnmappedPtrIndexFn = absl::FunctionRef<size_t(const PointedToTy*)>;
static UnmappedPtrIndexFn IndexBeforeMappedElementsFn();
static UnmappedPtrIndexFn IndexAfterMappedElementsFn();
static UnmappedPtrIndexFn InvalidIndexFn();
template <typename OrderedTy, typename UnorderedTy>
static absl::Status Sort(MapPtrFn map_ptr, UnmappedPtrIndexFn unmapped_index,
const OrderedTy& ordered_container,
UnorderedTy& unordered_container);
private:
class SortedIndices {
public:
SortedIndices(size_t max_partial_order_exclusive,
size_t unordered_container_size)
: max_partial_order_exclusive_(max_partial_order_exclusive),
unordered_container_size_(unordered_container_size),
mapped_element_indices_by_partial_order_(
max_partial_order_exclusive) {}
absl::Status AddMappedElement(size_t unordered_container_index,
size_t partial_order);
void AddUnmappedElement(size_t unordered_container_index,
size_t target_index_amongst_mapped_elements);
std::string ToString() const;
absl::StatusOr<std::vector<size_t>> Flatten() const;
private:
SortedIndices() = delete;
size_t max_partial_order_exclusive_;
size_t unordered_container_size_;
std::vector<std::vector<size_t>> mapped_element_indices_by_partial_order_;
absl::flat_hash_map<size_t, std::vector<size_t>>
target_index_to_unmapped_element_index_;
};
static size_t IndexBeforeMappedElements() {
return std::numeric_limits<size_t>::max() - 2;
}
static size_t IndexAfterMappedElements() {
return std::numeric_limits<size_t>::max() - 1;
}
static size_t InvalidIndex() { return std::numeric_limits<size_t>::max(); }
template <typename OrderedTy, typename UnorderedTy>
static absl::StatusOr<std::vector<size_t>> ComputeNewIndices(
MapPtrFn map_ptr, UnmappedPtrIndexFn unmapped_index,
const OrderedTy& ordered_container,
const UnorderedTy& unordered_container);
template <typename UnorderedTy>
static void Reorder(std::vector<size_t> new_indices,
UnorderedTy& unordered_container);
};
namespace mapped_ptr_container_sorter_internal {
template <typename I, typename O>
struct PtrGetter {
static O Get(I i);
};
template <typename T>
struct PtrGetter<T* const&, const T*> {
static const T* Get(T* const& p) { return p; }
};
template <typename T>
struct PtrGetter<T const* const&, const T*> {
static const T* Get(T const* const& p) { return p; }
};
template <typename T>
struct PtrGetter<T*&, T*> {
static T* Get(T*& p) { return p; }
};
template <typename T>
struct PtrGetter<const std::unique_ptr<T>&, const T*> {
static const T* Get(const std::unique_ptr<T>& p) { return p.get(); }
};
template <typename T>
struct PtrGetter<std::unique_ptr<T>&, T*> {
static T* Get(std::unique_ptr<T>& p) { return p.get(); }
};
}
template <typename PointedToTy>
typename MappedPtrContainerSorter<PointedToTy>::UnmappedPtrIndexFn
MappedPtrContainerSorter<PointedToTy>::IndexBeforeMappedElementsFn() {
static const auto fn = [](const PointedToTy*) {
return IndexBeforeMappedElements();
};
return fn;
}
template <typename PointedToTy>
typename MappedPtrContainerSorter<PointedToTy>::UnmappedPtrIndexFn
MappedPtrContainerSorter<PointedToTy>::IndexAfterMappedElementsFn() {
static const auto fn = [](const PointedToTy*) {
return IndexAfterMappedElements();
};
return fn;
}
template <typename PointedToTy>
typename MappedPtrContainerSorter<PointedToTy>::UnmappedPtrIndexFn
MappedPtrContainerSorter<PointedToTy>::InvalidIndexFn() {
static const auto fn = [](const PointedToTy*) { return InvalidIndex(); };
return fn;
}
template <typename PointedToTy>
absl::Status
MappedPtrContainerSorter<PointedToTy>::SortedIndices::AddMappedElement(
size_t unordered_container_index, size_t partial_order) {
if (partial_order >= mapped_element_indices_by_partial_order_.size()) {
return InternalStrCat("invalid partial order: ", partial_order, " v max(",
mapped_element_indices_by_partial_order_.size(), ")");
}
mapped_element_indices_by_partial_order_[partial_order].push_back(
unordered_container_index);
return absl::OkStatus();
}
template <typename PointedToTy>
void MappedPtrContainerSorter<PointedToTy>::SortedIndices::AddUnmappedElement(
size_t unordered_container_index,
size_t target_index_amongst_mapped_elements) {
target_index_to_unmapped_element_index_[target_index_amongst_mapped_elements]
.push_back(unordered_container_index);
}
template <typename PointedToTy>
std::string MappedPtrContainerSorter<PointedToTy>::SortedIndices::ToString()
const {
std::vector<std::string> mapped_element_strs;
mapped_element_strs.reserve(mapped_element_indices_by_partial_order_.size());
for (const auto& indices : mapped_element_indices_by_partial_order_) {
mapped_element_strs.push_back(
absl::StrCat("[", absl::StrJoin(indices, ", "), "]"));
}
std::vector<std::string> unmapped_element_strs;
unmapped_element_strs.reserve(target_index_to_unmapped_element_index_.size());
for (const auto& kv : target_index_to_unmapped_element_index_) {
std::string key = absl::StrCat(kv.first);
if (kv.first == IndexBeforeMappedElements()) {
key = "before_mapped";
}
if (kv.first == IndexAfterMappedElements()) {
key = "after_mapped";
}
if (kv.first == InvalidIndex()) {
key = "invalid";
}
unmapped_element_strs.push_back(
absl::StrCat(key, ": [", absl::StrJoin(kv.second, ", "), "]"));
}
return absl::StrCat(
"max_partial_order_exclusive_: ", max_partial_order_exclusive_, "\n",
"unordered_container_size_: ", unordered_container_size_, "\n",
"mapped_element_indices_by_partial_order_: [",
absl::StrJoin(mapped_element_strs, ", "), "]\n",
"target_index_to_unmapped_element_index_: {",
absl::StrJoin(unmapped_element_strs, ", "), "}\n");
}
template <typename PointedToTy>
absl::StatusOr<std::vector<size_t>>
MappedPtrContainerSorter<PointedToTy>::SortedIndices::Flatten() const {
std::vector<size_t> result(unordered_container_size_, InvalidIndex());
size_t next_available_index = 0;
auto next_index_fn = [&]() -> absl::StatusOr<size_t> {
if (next_available_index >= unordered_container_size_) {
return InternalStrCat(
"invalid unordered_container index: ", next_available_index,
" v size(", unordered_container_size_, ")");
}
return next_available_index++;
};
if (target_index_to_unmapped_element_index_.contains(
IndexBeforeMappedElements())) {
const auto& indices =
target_index_to_unmapped_element_index_.at(IndexBeforeMappedElements());
for (size_t index : indices) {
TF_ASSIGN_OR_RETURN(result[index], next_index_fn());
}
}
size_t num_inserted_mapped_elements = 0;
for (const auto& mapped_element_indices :
mapped_element_indices_by_partial_order_) {
for (size_t mapped_element_index : mapped_element_indices) {
TF_ASSIGN_OR_RETURN(result[mapped_element_index], next_index_fn());
++num_inserted_mapped_elements;
if (target_index_to_unmapped_element_index_.contains(
num_inserted_mapped_elements - 1)) {
const auto& unmapped_element_indices =
target_index_to_unmapped_element_index_.at(
num_inserted_mapped_elements - 1);
for (size_t unmapped_element_index : unmapped_element_indices) {
TF_ASSIGN_OR_RETURN(result[unmapped_element_index], next_index_fn());
}
}
}
}
if (target_index_to_unmapped_element_index_.contains(
IndexAfterMappedElements())) {
const auto& indices =
target_index_to_unmapped_element_index_.at(IndexAfterMappedElements());
for (size_t index : indices) {
TF_ASSIGN_OR_RETURN(result[index], next_index_fn());
}
}
absl::flat_hash_set<size_t> used_indices;
for (size_t index : result) {
if (used_indices.contains(index)) {
return InternalStrCat(
"2 elements in unordered_container are destined for the same "
"index: ",
index);
}
if (index >= unordered_container_size_) {
return InvalidArgumentStrCat("invalid unordered_container index: ", index,
" v size(", unordered_container_size_, ")");
}
}
return result;
}
template <typename PointedToTy>
template <typename OrderedTy, typename UnorderedTy>
absl::StatusOr<std::vector<size_t>>
MappedPtrContainerSorter<PointedToTy>::ComputeNewIndices(
MapPtrFn map_ptr, UnmappedPtrIndexFn unmapped_index,
const OrderedTy& ordered_container,
const UnorderedTy& unordered_container) {
using UnorderedPtrGetter = mapped_ptr_container_sorter_internal::PtrGetter<
typename UnorderedTy::const_reference, const PointedToTy*>;
using OrderedPtrGetter = mapped_ptr_container_sorter_internal::PtrGetter<
typename OrderedTy::const_reference, const PointedToTy*>;
if (unordered_container.size() >= IndexBeforeMappedElements()) {
return InvalidArgumentStrCat("Unordered container is too large to sort.");
}
absl::flat_hash_set<const PointedToTy*> unordered_ptrs;
for (const auto& unordered_element : unordered_container) {
const PointedToTy* ptr = UnorderedPtrGetter::Get(unordered_element);
unordered_ptrs.insert(ptr);
}
absl::flat_hash_map<const PointedToTy*, std::list<size_t>>
mapped_ptr_to_partial_order;
size_t next_partial_order_value = 0;
for (const auto& ordered_element : ordered_container) {
const PointedToTy* ordered_ptr = OrderedPtrGetter::Get(ordered_element);
const PointedToTy* unordered_ptr = map_ptr(ordered_ptr);
if (!unordered_ptr) {
continue;
}
if (!unordered_ptrs.contains(unordered_ptr)) {
continue;
}
mapped_ptr_to_partial_order[unordered_ptr].push_back(
next_partial_order_value);
++next_partial_order_value;
}
SortedIndices result(next_partial_order_value, unordered_container.size());
for (size_t i = 0; i < unordered_container.size(); ++i) {
const PointedToTy* ptr = UnorderedPtrGetter::Get(unordered_container[i]);
if (!mapped_ptr_to_partial_order.contains(ptr)) {
result.AddUnmappedElement(i, unmapped_index(ptr));
continue;
}
auto& index_list = mapped_ptr_to_partial_order[ptr];
TF_RETURN_IF_ERROR(result.AddMappedElement(i, index_list.front()));
if (index_list.size() > 1) {
index_list.pop_front();
}
}
VLOG(5) << "Pre flatten unordered_container result:\n" << result.ToString();
return result.Flatten();
}
template <typename PointedToTy>
template <typename UnorderedTy>
void MappedPtrContainerSorter<PointedToTy>::Reorder(
std::vector<size_t> new_indices, UnorderedTy& unordered_container) {
size_t old_pos = 0;
while (old_pos < new_indices.size()) {
size_t new_pos = new_indices[old_pos];
if (old_pos == new_pos) {
++old_pos;
continue;
}
std::swap(new_indices[old_pos], new_indices[new_pos]);
std::swap(unordered_container[old_pos], unordered_container[new_pos]);
}
}
template <typename PointedToTy>
template <typename OrderedTy, typename UnorderedTy>
absl::Status MappedPtrContainerSorter<PointedToTy>::Sort(
MapPtrFn map_ptr, UnmappedPtrIndexFn unmapped_index,
const OrderedTy& ordered_container, UnorderedTy& unordered_container) {
std::vector<size_t> indices;
TF_ASSIGN_OR_RETURN(
indices, ComputeNewIndices(map_ptr, unmapped_index, ordered_container,
unordered_container));
Reorder(std::move(indices), unordered_container);
return absl::OkStatus();
}
}
#endif | #include "xla/service/mapped_ptr_container_sorter.h"
#include <cstddef>
#include <list>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/functional/bind_front.h"
#include "absl/log/log.h"
#include "xla/test.h"
#include "xla/tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
using ::testing::ElementsAre;
using ::testing::Pointee;
std::vector<std::unique_ptr<std::string>> CreateUniquePtrContainer(
const std::vector<std::string>& values) {
std::vector<std::unique_ptr<std::string>> container;
for (auto value : values) {
container.push_back(std::make_unique<std::string>(value));
}
return container;
}
class MappedPtrContainerSorterTest : public ::testing::Test {
public:
using Sorter = MappedPtrContainerSorter<std::string>;
MappedPtrContainerSorterTest()
: ordered_unique_ptrs_(CreateUniquePtrContainer(
{"m0", "m1", "m2", "m3", "not_in_unordered"})),
unordered_unique_ptrs_(
CreateUniquePtrContainer({"m3", "m1", "m0", "m2"})) {
for (auto& unique : ordered_unique_ptrs_) {
ordered_raw_ptrs_.push_back(unique.get());
ordered_const_raw_ptrs_.push_back(unique.get());
}
for (auto& unique : unordered_unique_ptrs_) {
unordered_raw_ptrs_.push_back(unique.get());
unordered_const_raw_ptrs_.push_back(unique.get());
}
}
protected:
const std::string* MapPtr(const std::string* ordered) const {
for (size_t i = 0; i < unordered_unique_ptrs_.size(); ++i) {
if (*ordered == *unordered_unique_ptrs_[i]) {
return unordered_unique_ptrs_[i].get();
}
}
return nullptr;
}
auto MapPtrFn() const {
return absl::bind_front(&MappedPtrContainerSorterTest::MapPtr, this);
}
void AddUnmappedElementsToUnorderedUniquePtrs() {
unordered_unique_ptrs_.insert(unordered_unique_ptrs_.begin(),
std::make_unique<std::string>("u0"));
unordered_unique_ptrs_.insert(unordered_unique_ptrs_.begin() + 2,
std::make_unique<std::string>("u1"));
unordered_unique_ptrs_.insert(unordered_unique_ptrs_.begin() + 3,
std::make_unique<std::string>("u2"));
unordered_unique_ptrs_.insert(unordered_unique_ptrs_.end(),
std::make_unique<std::string>("u3"));
}
std::vector<std::unique_ptr<std::string>> ordered_unique_ptrs_;
std::vector<std::unique_ptr<std::string>> unordered_unique_ptrs_;
std::vector<std::string*> ordered_raw_ptrs_;
std::vector<std::string*> unordered_raw_ptrs_;
std::vector<const std::string*> ordered_const_raw_ptrs_;
std::vector<const std::string*> unordered_const_raw_ptrs_;
};
TEST_F(MappedPtrContainerSorterTest, SortUniquePtrs) {
TF_EXPECT_OK(Sorter::Sort(MapPtrFn(), Sorter::InvalidIndexFn(),
ordered_unique_ptrs_, unordered_unique_ptrs_));
EXPECT_THAT(
unordered_unique_ptrs_,
ElementsAre(Pointee(std::string("m0")), Pointee(std::string("m1")),
Pointee(std::string("m2")), Pointee(std::string("m3"))));
}
TEST_F(MappedPtrContainerSorterTest, RawPtrs) {
TF_EXPECT_OK(Sorter::Sort(MapPtrFn(), Sorter::InvalidIndexFn(),
ordered_raw_ptrs_, unordered_raw_ptrs_));
EXPECT_THAT(
unordered_raw_ptrs_,
ElementsAre(Pointee(std::string("m0")), Pointee(std::string("m1")),
Pointee(std::string("m2")), Pointee(std::string("m3"))));
}
TEST_F(MappedPtrContainerSorterTest, ConstRawPtrs) {
TF_EXPECT_OK(Sorter::Sort(MapPtrFn(), Sorter::InvalidIndexFn(),
ordered_const_raw_ptrs_,
unordered_const_raw_ptrs_));
EXPECT_THAT(
unordered_const_raw_ptrs_,
ElementsAre(Pointee(std::string("m0")), Pointee(std::string("m1")),
Pointee(std::string("m2")), Pointee(std::string("m3"))));
}
TEST_F(MappedPtrContainerSorterTest, DifferentContainerTypes) {
std::list<std::unique_ptr<std::string>> ordered_ptrs;
for (auto& ptr : ordered_unique_ptrs_) {
ordered_ptrs.push_back(std::move(ptr));
}
TF_EXPECT_OK(Sorter::Sort(MapPtrFn(), Sorter::InvalidIndexFn(), ordered_ptrs,
unordered_unique_ptrs_));
EXPECT_THAT(
unordered_unique_ptrs_,
ElementsAre(Pointee(std::string("m0")), Pointee(std::string("m1")),
Pointee(std::string("m2")), Pointee(std::string("m3"))));
}
TEST_F(MappedPtrContainerSorterTest, WithUnmappedPtrsAfterMappedPtrs) {
AddUnmappedElementsToUnorderedUniquePtrs();
TF_EXPECT_OK(Sorter::Sort(MapPtrFn(), Sorter::IndexAfterMappedElementsFn(),
ordered_unique_ptrs_, unordered_unique_ptrs_));
EXPECT_THAT(
unordered_unique_ptrs_,
ElementsAre(Pointee(std::string("m0")), Pointee(std::string("m1")),
Pointee(std::string("m2")), Pointee(std::string("m3")),
Pointee(std::string("u0")), Pointee(std::string("u1")),
Pointee(std::string("u2")), Pointee(std::string("u3"))));
}
TEST_F(MappedPtrContainerSorterTest, WithUnmappedPtrsBeforeMappedPtrs) {
AddUnmappedElementsToUnorderedUniquePtrs();
TF_EXPECT_OK(Sorter::Sort(MapPtrFn(), Sorter::IndexBeforeMappedElementsFn(),
ordered_unique_ptrs_, unordered_unique_ptrs_));
EXPECT_THAT(unordered_unique_ptrs_,
ElementsAre(
Pointee(std::string("u0")), Pointee(std::string("u1")),
Pointee(std::string("u2")), Pointee(std::string("u3")),
Pointee(std::string("m0")), Pointee(std::string("m1")),
Pointee(std::string("m2")), Pointee(std::string("m3"))));
}
TEST_F(MappedPtrContainerSorterTest, WithUnmappedPtrsInCustomLocations) {
auto unmapped_ptr_index = [](const std::string* s) -> size_t {
if (*s == "u0") {
return Sorter::IndexAfterMappedElementsFn()(s);
}
if (*s == "u1") {
return 2;
}
if (*s == "u2") {
return 2;
}
if (*s == "u3") {
return Sorter::IndexBeforeMappedElementsFn()(s);
}
LOG(FATAL) << "We should not be getting an unmapped ptr index for " << *s;
};
AddUnmappedElementsToUnorderedUniquePtrs();
TF_EXPECT_OK(Sorter::Sort(MapPtrFn(), unmapped_ptr_index,
ordered_unique_ptrs_, unordered_unique_ptrs_));
EXPECT_THAT(
unordered_unique_ptrs_,
ElementsAre(
Pointee(std::string("u3")),
Pointee(std::string("m0")),
Pointee(std::string("m1")),
Pointee(std::string("m2")),
Pointee(std::string("u1")),
Pointee(std::string("u2")),
Pointee(std::string("m3")),
Pointee(std::string("u0"))
));
}
TEST_F(MappedPtrContainerSorterTest,
ManyOrderedElementsMapToFewUnorderedElements) {
std::string* ordered_m1 = nullptr;
for (auto ptr : ordered_raw_ptrs_) {
if (*ptr == "m1") {
ordered_m1 = ptr;
break;
}
}
ASSERT_NE(ordered_m1, nullptr);
std::string* unordered_m1 = nullptr;
for (auto ptr : unordered_raw_ptrs_) {
if (*ptr == "m1") {
unordered_m1 = ptr;
break;
}
}
ASSERT_NE(unordered_m1, nullptr);
ordered_raw_ptrs_.insert(ordered_raw_ptrs_.begin(), ordered_m1);
ordered_raw_ptrs_.push_back(ordered_m1);
unordered_raw_ptrs_.push_back(unordered_m1);
TF_EXPECT_OK(Sorter::Sort(MapPtrFn(), Sorter::IndexBeforeMappedElementsFn(),
ordered_raw_ptrs_, unordered_raw_ptrs_));
EXPECT_THAT(
unordered_raw_ptrs_,
ElementsAre(
Pointee(std::string("m1")),
Pointee(std::string("m0")),
Pointee(std::string("m1")),
Pointee(std::string("m2")), Pointee(std::string("m3"))));
}
TEST_F(MappedPtrContainerSorterTest,
FewOrderedElementsMapToManyUnorderedElements) {
std::string* ordered_m1 = nullptr;
for (auto ptr : ordered_raw_ptrs_) {
if (*ptr == "m1") {
ordered_m1 = ptr;
break;
}
}
ASSERT_NE(ordered_m1, nullptr);
std::string* unordered_m1 = nullptr;
for (auto ptr : unordered_raw_ptrs_) {
if (*ptr == "m1") {
unordered_m1 = ptr;
break;
}
}
ASSERT_NE(unordered_m1, nullptr);
ordered_raw_ptrs_.insert(ordered_raw_ptrs_.begin(), ordered_m1);
unordered_raw_ptrs_.push_back(unordered_m1);
unordered_raw_ptrs_.push_back(unordered_m1);
TF_EXPECT_OK(Sorter::Sort(MapPtrFn(), Sorter::IndexBeforeMappedElementsFn(),
ordered_raw_ptrs_, unordered_raw_ptrs_));
EXPECT_THAT(
unordered_raw_ptrs_,
ElementsAre(
Pointee(std::string("m1")),
Pointee(std::string("m0")),
Pointee(std::string("m1")),
Pointee(std::string("m1")),
Pointee(std::string("m2")), Pointee(std::string("m3"))));
}
TEST_F(MappedPtrContainerSorterTest, InvalidUnmappedIndex) {
unordered_unique_ptrs_.push_back(std::make_unique<std::string>("u0"));
auto unmapped_index_fn = [](const std::string* unmapped) -> size_t {
if (*unmapped == "u0") {
return 4;
}
return Sorter::IndexBeforeMappedElementsFn()(unmapped);
};
EXPECT_FALSE(Sorter::Sort(MapPtrFn(), unmapped_index_fn, ordered_unique_ptrs_,
unordered_unique_ptrs_)
.ok());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/mapped_ptr_container_sorter.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/mapped_ptr_container_sorter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
49b48a75-9413-4452-9358-84307792442c | cpp | tensorflow/tensorflow | gpu_debug_allocator | tensorflow/core/common_runtime/gpu/gpu_debug_allocator.cc | tensorflow/core/common_runtime/gpu/gpu_debug_allocator_test.cc | #include "tensorflow/core/common_runtime/gpu/gpu_debug_allocator.h"
#include <cstddef>
#include <cstdint>
#include <optional>
#include <vector>
#include "xla/stream_executor/gpu/gpu_init.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tsl/framework/device_id.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#define MASK_WORDS 2
#define MASK_BYTES (MASK_WORDS * sizeof(int64_t))
namespace tensorflow {
namespace {
int64_t* NewMask(int64_t word) {
int64_t* m = new int64_t[MASK_WORDS];
for (int i = 0; i < MASK_WORDS; ++i) {
m[i] = word;
}
return m;
}
int64_t* before_mask = NewMask(0xabababababababab);
int64_t* after_mask = NewMask(0xcdcdcdcdcdcdcdcd);
bool CheckMask(se::StreamExecutor* exec, void* ptr, int64_t* mask) {
se::DeviceMemory<int64_t> gpu_ptr{se::DeviceMemoryBase{ptr, MASK_BYTES}};
int64_t tmp[MASK_WORDS];
absl::Status result = exec->SynchronousMemcpyD2H(gpu_ptr, MASK_BYTES, tmp);
if (!result.ok()) {
LOG(FATAL) << "Could not copy debug mask, " << result;
}
bool ok = true;
for (int i = 0; i < MASK_WORDS; ++i) {
ok &= (mask[i] == tmp[i]);
if (!ok) {
LOG(ERROR) << "i=" << i
<< " mask=" << reinterpret_cast<const void*>(mask[i])
<< " field=" << reinterpret_cast<const void*>(tmp[i]);
}
}
return ok;
}
void InitMask(se::StreamExecutor* exec, void* ptr, int64_t* mask) {
se::DeviceMemory<int64_t> gpu_ptr{se::DeviceMemoryBase{ptr, MASK_BYTES}};
absl::Status result = exec->SynchronousMemcpyH2D(mask, MASK_BYTES, &gpu_ptr);
if (!result.ok()) {
LOG(FATAL) << "Could not copy debug mask, " << result;
}
}
}
GPUDebugAllocator::GPUDebugAllocator(Allocator* allocator,
tsl::PlatformDeviceId platform_device_id)
: base_allocator_(allocator) {
stream_exec_ = se::GPUMachineManager()
->ExecutorForDevice(platform_device_id.value())
.value();
}
GPUDebugAllocator::~GPUDebugAllocator() { delete base_allocator_; }
void* GPUDebugAllocator::AllocateRaw(size_t alignment, size_t num_bytes) {
num_bytes += (2 * MASK_BYTES);
void* allocated_ptr = base_allocator_->AllocateRaw(alignment, num_bytes);
if (allocated_ptr == nullptr) return allocated_ptr;
void* rv = static_cast<char*>(allocated_ptr) + MASK_BYTES;
InitMask(stream_exec_, allocated_ptr, before_mask);
size_t req_size = base_allocator_->RequestedSize(allocated_ptr);
InitMask(stream_exec_,
static_cast<char*>(allocated_ptr) + req_size - MASK_BYTES,
after_mask);
return rv;
}
void GPUDebugAllocator::DeallocateRaw(void* ptr) {
if (ptr != nullptr) {
CHECK(CheckHeader(ptr)) << "before_mask has been overwritten";
CHECK(CheckFooter(ptr)) << "after_mask has been overwritten";
ptr = static_cast<void*>(static_cast<char*>(ptr) - MASK_BYTES);
}
base_allocator_->DeallocateRaw(ptr);
}
bool GPUDebugAllocator::TracksAllocationSizes() const { return true; }
size_t GPUDebugAllocator::RequestedSize(const void* ptr) const {
auto req_size = base_allocator_->RequestedSize(static_cast<const char*>(ptr) -
MASK_BYTES);
return req_size - 2 * MASK_BYTES;
}
size_t GPUDebugAllocator::AllocatedSize(const void* ptr) const {
return base_allocator_->AllocatedSize(static_cast<const char*>(ptr) -
MASK_BYTES);
}
int64_t GPUDebugAllocator::AllocationId(const void* ptr) const {
return base_allocator_->AllocationId(static_cast<const char*>(ptr) -
MASK_BYTES);
}
std::optional<tsl::AllocatorStats> GPUDebugAllocator::GetStats() {
return base_allocator_->GetStats();
}
bool GPUDebugAllocator::ClearStats() { return base_allocator_->ClearStats(); }
bool GPUDebugAllocator::CheckHeader(void* ptr) {
return CheckMask(stream_exec_, static_cast<char*>(ptr) - MASK_BYTES,
before_mask);
}
bool GPUDebugAllocator::CheckFooter(void* ptr) {
char* original_ptr = static_cast<char*>(ptr) - MASK_BYTES;
size_t req_size = base_allocator_->RequestedSize(original_ptr);
return CheckMask(stream_exec_, original_ptr + req_size - MASK_BYTES,
after_mask);
}
GPUNanResetAllocator::GPUNanResetAllocator(
Allocator* allocator, tsl::PlatformDeviceId platform_device_id)
: base_allocator_(allocator) {
stream_exec_ = se::GPUMachineManager()
->ExecutorForDevice(platform_device_id.value())
.value();
}
GPUNanResetAllocator::~GPUNanResetAllocator() { delete base_allocator_; }
void* GPUNanResetAllocator::AllocateRaw(size_t alignment, size_t num_bytes) {
void* allocated_ptr = base_allocator_->AllocateRaw(alignment, num_bytes);
if (allocated_ptr == nullptr) return allocated_ptr;
size_t req_size = base_allocator_->RequestedSize(allocated_ptr);
std::vector<float> nans((req_size + sizeof(float) - 1) / sizeof(float),
std::nanf(""));
se::DeviceMemory<float> nan_ptr{
se::DeviceMemoryBase{static_cast<float*>(allocated_ptr), req_size}};
absl::Status result =
stream_exec_->SynchronousMemcpyH2D(&nans[0], req_size, &nan_ptr);
if (!result.ok()) {
LOG(ERROR) << "Could not initialize to NaNs, " << result;
}
return allocated_ptr;
}
void GPUNanResetAllocator::DeallocateRaw(void* ptr) {
if (ptr != nullptr) {
size_t req_size = base_allocator_->RequestedSize(ptr);
std::vector<float> nans((req_size + sizeof(float) - 1) / sizeof(float),
std::nanf(""));
se::DeviceMemory<float> nan_ptr{
se::DeviceMemoryBase{static_cast<float*>(ptr), req_size}};
absl::Status result =
stream_exec_->SynchronousMemcpyH2D(&nans[0], req_size, &nan_ptr);
if (!result.ok()) {
LOG(ERROR) << "Could not initialize to NaNs, " << result;
}
}
base_allocator_->DeallocateRaw(ptr);
}
size_t GPUNanResetAllocator::RequestedSize(const void* ptr) const {
return base_allocator_->RequestedSize(ptr);
}
size_t GPUNanResetAllocator::AllocatedSize(const void* ptr) const {
return base_allocator_->AllocatedSize(ptr);
}
std::optional<tsl::AllocatorStats> GPUNanResetAllocator::GetStats() {
return base_allocator_->GetStats();
}
bool GPUNanResetAllocator::ClearStats() {
return base_allocator_->ClearStats();
}
} | #if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \
(defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM)
#include "tensorflow/core/common_runtime/gpu/gpu_debug_allocator.h"
#include <algorithm>
#include <vector>
#include "xla/stream_executor/gpu/gpu_init.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tsl/framework/device_id.h"
#include "xla/tsl/lib/gtl/inlined_vector.h"
#include "tensorflow/core/common_runtime/device/device_mem_allocator.h"
#include "tensorflow/core/common_runtime/gpu/gpu_bfc_allocator.h"
#include "tensorflow/core/framework/typed_allocator.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
namespace tensorflow {
namespace {
se::StreamExecutor* ExecutorForPlatformDeviceId(
tsl::PlatformDeviceId platform_device_id) {
return se::GPUMachineManager()
->ExecutorForDevice(platform_device_id.value())
.value();
}
TEST(GPUDebugAllocatorTest, OverwriteDetection_None) {
const tsl::PlatformDeviceId platform_device_id(0);
auto stream_exec = ExecutorForPlatformDeviceId(platform_device_id);
GPUDebugAllocator a(
new GPUBFCAllocator(absl::WrapUnique(new DeviceMemAllocator(
stream_exec, platform_device_id,
stream_executor::MemoryType::kDevice, {}, {})),
1 << 30, "", {}),
platform_device_id);
for (int s : {8}) {
std::vector<int64_t> cpu_array(s);
memset(&cpu_array[0], 0, cpu_array.size() * sizeof(int64_t));
int64_t* gpu_array =
TypedAllocator::Allocate<int64_t>(&a, cpu_array.size(), {});
se::DeviceMemory<int64_t> gpu_array_ptr{se::DeviceMemoryBase{gpu_array}};
TF_CHECK_OK(stream_exec->SynchronousMemcpyH2D(
&cpu_array[0], s * sizeof(int64_t), &gpu_array_ptr));
EXPECT_TRUE(a.CheckHeader(gpu_array));
EXPECT_TRUE(a.CheckFooter(gpu_array));
a.DeallocateRaw(gpu_array);
}
}
TEST(GPUDebugAllocatorTest, OverwriteDetection_Header) {
for (int s : {8, 211}) {
EXPECT_DEATH(
{
const tsl::PlatformDeviceId platform_device_id(0);
auto stream_exec = ExecutorForPlatformDeviceId(platform_device_id);
GPUDebugAllocator a(
new GPUBFCAllocator(
absl::WrapUnique(new DeviceMemAllocator(
stream_exec, platform_device_id,
stream_executor::MemoryType::kDevice, {}, {})),
1 << 30, "", {}),
platform_device_id);
std::vector<int64_t> cpu_array(s);
memset(&cpu_array[0], 0, cpu_array.size() * sizeof(int64_t));
int64_t* gpu_array =
TypedAllocator::Allocate<int64_t>(&a, cpu_array.size(), {});
se::DeviceMemory<int64_t> gpu_array_ptr{
se::DeviceMemoryBase{gpu_array}};
TF_CHECK_OK(stream_exec->SynchronousMemcpyH2D(
&cpu_array[0], cpu_array.size() * sizeof(int64_t),
&gpu_array_ptr));
se::DeviceMemory<int64_t> gpu_hdr_ptr{
se::DeviceMemoryBase{gpu_array - 1}};
float pi = 3.1417;
TF_CHECK_OK(stream_exec->SynchronousMemcpyH2D(&pi, sizeof(float),
&gpu_hdr_ptr));
a.DeallocateRaw(gpu_array);
},
"");
}
}
TEST(GPUDebugAllocatorTest, OverwriteDetection_Footer) {
for (int s : {8, 22}) {
EXPECT_DEATH(
{
const tsl::PlatformDeviceId platform_device_id(0);
auto stream_exec = ExecutorForPlatformDeviceId(platform_device_id);
GPUDebugAllocator a(
new GPUBFCAllocator(
absl::WrapUnique(new DeviceMemAllocator(
stream_exec, platform_device_id,
stream_executor::MemoryType::kDevice, {}, {})),
1 << 30, "", {}),
platform_device_id);
std::vector<int64_t> cpu_array(s);
memset(&cpu_array[0], 0, cpu_array.size() * sizeof(int64_t));
int64_t* gpu_array =
TypedAllocator::Allocate<int64_t>(&a, cpu_array.size(), {});
se::DeviceMemory<int64_t> gpu_array_ptr{
se::DeviceMemoryBase{gpu_array}};
TF_CHECK_OK(stream_exec->SynchronousMemcpyH2D(
&cpu_array[0], cpu_array.size() * sizeof(int64_t),
&gpu_array_ptr));
se::DeviceMemory<int64_t> gpu_ftr_ptr{
se::DeviceMemoryBase{gpu_array + s}};
float pi = 3.1417;
TF_CHECK_OK(stream_exec->SynchronousMemcpyH2D(&pi, sizeof(float),
&gpu_ftr_ptr));
a.DeallocateRaw(gpu_array);
},
"");
}
}
TEST(GPUDebugAllocatorTest, ResetToNan) {
const tsl::PlatformDeviceId platform_device_id(0);
auto stream_exec = ExecutorForPlatformDeviceId(platform_device_id);
GPUNanResetAllocator a(
new GPUBFCAllocator(absl::WrapUnique(new DeviceMemAllocator(
stream_exec, platform_device_id,
stream_executor::MemoryType::kDevice, {}, {})),
1 << 30, "", {}),
platform_device_id);
std::vector<float> cpu_array(1024);
std::vector<float> cpu_array_result(1024);
float* gpu_array = TypedAllocator::Allocate<float>(&a, cpu_array.size(), {});
se::DeviceMemory<float> gpu_array_ptr{se::DeviceMemoryBase{gpu_array}};
TF_CHECK_OK(stream_exec->SynchronousMemcpyD2H(
gpu_array_ptr, cpu_array.size() * sizeof(float), &cpu_array[0]));
for (float f : cpu_array) {
ASSERT_FALSE(std::isfinite(f));
}
cpu_array[0] = 1.0;
TF_CHECK_OK(stream_exec->SynchronousMemcpyH2D(
&cpu_array[0], cpu_array.size() * sizeof(float), &gpu_array_ptr));
TF_CHECK_OK(stream_exec->SynchronousMemcpyD2H(
gpu_array_ptr, cpu_array_result.size() * sizeof(float),
&cpu_array_result[0]));
ASSERT_EQ(1.0, cpu_array_result[0]);
a.DeallocateRaw(gpu_array);
TF_CHECK_OK(stream_exec->SynchronousMemcpyD2H(
gpu_array_ptr, cpu_array_result.size() * sizeof(float),
&cpu_array_result[0]));
for (float f : cpu_array_result) {
ASSERT_FALSE(std::isfinite(f));
}
}
TEST(GPUDebugAllocatorTest, ResetToNanWithHeaderFooter) {
const tsl::PlatformDeviceId platform_device_id(0);
auto stream_exec = ExecutorForPlatformDeviceId(platform_device_id);
GPUNanResetAllocator a(
new GPUBFCAllocator(absl::WrapUnique(new DeviceMemAllocator(
stream_exec, platform_device_id,
stream_executor::MemoryType::kDevice, {}, {})),
1 << 30, "", {}),
platform_device_id);
std::vector<float> cpu_array(1024);
std::vector<float> cpu_array_result(1024);
float* gpu_array = TypedAllocator::Allocate<float>(&a, cpu_array.size(), {});
se::DeviceMemory<float> gpu_array_ptr{se::DeviceMemoryBase{gpu_array}};
TF_CHECK_OK(stream_exec->SynchronousMemcpyD2H(
gpu_array_ptr, cpu_array.size() * sizeof(float), &cpu_array[0]));
for (float f : cpu_array) {
ASSERT_FALSE(std::isfinite(f));
}
cpu_array[0] = 1.0;
TF_CHECK_OK(stream_exec->SynchronousMemcpyH2D(
&cpu_array[0], cpu_array.size() * sizeof(float), &gpu_array_ptr));
TF_CHECK_OK(stream_exec->SynchronousMemcpyD2H(
gpu_array_ptr, cpu_array_result.size() * sizeof(float),
&cpu_array_result[0]));
ASSERT_EQ(1.0, cpu_array_result[0]);
a.DeallocateRaw(gpu_array);
TF_CHECK_OK(stream_exec->SynchronousMemcpyD2H(
gpu_array_ptr, cpu_array_result.size() * sizeof(float),
&cpu_array_result[0]));
for (float f : cpu_array_result) {
ASSERT_FALSE(std::isfinite(f));
}
}
TEST(GPUDebugAllocatorTest, TracksSizes) {
const tsl::PlatformDeviceId platform_device_id(0);
auto stream_exec = ExecutorForPlatformDeviceId(platform_device_id);
GPUDebugAllocator a(
new GPUBFCAllocator(absl::WrapUnique(new DeviceMemAllocator(
stream_exec, platform_device_id,
stream_executor::MemoryType::kDevice, {}, {})),
1 << 30, "", {}),
platform_device_id);
EXPECT_EQ(true, a.TracksAllocationSizes());
}
TEST(GPUDebugAllocatorTest, AllocatedVsRequested) {
const tsl::PlatformDeviceId platform_device_id(0);
auto stream_exec = ExecutorForPlatformDeviceId(platform_device_id);
GPUDebugAllocator a(
new GPUBFCAllocator(absl::WrapUnique(new DeviceMemAllocator(
stream_exec, platform_device_id,
stream_executor::MemoryType::kDevice, {}, {})),
1 << 30, "", {}),
platform_device_id);
float* t1 = TypedAllocator::Allocate<float>(&a, 1, {});
EXPECT_EQ(4, a.RequestedSize(t1));
EXPECT_EQ(256, a.AllocatedSize(t1));
a.DeallocateRaw(t1);
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/gpu/gpu_debug_allocator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/gpu/gpu_debug_allocator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ad0c7e00-9ae2-4443-ae99-f6f4b58dc22e | cpp | tensorflow/tensorflow | scc | tensorflow/core/grappler/utils/scc.cc | tensorflow/core/grappler/utils/scc_test.cc | #include "tensorflow/core/grappler/utils/scc.h"
#include <stack>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
namespace tensorflow {
namespace grappler {
struct SCCNodeData {
SCCNodeData()
: node(nullptr),
index(-1),
lowlink(-1),
onstack(false),
caller(nullptr),
caller_loop_location(-1) {}
void ResetStack(int new_index, SCCNodeData* new_caller) {
index = new_index;
lowlink = new_index;
onstack = true;
caller = new_caller;
caller_loop_location = 0;
}
const NodeDef* node;
int index;
int lowlink;
bool onstack;
std::vector<SCCNodeData*> children;
SCCNodeData* caller;
int caller_loop_location;
};
void StrongConnect(SCCNodeData* v, std::stack<SCCNodeData*>* stack, int* index,
std::unordered_map<const NodeDef*, int>* components,
int* scc_index) {
v->ResetStack(*index , nullptr );
++*index;
stack->push(v);
v->caller = nullptr;
v->caller_loop_location = 0;
SCCNodeData* last = v;
while (true) {
if (last->caller_loop_location < last->children.size()) {
SCCNodeData* w = last->children[last->caller_loop_location];
++(last->caller_loop_location);
if (w->index == -1) {
w->ResetStack(*index , last );
++*index;
stack->push(w);
last = w;
} else if (w->onstack == true) {
last->lowlink = std::min(last->lowlink, w->index);
}
} else {
if (last->lowlink == last->index) {
SCCNodeData* top;
while (true) {
top = stack->top();
stack->pop();
top->onstack = false;
(*components)[top->node] = *scc_index;
if (top == last) {
break;
}
}
++*scc_index;
}
SCCNodeData* next_last = last->caller;
if (next_last == nullptr) {
break;
} else {
next_last->lowlink = std::min(next_last->lowlink, last->lowlink);
last = next_last;
}
}
}
}
void StronglyConnectedComponents(
const GraphDef& graph, std::unordered_map<const NodeDef*, int>* components,
int* num_components) {
std::stack<SCCNodeData*> stack;
std::unordered_map<string, SCCNodeData*> name_to_data;
std::vector<SCCNodeData> node_data_container;
node_data_container.reserve(graph.node_size());
std::unordered_map<const NodeDef*, SCCNodeData*> node_to_data;
for (const NodeDef& node : graph.node()) {
SCCNodeData node_data;
node_data.node = &node;
node_data_container.push_back(node_data);
name_to_data[node.name()] = &(*node_data_container.rbegin());
node_to_data[&node] = &(*node_data_container.rbegin());
}
for (const NodeDef& node : graph.node()) {
for (const string& input : node.input()) {
auto it = name_to_data.find(NodeName(input));
if (it != name_to_data.end()) {
it->second->children.push_back(node_to_data[&node]);
}
}
}
components->clear();
*num_components = 0;
int index = 0;
for (auto& v : node_data_container) {
if (v.index == -1) {
StrongConnect(&v, &stack, &index, components, num_components);
}
}
std::vector<int> counts_per_component(*num_components, 0);
for (auto& component : *components) {
DCHECK(component.second >= 0);
DCHECK(component.second < *num_components);
counts_per_component[component.second]++;
}
bool has_single_element_component = false;
for (auto& component : *components) {
if (counts_per_component[component.second] == 1) {
component.second = -1;
(*num_components)--;
has_single_element_component = true;
}
}
if (has_single_element_component) {
(*num_components) += 1;
}
}
int IdentifyLoops(const GraphDef& graph,
std::unordered_map<const NodeDef*, std::vector<int>>* loops) {
int num_components = 0;
std::unordered_map<const NodeDef*, int> components;
StronglyConnectedComponents(graph, &components, &num_components);
if (num_components <= 1) {
if (!components.empty() && components.begin()->second == -1) {
return 0;
}
}
std::unordered_map<int, std::vector<const NodeDef*>> component_ids;
for (const auto it : components) {
int id = it.second;
if (id < 0) {
continue;
}
component_ids[id].push_back(it.first);
}
int loop_id = 0;
for (const auto& component : component_ids) {
const std::vector<const NodeDef*>& component_nodes = component.second;
std::vector<std::pair<NodeDef*, string>> next_iter_nodes;
GraphDef subgraph;
std::unordered_map<const NodeDef*, const NodeDef*> subgraph_mapping;
for (const auto& component_node : component_nodes) {
NodeDef* node = subgraph.add_node();
*node = *component_node;
subgraph_mapping[node] = component_node;
if (IsNextIteration(*node)) {
CHECK_EQ(1, node->input_size());
next_iter_nodes.emplace_back(node, node->input(0));
}
}
if (next_iter_nodes.size() == 1) {
for (const auto& component_node : component_nodes) {
(*loops)[component_node].push_back(loop_id);
}
++loop_id;
} else {
for (int i = 0; i < next_iter_nodes.size(); ++i) {
for (int j = 0; j < next_iter_nodes.size(); ++j) {
next_iter_nodes[j].first->clear_input();
if (i == j) {
*next_iter_nodes[j].first->add_input() = next_iter_nodes[j].second;
}
}
int num_components = 0;
std::unordered_map<const NodeDef*, int> components;
StronglyConnectedComponents(subgraph, &components, &num_components);
CHECK_GE(num_components, 1);
for (const auto it : components) {
int id = it.second;
if (id < 0) {
continue;
}
(*loops)[subgraph_mapping[it.first]].push_back(loop_id);
}
++loop_id;
}
}
}
return loop_id;
}
}
} | #include "tensorflow/core/grappler/utils/scc.h"
#include <memory>
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class SCCTest : public ::testing::Test {
public:
void SetUp() override {
std::unordered_map<string, DeviceProperties> devices;
DeviceProperties unknown_device;
devices["MY_DEVICE"] = unknown_device;
cluster_ = std::make_unique<VirtualCluster>(devices);
TF_CHECK_OK(cluster_->Provision());
}
void TearDown() override { cluster_.reset(); }
protected:
static NodeDef CreateNode(const string& name,
absl::Span<const string> inputs) {
NodeDef node;
node.set_name(name);
for (const string& input : inputs) {
node.add_input(input);
}
return node;
}
std::unique_ptr<VirtualCluster> cluster_;
};
TEST_F(SCCTest, NoLoops) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false,
cluster_->GetDeviceNames());
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
std::unordered_map<const NodeDef*, int> components;
int num_components;
StronglyConnectedComponents(item.graph, &components, &num_components);
EXPECT_EQ(num_components, 1);
for (const auto& node : item.graph.node()) {
EXPECT_EQ(-1, components[&node]);
}
}
TEST_F(SCCTest, DisjointCycleAndPath) {
GraphDef graph;
*graph.add_node() = CreateNode("a", {"d"});
*graph.add_node() = CreateNode("b", {"a"});
*graph.add_node() = CreateNode("c", {"b"});
*graph.add_node() = CreateNode("d", {"c"});
*graph.add_node() = CreateNode("e", {});
*graph.add_node() = CreateNode("f", {"e"});
*graph.add_node() = CreateNode("g", {"f"});
*graph.add_node() = CreateNode("h", {"g"});
std::vector<const NodeDef*> nodes;
std::unordered_map<string, const NodeDef*> name_to_node;
for (const auto& n : graph.node()) {
nodes.push_back(&n);
name_to_node[n.name()] = &n;
}
int num_components;
std::unordered_map<const NodeDef*, int> components;
StronglyConnectedComponents(graph, &components, &num_components);
EXPECT_EQ(num_components, 2);
for (const auto& pair : {std::make_pair("a", "b"), std::make_pair("a", "c"),
std::make_pair("a", "d")}) {
EXPECT_EQ(components[name_to_node[pair.first]],
components[name_to_node[pair.second]]);
}
for (const auto& node : {"e", "f", "g", "h"})
EXPECT_EQ(-1, components[name_to_node[node]]);
}
}
TEST_F(SCCTest, WikipediaExample) {
GraphDef graph;
*graph.add_node() = CreateNode("a", {"c"});
*graph.add_node() = CreateNode("b", {"a", "d"});
*graph.add_node() = CreateNode("c", {"b", "d", "f"});
*graph.add_node() = CreateNode("d", {"e"});
*graph.add_node() = CreateNode("e", {"d"});
*graph.add_node() = CreateNode("f", {"e", "g"});
*graph.add_node() = CreateNode("g", {"f", "h"});
*graph.add_node() = CreateNode("h", {"h"});
std::vector<const NodeDef*> nodes;
std::unordered_map<string, const NodeDef*> name_to_node;
for (const auto& n : graph.node()) {
nodes.push_back(&n);
name_to_node[n.name()] = &n;
}
int num_components;
std::unordered_map<const NodeDef*, int> components;
StronglyConnectedComponents(graph, &components, &num_components);
EXPECT_EQ(num_components, 4);
for (const auto& pair :
{std::make_pair("a", "b"), std::make_pair("a", "c"),
std::make_pair("d", "e"), std::make_pair("f", "g")}) {
EXPECT_EQ(components[name_to_node[pair.first]],
components[name_to_node[pair.second]]);
}
for (const auto& pair :
{std::make_pair("a", "d"), std::make_pair("a", "f"),
std::make_pair("a", "h"), std::make_pair("d", "f"),
std::make_pair("d", "h"), std::make_pair("f", "h")}) {
EXPECT_NE(components[name_to_node[pair.first]],
components[name_to_node[pair.second]]);
}
}
TEST_F(SCCTest, TensorFlowLoop) {
const string gdef_ascii = R"EOF(
node {
name: "Const"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 0
}
}
}
}
node {
name: "while/Enter"
op: "Enter"
input: "Const"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "frame_name"
value {
s: "while/while/"
}
}
attr {
key: "is_constant"
value {
b: false
}
}
attr {
key: "parallel_iterations"
value {
i: 10
}
}
}
node {
name: "while/Merge"
op: "Merge"
input: "while/Enter"
input: "while/NextIteration"
attr {
key: "N"
value {
i: 2
}
}
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Less/y"
op: "Const"
input: "^while/Merge"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 10
}
}
}
}
node {
name: "while/Less"
op: "Less"
input: "while/Merge"
input: "while/Less/y"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/LoopCond"
op: "LoopCond"
input: "while/Less"
}
node {
name: "while/Switch"
op: "Switch"
input: "while/Merge"
input: "while/LoopCond"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "_class"
value {
list {
s: "loc:@while/Merge"
}
}
}
}
node {
name: "while/Identity"
op: "Identity"
input: "while/Switch:1"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Add/y"
op: "Const"
input: "^while/Identity"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 1
}
}
}
}
node {
name: "while/Add"
op: "Add"
input: "while/Identity"
input: "while/Add/y"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/NextIteration"
op: "NextIteration"
input: "while/Add"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Exit"
op: "Exit"
input: "while/Switch"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
versions {
producer: 11
}
)EOF";
GrapplerItem item;
CHECK(protobuf::TextFormat::ParseFromString(gdef_ascii, &item.graph));
std::unordered_map<const NodeDef*, int> components;
int num_components;
StronglyConnectedComponents(item.graph, &components, &num_components);
EXPECT_EQ(num_components, 2);
for (const auto& node : item.graph.node()) {
if (node.name() == "Const" || node.name() == "while/Enter" ||
node.name() == "while/Exit") {
EXPECT_EQ(-1, components[&node]);
} else {
EXPECT_LE(0, components[&node]);
}
}
}
TEST_F(SCCTest, NestedLoops) {
GrapplerItem item;
string filename = io::JoinPath(
testing::TensorFlowSrcRoot(),
"core/grappler/costs/graph_properties_testdata/nested_loop.pbtxt");
TF_CHECK_OK(ReadGraphDefFromFile(filename, &item.graph));
for (const auto& node : item.graph.node()) {
std::cout << node.DebugString() << std::endl;
}
std::unordered_map<const NodeDef*, std::vector<int>> loops;
int num_loops = IdentifyLoops(item.graph, &loops);
EXPECT_EQ(4, num_loops);
for (const auto& node_info : loops) {
std::cout << node_info.first->name() << " [";
for (int i : node_info.second) {
std::cout << " " << i;
}
std::cout << "]" << std::endl;
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/scc.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/scc_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5c50ee19-4508-40ba-8d7f-ca5923c7ad9a | cpp | tensorflow/tensorflow | latency_hiding_scheduler | third_party/xla/xla/service/latency_hiding_scheduler.cc | third_party/xla/xla/service/latency_hiding_scheduler_test.cc | #include "xla/service/latency_hiding_scheduler.h"
#include <algorithm>
#include <cstdint>
#include <cstdlib>
#include <functional>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_reachability.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/map_util.h"
#include "xla/service/dump.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_value.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
const int64_t kDefaultMemorySpace = 0;
bool IsNopInstruction(const HloInstruction& hlo) {
HloOpcode op = hlo.opcode();
return op == HloOpcode::kGetTupleElement || op == HloOpcode::kBitcast ||
op == HloOpcode::kConstant || op == HloOpcode::kParameter ||
op == HloOpcode::kBroadcast || op == HloOpcode::kIota ||
hlo.IsEffectiveBitcast() ||
(op == HloOpcode::kTuple && hlo.user_count() == 1 &&
hlo.users().front()->opcode() == HloOpcode::kWhile);
}
bool InstructionDefinesValue(const HloInstruction* instruction,
const HloValue* value) {
if (value->defining_instruction() == instruction) {
return true;
}
if (value->shape().has_layout() &&
value->shape().layout().memory_space() != kDefaultMemorySpace) {
return false;
}
if (instruction->opcode() == HloOpcode::kAsyncStart) {
if (instruction->async_wrapped_opcode() == HloOpcode::kCall) {
return instruction->async_wrapped_instruction()
->called_computations()[0]
->root_instruction() == value->defining_instruction();
}
return instruction->async_wrapped_instruction() ==
value->defining_instruction();
}
return false;
}
bool InstructionFirstDefinesBuffer(
const HloInstruction* instruction,
const BufferInfoTracker::ValueInfo& buffer_value_info) {
if (buffer_value_info.first_definition == instruction) {
return true;
}
if (buffer_value_info.value->values()[0]->shape().has_layout() &&
buffer_value_info.value->values()[0]->shape().layout().memory_space() !=
kDefaultMemorySpace) {
return false;
}
if (instruction->opcode() == HloOpcode::kAsyncStart) {
if (instruction->async_wrapped_opcode() == HloOpcode::kCall) {
return instruction->async_wrapped_instruction()
->called_computations()[0]
->root_instruction() == buffer_value_info.first_definition;
}
return instruction->async_wrapped_instruction() ==
buffer_value_info.first_definition;
}
return false;
}
}
CanonicalAsyncOp DefaultGetCanonicalAsyncOp(const HloInstruction& hlo) {
switch (hlo.opcode()) {
case HloOpcode::kAsyncStart:
case HloOpcode::kAsyncDone:
if (hlo.async_wrapped_opcode() == HloOpcode::kCall) {
return {hlo.opcode(), hlo.async_wrapped_instruction()
->called_computations()[0]
->root_instruction()
->opcode()};
}
return {hlo.opcode(), hlo.async_wrapped_opcode()};
case HloOpcode::kAllReduceStart:
return {HloOpcode::kAsyncStart, HloOpcode::kAllReduce};
case HloOpcode::kAllGatherStart:
return {HloOpcode::kAsyncStart, HloOpcode::kAllGather};
case HloOpcode::kCollectivePermuteStart:
return {HloOpcode::kAsyncStart, HloOpcode::kCollectivePermute};
case HloOpcode::kCopyStart:
return {HloOpcode::kAsyncStart, HloOpcode::kCopy};
case HloOpcode::kCopyDone:
return {HloOpcode::kAsyncDone, HloOpcode::kCopy};
case HloOpcode::kAllReduceDone:
return {HloOpcode::kAsyncDone, HloOpcode::kAllReduce};
case HloOpcode::kAllGatherDone:
return {HloOpcode::kAsyncDone, HloOpcode::kAllGather};
case HloOpcode::kCollectivePermuteDone:
return {HloOpcode::kAsyncDone, HloOpcode::kCollectivePermute};
default:
return {hlo.opcode(), hlo.opcode()};
}
}
bool LatencyEstimator::IsAsyncPair(const HloGraphNode& from,
const HloGraphNode& target) const {
CanonicalAsyncOp from_op = GetCanonicalAsyncOp(from.GetInstr());
CanonicalAsyncOp target_op = GetCanonicalAsyncOp(target.GetInstr());
return from_op.outer == HloOpcode::kAsyncStart &&
target_op.outer == HloOpcode::kAsyncDone &&
from_op.inner == target_op.inner;
}
bool LatencyEstimator::IsP2pPair(const HloGraphNode& from,
const HloGraphNode& target) const {
return (from.GetInstr().opcode() == HloOpcode::kSend &&
target.GetInstr().opcode() == HloOpcode::kSendDone) ||
(from.GetInstr().opcode() == HloOpcode::kRecv &&
target.GetInstr().opcode() == HloOpcode::kRecvDone);
}
LatencyEstimator::TimeCost ApproximateLatencyEstimator::GetLatencyBetween(
const HloGraphNode& from, const HloGraphNode& target) const {
if (IsAsyncPair(from, target)) {
return kHighLatency;
}
return kLowLatency;
}
LatencyEstimator::TimeCost ApproximateLatencyEstimator::NodeCost(
const HloInstruction* instr) const {
if (instr->IsLoopFusion()) {
return kMediumCost;
}
if (instr->IsOutputFusion() || instr->opcode() == HloOpcode::kConvolution) {
return kHighCost;
}
return kLowCost;
}
bool AsyncTracker::IsSupportedAsyncDone(const HloInstruction& hlo) const {
CanonicalAsyncOp op = GetCanonicalAsyncOp(hlo);
if (op.outer == HloOpcode::kSendDone || op.outer == HloOpcode::kRecvDone) {
return config_.schedule_send_recvs;
}
if (op.outer == HloOpcode::kAsyncDone) {
if (hlo.IsAsynchronous() &&
hlo.async_execution_thread() != hlo.parent()->execution_thread()) {
return true;
}
switch (op.inner) {
case HloOpcode::kAllToAll:
case HloOpcode::kAllGather:
case HloOpcode::kAllReduce:
case HloOpcode::kCollectiveBroadcast:
case HloOpcode::kCollectivePermute:
case HloOpcode::kCopy:
case HloOpcode::kReduceScatter:
return true;
default:
return false;
}
}
return false;
}
bool AsyncTracker::IsSupportedAsyncStart(const HloInstruction& hlo) const {
CanonicalAsyncOp op = GetCanonicalAsyncOp(hlo);
if (op.outer == HloOpcode::kSend || op.outer == HloOpcode::kRecv) {
return config_.schedule_send_recvs;
}
if (op.outer == HloOpcode::kAsyncStart) {
if (hlo.IsAsynchronous() &&
hlo.async_execution_thread() != hlo.parent()->execution_thread()) {
return true;
}
switch (op.inner) {
case HloOpcode::kAllToAll:
case HloOpcode::kAllGather:
case HloOpcode::kAllReduce:
case HloOpcode::kCollectiveBroadcast:
case HloOpcode::kCollectivePermute:
case HloOpcode::kCopy:
case HloOpcode::kReduceScatter:
return true;
default:
return false;
}
}
return false;
}
ResourcesVector AsyncTracker::GetResourcesFromInstructionImpl(
const HloInstruction& hlo) const {
CanonicalAsyncOp op = GetCanonicalAsyncOp(hlo);
auto get_resource_for_op = [](HloOpcode op) -> ResourceType {
switch (op) {
case HloOpcode::kAllReduce:
return ResourceType::kAllReduce;
case HloOpcode::kAllGather:
return ResourceType::kAllGather;
case HloOpcode::kAllToAll:
return ResourceType::kAllToAll;
case HloOpcode::kCollectiveBroadcast:
return ResourceType::kCollectiveBroadcast;
case HloOpcode::kCollectivePermute:
return ResourceType::kCollectivePermute;
case HloOpcode::kCopy:
return ResourceType::kCopy;
case HloOpcode::kReduceScatter:
return ResourceType::kReduceScatter;
default:
return ResourceType::kNoResource;
}
};
if (op.outer == HloOpcode::kAsyncStart || op.outer == HloOpcode::kAsyncDone) {
ResourceType type = get_resource_for_op(op.inner);
if (type == ResourceType::kNoResource) {
return {};
}
ResourceUsageType usage = op.outer == HloOpcode::kAsyncStart
? ResourceUsageType::kResourceRelease
: ResourceUsageType::kResourceOccupy;
return {std::make_pair(ResourceTypeToIndex(type), usage)};
}
switch (hlo.opcode()) {
case HloOpcode::kAfterAll:
return ResourcesVector{
std::make_pair(ResourceTypeToIndex(ResourceType::kSendHost),
ResourceUsageType::kNoResource)};
case HloOpcode::kRecv:
return ResourcesVector{
static_cast<const HloSendRecvInstruction*>(&hlo)->is_host_transfer()
? std::make_pair(
config_.force_send_recv_to_use_same_resource
? ResourceTypeToIndex(ResourceType::kSendHost)
: ResourceTypeToIndex(ResourceType::kRecvHost),
ResourceUsageType::kResourceRelease)
: std::make_pair(ResourceTypeToIndex(ResourceType::kSendRecv),
ResourceUsageType::kResourceRelease)};
case HloOpcode::kSend:
return ResourcesVector{
static_cast<const HloSendRecvInstruction*>(&hlo)->is_host_transfer()
? std::make_pair(ResourceTypeToIndex(ResourceType::kSendHost),
ResourceUsageType::kResourceRelease)
: std::make_pair(ResourceTypeToIndex(ResourceType::kSendRecv),
ResourceUsageType::kResourceRelease)};
case HloOpcode::kRecvDone:
return ResourcesVector{
static_cast<const HloSendRecvInstruction*>(hlo.operand(0))
->is_host_transfer()
? std::make_pair(
config_.force_send_recv_to_use_same_resource
? ResourceTypeToIndex(ResourceType::kSendHost)
: ResourceTypeToIndex(ResourceType::kRecvHost),
ResourceUsageType::kResourceOccupy)
: std::make_pair(ResourceTypeToIndex(ResourceType::kSendRecv),
ResourceUsageType::kResourceOccupy)};
case HloOpcode::kSendDone:
return ResourcesVector{
static_cast<const HloSendRecvInstruction*>(hlo.operand(0))
->is_host_transfer()
? std::make_pair(ResourceTypeToIndex(ResourceType::kSendHost),
ResourceUsageType::kResourceOccupy)
: std::make_pair(ResourceTypeToIndex(ResourceType::kSendRecv),
ResourceUsageType::kResourceOccupy)};
default:
return ResourcesVector{};
}
}
ResourcesVector AsyncTracker::GetResourcesFromInstruction(
const HloInstruction& hlo) const {
if (!resources_cache_.contains(&hlo)) {
resources_cache_.insert({&hlo, GetResourcesFromInstructionImpl(hlo)});
}
return resources_cache_.at(&hlo);
}
int64_t AsyncTracker::GetNumResourcesPerInstruction(
ResourceType resource_type, const HloInstruction& instr) const {
return GetNumResourcesPerInstruction(ResourceTypeToIndex(resource_type),
instr);
}
int64_t AsyncTracker::GetNumResourcesPerInstruction(
int64_t resource_type, const HloInstruction& instr) const {
if (instr.called_computations().empty() ||
instr.opcode() == HloOpcode::kAsyncStart ||
instr.opcode() == HloOpcode::kAsyncDone) {
return absl::c_any_of(GetResourcesFromInstruction(instr),
[resource_type](const ResourcePair& resource) {
return resource.second ==
ResourceUsageType::kResourceOccupy &&
(resource_type == resource.first);
})
? 1
: 0;
}
std::function<void(const HloComputation*)> recursively_compute_resource_map =
[this,
&recursively_compute_resource_map](const HloComputation* computation) {
absl::flat_hash_map<int64_t, int64_t> per_opcode_map;
for (HloInstruction* instr : computation->instructions()) {
if (IsSupportedAsyncDone(*instr)) {
for (auto& resource : GetResourcesFromInstruction(*instr)) {
++per_opcode_map[resource.first];
}
}
for (const HloComputation* called_comp :
instr->called_computations()) {
auto it = async_in_computation_cache_.find(called_comp);
if (it == async_in_computation_cache_.end()) {
recursively_compute_resource_map(called_comp);
it = async_in_computation_cache_.find(called_comp);
CHECK(it != async_in_computation_cache_.end());
}
for (auto& called_per_opcode_pair : it->second) {
per_opcode_map[called_per_opcode_pair.first] +=
called_per_opcode_pair.second;
}
}
}
async_in_computation_cache_[computation] = std::move(per_opcode_map);
};
int64_t num_resources = 0;
for (const HloComputation* computation : instr.called_computations()) {
auto it = async_in_computation_cache_.find(computation);
if (it == async_in_computation_cache_.end()) {
recursively_compute_resource_map(computation);
it = async_in_computation_cache_.find(computation);
CHECK(it != async_in_computation_cache_.end());
}
auto opcode_it = it->second.find(resource_type);
if (opcode_it == it->second.end()) {
continue;
}
num_resources += opcode_it->second;
}
return num_resources;
}
void AsyncTracker::SetConcurrentResourceLimits(
absl::flat_hash_map<int64_t, int64_t>& max_concurrent_resource) const {
max_concurrent_resource[ResourceTypeToIndex(
ResourceType::kCollectiveBroadcast)] =
config_.collective_broadcast_overlap_limit;
max_concurrent_resource[ResourceTypeToIndex(
ResourceType::kCollectivePermute)] =
config_.collective_permute_overlap_limit;
max_concurrent_resource[ResourceTypeToIndex(ResourceType::kCopy)] =
config_.copy_overlap_limit;
max_concurrent_resource[ResourceTypeToIndex(ResourceType::kAllToAll)] =
config_.all_to_all_overlap_limit;
max_concurrent_resource[ResourceTypeToIndex(ResourceType::kAllGather)] =
config_.all_gather_overlap_limit;
max_concurrent_resource[ResourceTypeToIndex(ResourceType::kAllReduce)] =
config_.all_reduce_overlap_limit;
max_concurrent_resource[ResourceTypeToIndex(ResourceType::kReduceScatter)] =
config_.reduce_scatter_overlap_limit;
max_concurrent_resource[ResourceTypeToIndex(ResourceType::kSendRecv)] =
config_.send_recv_overlap_limit;
max_concurrent_resource[ResourceTypeToIndex(ResourceType::kSendHost)] =
config_.send_recv_host_overlap_limit;
max_concurrent_resource[ResourceTypeToIndex(ResourceType::kRecvHost)] =
config_.send_recv_host_overlap_limit;
const int64_t first_target_resource =
AsyncTracker::GetFirstTargetDefinedResource();
for (int64_t i = 0; i < GetNumTargetDefinedResources(); ++i) {
max_concurrent_resource[first_target_resource + i] =
GetNumAvailableResources(first_target_resource + i);
}
}
absl::string_view AsyncTracker::GetResourceName(int64_t resource_type) const {
switch (resource_type) {
case ResourceTypeToIndex(ResourceType::kNoResource):
return "kNoResource";
case ResourceTypeToIndex(ResourceType::kAllToAll):
return "kAllToAll";
case ResourceTypeToIndex(ResourceType::kAllGather):
return "kAllGather";
case ResourceTypeToIndex(ResourceType::kAllReduce):
return "kAllReduce";
case ResourceTypeToIndex(ResourceType::kCollectiveBroadcast):
return "kCollectiveBroadcast";
case ResourceTypeToIndex(ResourceType::kCollectivePermute):
return "kCollectivePermute";
case ResourceTypeToIndex(ResourceType::kCopy):
return "kCopy";
case ResourceTypeToIndex(ResourceType::kSendRecv):
return "kSendRecv";
case ResourceTypeToIndex(ResourceType::kSendHost):
return "kSendHost";
case ResourceTypeToIndex(ResourceType::kRecvHost):
return "kRecvHost";
case ResourceTypeToIndex(ResourceType::kReduceScatter):
return "kReduceScatter";
default:
return "Not a valid default resource";
}
}
absl::string_view AsyncTracker::GetResourceUsageName(
ResourceUsageType resource_usage_type) const {
return GetResourceUsageName(ResourceUsageTypeToIndex(resource_usage_type));
}
ResourceHazardType AsyncTracker::GetResourceHazardType(
int64_t resource_type) const {
return ResourceHazardType::kUnshareable;
}
absl::string_view AsyncTracker::GetResourceUsageName(
int64_t resource_usage_type) const {
switch (resource_usage_type) {
case ResourceUsageTypeToIndex(ResourceUsageType::kNoResource):
return "kNoResource";
case ResourceUsageTypeToIndex(ResourceUsageType::kResourceOccupy):
return "kResourceOccupy";
case ResourceUsageTypeToIndex(ResourceUsageType::kResourceRelease):
return "kResourceRelease";
default:
return "Not a valid resource usage type";
}
}
int64_t AsyncTracker::GetNumTargetDefinedResources() const { return 0; }
int64_t AsyncTracker::GetNumAvailableResources(int64_t resource_type) const {
return 0;
}
absl::InlinedVector<int64_t, 1>
AsyncTracker::GetReleasedShareableResourcesFromVector(
const ResourcesVector& resources) const {
return {};
}
absl::InlinedVector<int64_t, 1>
AsyncTracker::GetOccupiedShareableResourcesFromVector(
const ResourcesVector& resources) const {
return {};
}
absl::InlinedVector<int64_t, 1>
AsyncTracker::GetOccupiedSerialResourcesFromVector(
const ResourcesVector& resources) const {
return {};
}
absl::InlinedVector<int64_t, 1>
AsyncTracker::GetReleasedNonextendableResourcesFromVector(
const ResourcesVector& resources) const {
return {};
}
bool AsyncTracker::ReleasesSelectiveResource(const HloGraphNode* node) const {
return absl::c_any_of(
node->GetResources(), [&](const ResourcePair& resource) {
return resource.second == ResourceUsageType::kResourceRelease &&
GetResourceHazardType(resource.first) ==
ResourceHazardType::kSelective;
});
}
bool AsyncTracker::OccupiesSelectiveResource(const HloGraphNode* node) const {
return absl::c_any_of(
node->GetResources(), [&](const ResourcePair& resource) {
return resource.second == ResourceUsageType::kResourceOccupy &&
GetResourceHazardType(resource.first) ==
ResourceHazardType::kSelective;
});
}
BufferInfoTracker::BufferInfoTracker(
const HloModule* module, const HloAliasAnalysis* alias_analysis,
const HloCostAnalysis::ShapeSizeFunction& shape_size_bytes) {
buffer_infos_.resize(alias_analysis->buffers().back().id() + 1);
std::function<void(const HloComputation*)> process_computation =
[&process_computation, module, alias_analysis, this,
&shape_size_bytes](const HloComputation* computation) {
const HloInstructionSequence& sequence =
module->schedule().sequence(computation);
for (int idx = 0; idx < sequence.size(); ++idx) {
const HloInstruction* instruction = sequence.instructions()[idx];
for (auto* called_computation : instruction->called_computations()) {
if (called_computation->IsFusionComputation()) {
continue;
}
process_computation(called_computation);
}
ShapeUtil::ForEachSubshape(
instruction->shape(),
[&](const Shape& subshape, const ShapeIndex& index) {
for (const HloBuffer* buffer :
alias_analysis->ComputeBuffersAt(instruction, index)) {
if (buffer_infos_[buffer->id()].value == nullptr) {
buffer_infos_[buffer->id()] =
CreateBufferInfo(buffer, instruction, shape_size_bytes);
}
}
});
}
};
process_computation(module->entry_computation());
}
void ModulePressureState::InitializePressureStates() {
memory_pressure_states_.clear();
std::function<void(HloComputation*,
const MemoryPressureTracker::LiveBufferSet&)>
process_computation = [this, &process_computation](
HloComputation* computation,
const MemoryPressureTracker::LiveBufferSet&
initial_live_buffers) {
const HloInstructionSequence& sequence =
module_->schedule().sequence(computation);
MemoryPressureTracker tracker(hlo_alias_analysis_, buffer_tracker_,
memory_pressure_states_);
tracker.Initialize(computation, initial_live_buffers);
VLOG(6) << "Pressure at bottom for " << computation->name() << ": "
<< tracker.memory_usage();
for (int idx = sequence.size() - 1; idx >= 0; --idx) {
const HloInstruction* instruction = sequence.instructions()[idx];
if (!instruction->called_computations().empty()) {
for (auto* called_computation :
instruction->called_computations()) {
if (called_computation->IsFusionComputation()) {
continue;
}
process_computation(called_computation, tracker.live_buffers());
}
}
VLOG(10) << "Instruction: " << instruction->ToString();
VLOG(10) << "Pressure change: "
<< tracker.MemoryPressureDifference(instruction).first;
VLOG(10) << "Current usage: " << tracker.memory_usage();
tracker.UpdateBuffers(instruction);
VLOG(10) << "Current usage after update: " << tracker.memory_usage();
VLOG(10) << "Current peak after update: "
<< tracker.pressure_state().memory_peak;
}
VLOG(6) << "Pressure peak for " << computation->name() << ": "
<< tracker.pressure_state().memory_peak;
UpdatePressureStateForComputation(computation,
tracker.pressure_state());
};
process_computation(module_->entry_computation(), {});
}
void MemoryPressureTracker::Initialize(
const HloComputation* computation,
const LiveBufferSet& initial_live_buffers) {
live_memory_usage_ = 0;
initial_memory_pressure_ = 0;
pressure_state_ = MemoryPressureState{};
output_buffers_.clear();
defined_buffers_.clear();
live_buffers_set_.clear();
for (auto* instruction : computation->instructions()) {
auto& output_values = this->output_buffers_[instruction];
auto& defined_values = this->defined_buffers_[instruction];
ShapeUtil::ForEachSubshape(
instruction->shape(),
[&](const Shape& subshape, const ShapeIndex& index) {
for (const HloBuffer* buffer :
hlo_alias_analysis_->ComputeBuffersAt(instruction, index)) {
output_values.push_back(std::make_pair(
buffer_tracker_.GetBufferInfo(buffer->id()), index));
if (absl::c_any_of(buffer->values(), [&](const HloValue* value) {
return InstructionDefinesValue(instruction, value);
})) {
defined_values.push_back(
buffer_tracker_.GetBufferInfo(buffer->id()));
}
}
});
}
if (!initial_live_buffers.empty()) {
for (HloBuffer::Id id : initial_live_buffers) {
auto& buffer = buffer_tracker_.GetBufferInfo(id);
if (buffer.value->values()[0]->shape().has_layout() &&
buffer.value->values()[0]->shape().layout().memory_space() != 0) {
continue;
}
live_buffers_[buffer.value->id()] = 1;
initial_memory_pressure_ += buffer.buffer_size;
}
live_buffers_set_ = initial_live_buffers;
} else {
absl::c_fill(live_buffers_, 0);
}
pressure_state_.live_ids_at_bottom = live_buffers_set_;
}
void MemoryPressureTracker::UpdateBuffers(const HloInstruction* instruction) {
int64_t computations_peak = 0;
for (auto* called_comp : instruction->called_computations()) {
if (called_comp->IsFusionComputation()) {
continue;
}
auto it = pressure_state_cache_.find(called_comp);
CHECK(it != pressure_state_cache_.end());
computations_peak = std::max(computations_peak, it->second.memory_peak);
}
if (pressure_state_.memory_peak < live_memory_usage_ + computations_peak) {
pressure_state_.memory_peak = live_memory_usage_ + computations_peak;
}
for (auto* op : instruction->operands()) {
auto& output_values = output_buffers_[op];
for (auto& info : output_values) {
if (ShouldSkipBufferAllocations(instruction, info.second,
info.first.first_definition) ||
(info.first.value->values()[0]->shape().has_layout() &&
info.first.value->values()[0]->shape().layout().memory_space() !=
kDefaultMemorySpace)) {
continue;
}
if (live_buffers_[info.first.value->id()] == 0) {
live_buffers_[info.first.value->id()] = 1;
live_buffers_set_.insert(info.first.value->id());
live_memory_usage_ += info.first.buffer_size;
}
}
}
pressure_state_.memory_peak =
std::max(live_memory_usage_, pressure_state_.memory_peak);
auto it = defined_buffers_.find(instruction);
CHECK(it != defined_buffers_.end());
if (!ShouldSkipBufferReleases(instruction)) {
for (auto& b : it->second) {
if (b.value->values()[0]->shape().has_layout() &&
b.value->values()[0]->shape().layout().memory_space() !=
kDefaultMemorySpace) {
continue;
}
if (live_buffers_[b.value->id()] != 0) {
if (InstructionFirstDefinesBuffer(instruction, b)) {
live_memory_usage_ -= b.buffer_size;
live_buffers_set_.erase(b.value->id());
}
}
}
}
}
std::pair<int64_t, int64_t> MemoryPressureTracker::MemoryPressureDifference(
const HloInstruction* instruction) const {
int64_t increase = 0;
int64_t peak = 0;
if (!instruction->called_computations().empty()) {
int64_t called_comp_peak = 0;
for (auto* called_comp : instruction->called_computations()) {
if (called_comp->IsFusionComputation()) {
continue;
}
auto it = pressure_state_cache_.find(called_comp);
CHECK(it != pressure_state_cache_.end());
peak = called_comp_peak =
std::max(called_comp_peak, it->second.memory_peak);
}
}
for (auto* op : instruction->operands()) {
auto it = output_buffers_.find(op);
CHECK(it != output_buffers_.end());
for (auto& b : it->second) {
if (ShouldSkipBufferAllocations(instruction, b.second,
b.first.first_definition) ||
(b.first.value->values()[0]->shape().has_layout() &&
b.first.value->values()[0]->shape().layout().memory_space() !=
kDefaultMemorySpace)) {
continue;
}
if (!live_buffers_[b.first.value->id()]) {
increase += b.first.buffer_size;
}
}
}
peak = std::max(increase, peak);
auto it = defined_buffers_.find(instruction);
CHECK(it != defined_buffers_.end());
if (!ShouldSkipBufferReleases(instruction)) {
for (auto& b : it->second) {
if (b.value->values()[0]->shape().has_layout() &&
b.value->values()[0]->shape().layout().memory_space() !=
kDefaultMemorySpace) {
continue;
}
if (live_buffers_[b.value->id()]) {
if (InstructionFirstDefinesBuffer(instruction, b)) {
increase -= b.buffer_size;
}
}
}
}
return std::make_pair(increase, peak);
}
DefaultSchedulerCore::ScheduleCandidate InitializeCandidate(
HloGraphNode* node,
const DefaultSchedulerCore::SchedulingState& sched_state) {
DefaultSchedulerCore::ScheduleCandidate cand;
cand.node = node;
return cand;
}
namespace {
int64_t GetNumHopsToClosestSelectiveOverlap(
const DefaultSchedulerCore::ReadyQueueSet& ready_set,
const HloGraphNode* node) {
int64_t num_hops_to_closest_selective_resource_occupier =
std::numeric_limits<int64_t>::max();
for (const HloGraphNode* n : ready_set) {
if (n == node) {
continue;
}
num_hops_to_closest_selective_resource_occupier =
std::min(num_hops_to_closest_selective_resource_occupier,
n->GetNumHopsToClosestSelectiveResourceOccupier());
}
return num_hops_to_closest_selective_resource_occupier;
}
class ReadySetLt {
public:
explicit ReadySetLt(
const DefaultSchedulerCore::SchedulingState* sched_state,
DefaultSchedulerCore::TargetSchedulingRule target_scheduling_rule,
DefaultSchedulerCore::TargetSchedulingRule early_target_scheduling_rule)
: sched_state_(*sched_state),
target_scheduling_rule_(target_scheduling_rule),
early_target_scheduling_rule_(early_target_scheduling_rule) {}
DefaultSchedulerCore::CandidateResult operator()(
DefaultSchedulerCore::ScheduleCandidate& a,
DefaultSchedulerCore::ScheduleCandidate& b) const {
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
a.node->GetForceEarly(), a, b.node->GetForceEarly(), b,
"kForceEarly")) {
return *value;
}
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
!a.node->GetForceDelay(), a, !b.node->GetForceDelay(), b,
"kForceDelay")) {
return *value;
}
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
IsNop(*a.node), a, IsNop(*b.node), b, "kIsNop")) {
return *value;
}
std::pair<int64_t, int64_t> a_increase = std::make_pair(0LL, 0LL);
std::pair<int64_t, int64_t> b_increase = std::make_pair(0LL, 0LL);
if (sched_state_.config.memory_limit != UINT64_MAX &&
sched_state_.memory_pressure_tracker->memory_usage() >
(sched_state_.config.memory_limit / 2)) {
a_increase = GetMemoryPressureChanges(a);
b_increase = GetMemoryPressureChanges(b);
if (sched_state_.memory_pressure_tracker->memory_usage() >=
sched_state_.config.memory_limit) {
if (sched_state_.config.depth_based_memory_pressure_reduction) {
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
a_increase.first < 0 && a_increase.first < b_increase.first,
a,
b_increase.first < 0 && b_increase.first < a_increase.first,
b, "kOnlyDecreaseMemoryOverLimit")) {
return *value;
}
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
a.node->GetGraphDepth() > b.node->GetGraphDepth(), a,
b.node->GetGraphDepth() > a.node->GetGraphDepth(), b,
"kDepthOverLimit")) {
return *value;
}
}
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
a_increase.first < b_increase.first, a,
b_increase.first < a_increase.first, b,
"kDecreaseMemoryOverLimit")) {
return *value;
}
}
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
a_increase.second +
sched_state_.memory_pressure_tracker->memory_usage() <=
sched_state_.config.memory_limit,
a,
b_increase.second +
sched_state_.memory_pressure_tracker->memory_usage() <=
sched_state_.config.memory_limit,
b, "kMemoryPeakOverLimit")) {
return *value;
}
}
if (early_target_scheduling_rule_) {
if (auto value = early_target_scheduling_rule_(a, b)) {
return *value;
}
}
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
ShouldScheduleAsyncDone(a), a, ShouldScheduleAsyncDone(b), b,
"kScheduleDone")) {
return *value;
}
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
PastDueCyclesForNonextendableResource(a) >
PastDueCyclesForNonextendableResource(b),
a,
PastDueCyclesForNonextendableResource(b) >
PastDueCyclesForNonextendableResource(a),
b, "kReleaseNonextendable")) {
return *value;
}
if (sched_state_.config.enable_release_start_policy) {
const ApproximateLatencyEstimator::TimeCost a_ready_interval =
a.node->GetReadyTime() - sched_state_.current_time;
const ApproximateLatencyEstimator::TimeCost b_ready_interval =
b.node->GetReadyTime() - sched_state_.current_time;
bool a_ready_and_release =
a_ready_interval <= 0 &&
a.node->DoesReleaseResource(ResourceType::kCollectivePermute);
bool b_ready_and_release =
b_ready_interval <= 0 &&
b.node->DoesReleaseResource(ResourceType::kCollectivePermute);
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
a_ready_and_release, a, b_ready_and_release, b,
"kScheduleStart")) {
return *value;
}
if (a_ready_and_release && b_ready_and_release) {
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
a_ready_interval < b_ready_interval, a,
b_ready_interval < a_ready_interval, b, "kScheduleStart")) {
return *value;
}
}
}
auto async_depth_0_candidate =
[this](DefaultSchedulerCore::ScheduleCandidate& a,
DefaultSchedulerCore::ScheduleCandidate& b)
-> std::optional<DefaultSchedulerCore::CandidateResult> {
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
!(a.node->DoesReleaseAnyResource() &&
a.node->GetAsyncDepth() == 0 &&
!IsResourceConstrained(a)),
a,
!(b.node->DoesReleaseAnyResource() &&
b.node->GetAsyncDepth() == 0 && !IsResourceConstrained(b)),
b, "kStartAtZeroDepth")) {
return value;
}
return std::nullopt;
};
if (sched_state_.config.aggressive_scheduling_policies &&
sched_state_.config.prioritize_async_depth_over_stall) {
if (auto value = async_depth_0_candidate(a, b)) {
return *value;
}
}
const ApproximateLatencyEstimator::TimeCost a_ready_interval =
std::max(a.node->GetReadyTime() - sched_state_.current_time, 0.0);
const ApproximateLatencyEstimator::TimeCost b_ready_interval =
std::max(b.node->GetReadyTime() - sched_state_.current_time, 0.0);
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
a_ready_interval < b_ready_interval, a,
b_ready_interval < a_ready_interval, b, "kLessStall")) {
return *value;
}
if (sched_state_.config.resource_serializing) {
const int64_t a_num_conflicting_resources =
GetNumConflictingSerialResources(a);
const int64_t b_num_conflicting_resources =
GetNumConflictingSerialResources(b);
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
a_num_conflicting_resources < b_num_conflicting_resources, a,
b_num_conflicting_resources < a_num_conflicting_resources, b,
"kLessSerialResourceConflict")) {
return *value;
}
}
if (sched_state_.config.aggressive_scheduling_policies &&
!sched_state_.config.prioritize_async_depth_over_stall) {
if (auto value = async_depth_0_candidate(a, b)) {
return *value;
}
}
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
a.node->DoesReleaseAnyResource() && IsResourceConstrained(a), a,
b.node->DoesReleaseAnyResource() && IsResourceConstrained(b), b,
"kFreeBackedupResource")) {
return *value;
}
if (sched_state_.config.aggressive_scheduling_policies) {
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
a.node->GetAsyncDepth() > b.node->GetAsyncDepth(), a,
b.node->GetAsyncDepth() > a.node->GetAsyncDepth(), b,
"kAsyncDepth")) {
return *value;
}
if (!sched_state_.next_ready_stack.empty()) {
HloGraphNode::TimeCost latest_ready =
sched_state_.next_ready_stack.front()->GetReadyTime();
HloGraphNode::TimeCost a_cost_diff = std::abs(
latest_ready - sched_state_.current_time - a.node->GetCost());
HloGraphNode::TimeCost b_cost_diff = std::abs(
latest_ready - sched_state_.current_time - b.node->GetCost());
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
!a.node->DoesReleaseAnyResource() && a_cost_diff < b_cost_diff,
a,
!b.node->DoesReleaseAnyResource() && b_cost_diff < a_cost_diff,
b, "kAvoidWaste")) {
return *value;
}
}
}
bool a_operands = absl::c_any_of(
a.node->GetInstr().operands(),
[async_tracker = sched_state_.async_tracker](const HloInstruction* i) {
return async_tracker->IsSupportedAsyncDone(*i);
});
bool b_operands = absl::c_any_of(
b.node->GetInstr().operands(),
[async_tracker = sched_state_.async_tracker](const HloInstruction* i) {
return async_tracker->IsSupportedAsyncDone(*i);
});
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
a_operands, a, b_operands, b, "kUnlockDone")) {
return *value;
}
if (target_scheduling_rule_) {
if (auto value = target_scheduling_rule_(a, b)) {
return *value;
}
}
if (sched_state_.config.enable_selective_resources &&
sched_state_.selective_resource_releasers.empty()) {
int64_t distance_to_selective_overlap_for_a =
GetNumHopsToClosestSelectiveOverlap(sched_state_.ready_set, a.node);
int64_t distance_to_selective_overlap_for_b =
GetNumHopsToClosestSelectiveOverlap(sched_state_.ready_set, b.node);
int64_t max_distance =
sched_state_.config.max_hops_to_closest_selective_overlap;
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
(a.node->GetValuableForSelectiveOverlap() &&
distance_to_selective_overlap_for_a <= max_distance),
b,
(b.node->GetValuableForSelectiveOverlap() &&
distance_to_selective_overlap_for_b <= max_distance),
a, "kNotValuableForSelectiveOverlap")) {
return *value;
}
}
if (sched_state_.config.aggressive_scheduling_policies) {
int ready_if_a_scheduled = ReadyIfScheduled(*a.node);
int ready_if_b_scheduled = ReadyIfScheduled(*b.node);
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
ready_if_a_scheduled > ready_if_b_scheduled, a,
ready_if_b_scheduled > ready_if_a_scheduled, b,
"kCreatesMoreReadyNodes")) {
return *value;
}
}
if (auto value = DefaultSchedulerCore::ChooseBestCandidate(
a_increase.first < 0, a, b_increase.first < 0, b,
"kDecreaseMemory")) {
return *value;
}
if (sched_state_.sched_graph.OriginalInstructionPosition(
&a.node->GetInstr()) >
sched_state_.sched_graph.OriginalInstructionPosition(
&b.node->GetInstr())) {
return {a, "kOriginalOrder"};
}
return {b, "kOriginalOrder"};
}
private:
const DefaultSchedulerCore::SchedulingState& sched_state_;
DefaultSchedulerCore::TargetSchedulingRule target_scheduling_rule_;
DefaultSchedulerCore::TargetSchedulingRule early_target_scheduling_rule_;
int ReadyIfScheduled(const HloGraphNode& gn) const {
int ready_nodes_if_scheduled = 0;
for (auto& pred : gn.GetPredecessors()) {
if (pred.Target().GetOutdegree() == 1) {
++ready_nodes_if_scheduled;
}
}
return ready_nodes_if_scheduled;
}
static bool IsNop(const HloGraphNode& gn) {
return IsNopInstruction(gn.GetInstr());
}
bool IsResourceConstrained(
DefaultSchedulerCore::ScheduleCandidate& cand) const {
if (cand.resource_constrained) {
return *cand.resource_constrained;
}
if (cand.node->GetResources().empty()) {
cand.resource_constrained = false;
return *(cand.resource_constrained);
}
cand.resource_constrained = false;
for (const auto& [resource_type, usage_type] : cand.node->GetResources()) {
auto max_it = sched_state_.max_concurrent_resource.find(resource_type);
auto res_it = sched_state_.resource_users_in_queue.find(resource_type);
cand.resource_constrained =
max_it != sched_state_.max_concurrent_resource.end() &&
max_it->second == 0 &&
res_it != sched_state_.resource_users_in_queue.end() &&
res_it->second > 0;
if (*cand.resource_constrained) {
return *cand.resource_constrained;
}
}
return *cand.resource_constrained;
}
bool ShouldScheduleAsyncDone(
DefaultSchedulerCore::ScheduleCandidate& gn_cand) const {
if (!gn_cand.node->DoesOccupyAnyResource()) {
return false;
}
return !ShouldDelaySendHostDone(gn_cand);
}
HloGraphNode::TimeCost PastDueCyclesForNonextendableResource(
DefaultSchedulerCore::ScheduleCandidate& cand) const {
if (sched_state_.async_tracker
->GetReleasedNonextendableResourcesFromVector(
cand.node->GetResources())
.empty()) {
return 0.0;
}
return std::max(sched_state_.current_time - cand.node->GetReadyTime(), 0.0);
}
bool ShouldDelaySendHostDone(
DefaultSchedulerCore::ScheduleCandidate& gn_cand) const {
const HloGraphNode& gn = *gn_cand.node;
if (!gn.UsesResourceType(ResourceType::kSendHost).has_value() ||
gn.GetInstr().opcode() != HloOpcode::kSendDone) {
return false;
}
const HloGraphNode& start =
sched_state_.sched_graph.GetNode(gn.GetInstr().operand(0));
const LatencyEstimator::TimeCost latency =
sched_state_.latency_estimator->GetLatencyBetween(start, gn);
if (!gn_cand.estimated_connected_send_ready_time.has_value()) {
HloGraphNode::TimeCost start_ready_time = 0;
for (const auto& succ : start.GetSuccessors()) {
if (succ.Target().GetReadyTime() >=
std::numeric_limits<HloGraphNode::TimeCost>::max()) {
return false;
}
start_ready_time = std::max(
start_ready_time, succ.Latency() + succ.Target().GetReadyTime());
}
gn_cand.estimated_connected_send_ready_time = start_ready_time;
}
if (*gn_cand.estimated_connected_send_ready_time -
sched_state_.current_time <=
latency) {
return false;
}
return true;
}
std::pair<int64_t, int64_t> GetMemoryPressureChanges(
DefaultSchedulerCore::ScheduleCandidate& cand) const {
if (cand.pressure_change) {
return *cand.pressure_change;
}
std::optional<std::pair<int64_t, int64_t>> start_result;
if (this->sched_state_.async_tracker->IsSupportedAsyncDone(
cand.node->GetInstr())) {
const HloInstruction* start = cand.node->GetInstr().operand_count() > 0
? cand.node->GetInstr().operand(0)
: nullptr;
if (start != nullptr &&
this->sched_state_.async_tracker->IsSupportedAsyncStart(*start)) {
start_result =
sched_state_.memory_pressure_tracker->MemoryPressureDifference(
start);
}
}
cand.pressure_change =
sched_state_.memory_pressure_tracker->MemoryPressureDifference(
&cand.node->GetInstr());
if (start_result.has_value()) {
cand.pressure_change->first =
std::min(start_result->first, cand.pressure_change->first);
cand.pressure_change->second =
std::max(start_result->second, cand.pressure_change->second);
}
return *cand.pressure_change;
}
int64_t GetNumConflictingSerialResources(
DefaultSchedulerCore::ScheduleCandidate& cand) const {
auto resources =
sched_state_.async_tracker->GetOccupiedSerialResourcesFromVector(
cand.node->GetResources());
int64_t num_conflicting_resources = 0;
for (int64_t resource : resources) {
if (!sched_state_.resources_in_flight.contains(resource)) continue;
num_conflicting_resources +=
sched_state_.resources_in_flight.at(resource);
}
return num_conflicting_resources;
}
};
enum SkipNodeReason {
kShouldSkipNodeFunction,
kExceedsOverlapLimit,
};
absl::string_view SkipNodeReasonString(SkipNodeReason reason) {
switch (reason) {
case SkipNodeReason::kShouldSkipNodeFunction:
return "Skipped due to kShouldSkipNodeFunction.";
case SkipNodeReason::kExceedsOverlapLimit:
return "Skipped due to kExceedsOverlapLimit.";
}
}
}
absl::StatusOr<HloGraphNode*>
DefaultSchedulerCore::FindAndExtractBestNodeAvailable(
DefaultSchedulerCore::SchedulingState& sched_state,
DefaultSchedulerCore::ShouldSkipNodeFunction should_skip_node) {
absl::InlinedVector<std::pair<HloGraphNode*, SkipNodeReason>, 2>
skipped_nodes_and_reasons;
auto scheduling_instruction_crosses_overlap_limit =
[&sched_state](const HloInstruction& instr) {
for (const auto& [resource, limit] :
sched_state.max_concurrent_resource) {
auto it = sched_state.resources_in_flight.find(resource);
if (it == sched_state.resources_in_flight.end() || it->second == 0) {
continue;
}
const int64_t num_resources_needed =
sched_state.async_tracker->GetNumResourcesPerInstruction(resource,
instr);
if (limit < num_resources_needed) {
return true;
}
}
return false;
};
VLOG(2) << "Current time: " << sched_state.current_time;
ReadySetLt ready_lt{&sched_state, target_scheduling_rule_,
early_target_scheduling_rule_};
ScheduleCandidate ready_chosen;
auto chosen_it = sched_state.ready_set.end();
for (auto ready_node_it = sched_state.ready_set.begin(),
e = sched_state.ready_set.end();
ready_node_it != e; ++ready_node_it) {
if (should_skip_node && should_skip_node(*ready_node_it)) {
if (ready_chosen.node == nullptr) {
skipped_nodes_and_reasons.push_back(
{*ready_node_it, SkipNodeReason::kShouldSkipNodeFunction});
}
continue;
}
if (scheduling_instruction_crosses_overlap_limit(
(*ready_node_it)->GetInstr())) {
if (ready_chosen.node == nullptr) {
skipped_nodes_and_reasons.push_back(
{*ready_node_it, SkipNodeReason::kExceedsOverlapLimit});
}
continue;
}
ScheduleCandidate ready_candidate =
InitializeCandidate(*ready_node_it, sched_state);
if (ready_chosen.node == nullptr) {
ready_chosen = ready_candidate;
chosen_it = ready_node_it;
VLOG(2) << "Choosing from ready (" << ready_chosen.node->GetInstr().name()
<< ") Reason: First Candidate";
continue;
}
CandidateResult cand_result = ready_lt(ready_candidate, ready_chosen);
const bool new_candidate_selected =
cand_result.result.node == *ready_node_it;
auto print_pressure_change =
[](const std::optional<std::pair<int64_t, int64_t>>& p) {
if (p.has_value()) {
return std::to_string(p.value().first);
}
return std::string("N/A");
};
VLOG(2) << "Choosing from ready ("
<< (new_candidate_selected ? ready_candidate.node->GetInstr().name()
: ready_chosen.node->GetInstr().name())
<< ") vs ("
<< (new_candidate_selected
? ready_chosen.node->GetInstr().name()
: ready_candidate.node->GetInstr().name())
<< ") Reason: " << cand_result.reason << " mem pressure chosen "
<< print_pressure_change(
(new_candidate_selected ? ready_candidate : ready_chosen)
.pressure_change)
<< " mem pressure other "
<< print_pressure_change(
(new_candidate_selected ? ready_chosen : ready_candidate)
.pressure_change);
if (new_candidate_selected) {
ready_chosen = cand_result.result;
chosen_it = ready_node_it;
}
}
if (ready_chosen.node == nullptr) {
return absl::InternalError(absl::StrCat(
"FindAndExtractBestNodeAvailable failed to find a node to "
"schedule, skipped nodes: ",
absl::StrJoin(skipped_nodes_and_reasons, "; ",
[](std::string* out, const auto& pair) {
absl::StrAppend(out, pair.first->GetInstr().name(),
": ",
SkipNodeReasonString(pair.second));
})));
}
CHECK(chosen_it != sched_state.ready_set.end());
std::swap(*chosen_it, sched_state.ready_set.back());
sched_state.ready_set.pop_back();
return ready_chosen.node;
}
void DefaultSchedulerCore::LogInstruction(const HloInstruction* instr) const {
VLOG(5) << instr->ToString();
}
void PrintOccupierList(
std::vector<std::pair<HloEdge*, HloGraphNode::TimeCost>>& occupiers) {
for (int64_t i = 0; i < occupiers.size(); i++) {
VLOG(3) << "\tOccupier " << i << ": "
<< occupiers[i].first->Target().GetInstr().name()
<< ", projected finish time: " << occupiers[i].second
<< " original latency: " << occupiers[i].first->OriginalLatency()
<< " latency: " << occupiers[i].first->Latency();
}
}
bool DefaultSchedulerCore::DeleteOccupierFromResource(
HloGraphNode::TimeCost current_time, HloEdge& edge,
std::vector<std::pair<HloEdge*, HloGraphNode::TimeCost>>& occupiers) {
if (absl::c_any_of(
occupiers,
[&edge](const std::pair<HloEdge*, HloGraphNode::TimeCost>& element) {
return element.first == &edge;
}) == false) {
return false;
}
std::vector<std::pair<HloEdge*, HloGraphNode::TimeCost>>::iterator it =
occupiers.begin();
int64_t num_occupiers = occupiers.size();
HloGraphNode::TimeCost prev_time = current_time;
HloGraphNode::TimeCost accumulated_saved_time = 0;
while (it != occupiers.end() && it->first != &edge) {
if (it->second <= current_time) {
num_occupiers--;
it++;
continue;
}
HloGraphNode::TimeCost remaining_time_of_edge = it->second - prev_time;
prev_time = it->second;
CHECK_GT(num_occupiers, 0);
HloGraphNode::TimeCost current_saved_time =
remaining_time_of_edge / num_occupiers;
accumulated_saved_time += current_saved_time;
CHECK_GE(it->second, accumulated_saved_time);
it->second -= accumulated_saved_time;
num_occupiers--;
it++;
}
CHECK(it != occupiers.end());
if (it->second > current_time) {
HloGraphNode::TimeCost remaining_time_of_edge = it->second - prev_time;
HloGraphNode::TimeCost current_saved_time =
remaining_time_of_edge / num_occupiers;
accumulated_saved_time += current_saved_time;
}
it = occupiers.erase(it);
for (; it != occupiers.end(); it++) {
it->second -= accumulated_saved_time;
}
return true;
}
bool DefaultSchedulerCore::AddOccupierToResource(
HloGraphNode::TimeCost current_time, HloEdge& new_edge,
std::vector<std::pair<HloEdge*, HloGraphNode::TimeCost>>& occupiers) {
CHECK(new_edge.OriginalLatency() > 0 && current_time >= 0);
auto new_edge_remaining = new_edge.OriginalLatency();
std::vector<std::pair<HloEdge*, HloGraphNode::TimeCost>>::iterator it =
occupiers.begin();
int64_t num_occupiers = occupiers.size();
HloGraphNode::TimeCost prev_time = current_time;
HloGraphNode::TimeCost accumulated_delay = 0;
while (it != occupiers.end() &&
it->second - prev_time <= new_edge_remaining * num_occupiers) {
if (it->second <= current_time) {
num_occupiers--;
it++;
continue;
}
HloGraphNode::TimeCost remaining_time_of_edge = it->second - prev_time;
prev_time = it->second;
CHECK_GT(num_occupiers, 0);
HloGraphNode::TimeCost current_delay =
remaining_time_of_edge / num_occupiers;
new_edge_remaining -= current_delay;
accumulated_delay += current_delay;
it->second += accumulated_delay;
num_occupiers--;
it++;
}
num_occupiers++;
HloGraphNode::TimeCost adjusted_remaining_time =
new_edge_remaining * num_occupiers;
it = occupiers.insert(
it, std::make_pair(&new_edge, prev_time + accumulated_delay +
adjusted_remaining_time));
it++;
accumulated_delay += new_edge_remaining;
CHECK(new_edge.OriginalLatency() - 0.0001 < accumulated_delay &&
accumulated_delay < new_edge.OriginalLatency() + 0.0001);
for (; it != occupiers.end(); it++) {
it->second += accumulated_delay;
}
return true;
}
absl::StatusOr<HloGraphNode::TimeCost> DefaultSchedulerCore::ScheduleNode(
HloGraphNode* n, DefaultSchedulerCore::SchedulingState* sched_state) const {
sched_state->new_sequence_reversed.push_back(
const_cast<HloInstruction*>(&n->GetInstr()));
n->SetScheduled();
if (sched_state->config.enable_selective_resources &&
n->ReleasesSelectiveResource()) {
auto it = std::find(sched_state->selective_resource_releasers.begin(),
sched_state->selective_resource_releasers.end(), n);
if (it == sched_state->selective_resource_releasers.end()) {
LOG(WARNING) << "Selective resource releasers list does not contain node "
"that releases a selective resource: "
<< n->ToString();
} else {
sched_state->selective_resource_releasers.erase(it);
}
}
if (sched_state->config.enable_selective_resources &&
!n->GetValuableForSelectiveOverlap()) {
for (HloGraphNode* node : sched_state->selective_resource_releasers) {
node->SetReadyTime(node->GetReadyTime() + n->GetCost());
}
}
for (auto& resource : n->GetResources()) {
if (resource.second == ResourceUsageType::kResourceRelease) {
++(sched_state->max_concurrent_resource[resource.first]);
} else if (resource.second == ResourceUsageType::kResourceOccupy) {
--(sched_state->max_concurrent_resource[resource.first]);
--(sched_state->resource_users_in_queue[resource.first]);
}
}
HloGraphNode::TimeCost schedule_time = sched_state->current_time;
for (const HloEdge& pred : n->GetSuccessors()) {
const HloGraphNode::TimeCost time_from_edge =
pred.Target().GetReadyTime() + pred.Latency();
schedule_time = std::max(schedule_time, time_from_edge);
if (sched_state->config.resource_sharing) {
auto occupied_resources = n->GetShareableResourcesOnEdge(pred);
for (const int64_t resource : occupied_resources) {
auto occupiers = sched_state->shareable_resource_occupiers[resource];
for (auto [occupier_edge, edge_pft] : occupiers) {
if (occupier_edge == &pred) {
VLOG(3) << "Ready time of scheduled node " << n->GetInstr().name()
<< " before update with pft: " << edge_pft
<< ", ready_time: " << schedule_time;
schedule_time = std::max(schedule_time, edge_pft);
VLOG(3) << "Ready time of scheduled node " << n->GetInstr().name()
<< " after update with pft: " << edge_pft
<< ", ready_time: " << schedule_time;
}
}
}
}
}
n->SetReadyTime(schedule_time);
HloGraphNode::TimeCost current_time = schedule_time + n->GetCost();
if (sched_state->config.resource_sharing) {
for (HloEdge& edge : n->GetSuccessors()) {
auto released_resources = n->GetShareableResourcesOnEdge(edge);
for (const int64_t resource : released_resources) {
CHECK(DeleteOccupierFromResource(
schedule_time, edge,
sched_state->shareable_resource_occupiers[resource]));
if (VLOG_IS_ON(2)) {
VLOG(3) << "Occupier list for "
<< sched_state->async_tracker->GetResourceName(resource)
<< ": ";
PrintOccupierList(
sched_state->shareable_resource_occupiers[resource]);
}
}
}
for (HloEdge& edge : n->GetPredecessors()) {
for (HloEdge& inverse_edge : edge.Target().GetSuccessors()) {
if (&(inverse_edge.Target()) == n) {
auto occupied_resources =
edge.Target().GetShareableResourcesOnEdge(inverse_edge);
for (const int64_t resource : occupied_resources) {
CHECK(AddOccupierToResource(
current_time, inverse_edge,
sched_state->shareable_resource_occupiers[resource]));
if (VLOG_IS_ON(2)) {
VLOG(3) << "Occupier list for "
<< sched_state->async_tracker->GetResourceName(resource)
<< ": ";
PrintOccupierList(
sched_state->shareable_resource_occupiers[resource]);
}
}
break;
}
}
}
}
auto ready_time_cmp = [](const HloGraphNode* a, const HloGraphNode* b) {
return a->GetReadyTime() > b->GetReadyTime();
};
while (!sched_state->next_ready_stack.empty()) {
const HloGraphNode* node = sched_state->next_ready_stack.front();
if (node->GetReadyTime() < current_time) {
std::pop_heap(sched_state->next_ready_stack.begin(),
sched_state->next_ready_stack.end(), ready_time_cmp);
sched_state->next_ready_stack.pop_back();
continue;
}
break;
}
for (HloEdge& edge : n->GetPredecessors()) {
const int64_t current_outdegree = edge.Target().GetOutdegree();
if (current_outdegree != 1) {
edge.Target().SetOutdegree(current_outdegree - 1);
continue;
}
edge.Target().SetOutdegree(0);
LatencyEstimator::TimeCost ready_time = current_time;
for (const HloEdge& pred : edge.Target().GetSuccessors()) {
const LatencyEstimator::TimeCost edge_time =
pred.Target().GetReadyTime() + pred.Latency();
ready_time = std::max(ready_time, edge_time);
if (sched_state->config.resource_sharing) {
auto occupied_resources =
edge.Target().GetShareableResourcesOnEdge(pred);
for (const int64_t resource : occupied_resources) {
auto occupiers = sched_state->shareable_resource_occupiers[resource];
for (auto [occupier_edge, edge_pft] : occupiers) {
if (occupier_edge == &pred) {
VLOG(3) << "Ready time of predecessor "
<< edge.Target().GetInstr().name()
<< " before update with pft: " << edge_pft
<< ", ready_time: " << ready_time;
ready_time = std::max(ready_time, edge_pft);
VLOG(3) << "Ready time of predecessor "
<< edge.Target().GetInstr().name()
<< " after update with pft: " << edge_pft
<< ", ready_time: " << ready_time;
}
}
}
}
}
for (auto& resource : edge.Target().GetResources()) {
if (resource.second == ResourceUsageType::kResourceOccupy) {
++(sched_state->resource_users_in_queue[resource.first]);
}
}
edge.Target().SetReadyTime(ready_time);
sched_state->ready_set.push_back(&edge.Target());
if (edge.Target().GetReadyTime() > current_time) {
sched_state->next_ready_stack.push_back(&edge.Target());
std::push_heap(sched_state->next_ready_stack.begin(),
sched_state->next_ready_stack.end(), ready_time_cmp);
}
if (sched_state->config.enable_selective_resources &&
edge.Target().ReleasesSelectiveResource()) {
sched_state->selective_resource_releasers.push_back(&edge.Target());
}
}
++sched_state->scheduled_count;
for (auto& resource : n->GetResources()) {
if (resource.second == ResourceUsageType::kResourceRelease) {
--sched_state->resources_in_flight[resource.first];
} else if (resource.second == ResourceUsageType::kResourceOccupy) {
++sched_state->resources_in_flight[resource.first];
}
}
VLOG(10) << "Memory pressure before schedule: "
<< sched_state->memory_pressure_tracker->memory_usage();
VLOG(10)
<< "Memory peak before schedule: "
<< sched_state->memory_pressure_tracker->pressure_state().memory_peak;
sched_state->memory_pressure_tracker->UpdateBuffers(&n->GetInstr());
VLOG(10) << "Memory pressure after schedule: "
<< sched_state->memory_pressure_tracker->memory_usage();
VLOG(10)
<< "Memory peak after schedule: "
<< sched_state->memory_pressure_tracker->pressure_state().memory_peak;
return current_time;
}
std::string HloEdge::ToString() const {
return absl::StrCat("\tEdge: ", target_->GetInstr().name(),
" latency: ", Latency(), "\n");
}
bool HloScheduleGraph::IsPredecessorTransitively(
const HloGraphNode* node, const HloGraphNode* possible_predecessor) {
absl::flat_hash_set<const HloGraphNode*> visited = {possible_predecessor};
std::vector<const HloGraphNode*> to_visit_queue = {node};
while (!to_visit_queue.empty()) {
const HloGraphNode* curr = to_visit_queue.back();
to_visit_queue.pop_back();
if (curr == possible_predecessor) {
return true;
}
if (visited.contains(curr)) {
continue;
}
visited.insert(curr);
for (const auto& edge : curr->GetPredecessors()) {
auto user_node_it = nodes_.find(&edge.Target().GetInstr());
to_visit_queue.push_back(user_node_it->second.get());
}
}
return false;
}
HloScheduleGraph::HloScheduleGraph(
const std::vector<HloInstruction*>* post_order_instructions,
HloAliasAnalysis* alias_analysis, const LatencyEstimator* latency_estimator,
const AsyncTracker* async_tracker)
: original_order_(post_order_instructions->begin(),
post_order_instructions->end()) {
HloComputation* comp = (*post_order_instructions)[0]->parent();
auto reachability = HloReachabilityMap::Build(comp);
int64_t current_pos = 0;
std::vector<const HloInstruction*> while_instrs;
for (HloInstruction* instr : *post_order_instructions) {
auto [new_node_it, inserted] = nodes_.try_emplace(
instr, std::make_unique<HloGraphNode>(instr, current_pos));
CHECK(inserted) << "Expected the value to not be already in the map";
instr_order_map_[instr] = current_pos++;
new_node_it->second->predecessors_.reserve(instr->operand_count());
new_node_it->second->successors_.reserve(instr->user_count());
new_node_it->second->cost_ = latency_estimator->NodeCost(instr);
new_node_it->second->resources_ =
async_tracker->GetResourcesFromInstruction(*instr);
new_node_it->second->released_shareable_resources_ =
async_tracker->GetReleasedShareableResourcesFromVector(
new_node_it->second->GetResources());
new_node_it->second->occupied_shareable_resources_ =
async_tracker->GetOccupiedShareableResourcesFromVector(
new_node_it->second->GetResources());
new_node_it->second->releases_selective_resource_ =
async_tracker->ReleasesSelectiveResource(new_node_it->second.get());
new_node_it->second->occupies_selective_resource_ =
async_tracker->OccupiesSelectiveResource(new_node_it->second.get());
if (instr->opcode() == HloOpcode::kWhile) {
while_instrs.push_back(instr);
}
}
auto add_dependency_helper = [latency_estimator](HloGraphNode* from,
HloGraphNode* to) {
const LatencyEstimator::TimeCost latency =
latency_estimator->GetLatencyBetween(*from, *to);
from->successors_.push_back(HloEdge(latency, to));
to->predecessors_.push_back(HloEdge(latency, from));
++to->indegree_;
++from->outdegree_;
};
for (const HloInstruction* instr : *post_order_instructions) {
auto node_it = nodes_.find(instr);
CHECK(node_it != nodes_.end()) << "We should have just allocated a node";
HloGraphNode* instr_node = node_it->second.get();
VLOG(10) << "Adding users for " << instr_node->GetInstr().ToString();
for (const HloInstruction* user : instr->users()) {
VLOG(10) << "\tUser: " << user->ToString();
auto user_node_it = nodes_.find(user);
CHECK(user_node_it != nodes_.end());
HloGraphNode* user_node = user_node_it->second.get();
add_dependency_helper(instr_node, user_node);
}
for (const HloInstruction* ctrl_succ : instr->control_successors()) {
VLOG(10) << "\tCtrl Successor: " << ctrl_succ->ToString();
auto ctrl_succ_node_it = nodes_.find(ctrl_succ);
CHECK(ctrl_succ_node_it != nodes_.end());
HloGraphNode* ctrl_succ_node = ctrl_succ_node_it->second.get();
add_dependency_helper(instr_node, ctrl_succ_node);
}
if (async_tracker->IsSupportedAsyncDone(*instr)) {
const HloInstruction* async_start = instr->operand(0);
if (alias_analysis != nullptr) {
for (const HloBuffer* buffer :
alias_analysis->ComputeBuffersAt(instr, {})) {
for (const HloValue* value : buffer->values()) {
if (value->defining_instruction() == instr) {
continue;
}
for (const HloUse& use : value->GetUses()) {
if (ContainsKey(instr_order_map_, use.instruction)) {
if (use.instruction == async_start ||
reachability->IsReachable(instr, use.instruction)) {
continue;
}
auto it = nodes_.find(use.instruction);
CHECK(it != nodes_.end());
HloGraphNode* pred_node = it->second.get();
it = nodes_.find(async_start);
CHECK(it != nodes_.end());
HloGraphNode* start_node = it->second.get();
if (IsPredecessorTransitively(pred_node, start_node)) {
continue;
}
pred_node->successors_.push_back(HloEdge(1, start_node));
start_node->predecessors_.push_back(HloEdge(1, pred_node));
++pred_node->outdegree_;
++start_node->indegree_;
}
}
}
}
}
}
if (instr->opcode() == HloOpcode::kSendDone) {
for (const auto* ctrl_pred : instr->control_predecessors()) {
if (ctrl_pred->opcode() != HloOpcode::kRecvDone) {
continue;
}
const HloInstruction* dependent_while_instr = nullptr;
for (const auto* while_hlo : while_instrs) {
if (!reachability->IsReachable(ctrl_pred, while_hlo)) {
continue;
}
if (dependent_while_instr == nullptr) {
dependent_while_instr = while_hlo;
continue;
}
if (OriginalInstructionPosition(while_hlo) <
OriginalInstructionPosition(dependent_while_instr)) {
dependent_while_instr = while_hlo;
}
}
if (dependent_while_instr != nullptr) {
auto send_done_it = nodes_.find(instr);
CHECK(send_done_it != nodes_.end());
HloGraphNode* send_done_node = send_done_it->second.get();
auto while_it = nodes_.find(dependent_while_instr);
CHECK(while_it != nodes_.end());
HloGraphNode* while_node = while_it->second.get();
send_done_node->successors_.push_back(HloEdge(1, while_node));
while_node->predecessors_.push_back(HloEdge(1, send_done_node));
++send_done_node->outdegree_;
++while_node->indegree_;
}
break;
}
}
}
}
std::string HloScheduleGraph::ToString(
const AsyncTracker* async_tracker) const {
std::string result;
std::vector<std::pair<const HloGraphNode*, int>> stack;
for (const auto& node : nodes_) {
if (node.second->predecessors_.empty()) {
stack.push_back(std::make_pair(node.second.get(), 0));
}
}
std::vector<const HloGraphNode*> order;
absl::flat_hash_set<const HloGraphNode*> visited;
while (!stack.empty()) {
auto& val = stack.back();
if (val.second == val.first->successors_.size()) {
order.push_back(val.first);
stack.pop_back();
continue;
}
const int64_t next_child = val.second++;
if (visited.insert(&val.first->successors_[next_child].Target()).second) {
stack.push_back(
std::make_pair(&val.first->successors_[next_child].Target(), 0));
}
}
for (auto it = order.rbegin(), e = order.rend(); it != e; ++it) {
absl::StrAppend(&result, (*it)->ToString(async_tracker));
}
return result;
}
HloGraphNode& HloScheduleGraph::GetNode(const HloInstruction* instr) const {
auto it = nodes_.find(instr);
CHECK(it != nodes_.end());
return *it->second;
}
std::vector<HloGraphNode*> HloScheduleGraph::FindBottomRoots() const {
std::vector<HloGraphNode*> roots;
for (const HloInstruction* instr : original_order_) {
HloGraphNode& node = GetNode(instr);
if (node.GetOutdegree() == 0) {
roots.push_back(&node);
}
}
return roots;
}
std::vector<HloGraphNode*> HloScheduleGraph::FindTopRoots() const {
std::vector<HloGraphNode*> roots;
for (const HloInstruction* instr : original_order_) {
HloGraphNode& node = GetNode(instr);
if (node.GetIndegree() == 0) {
roots.push_back(&node);
}
}
return roots;
}
void HloScheduleGraph::InitializeGraphAnalysis(
const AsyncTracker* async_tracker) {
absl::flat_hash_map<HloGraphNode*, int> current_rank;
std::vector<HloGraphNode*> stack;
for (const HloInstruction* instr : original_order_) {
HloGraphNode& node = GetNode(instr);
current_rank[&node] = node.GetIndegree();
node.SetAsyncDepth(0.0);
node.SetDepth(0.0);
node.SetGraphDepth(0);
if (node.GetIndegree() == 0) {
stack.push_back(&node);
}
}
while (!stack.empty()) {
auto* node = stack.back();
stack.pop_back();
if (async_tracker->OccupiesSelectiveResource(node)) {
node->num_hops_to_closest_selective_resource_occupier_ = 0;
} else {
int64_t closest_predecessor_distance =
std::numeric_limits<int64_t>::max();
for (auto& pred : node->GetPredecessors()) {
closest_predecessor_distance = std::min(
closest_predecessor_distance,
pred.Target().num_hops_to_closest_selective_resource_occupier_);
}
if (closest_predecessor_distance != std::numeric_limits<int64_t>::max()) {
node->num_hops_to_closest_selective_resource_occupier_ =
closest_predecessor_distance + 1;
}
}
if (async_tracker->IsSupportedAsyncDone(node->GetInstr())) {
for (auto& pred : node->GetPredecessors()) {
node->SetAsyncDepth(
std::max(pred.Target().GetAsyncDepth() + pred.Latency(),
node->GetAsyncDepth()));
node->SetDepth(std::max(
pred.Target().GetDepth() + pred.Target().GetCost() + pred.Latency(),
node->GetDepth()));
node->SetGraphDepth(
std::max(pred.Target().GetGraphDepth() + 1, node->GetGraphDepth()));
}
} else {
for (auto& pred : node->GetPredecessors()) {
node->SetAsyncDepth(
std::max(pred.Target().GetAsyncDepth(), node->GetAsyncDepth()));
node->SetDepth(std::max(
pred.Target().GetDepth() + pred.Target().GetCost() + pred.Latency(),
node->GetDepth()));
node->SetGraphDepth(
std::max(pred.Target().GetGraphDepth() + 1, node->GetGraphDepth()));
}
}
for (auto& succ : node->GetSuccessors()) {
if (--current_rank[&succ.Target()] == 0) {
stack.push_back(&succ.Target());
}
}
}
}
absl::Status DefaultSchedulerCore::InitializeScheduler(
const HloModule* module) {
TF_ASSIGN_OR_RETURN(alias_analysis_, HloAliasAnalysis::Run(module));
module_pressure_state_ = std::make_unique<ModulePressureState>(
module, alias_analysis_.get(), shape_size_bytes_);
module_pressure_state_->InitializePressureStates();
module_pressure_state_->SetMemoryPeak(0);
return absl::OkStatus();
}
absl::Status DefaultSchedulerCore::SchedulingStep(
SchedulingState* sched_state) {
TF_ASSIGN_OR_RETURN(HloGraphNode * node,
FindAndExtractBestNodeAvailable(
*sched_state, nullptr));
CHECK(node != nullptr);
TF_ASSIGN_OR_RETURN(sched_state->current_time,
ScheduleNode(node, sched_state));
VLOG(2) << "Scheduled: " << node->GetInstr().name();
XLA_VLOG_LINES(5, node->ToString());
return absl::OkStatus();
}
absl::StatusOr<std::vector<HloInstruction*>>
DefaultSchedulerCore::ScheduleComputation(const HloComputation* computation) {
const HloSchedule& module_schedule = computation->parent()->schedule();
MemoryPressureTracker memory_pressure_tracker(
alias_analysis_.get(), module_pressure_state_->buffer_tracker(),
module_pressure_state_->pressure_state_cache());
memory_pressure_tracker.Initialize(
computation,
module_pressure_state_->GetPressureStateForComputation(computation)
.live_ids_at_bottom);
SchedulingState sched_state(
&module_schedule.sequence(computation), alias_analysis_.get(),
latency_estimator_, async_tracker_, &memory_pressure_tracker, config_);
async_tracker_->PostProcessScheduleGraph(&sched_state.sched_graph,
latency_estimator_);
sched_state.sched_graph.InitializeGraphAnalysis(async_tracker_);
VLOG(5) << "Just built graph:";
XLA_VLOG_LINES(5, sched_state.sched_graph.ToString(async_tracker_));
async_tracker_->SetConcurrentResourceLimits(
sched_state.max_concurrent_resource);
auto roots = sched_state.sched_graph.FindBottomRoots();
for (HloGraphNode* root : roots) {
root->SetReadyTime(0.0);
}
VLOG(5) << "Initial memory pressure for " << computation->name() << ": "
<< memory_pressure_tracker.memory_usage();
sched_state.ready_set.insert(sched_state.ready_set.end(), roots.begin(),
roots.end());
while (!sched_state.ready_set.empty()) {
VLOG(10) << "Current ready time: " << sched_state.current_time;
VLOG(2) << "Current ready queue:";
XLA_VLOG_LINES(2, [&sched_state]() {
struct LogFormatter {
void operator()(std::string* out, const HloGraphNode* n) const {
out->append(absl::StrCat("\t", n->GetInstr().name(),
" Ready time: ", n->GetReadyTime(),
" Depth: ", n->GetGraphDepth()));
}
};
return absl::StrJoin(sched_state.ready_set, "\n", LogFormatter());
}());
TF_RETURN_IF_ERROR(SchedulingStep(&sched_state));
}
if (VLOG_IS_ON(5)) {
VLOG(5) << "New order";
for (auto r_it = sched_state.new_sequence_reversed.rbegin(),
e_it = sched_state.new_sequence_reversed.rend();
r_it != e_it; ++r_it) {
LogInstruction(*r_it);
}
}
module_pressure_state_->UpdatePressureStateForComputation(
computation, memory_pressure_tracker.pressure_state());
absl::c_reverse(sched_state.new_sequence_reversed);
if (post_processing_fn_) {
post_processing_fn_(sched_state);
}
CHECK_EQ(sched_state.new_sequence_reversed.size(),
sched_state.sched_graph.GetOriginalInstrList().size())
<< "Not all instructions have been scheduled "
<< sched_state.new_sequence_reversed.size() << " vs "
<< sched_state.sched_graph.GetOriginalInstrList().size();
VLOG(2) << "Total time: "
<< sched_state.sched_graph
.GetNode(sched_state.new_sequence_reversed.front())
.GetReadyTime();
const auto& debug_options = xla::GetDebugOptionsFromFlags();
if (debug_options.xla_dump_latency_hiding_schedule() &&
computation->IsEntryComputation()) {
int core_freq = latency_estimator_->CyclesPerMicrosecond();
DumpLatencyHidingSchedule(computation, sched_state.sched_graph,
sched_state.new_sequence_reversed, core_freq,
debug_options);
}
return std::move(sched_state.new_sequence_reversed);
}
void DefaultSchedulerCore::DumpLatencyHidingSchedule(
const HloComputation* computation, const HloScheduleGraph& schedule_graph,
const std::vector<HloInstruction*>& instructions,
const int cycles_per_microsecond, const DebugOptions& debug_options) {
ScheduleProto proto;
proto.set_computation_id(computation->unique_id());
proto.set_cycles_per_microsecond(cycles_per_microsecond);
const HloGraphNode& first_node = schedule_graph.GetNode(instructions.front());
const double total_time = first_node.GetReadyTime() + first_node.GetCost();
for (const HloInstruction* instr : instructions) {
const HloGraphNode& instr_node = schedule_graph.GetNode(instr);
const double start_time =
total_time - (instr_node.GetReadyTime() + instr_node.GetCost());
const double end_time = start_time + instr_node.GetCost();
ScheduleProto::Instruction* instr_msg = proto.add_instructions();
instr_msg->set_id(instr->unique_id());
instr_msg->set_start_timestamp_cycles(start_time);
instr_msg->set_end_timestamp_cycles(end_time);
}
*proto.mutable_hlo_module() = computation->parent()->ToProto();
const std::string fn = absl::StrFormat("%s.schedule", computation->name());
DumpProtobufToFile(proto, debug_options, fn);
}
LatencyHidingScheduler::SchedulerStatistics
LatencyHidingScheduler::LatencyHidingStatistics(
const HloComputation* computation,
const LatencyEstimator* latency_estimator,
const AsyncTracker* async_tracker,
const HloCostAnalysis::ShapeSizeFunction& shape_size_bytes) {
const HloModule* module = computation->parent();
absl::flat_hash_map<
HloOpcode,
std::vector<std::tuple<const HloInstruction*, int64_t, int64_t>>>
outstanding_collectives;
double current_time = 0;
enum class AsyncKind {
kNotAsync,
kAllGather,
kAllReduce,
kCollectivePermute,
kAllToAll,
kReduceScatter,
kSend,
kRecv,
kCollectiveBroadcast,
};
auto opcode_to_async_kind = [](HloOpcode opcode) {
switch (opcode) {
case HloOpcode::kAllGather:
return AsyncKind::kAllGather;
case HloOpcode::kAllReduce:
return AsyncKind::kAllReduce;
case HloOpcode::kCollectiveBroadcast:
return AsyncKind::kCollectiveBroadcast;
case HloOpcode::kCollectivePermute:
return AsyncKind::kCollectivePermute;
case HloOpcode::kAllToAll:
return AsyncKind::kAllToAll;
case HloOpcode::kReduceScatter:
return AsyncKind::kReduceScatter;
case HloOpcode::kSend:
return AsyncKind::kSend;
case HloOpcode::kRecv:
return AsyncKind::kRecv;
default:
return AsyncKind::kNotAsync;
}
};
auto find_node_successor_edge = [](const HloGraphNode& graph_node,
const HloGraphNode& successor_node) {
auto edge_it = std::find_if(graph_node.GetSuccessors().begin(),
graph_node.GetSuccessors().end(),
[&successor_node](const HloEdge& edge) {
return &edge.Target() == &successor_node;
});
CHECK(edge_it != graph_node.GetSuccessors().end());
return edge_it;
};
auto find_outstanding_async = [&outstanding_collectives,
async_tracker](const HloInstruction* instr) {
const auto& collective_vec =
outstanding_collectives[async_tracker->GetCanonicalAsyncOp(*instr)
.inner];
auto it = absl::c_find_if(
collective_vec,
[instr](const std::tuple<const HloInstruction*, int64_t, int64_t>& p) {
return instr == std::get<0>(p);
});
CHECK(it != collective_vec.end());
return it;
};
absl::flat_hash_map<AsyncKind, double> wasted_time_per_collective;
SchedulerConfig config;
config.schedule_send_recvs = true;
config.use_real_cost_model = true;
std::unique_ptr<HloAliasAnalysis> hlo_alias_analysis =
HloAliasAnalysis::Run(module).value();
auto instructions_post_order = computation->MakeInstructionPostOrder();
HloScheduleGraph schedule_graph(&instructions_post_order,
nullptr, latency_estimator,
async_tracker);
async_tracker->PostProcessScheduleGraph(&schedule_graph, latency_estimator);
int64_t curr_pos = 0;
for (const HloInstruction* instr :
module->schedule().sequence(computation).instructions()) {
const HloGraphNode& instr_node = schedule_graph.GetNode(instr);
current_time += instr_node.GetCost();
if (async_tracker->IsSupportedAsyncStart(*instr)) {
outstanding_collectives[async_tracker->GetCanonicalAsyncOp(*instr).inner]
.push_back({instr, current_time, curr_pos});
} else if (async_tracker->IsSupportedAsyncDone(*instr)) {
const HloInstruction* start_instr = instr->operand(0);
if (async_tracker->IsSupportedAsyncStart(*start_instr)) {
auto it = find_outstanding_async(start_instr);
const HloGraphNode& start_node =
schedule_graph.GetNode(std::get<0>(*it));
auto edge_it = find_node_successor_edge(start_node, instr_node);
const double async_wasted_cycles = std::max(
0.0, edge_it->Latency() - (current_time - std::get<1>(*it)));
AsyncKind kind = opcode_to_async_kind(
async_tracker->GetCanonicalAsyncOp(*start_instr).inner);
wasted_time_per_collective[kind] += async_wasted_cycles;
current_time += async_wasted_cycles;
}
}
curr_pos++;
}
ModulePressureState module_pressure_state(module, hlo_alias_analysis.get(),
shape_size_bytes);
module_pressure_state.InitializePressureStates();
const MemoryPressureTracker::MemoryPressureState* memory_pressure_state =
module_pressure_state.ComputationIsMemoryTracked(computation)
? &module_pressure_state.GetPressureStateForComputation(computation)
: nullptr;
MemoryPressureTracker mem_pressure_tracker(
hlo_alias_analysis.get(), module_pressure_state.buffer_tracker(),
module_pressure_state.pressure_state_cache());
if (memory_pressure_state != nullptr) {
mem_pressure_tracker.Initialize(computation,
memory_pressure_state->live_ids_at_bottom);
}
return LatencyHidingScheduler::SchedulerStatistics{
computation,
wasted_time_per_collective[AsyncKind::kAllGather],
wasted_time_per_collective[AsyncKind::kAllReduce],
wasted_time_per_collective[AsyncKind::kCollectiveBroadcast],
wasted_time_per_collective[AsyncKind::kCollectivePermute],
wasted_time_per_collective[AsyncKind::kAllToAll],
wasted_time_per_collective[AsyncKind::kReduceScatter],
wasted_time_per_collective[AsyncKind::kSend],
wasted_time_per_collective[AsyncKind::kRecv],
current_time,
memory_pressure_state ? mem_pressure_tracker.initial_memory_pressure() +
memory_pressure_state->memory_peak
: 0};
}
std::string LatencyHidingScheduler::SchedulerStatisticsString(
const SchedulerStatistics& sched_stats) {
std::string result;
if (const HloComputation* comp = sched_stats.computation) {
absl::StrAppend(&result, "For computation: ", comp->name(), ", module ",
comp->parent()->name(), "(", comp->parent()->unique_id(),
")\n");
}
absl::StrAppend(&result, "Total wasted cycles: ",
sched_stats.all_gather_wasted_cycles +
sched_stats.all_reduce_wasted_cycles +
sched_stats.collective_broadcast_wasted_cycles +
sched_stats.collective_permute_wasted_cycles +
sched_stats.all_to_all_wasted_cycles +
sched_stats.reduce_scatter_wasted_cycles +
sched_stats.send_wasted_cycles +
sched_stats.recv_wasted_cycles,
"\n");
absl::StrAppend(&result, "Wasted cycles for all-reduce: ",
sched_stats.all_reduce_wasted_cycles, "\n");
absl::StrAppend(&result, "Wasted cycles for all-gather: ",
sched_stats.all_gather_wasted_cycles, "\n");
absl::StrAppend(&result, "Wasted cycles for collective-broadcast: ",
sched_stats.collective_broadcast_wasted_cycles, "\n");
absl::StrAppend(&result, "Wasted cycles for collective-permute: ",
sched_stats.collective_permute_wasted_cycles, "\n");
absl::StrAppend(&result, "Wasted cycles for all-to-all: ",
sched_stats.all_to_all_wasted_cycles, "\n");
absl::StrAppend(&result, "Wasted cycles for reduce-scatter: ",
sched_stats.reduce_scatter_wasted_cycles, "\n");
absl::StrAppend(&result,
"Wasted cycles for send: ", sched_stats.send_wasted_cycles,
"\n");
absl::StrAppend(&result,
"Wasted cycles for recv: ", sched_stats.recv_wasted_cycles,
"\n");
absl::StrAppend(&result, "Total cycles: ", sched_stats.total_cycles, "\n");
absl::StrAppend(&result, "Memory pressure peak (bytes): ",
sched_stats.memory_pressure_peak, "\n");
return result;
}
void LatencyHidingScheduler::LogScheduleStatistics(
const HloComputation* computation) {
XLA_VLOG_LINES(1, SchedulerStatisticsString(LatencyHidingStatistics(
computation, latency_estimator_.get(),
async_tracker_.get(), shape_size_bytes_)));
}
absl::StatusOr<bool> LatencyHidingScheduler::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(5) << "Original module:";
XLA_VLOG_LINES(5, module->ToString());
std::vector<HloComputation*> computations_to_schedule;
computations_to_schedule.reserve(module->computation_count());
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (auto* instr : computation->instructions()) {
if (async_tracker_->IsSupportedAsyncStart(*instr) ||
async_tracker_->IsSupportedAsyncDone(*instr)) {
computations_to_schedule.push_back(computation);
break;
}
}
}
if (computations_to_schedule.empty()) {
return false;
}
absl::flat_hash_map<HloComputation*, std::vector<HloInstruction*>>
saved_schedules;
TF_RETURN_IF_ERROR(scheduler_core_->InitializeScheduler(module));
for (HloComputation* computation : computations_to_schedule) {
TF_ASSIGN_OR_RETURN(std::vector<HloInstruction*> new_schedule,
scheduler_core_->ScheduleComputation(computation));
saved_schedules[computation] = std::move(new_schedule);
}
uint64_t initial_memory_limit = scheduler_core_->GetMemoryLimit();
for (int64_t iter = 0;
iter < scheduler_core_->GetRerunTimes() &&
scheduler_core_->GetMemoryPeak() > initial_memory_limit;
iter++) {
LOG(INFO) << "LatencyHidingScheduler current memory usage: "
<< scheduler_core_->GetMemoryPeak()
<< " bytes, does not fit in limit: "
<< scheduler_core_->GetMemoryLimit()
<< ". Setting the new limit to "
<< scheduler_core_->GetMemoryLimit() * 0.9;
TF_RETURN_IF_ERROR(scheduler_core_->InitializeScheduler(module));
scheduler_core_->SetMemoryLimit(scheduler_core_->GetMemoryLimit() * 0.9);
for (HloComputation* computation : computations_to_schedule) {
TF_ASSIGN_OR_RETURN(std::vector<HloInstruction*> new_schedule,
scheduler_core_->ScheduleComputation(computation));
saved_schedules[computation] = std::move(new_schedule);
}
}
LOG(INFO) << "LatencyHidingScheduler current memory usage: "
<< scheduler_core_->GetMemoryPeak()
<< " bytes. Current limit: " << scheduler_core_->GetMemoryLimit();
for (HloComputation* computation : computations_to_schedule) {
VLOG(1) << "Statistics before scheduling:";
LogScheduleStatistics(computation);
module->schedule().set_sequence(
computation, absl::MakeConstSpan(saved_schedules[computation]));
VLOG(1) << "Statistics after scheduling:";
LogScheduleStatistics(computation);
}
return true;
}
} | #include "xla/service/latency_hiding_scheduler.h"
#include <algorithm>
#include <cstdint>
#include <cstdlib>
#include <functional>
#include <iterator>
#include <list>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/async_collective_creator.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
constexpr int kMaxConcurrentAsyncCollectivePermutes = 5;
int PositionInVector(absl::Span<HloInstruction* const> vec,
const HloInstruction* element) {
return std::distance(vec.begin(), std::find(vec.begin(), vec.end(), element));
}
bool MaxConcurrentCollectivePermutesBelowThreshold(
absl::Span<HloInstruction* const> instruction_sequence) {
int max_concurrent_collective_permutes = 0;
int num_concurrent_collective_permutes = 0;
for (HloInstruction* instruction : instruction_sequence) {
if (instruction->opcode() == HloOpcode::kCollectivePermuteStart) {
num_concurrent_collective_permutes += 1;
max_concurrent_collective_permutes =
std::max(max_concurrent_collective_permutes,
num_concurrent_collective_permutes);
}
if (instruction->opcode() == HloOpcode::kCollectivePermuteDone) {
num_concurrent_collective_permutes -= 1;
}
}
int max_num_collective_permutes_threshold =
kMaxConcurrentAsyncCollectivePermutes;
return max_concurrent_collective_permutes <=
max_num_collective_permutes_threshold;
}
int GetIndex(absl::Span<HloInstruction* const> instruction_sequence,
absl::string_view hlo_name) {
return absl::c_find_if(instruction_sequence,
[hlo_name](HloInstruction* instruction) {
return instruction->name() == hlo_name;
}) -
instruction_sequence.begin();
}
int GetOpcodeIndexUsingMetaData(
HloOpcode opcode, absl::Span<HloInstruction* const> instruction_sequence,
absl::string_view metadata_name) {
return absl::c_find_if(instruction_sequence,
[metadata_name, opcode](HloInstruction* instruction) {
return instruction->metadata().op_name() ==
metadata_name &&
instruction->opcode() == opcode;
}) -
instruction_sequence.begin();
}
SchedulerConfig GetDefaultSchedConfig() {
SchedulerConfig sched_cfg;
sched_cfg.collective_permute_overlap_limit =
kMaxConcurrentAsyncCollectivePermutes;
sched_cfg.send_recv_overlap_limit = INT32_MAX;
return sched_cfg;
}
class TestLatencyEstimator : public LatencyEstimator {
public:
TimeCost GetLatencyBetween(const HloGraphNode& from,
const HloGraphNode& target) const override {
static constexpr TimeCost kLowLatency = 1.0;
if (from.GetInstr().opcode() == HloOpcode::kCollectivePermuteStart &&
target.GetInstr().opcode() == HloOpcode::kCollectivePermuteDone) {
return kLowLatency *
ShapeUtil::ElementsIn(from.GetInstr().operand(0)->shape());
}
return kLowLatency;
}
TimeCost NodeCost(const HloInstruction* instr) const override {
if (instr->IsLoopFusion()) {
return instr->shape().IsTuple()
? kMediumCost
: kLowCost * ShapeUtil::ElementsIn(instr->shape());
}
if (instr->IsOutputFusion() || instr->opcode() == HloOpcode::kConvolution) {
return instr->shape().IsTuple()
? kHighCost
: kMediumCost * ShapeUtil::ElementsIn(instr->shape());
}
return kLowCost;
}
int CyclesPerMicrosecond() const override { return 1; }
public:
static constexpr TimeCost kLowCost = 1.0;
static constexpr TimeCost kMediumCost = 1000.0;
static constexpr TimeCost kHighCost = 5000.0;
};
absl::StatusOr<bool> RunScheduler(
HloModule* module, SchedulerConfig sched_config = GetDefaultSchedConfig(),
std::unique_ptr<LatencyEstimator> latency_estimator =
std::make_unique<ApproximateLatencyEstimator>(),
std::unique_ptr<AsyncTracker> async_tracker = nullptr) {
AsyncCollectiveCreator::CollectiveCreatorConfig config{
HloPredicateTrue,
HloPredicateTrue,
HloPredicateTrue,
HloPredicateTrue};
TF_ASSIGN_OR_RETURN(bool value,
AsyncCollectiveCreator(std::move(config)).Run(module));
HloCostAnalysis::ShapeSizeFunction shape_size_bytes =
[&shape_size_bytes](const Shape& shape) -> int64_t {
int64_t shape_size = 0;
if (shape.IsTuple()) {
for (auto& sub_shape : shape.tuple_shapes()) {
shape_size += shape_size_bytes(sub_shape);
}
return shape_size;
}
return ShapeUtil::ByteSizeOfElements(shape);
};
if (!async_tracker) {
async_tracker = std::make_unique<AsyncTracker>(sched_config);
}
auto scheduler_core = std::make_unique<DefaultSchedulerCore>(
shape_size_bytes, async_tracker.get(), latency_estimator.get(),
sched_config);
TF_ASSIGN_OR_RETURN(
value, LatencyHidingScheduler(std::move(latency_estimator),
std::move(async_tracker),
std::move(scheduler_core), shape_size_bytes)
.Run(module));
return value;
}
}
class LatencyHidingSchedulerTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> ParseHloText(
absl::string_view hlo_string) {
TF_ASSIGN_OR_RETURN(
auto hlo_module,
ParseAndReturnVerifiedModule(hlo_string, GetModuleConfigForTest()));
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(hlo_module));
}
};
TEST_F(LatencyHidingSchedulerTest, AllGatherAsyncSimple) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY %module {
%constant.19 = u32[] constant(0)
%replica_id = u32[]{:T(128)} replica-id()
%convert = f32[]{:T(128)} convert(u32[]{:T(128)} %replica_id)
%color_operand.1 = f32[8,256,256]{2,1,0:T(8,128)} broadcast(
f32[]{:T(128)} %convert), dimensions={}
%ag-start = (f32[8,256,256], f32[16,256,256]) all-gather-start(
f32[8,256,256] %color_operand.1), replica_groups={{0,1}}, dimensions={0},
metadata={op_type="AllGather" op_name="ag0"}
%ag-done = f32[16,256,256] all-gather-done(
(f32[8,256,256], f32[16,256,256]) %ag-start),
metadata={op_type="AllGather" op_name="ag0"}
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[16,256,256]{2,1,0} parameter(2)
p3 = f32[16,256,256]{2,1,0} parameter(3)
c0 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
c1 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
ROOT a2 = f32[16,256,256]{2,1,0} add(%ag-done, c0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_EQ(GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag0"),
GetIndex(new_instruction_sequence, "a2") - 1);
}
TEST_F(LatencyHidingSchedulerTest, AllGatherAsyncBalance) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY %module {
%constant.19 = u32[] constant(0)
%replica_id = u32[]{:T(128)} replica-id()
%convert = f32[]{:T(128)} convert(u32[]{:T(128)} %replica_id)
%color_operand.1 = f32[1,8,256,256]{3,2,1,0:T(8,128)} broadcast(
f32[]{:T(128)} %convert), dimensions={}
%ag-start = (f32[1,8,256,256], f32[2,8,256,256]) all-gather-start(
f32[1,8,256,256] %color_operand.1), replica_groups={{0,1}}, dimensions={0},
metadata={op_type="AllGather" op_name="ag0"}
%ag-done = f32[2,8,256,256] all-gather-done(
(f32[1,8,256,256], f32[2,8,256,256]) %ag-start),
metadata={op_type="AllGather" op_name="ag0"}
%ag-done-bc = f32[16,256,256] bitcast(f32[2,8,256,256] %ag-done),
metadata={op_type="Bitcast" op_name="ag0"}
%ag-start.2 = (f32[1,8,256,256], f32[2,8,256,256]) all-gather-start(
f32[1,8,256,256] %color_operand.1), replica_groups={{0,1}}, dimensions={0},
metadata={op_type="AllGather" op_name="ag1"}
%ag-done.2 = f32[2,8,256,256] all-gather-done(
(f32[1,8,256,256], f32[2,8,256,256]) %ag-start.2),
metadata={op_type="AllGather" op_name="ag1"}
%ag-done-bc.2 = f32[16,256,256] bitcast(f32[2,8,256,256] %ag-done.2),
metadata={op_type="Bitcast" op_name="ag1"}
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[16,256,256]{2,1,0} parameter(2)
p3 = f32[16,256,256]{2,1,0} parameter(3)
c0 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb,
metadata={op_type="AllGather" op_name="c0"}
c1 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb,
metadata={op_type="AllGather" op_name="c1"}
a2 = f32[16,256,256]{2,1,0} add(c1, c0)
ROOT t = (f32[16,256,256], f32[16,256,256], f32[16,256,256]) tuple(a2, %ag-done-bc.2, %ag-done-bc)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kConvolution,
new_instruction_sequence, "c0"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag0"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kConvolution,
new_instruction_sequence, "c0"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherStart,
new_instruction_sequence, "ag0"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kConvolution,
new_instruction_sequence, "c1"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag1"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kConvolution,
new_instruction_sequence, "c1"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherStart,
new_instruction_sequence, "ag1"));
}
TEST_F(LatencyHidingSchedulerTest, AllGatherAsyncReshaped) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY %module {
%constant.19 = u32[] constant(0)
%replica_id = u32[]{:T(128)} replica-id()
%convert = f32[]{:T(128)} convert(u32[]{:T(128)} %replica_id)
%color_operand.1 = f32[1,8,256,256]{3,2,1,0:T(8,128)} broadcast(
f32[]{:T(128)} %convert), dimensions={}
%ag-start = (f32[1,8,256,256], f32[2,8,256,256]) all-gather-start(
f32[1,8,256,256] %color_operand.1), replica_groups={{0,1}}, dimensions={0},
metadata={op_type="AllGather" op_name="ag0"}
%ag-done = f32[2,8,256,256] all-gather-done(
(f32[1,8,256,256], f32[2,8,256,256]) %ag-start),
metadata={op_type="AllGather" op_name="ag0"}
%ag-done-bc = f32[16,256,256] bitcast(f32[2,8,256,256] %ag-done),
metadata={op_type="Bitcast" op_name="ag0"}
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[16,256,256]{2,1,0} parameter(2)
p3 = f32[16,256,256]{2,1,0} parameter(3)
c0 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
c1 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
ROOT a2 = f32[16,256,256]{2,1,0} add(%ag-done-bc, c0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_EQ(GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag0"),
GetIndex(new_instruction_sequence, "a2") - 2);
EXPECT_EQ(GetOpcodeIndexUsingMetaData(HloOpcode::kBitcast,
new_instruction_sequence, "ag0"),
GetIndex(new_instruction_sequence, "a2") - 1);
}
TEST_F(LatencyHidingSchedulerTest, AllGatherAsyncOverlapped) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY %module {
%constant.19 = u32[] constant(1)
%replica_id = u32[]{:T(128)} replica-id()
%add.1 = u32[]{:T(128)} add(replica_id, constant.19)
%convert = f32[]{:T(128)} convert(u32[]{:T(128)} %replica_id)
%convert.1 = f32[]{:T(128)} convert(u32[]{:T(128)} %add.1)
%color_operand.1 = f32[8,256,256]{2,1,0:T(8,128)} broadcast(f32[]{:T(128)} %convert), dimensions={}
%color_operand.2 = f32[8,256,256]{2,1,0:T(8,128)} broadcast(f32[]{:T(128)} %convert.1), dimensions={}
%ag-start = (f32[8,256,256], f32[16,256,256]) all-gather-start(f32[8,256,256] %color_operand.1), replica_groups={{0,1}}, dimensions={0},
metadata={op_type="AllGather" op_name="ag0"}
%ag-start.2 = (f32[8,256,256], f32[16,256,256]) all-gather-start(f32[8,256,256] %color_operand.2), replica_groups={{0,1}}, dimensions={0},
metadata={op_type="AllGather" op_name="ag1"}
%ag-done = f32[16,256,256] all-gather-done((f32[8,256,256], f32[16,256,256]) %ag-start),
metadata={op_type="AllGather" op_name="ag0"}
%ag-done.2 = f32[16,256,256] all-gather-done((f32[8,256,256], f32[16,256,256]) %ag-start.2),
metadata={op_type="AllGather" op_name="ag1"}
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[16,256,256]{2,1,0} parameter(2)
p3 = f32[16,256,256]{2,1,0} parameter(3)
c0 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
c1 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
ROOT a2 = f32[16,256,256]{2,1,0} add(%ag-done, %ag-done.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag0"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherStart,
new_instruction_sequence, "ag1"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherStart,
new_instruction_sequence, "ag0"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag0"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherStart,
new_instruction_sequence, "ag1"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag1"));
}
TEST_F(LatencyHidingSchedulerTest, AllGatherAsyncOverlapped2) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY %module {
%constant.19 = u32[] constant(1)
%replica_id = u32[]{:T(128)} replica-id()
%add.1 = u32[]{:T(128)} add(replica_id, constant.19)
%convert = f32[]{:T(128)} convert(u32[]{:T(128)} %replica_id)
%convert.1 = f32[]{:T(128)} convert(u32[]{:T(128)} %add.1)
%color_operand.1 = f32[8,256,256]{2,1,0:T(8,128)} broadcast(f32[]{:T(128)} %convert), dimensions={}
%color_operand.2 = f32[8,256,256]{2,1,0:T(8,128)} broadcast(f32[]{:T(128)} %convert.1), dimensions={}
%ag-start = (f32[8,256,256], f32[16,256,256]) all-gather-start(f32[8,256,256] %color_operand.1), replica_groups={{0,1}}, dimensions={0},
metadata={op_type="AllGather" op_name="ag0"}
%ag-start.2 = (f32[8,256,256], f32[16,256,256]) all-gather-start(f32[8,256,256] %color_operand.2), replica_groups={{0,1}}, dimensions={0},
metadata={op_type="AllGather" op_name="ag1"}
%ag-done = f32[16,256,256] all-gather-done((f32[8,256,256], f32[16,256,256]) %ag-start),
metadata={op_type="AllGather" op_name="ag0"}
%ag-done.2 = f32[16,256,256] all-gather-done((f32[8,256,256], f32[16,256,256]) %ag-start.2),
metadata={op_type="AllGather" op_name="ag1"}
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
c0 = f32[16,256,256]{2,1,0} convolution(ag-done, ag-done.2),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
c1 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
ROOT a2 = f32[16,256,256]{2,1,0} add(%c0, %c1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag0"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherStart,
new_instruction_sequence, "ag1"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherStart,
new_instruction_sequence, "ag0"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag0"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherStart,
new_instruction_sequence, "ag1"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag1"));
}
TEST_F(LatencyHidingSchedulerTest, AllGatherAsyncOverlapped3) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY %module {
%constant.19 = u32[] constant(1)
%replica_id = u32[]{:T(128)} replica-id()
%add.1 = u32[]{:T(128)} add(replica_id, constant.19)
%convert = f32[]{:T(128)} convert(u32[]{:T(128)} %replica_id)
%convert.1 = f32[]{:T(128)} convert(u32[]{:T(128)} %add.1)
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[16,256,256]{2,1,0} parameter(2)
p3 = f32[16,256,256]{2,1,0} parameter(3)
c1 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
%color_operand.1 = f32[8,256,256]{2,1,0:T(8,128)} broadcast(f32[]{:T(128)} %convert), dimensions={}
%color_operand.2 = f32[8,256,256]{2,1,0:T(8,128)} broadcast(f32[]{:T(128)} %convert.1), dimensions={}
%ag-start = (f32[8,256,256], f32[16,256,256]) all-gather-start(f32[8,256,256] %color_operand.1), replica_groups={{0,1}}, dimensions={0},
metadata={op_type="AllGather" op_name="ag0"}
%ag-start.2 = (f32[8,256,256], f32[16,256,256]) all-gather-start(f32[8,256,256] %color_operand.2), replica_groups={{0,1}}, dimensions={0},
metadata={op_type="AllGather" op_name="ag1"}
%ag-done = f32[16,256,256] all-gather-done((f32[8,256,256], f32[16,256,256]) %ag-start),
metadata={op_type="AllGather" op_name="ag0"}
%ag-done.2 = f32[16,256,256] all-gather-done((f32[8,256,256], f32[16,256,256]) %ag-start.2),
metadata={op_type="AllGather" op_name="ag1"}
c0 = f32[16,256,256]{2,1,0} convolution(ag-done, ag-done.2),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
ROOT a2 = f32[16,256,256]{2,1,0} add(%ag-done, %ag-done.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag0"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherStart,
new_instruction_sequence, "ag1"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherStart,
new_instruction_sequence, "ag0"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag0"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherStart,
new_instruction_sequence, "ag1"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllGatherDone,
new_instruction_sequence, "ag1"));
}
TEST_F(LatencyHidingSchedulerTest, AllReduceAsyncBalance) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
%add {
%p0 = f32[] parameter(0)
%p1 = f32[] parameter(1)
ROOT %a = f32[] add(p0, p1)
}
ENTRY %module {
%constant.19 = u32[] constant(0)
%replica_id = u32[]{:T(128)} replica-id()
%convert = f32[]{:T(128)} convert(u32[]{:T(128)} %replica_id)
%color_operand.1 = f32[2,8,256,256]{3,2,1,0:T(8,128)} broadcast(
f32[]{:T(128)} %convert), dimensions={}
%color_operand.2 = f32[2,8,256,256]{3,2,1,0:T(8,128)} broadcast(
f32[]{:T(128)} %convert), dimensions={}
%ar-start = f32[2,8,256,256] all-reduce-start(
f32[2,8,256,256] %color_operand.1), replica_groups={{0,1}}, to_apply=%add,
metadata={op_type="AllReduce" op_name="ar0"}
%ar-start.2 = f32[2,8,256,256] all-reduce-start(
f32[2,8,256,256] %color_operand.2), replica_groups={{0,1}}, to_apply=%add,
metadata={op_type="AllReduce" op_name="ar1"}
%ar-done = f32[2,8,256,256] all-reduce-done(
f32[2,8,256,256] %ar-start),
metadata={op_type="AllReduce" op_name="ar0"}
%ar-done-bc = f32[16,256,256] bitcast(f32[2,8,256,256] %ar-done),
metadata={op_type="Bitcast" op_name="ar0"}
%ar-done.2 = f32[2,8,256,256] all-reduce-done(
f32[2,8,256,256] %ar-start.2),
metadata={op_type="AllReduce" op_name="ar1"}
%ar-done-bc.2 = f32[16,256,256] bitcast(f32[2,8,256,256] %ar-done.2),
metadata={op_type="Bitcast" op_name="ar1"}
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[16,256,256]{2,1,0} parameter(2)
p3 = f32[16,256,256]{2,1,0} parameter(3)
c0 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb,
metadata={op_type="AllReduce" op_name="c0"}
c1 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb,
metadata={op_type="AllReduce" op_name="c1"}
a2 = f32[16,256,256]{2,1,0} add(c1, c0)
ROOT t = (f32[16,256,256], f32[16,256,256], f32[16,256,256]) tuple(a2, %ar-done-bc.2, %ar-done-bc)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kConvolution,
new_instruction_sequence, "c0"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllReduceDone,
new_instruction_sequence, "ar0"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kConvolution,
new_instruction_sequence, "c0"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllReduceStart,
new_instruction_sequence, "ar0"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kConvolution,
new_instruction_sequence, "c1"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllReduceDone,
new_instruction_sequence, "ar1"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kConvolution,
new_instruction_sequence, "c1"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAllReduceStart,
new_instruction_sequence, "ar1"));
}
TEST_F(LatencyHidingSchedulerTest, WhileLoopAliasingBug) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
param = (bf16[8]{0}, bf16[8]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(param), index=2
}
while_body {
param = (bf16[8]{0}, bf16[8]{0}, pred[]) parameter(0)
gte0 = bf16[8]{0} get-tuple-element(param), index=0
gte1 = pred[] get-tuple-element(param), index=2
bitcast = bf16[8]{0} bitcast(gte0)
collective-permute.1 = bf16[8]{0} collective-permute(gte0), source_target_pairs={{0,1},{1,2},{2,3}}
add0 = bf16[8]{0} add(collective-permute.1, bitcast)
negate = bf16[8]{0} negate(add0)
collective-permute.2 = bf16[8]{0} collective-permute(collective-permute.1), source_target_pairs={{1,0},{0,3},{3,2}}
ROOT tuple = (bf16[8]{0}, bf16[8]{0}, pred[]) tuple(collective-permute.2, negate, gte1)
}
ENTRY entry {
p0 = bf16[8]{0} parameter(0)
p1 = bf16[8]{0} parameter(1)
p2 = pred[] parameter(2)
tuple = (bf16[8]{0}, bf16[8]{0}, pred[]) tuple(p0, p1, p2)
while = (bf16[8]{0}, bf16[8]{0}, pred[]) while(tuple), condition=while_cond, body=while_body
gte0 = bf16[8]{0} get-tuple-element(while), index=0
gte1 = bf16[8]{0} get-tuple-element(while), index=1
ROOT add = bf16[8]{0} add(gte0, gte1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* while_body = hlo_module->GetComputationWithName("while_body");
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(while_body).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
const HloInstruction* cp_start =
while_body->root_instruction()->operand(0)->operand(0);
EXPECT_EQ(cp_start->opcode(), HloOpcode::kCollectivePermuteStart);
EXPECT_LT(GetIndex(new_instruction_sequence, "add0"),
GetIndex(new_instruction_sequence, cp_start->name()));
}
TEST_F(LatencyHidingSchedulerTest, WhileLoopAliasingBug2) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
param = (bf16[8]{0}, bf16[8]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(param), index=2
}
while_body {
param = (bf16[8]{0}, bf16[8]{0}, pred[]) parameter(0)
gte0 = bf16[8]{0} get-tuple-element(param), index=0
gte1 = bf16[8]{0} get-tuple-element(param), index=1
gte2 = pred[] get-tuple-element(param), index=2
negate1 = bf16[8]{0} negate(gte1)
collective-permute.1 = bf16[8]{0} collective-permute(gte0), source_target_pairs={{0,1},{1,2},{2,3}}
negate0 = bf16[8]{0} negate(collective-permute.1)
collective-permute.2 = bf16[8]{0} collective-permute(negate1), source_target_pairs={{1,0},{0,3},{3,2}}
ROOT tuple = (bf16[8]{0}, bf16[8]{0}, pred[]) tuple(collective-permute.2, negate0, gte2)
}
ENTRY entry {
p0 = bf16[8]{0} parameter(0)
p1 = bf16[8]{0} parameter(1)
p2 = pred[] parameter(2)
tuple = (bf16[8]{0}, bf16[8]{0}, pred[]) tuple(p0, p1, p2)
while = (bf16[8]{0}, bf16[8]{0}, pred[]) while(tuple), condition=while_cond, body=while_body
gte0 = bf16[8]{0} get-tuple-element(while), index=0
gte1 = bf16[8]{0} get-tuple-element(while), index=1
ROOT add = bf16[8]{0} add(gte0, gte1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* while_body = hlo_module->GetComputationWithName("while_body");
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(while_body).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
const HloInstruction* cp_start_2 =
while_body->root_instruction()->operand(0)->operand(0);
EXPECT_EQ(cp_start_2->opcode(), HloOpcode::kCollectivePermuteStart);
const HloInstruction* cp_done_1 =
while_body->root_instruction()->operand(1)->operand(0);
EXPECT_EQ(cp_done_1->opcode(), HloOpcode::kCollectivePermuteDone);
EXPECT_LT(GetIndex(new_instruction_sequence, cp_done_1->name()),
GetIndex(new_instruction_sequence, cp_start_2->name()));
}
TEST_F(LatencyHidingSchedulerTest, SingleCollectivePermuteTest) {
absl::string_view hlo_string = R"(
HloModule single_collective_permute_test, is_scheduled=true
ENTRY after_optimizations_test {
%parameter.1 = bf16[8]{0} parameter(0), sharding={replicated}
ROOT %collective-permute.1 = bf16[8]{0} collective-permute(bf16[8]{0} parameter.1), source_target_pairs={{0,1},{1,2},{2,3}}, channel_id=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_EQ(new_instruction_sequence.size(), 3);
EXPECT_EQ(new_instruction_sequence[1]->opcode(),
HloOpcode::kCollectivePermuteStart);
EXPECT_EQ(new_instruction_sequence[2]->opcode(),
HloOpcode::kCollectivePermuteDone);
}
TEST_F(LatencyHidingSchedulerTest, InplaceUpdateCPTest) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
%fused_computation.1 (param_0.1: f32[4,4,128], param_1.2: u32[]) -> f32[4,4,128] {
%param_0.1 = f32[4,4,128]{2,1,0:T(4,128)} parameter(0)
%constant.15 = f32[]{:T(128)} constant(1)
%broadcast.2 = f32[2,4,128]{2,1,0:T(4,128)} broadcast(f32[]{:T(128)} %constant.15), dimensions={}
%param_1.2 = u32[] parameter(1)
%constant.14 = u32[] constant(0)
ROOT %dynamic-update-slice.1 = f32[4,4,128]{2,1,0:T(4,128)} dynamic-update-slice(f32[4,4,128]{2,1,0:T(4,128)} %param_0.1, f32[2,4,128]{2,1,0:T(4,128)} %broadcast.2, u32[] %param_1.2, u32[] %constant.14, u32[] %constant.14)
}
ENTRY %module_spmd () -> f32[4,4,128] {
%constant.8 = u32[] constant(0)
%constant.5 = u32[] constant(2)
%tuple.1 = (u32[], u32[], u32[]) tuple(u32[] %constant.8, u32[] %constant.8, u32[] %constant.8)
%tuple = (u32[], u32[], u32[]) tuple(u32[] %constant.5, u32[] %constant.8, u32[] %constant.8)
%custom-call = f32[4,4,128]{2,1,0:T(4,128)} custom-call(), custom_call_target="AllocateBuffer"
%fusion.1 = f32[4,4,128]{2,1,0:T(4,128)} fusion(f32[4,4,128]{2,1,0:T(4,128)} %custom-call, u32[] %constant.5), kind=kLoop, calls=%fused_computation.1
%collective-permute = f32[4,4,128]{2,1,0:T(4,128)} collective-permute(f32[4,4,128]{2,1,0:T(4,128)} %fusion.1, f32[4,4,128]{2,1,0:T(4,128)} %fusion.1, (u32[], u32[], u32[]) %tuple, (u32[], u32[], u32[]) %tuple.1), channel_id=958, source_target_pairs={{0,4},{4,0},{1,5},{5,1},{2,6},{6,2},{3,7},{7,3}}, slice_sizes={{2,4,128}}, backend_config="{\"flag_configs\":[],\"barrier_config\":{\"barrier_type\":\"CUSTOM\",\"id\":\"0\"},\"scoped_memory_configs\":[]}"
ROOT %copy.3 = f32[4,4,128]{2,1,0:T(4,128)} copy(f32[4,4,128]{2,1,0:T(4,128)} %collective-permute)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_EQ(new_instruction_sequence.size(),
original_instruction_sequence.size() + 1);
}
TEST_F(LatencyHidingSchedulerTest, InplaceUpdateCPTest2) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
%sum (x.336: f32[], y.336: f32[]) -> f32[] {
%x.336 = f32[]{:T(128)} parameter(0)
%y.336 = f32[]{:T(128)} parameter(1)
ROOT %add.5252 = f32[]{:T(128)} add(f32[]{:T(128)} %x.336, f32[]{:T(128)} %y.336)
}
ENTRY %module () -> f32[33708,1024] {
%constant.19 = u32[] constant(0)
%replica_id = u32[]{:T(128)} replica-id()
%convert = f32[]{:T(128)} convert(u32[]{:T(128)} %replica_id)
%color_operand.1 = f32[2128,8,128]{2,1,0:T(8,128)} broadcast(f32[]{:T(128)} %convert), dimensions={}
%all-gather.1 = f32[4256,8,128]{2,1,0:T(8,128)} all-gather(f32[2128,8,128]{2,1,0:T(8,128)} %color_operand.1), replica_groups={{0,6},{2,4},{3,5},{1,7}}, dimensions={0}
%custom-call = f32[33712,8,128]{2,1,0:T(8,128)} custom-call(), custom_call_target="AllocateBuffer"
%dynamic-update-slice = f32[33712,8,128]{2,1,0:T(8,128)} dynamic-update-slice(f32[33712,8,128]{2,1,0:T(8,128)} %custom-call, f32[4256,8,128]{2,1,0:T(8,128)} %all-gather.1, u32[] %constant.19, u32[] %constant.19, u32[] %constant.19)
%tuple.7 = (u32[], u32[], u32[]) tuple(u32[] %constant.19, u32[] %constant.19, u32[] %constant.19)
%constant.20 = u32[] constant(4256)
%tuple.8 = (u32[], u32[], u32[]) tuple(u32[] %constant.20, u32[] %constant.19, u32[] %constant.19)
%collective-permute.3 = f32[33712,8,128]{2,1,0:T(8,128)} collective-permute(f32[33712,8,128]{2,1,0:T(8,128)} %dynamic-update-slice, f32[33712,8,128]{2,1,0:T(8,128)} %dynamic-update-slice, (u32[], u32[], u32[]) %tuple.7, (u32[], u32[], u32[]) %tuple.8), source_target_pairs={{0,2},{2,3},{3,1},{1,0},{6,4},{4,5},{5,7},{7,6}}, slice_sizes={{4256,8,128}}
%tuple.9 = (u32[], u32[], u32[]) tuple(u32[] %constant.20, u32[] %constant.19, u32[] %constant.19)
%constant.21 = u32[] constant(8512)
%tuple.10 = (u32[], u32[], u32[]) tuple(u32[] %constant.21, u32[] %constant.19, u32[] %constant.19)
%collective-permute.4 = f32[33712,8,128]{2,1,0:T(8,128)} collective-permute(f32[33712,8,128]{2,1,0:T(8,128)} %collective-permute.3, f32[33712,8,128]{2,1,0:T(8,128)} %collective-permute.3, (u32[], u32[], u32[]) %tuple.9, (u32[], u32[], u32[]) %tuple.10), source_target_pairs={{0,2},{2,3},{3,1},{1,0},{6,4},{4,5},{5,7},{7,6}}, slice_sizes={{4256,8,128}}
%tuple.11 = (u32[], u32[], u32[]) tuple(u32[] %constant.21, u32[] %constant.19, u32[] %constant.19)
%constant.22 = u32[] constant(12768)
%tuple.12 = (u32[], u32[], u32[]) tuple(u32[] %constant.22, u32[] %constant.19, u32[] %constant.19)
%collective-permute.5 = f32[33712,8,128]{2,1,0:T(8,128)} collective-permute(f32[33712,8,128]{2,1,0:T(8,128)} %collective-permute.4, f32[33712,8,128]{2,1,0:T(8,128)} %collective-permute.4, (u32[], u32[], u32[]) %tuple.11, (u32[], u32[], u32[]) %tuple.12), source_target_pairs={{0,2},{2,3},{3,1},{1,0},{6,4},{4,5},{5,7},{7,6}}, slice_sizes={{4256,8,128}}
ROOT %bitcast.16 = f32[33708,1024]{1,0:T(8,128)} bitcast(f32[33712,8,128]{2,1,0:T(8,128)} %collective-permute.5)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_EQ(new_instruction_sequence.size(),
original_instruction_sequence.size() + 4);
}
TEST_F(LatencyHidingSchedulerTest, TwoCollectivePermuteTypesOverlap) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param = (f32[16,64,256]{2,1,0}, f32[16,64,256]{2,1,0}, f32[16,128,256]{2,1,0}) parameter(0)
gte0 = f32[16,64,256]{2,1,0} get-tuple-element(param), index=0
gte1 = f32[16,64,256]{2,1,0} get-tuple-element(param), index=1
cp0 = f32[16,64,256]{2,1,0} collective-permute(gte0),
source_target_pairs={{0,1},{1,0}},
metadata={op_type="CollectivePermute" op_name="cp0"}
cp1 = f32[16,64,256]{2,1,0} collective-permute(cp0),
source_target_pairs={{0,1},{1,0}},
metadata={op_type="CollectivePermute" op_name="cp1"}
c0 = f32[16,256,256]{2,1,0} convolution(gte0, gte1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
cp2 = f32[16,64,256]{2,1,0} collective-permute(gte1),
source_target_pairs={{0,1},{1,0}},
metadata={op_type="CollectivePermute" op_name="cp2"}
c1 = f32[16,256,256]{2,1,0} convolution(cp0, gte1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
cp3 = f32[16,64,256]{2,1,0} collective-permute(cp2),
source_target_pairs={{0,1},{1,0}},
metadata={op_type="CollectivePermute" op_name="cp3"}
gte2 = f32[16,128,256]{2,1,0} get-tuple-element(param), index=2
const0 = u32[] constant(0)
const1 = u32[] constant(8)
tuple0 = (u32[], u32[], u32[]) tuple(u32[] const0, u32[] const0, u32[] const0)
tuple1 = (u32[], u32[], u32[]) tuple(u32[] const1, u32[] const0, u32[] const0)
cp4 = f32[16,128,256]{2,1,0} collective-permute(gte2, gte2, tuple0, tuple1),
source_target_pairs={{2,3},{3,2}},
slice_sizes={{8,128,256}},
metadata={op_type="CollectivePermute" op_name="cp4"}
cp5 = f32[16,128,256]{2,1,0} collective-permute(cp4, cp4, tuple0, tuple1),
source_target_pairs={{2,3},{3,2}},
slice_sizes={{8,128,256}},
metadata={op_type="CollectivePermute" op_name="cp5"}
ROOT tuple = (f32[16,256,256]{2,1,0}, f32[16,64,256]{2,1,0}, f32[16,128,256]{2,1,0}) tuple(c1, cp3, cp5)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_EQ(new_instruction_sequence.size(),
original_instruction_sequence.size() + 6);
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteStart,
new_instruction_sequence, "cp0"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteStart,
new_instruction_sequence, "cp2"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteStart,
new_instruction_sequence, "cp4"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteDone,
new_instruction_sequence, "cp0"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteDone,
new_instruction_sequence, "cp2"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteDone,
new_instruction_sequence, "cp4"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteStart,
new_instruction_sequence, "cp1"),
GetIndex(new_instruction_sequence, "c1"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteStart,
new_instruction_sequence, "cp3"),
GetIndex(new_instruction_sequence, "c1"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteStart,
new_instruction_sequence, "cp5"),
GetIndex(new_instruction_sequence, "c1"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteDone,
new_instruction_sequence, "cp1"),
GetIndex(new_instruction_sequence, "c1"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteDone,
new_instruction_sequence, "cp3"),
GetIndex(new_instruction_sequence, "c1"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteDone,
new_instruction_sequence, "cp5"),
GetIndex(new_instruction_sequence, "c1"));
}
TEST_F(LatencyHidingSchedulerTest, SerialCollectivePermutesTest) {
absl::string_view hlo_string = R"(
HloModule serial_collective_permute_test, is_scheduled=true
ENTRY after_optimizations_test {
%parameter.1 = bf16[8]{0} parameter(0)
%collective-permute.2 = bf16[8]{0} collective-permute(bf16[8]{0} parameter.1), source_target_pairs={{0,1},{1,2},{2,3}}
%add.3 = bf16[8]{0} add(%parameter.1, %parameter.1)
%add.4 = bf16[8]{0} add(%add.3, parameter.1)
%add.5 = bf16[8]{0} add(%collective-permute.2, %add.4)
%collective-permute.6 = bf16[8]{0} collective-permute(bf16[8]{0} add.5), source_target_pairs={{1,0},{0,3},{3,2}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_EQ(original_instruction_sequence.size(), 6);
EXPECT_EQ(new_instruction_sequence.size(), 8);
EXPECT_LT(PositionInVector(new_instruction_sequence,
original_instruction_sequence[0]),
PositionInVector(new_instruction_sequence,
original_instruction_sequence[2]));
EXPECT_LT(PositionInVector(new_instruction_sequence,
original_instruction_sequence[2]),
PositionInVector(new_instruction_sequence,
original_instruction_sequence[3]));
EXPECT_LT(PositionInVector(new_instruction_sequence,
original_instruction_sequence[3]),
PositionInVector(new_instruction_sequence,
original_instruction_sequence[4]));
EXPECT_EQ(original_instruction_sequence[0]->user_count(), 3);
EXPECT_EQ(original_instruction_sequence[0]->users()[0]->opcode(),
HloOpcode::kCollectivePermuteStart);
HloInstruction* collective_permute_start_1 =
original_instruction_sequence[0]->users()[0];
EXPECT_EQ(
PositionInVector(new_instruction_sequence,
original_instruction_sequence[0]) +
1,
PositionInVector(new_instruction_sequence, collective_permute_start_1));
EXPECT_EQ(collective_permute_start_1->user_count(), 1);
EXPECT_EQ(collective_permute_start_1->users()[0]->opcode(),
HloOpcode::kCollectivePermuteDone);
HloInstruction* collective_permute_done_1 =
collective_permute_start_1->users()[0];
EXPECT_TRUE(
(PositionInVector(new_instruction_sequence, collective_permute_done_1) +
1 ==
PositionInVector(new_instruction_sequence,
collective_permute_done_1->users()[0])) ||
(PositionInVector(new_instruction_sequence, collective_permute_done_1) +
1 ==
PositionInVector(new_instruction_sequence,
collective_permute_done_1->users()[1])));
EXPECT_TRUE(
(PositionInVector(new_instruction_sequence, collective_permute_done_1) <
PositionInVector(new_instruction_sequence,
collective_permute_done_1->users()[0])));
EXPECT_EQ(new_instruction_sequence[7]->opcode(),
HloOpcode::kCollectivePermuteDone);
EXPECT_GT(
PositionInVector(new_instruction_sequence,
new_instruction_sequence[7]->operand(0)),
PositionInVector(new_instruction_sequence, collective_permute_done_1));
}
TEST_F(LatencyHidingSchedulerTest, BackToBackCollectivePerGmutesTest) {
absl::string_view hlo_string = R"(
HloModule back_to_back_collective_permute_test, is_scheduled=true
ENTRY after_optimizations_test {
%parameter.1 = bf16[8]{0} parameter(0)
%collective-permute.2 = bf16[8]{0} collective-permute(bf16[8]{0} parameter.1), source_target_pairs={{0,1},{1,2},{2,3}}
%collective-permute.6 = bf16[8]{0} collective-permute(bf16[8]{0} collective-permute.2), source_target_pairs={{1,0},{0,3},{3,2}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_EQ(original_instruction_sequence.size(), 3);
EXPECT_EQ(new_instruction_sequence.size(), 5);
EXPECT_EQ(original_instruction_sequence[0]->user_count(), 1);
EXPECT_EQ(original_instruction_sequence[0]->users()[0]->opcode(),
HloOpcode::kCollectivePermuteStart);
HloInstruction* collective_permute_start_1 =
original_instruction_sequence[0]->users()[0];
EXPECT_EQ(
PositionInVector(new_instruction_sequence,
original_instruction_sequence[0]) +
1,
PositionInVector(new_instruction_sequence, collective_permute_start_1));
EXPECT_EQ(collective_permute_start_1->user_count(), 1);
EXPECT_EQ(collective_permute_start_1->users()[0]->opcode(),
HloOpcode::kCollectivePermuteDone);
HloInstruction* collective_permute_done_1 =
collective_permute_start_1->users()[0];
EXPECT_TRUE(
(PositionInVector(new_instruction_sequence, collective_permute_done_1) +
1 ==
PositionInVector(new_instruction_sequence,
collective_permute_done_1->users()[0])) ||
(PositionInVector(new_instruction_sequence, collective_permute_done_1) +
1 ==
PositionInVector(new_instruction_sequence,
collective_permute_done_1->users()[1])));
EXPECT_TRUE(
(PositionInVector(new_instruction_sequence, collective_permute_done_1) <
PositionInVector(new_instruction_sequence,
collective_permute_done_1->users()[0])));
EXPECT_EQ(new_instruction_sequence[4]->opcode(),
HloOpcode::kCollectivePermuteDone);
EXPECT_GT(
PositionInVector(new_instruction_sequence,
new_instruction_sequence[4]->operand(0)),
PositionInVector(new_instruction_sequence, collective_permute_done_1));
}
TEST_F(LatencyHidingSchedulerTest, ParallelCollectivePermutesTest) {
absl::string_view hlo_string = R"(
HloModule single_collective_permute_test, is_scheduled=true
ENTRY after_optimizations_test {
%parameter.1 = bf16[8]{0} parameter(0)
%collective-permute.2 = bf16[8]{0} collective-permute(bf16[8]{0} parameter.1), source_target_pairs={{0,1},{1,2},{2,3}}
%constant.3 = bf16[] constant(1)
%broadcast.4 = bf16[8]{0} broadcast(bf16[] %constant.3), dimensions={}
%add.5 = bf16[8]{0} add(bf16[8]{0} %collective-permute.2, bf16[8]{0} %broadcast.4)
%collective-permute.6 = bf16[8]{0} collective-permute(bf16[8]{0} parameter.1), source_target_pairs={{1,0},{0,3},{3,2}}
%add.6 = bf16[8]{0} add(bf16[8]{0} %collective-permute.6, bf16[8]{0} %add.5)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(PositionInVector(new_instruction_sequence,
original_instruction_sequence[0]),
PositionInVector(new_instruction_sequence,
original_instruction_sequence[2]));
EXPECT_LT(PositionInVector(new_instruction_sequence,
original_instruction_sequence[2]),
PositionInVector(new_instruction_sequence,
original_instruction_sequence[3]));
EXPECT_LT(PositionInVector(new_instruction_sequence,
original_instruction_sequence[3]),
PositionInVector(new_instruction_sequence,
original_instruction_sequence[4]));
EXPECT_LT(PositionInVector(new_instruction_sequence,
original_instruction_sequence[4]),
PositionInVector(new_instruction_sequence,
original_instruction_sequence[6]));
EXPECT_EQ(original_instruction_sequence[0]->user_count(), 2);
EXPECT_EQ(original_instruction_sequence[0]->users()[0]->opcode(),
HloOpcode::kCollectivePermuteStart);
EXPECT_EQ(original_instruction_sequence[0]->users()[1]->opcode(),
HloOpcode::kCollectivePermuteStart);
int collective_permute_1_pos = PositionInVector(
new_instruction_sequence, original_instruction_sequence[0]->users()[0]);
int collective_permute_2_pos = PositionInVector(
new_instruction_sequence, original_instruction_sequence[0]->users()[1]);
EXPECT_TRUE((collective_permute_1_pos == collective_permute_2_pos + 1) ||
(collective_permute_1_pos + 1 == collective_permute_2_pos));
}
TEST_F(LatencyHidingSchedulerTest, MaxConcurrentCollectivePermutesTest) {
absl::string_view hlo_string = R"(
HloModule single_collective_permute_test, is_scheduled=true
ENTRY after_optimizations_test {
%parameter.1 = bf16[8]{0} parameter(0)
%parameter.2 = bf16[8]{0} parameter(1)
%parameter.3 = bf16[8]{0} parameter(2)
%collective-permute.4 = bf16[8]{0} collective-permute(bf16[8]{0} parameter.1), source_target_pairs={{0,1},{1,2},{2,3}}
%collective-permute.5 = bf16[8]{0} collective-permute(bf16[8]{0} parameter.1), source_target_pairs={{1,0},{0,3},{3,2}}
%collective-permute.6 = bf16[8]{0} collective-permute(bf16[8]{0} parameter.2), source_target_pairs={{0,1},{1,2},{2,3}}
%collective-permute.7 = bf16[8]{0} collective-permute(bf16[8]{0} parameter.2), source_target_pairs={{1,0},{0,3},{3,2}}
%collective-permute.8 = bf16[8]{0} collective-permute(bf16[8]{0} parameter.3), source_target_pairs={{0,1},{1,2},{2,3}}
%collective-permute.9 = bf16[8]{0} collective-permute(bf16[8]{0} parameter.3), source_target_pairs={{1,0},{0,3},{3,2}}
%add.10 = bf16[8]{0} add(bf16[8]{0} %collective-permute.8, bf16[8]{0} %collective-permute.9)
%add.11 = bf16[8]{0} add(bf16[8]{0} %collective-permute.7, bf16[8]{0} %add.10)
%add.12 = bf16[8]{0} add(bf16[8]{0} %collective-permute.6, bf16[8]{0} %add.11)
%add.13 = bf16[8]{0} add(bf16[8]{0} %collective-permute.5, bf16[8]{0} %add.12)
ROOT %add.14 = bf16[8]{0} add(bf16[8]{0} %collective-permute.4, bf16[8]{0} %add.13)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_TRUE(
MaxConcurrentCollectivePermutesBelowThreshold(new_instruction_sequence));
}
TEST_F(LatencyHidingSchedulerTest, BalanceChainedCollectivePermutesNoOverlap) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param = bf16[8]{0} parameter(0)
collective-permute.1 = bf16[8]{0} collective-permute(param), source_target_pairs={{0,1},{1,2},{2,3}}
copy.2 = bf16[8]{0} copy(collective-permute.1)
ROOT collective-permute.2 = bf16[8]{0} collective-permute(copy.2), source_target_pairs={{1,0},{0,3},{3,2}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
}
TEST_F(LatencyHidingSchedulerTest, ExistingSingleCollectivePermuteAsyncTest) {
absl::string_view hlo_string = R"(
HloModule single_collective_permute_test, is_scheduled=true
ENTRY after_optimizations_test {
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[16,256,256]{2,1,0} parameter(2)
c0 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
%collective-permute-start.1 = (f32[16,256,256]{2,1,0},
f32[16,256,256]{2,1,0}, u32[], u32[]) collective-permute-start(
f32[16,256,256]{2,1,0} p2), source_target_pairs={{0,1},{1,2},{2,3}},
channel_id=1, metadata={op_type="CollectivePermute" op_name="cp0"}
%collective-permute-done.1 = f32[16,256,256]{2,1,0} collective-permute-done(
(f32[16,256,256]{2,1,0}, f32[16,256,256]{2,1,0},
u32[], u32[]) collective-permute-start.1),
metadata={op_type="CollectivePermute" op_name="cp0"}
ROOT a = f32[16,256,256]{2,1,0} add(c0, collective-permute-done.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteStart,
new_instruction_sequence, "cp0"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_GE(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteDone,
new_instruction_sequence, "cp0"),
GetIndex(new_instruction_sequence, "c0"));
}
TEST_F(LatencyHidingSchedulerTest, BalanceChainExtended) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[16,256,256]{2,1,0} parameter(2)
p3 = f32[16,256,256]{2,1,0} parameter(3)
cp0 = f32[16,256,256]{2,1,0} collective-permute(p2),
source_target_pairs={{0,1},{1,0}},
metadata={op_type="CollectivePermute" op_name="cp0"}
cp1 = f32[16,256,256]{2,1,0} collective-permute(p3),
source_target_pairs={{0,1},{1,0}},
metadata={op_type="CollectivePermute" op_name="cp1"}
c0 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
c1 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
t0 = (f32[16,256,256]{2,1,0}, f32[16,256,256]{2,1,0}) tuple(cp0, cp1)
gte0 = f32[16,256,256]{2,1,0} get-tuple-element(t0), index=0
gte1 = f32[16,256,256]{2,1,0} get-tuple-element(t0), index=1
cp2 = f32[16,256,256]{2,1,0} collective-permute(gte0),
source_target_pairs={{0,1},{1,0}},
metadata={op_type="CollectivePermute" op_name="cp2"}
a2 = f32[16,256,256]{2,1,0} add(cp2, c0)
cp3 = f32[16,256,256]{2,1,0} collective-permute(gte1),
source_target_pairs={{0,1},{1,0}},
metadata={op_type="CollectivePermute" op_name="cp3"}
ROOT tuple = (f32[16,256,256]{2,1,0}, f32[16,256,256]{2,1,0}) tuple(a2, cp3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteStart,
new_instruction_sequence, "cp0"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteStart,
new_instruction_sequence, "cp1"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteDone,
new_instruction_sequence, "cp0"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteDone,
new_instruction_sequence, "cp1"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteStart,
new_instruction_sequence, "cp2"),
GetIndex(new_instruction_sequence, "c1"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteStart,
new_instruction_sequence, "cp3"),
GetIndex(new_instruction_sequence, "c1"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteDone,
new_instruction_sequence, "cp2"),
GetIndex(new_instruction_sequence, "c1"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kCollectivePermuteDone,
new_instruction_sequence, "cp3"),
GetIndex(new_instruction_sequence, "c1"));
}
TEST_F(LatencyHidingSchedulerTest,
BalanceChainedCollectivePermutesLoopedEinsum) {
std::string hlo_string = R"(
HloModule module, is_scheduled=true
%fused_computation.1793 (param_0.4944: s32[16], param_1.5648: u32[], param_2.3959: u32[], param_3.3338: u32[], param_4.2302: u32[]) -> (s32[1], s32[1], s32[1], s32[1]) {
%param_0.4944 = s32[16]{0:T(128)} parameter(0)
%param_1.5648 = u32[]{:T(128)} parameter(1)
%dynamic-slice.1806 = s32[1]{0:T(128)} dynamic-slice(s32[16]{0:T(128)} %param_0.4944, u32[]{:T(128)} %param_1.5648), dynamic_slice_sizes={1}
%param_2.3959 = u32[]{:T(128)} parameter(2)
%dynamic-slice.1807 = s32[1]{0:T(128)} dynamic-slice(s32[16]{0:T(128)} %param_0.4944, u32[]{:T(128)} %param_2.3959), dynamic_slice_sizes={1}
%param_3.3338 = u32[]{:T(128)} parameter(3)
%dynamic-slice.1808 = s32[1]{0:T(128)} dynamic-slice(s32[16]{0:T(128)} %param_0.4944, u32[]{:T(128)} %param_3.3338), dynamic_slice_sizes={1}
%param_4.2302 = u32[]{:T(128)} parameter(4)
%dynamic-slice.1809 = s32[1]{0:T(128)} dynamic-slice(s32[16]{0:T(128)} %param_0.4944, u32[]{:T(128)} %param_4.2302), dynamic_slice_sizes={1}
ROOT %tuple.1384 = (s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) tuple(s32[1]{0:T(128)} %dynamic-slice.1806, s32[1]{0:T(128)} %dynamic-slice.1807, s32[1]{0:T(128)} %dynamic-slice.1808, s32[1]{0:T(128)} %dynamic-slice.1809)
}
%fused_computation.109 (param_0.225: bf16[8,1024,1,20,256,1,1]) -> bf16[8,1024,1,20,256,1,1,1] {
%param_0.225 = bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)} parameter(0)
ROOT %bitcast.713 = bf16[8,1024,1,20,256,1,1,1]{4,1,7,3,2,0,6,5:T(8,128)(2,1)} bitcast(bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)} %param_0.225)
}
%fused_computation.110.clone (param_0.251: s32[], param_1.277: bf16[1,20,256,1,16,4,288,1], param_2.190: s32[]) -> bf16[1,20,256,2,1,4,288,1] {
%param_1.277 = bf16[1,20,256,1,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} parameter(1)
%constant.6014 = bf16[]{:T(256)} constant(-inf)
%pad.370 = bf16[1,20,256,2,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} pad(bf16[1,20,256,1,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %param_1.277, bf16[]{:T(256)} %constant.6014), padding=0_0x0_0x0_0x0_1x0_0x0_0x0_0x0_0
%constant.6004 = s32[]{:T(128)} constant(0)
%param_0.251 = s32[]{:T(128)} parameter(0)
%dynamic-slice.1503 = bf16[1,20,256,2,1,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} dynamic-slice(bf16[1,20,256,2,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %pad.370, s32[]{:T(128)} %constant.6004, s32[]{:T(128)} %constant.6004, s32[]{:T(128)} %constant.6004, s32[]{:T(128)} %constant.6004, s32[]{:T(128)} %param_0.251, s32[]{:T(128)} %constant.6004, s32[]{:T(128)} %constant.6004, s32[]{:T(128)} %constant.6004), dynamic_slice_sizes={1,20,256,2,1,4,288,1}
%pad.369 = bf16[1,20,256,2,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} pad(bf16[1,20,256,1,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %param_1.277, bf16[]{:T(256)} %constant.6014), padding=0_0x0_0x0_0x1_0x0_0x0_0x0_0x0_0
%param_2.190 = s32[]{:T(128)} parameter(2)
%dynamic-slice.1502 = bf16[1,20,256,2,1,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} dynamic-slice(bf16[1,20,256,2,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %pad.369, s32[]{:T(128)} %constant.6004, s32[]{:T(128)} %constant.6004, s32[]{:T(128)} %constant.6004, s32[]{:T(128)} %constant.6004, s32[]{:T(128)} %param_2.190, s32[]{:T(128)} %constant.6004, s32[]{:T(128)} %constant.6004, s32[]{:T(128)} %constant.6004), dynamic_slice_sizes={1,20,256,2,1,4,288,1}
ROOT %maximum.513 = bf16[1,20,256,2,1,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} maximum(bf16[1,20,256,2,1,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %dynamic-slice.1503, bf16[1,20,256,2,1,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %dynamic-slice.1502)
}
%fused_computation.108 (param_0.235: bf16[8,1024,1,20,256,1,1], param_1.276: s32[], param_2.187: bf16[1,20,256,1,16,4,288,1], param_3.145: s32[]) -> bf16[2,1,4,288,8,1024,1,1] {
%param_1.276 = s32[]{:T(128)} parameter(1)
%param_2.187 = bf16[1,20,256,1,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} parameter(2)
%param_3.145 = s32[]{:T(128)} parameter(3)
%fusion.132 = bf16[1,20,256,2,1,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} fusion(s32[]{:T(128)} %param_1.276, bf16[1,20,256,1,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %param_2.187, s32[]{:T(128)} %param_3.145), kind=kLoop, calls=%fused_computation.110.clone
%param_0.235 = bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)} parameter(0)
%fusion.129 = bf16[8,1024,1,20,256,1,1,1]{4,1,7,3,2,0,6,5:T(8,128)(2,1)} fusion(bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)} %param_0.235), kind=kLoop, calls=%fused_computation.109
ROOT %convolution.170 = bf16[2,1,4,288,8,1024,1,1]{5,3,0,7,6,4,2,1:T(8,128)(2,1)} convolution(bf16[1,20,256,2,1,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %fusion.132, bf16[8,1024,1,20,256,1,1,1]{4,1,7,3,2,0,6,5:T(8,128)(2,1)} %fusion.129), window={size=1x1x8x1x20x1 pad=0_0x0_0x7_7x0_0x0_0x0_0 rhs_reversal=0x0x1x0x0x0}, dim_labels=34f501b2_2o34i015->501b2f34
}
%fused_computation.117 (param_0.248: bf16[1,4,288,8,1024,1,1], param_1.273: bf16[2,1,4,288,8,1024,1,1]) -> bf16[1,4,288,8,1024,1,1] {
%param_0.248 = bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} parameter(0)
%param_1.273 = bf16[2,1,4,288,8,1024,1,1]{5,3,0,7,6,4,2,1:T(8,128)(2,1)} parameter(1)
%slice.1252 = bf16[1,1,4,288,8,1024,1,1]{5,3,0,7,6,4,2,1:T(8,128)(2,1)} slice(bf16[2,1,4,288,8,1024,1,1]{5,3,0,7,6,4,2,1:T(8,128)(2,1)} %param_1.273), slice={[0:1], [0:1], [0:4], [0:288], [0:8], [0:1024], [0:1], [0:1]}
%bitcast.719 = bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} bitcast(bf16[1,1,4,288,8,1024,1,1]{5,3,0,7,6,4,2,1:T(8,128)(2,1)} %slice.1252)
ROOT %add.3083 = bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} add(bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} %param_0.248, bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} %bitcast.719)
}
%fused_computation.107 (param_0.223: bf16[8,1024,1,20,256,1,1]) -> bf16[8,1024,1,20,256,1,1,1] {
%param_0.223 = bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)} parameter(0)
ROOT %bitcast.711 = bf16[8,1024,1,20,256,1,1,1]{4,1,7,3,2,0,6,5:T(8,128)(2,1)} bitcast(bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)} %param_0.223)
}
%fused_computation.111.clone (param_0.250: s32[], param_1.275: bf16[1,20,256,1,16,4,288,1], param_2.189: s32[]) -> bf16[1,20,256,2,1,4,288,1] {
%param_1.275 = bf16[1,20,256,1,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} parameter(1)
%constant.6009 = bf16[]{:T(256)} constant(-inf)
%pad.374 = bf16[1,20,256,2,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} pad(bf16[1,20,256,1,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %param_1.275, bf16[]{:T(256)} %constant.6009), padding=0_0x0_0x0_0x0_1x0_0x0_0x0_0x0_0
%constant.5999 = s32[]{:T(128)} constant(0)
%param_0.250 = s32[]{:T(128)} parameter(0)
%dynamic-slice.1507 = bf16[1,20,256,2,1,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} dynamic-slice(bf16[1,20,256,2,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %pad.374, s32[]{:T(128)} %constant.5999, s32[]{:T(128)} %constant.5999, s32[]{:T(128)} %constant.5999, s32[]{:T(128)} %constant.5999, s32[]{:T(128)} %param_0.250, s32[]{:T(128)} %constant.5999, s32[]{:T(128)} %constant.5999, s32[]{:T(128)} %constant.5999), dynamic_slice_sizes={1,20,256,2,1,4,288,1}
%pad.373 = bf16[1,20,256,2,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} pad(bf16[1,20,256,1,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %param_1.275, bf16[]{:T(256)} %constant.6009), padding=0_0x0_0x0_0x1_0x0_0x0_0x0_0x0_0
%param_2.189 = s32[]{:T(128)} parameter(2)
%dynamic-slice.1506 = bf16[1,20,256,2,1,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} dynamic-slice(bf16[1,20,256,2,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %pad.373, s32[]{:T(128)} %constant.5999, s32[]{:T(128)} %constant.5999, s32[]{:T(128)} %constant.5999, s32[]{:T(128)} %constant.5999, s32[]{:T(128)} %param_2.189, s32[]{:T(128)} %constant.5999, s32[]{:T(128)} %constant.5999, s32[]{:T(128)} %constant.5999), dynamic_slice_sizes={1,20,256,2,1,4,288,1}
ROOT %maximum.514 = bf16[1,20,256,2,1,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} maximum(bf16[1,20,256,2,1,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %dynamic-slice.1507, bf16[1,20,256,2,1,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %dynamic-slice.1506)
}
%fused_computation.106 (param_0.239: bf16[8,1024,1,20,256,1,1], param_1.274: s32[], param_2.185: bf16[1,20,256,1,16,4,288,1], param_3.144: s32[]) -> bf16[2,1,4,288,8,1024,1,1] {
%param_1.274 = s32[]{:T(128)} parameter(1)
%param_2.185 = bf16[1,20,256,1,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} parameter(2)
%param_3.144 = s32[]{:T(128)} parameter(3)
%fusion.133 = bf16[1,20,256,2,1,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} fusion(s32[]{:T(128)} %param_1.274, bf16[1,20,256,1,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %param_2.185, s32[]{:T(128)} %param_3.144), kind=kLoop, calls=%fused_computation.111.clone
%param_0.239 = bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)} parameter(0)
%fusion.127 = bf16[8,1024,1,20,256,1,1,1]{4,1,7,3,2,0,6,5:T(8,128)(2,1)} fusion(bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)} %param_0.239), kind=kLoop, calls=%fused_computation.107
ROOT %convolution.169 = bf16[2,1,4,288,8,1024,1,1]{5,3,0,7,6,4,2,1:T(8,128)(2,1)} convolution(bf16[1,20,256,2,1,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %fusion.133, bf16[8,1024,1,20,256,1,1,1]{4,1,7,3,2,0,6,5:T(8,128)(2,1)} %fusion.127), window={size=1x1x8x1x20x1 pad=0_0x0_0x7_7x0_0x0_0x0_0 rhs_reversal=0x0x1x0x0x0}, dim_labels=34f501b2_2o34i015->501b2f34
}
%fused_computation.115 (param_0.244: bf16[1,4,288,8,1024,1,1], param_1.270: bf16[2,1,4,288,8,1024,1,1]) -> bf16[1,4,288,8,1024,1,1] {
%param_0.244 = bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} parameter(0)
%param_1.270 = bf16[2,1,4,288,8,1024,1,1]{5,3,0,7,6,4,2,1:T(8,128)(2,1)} parameter(1)
%slice.1249 = bf16[1,1,4,288,8,1024,1,1]{5,3,0,7,6,4,2,1:T(8,128)(2,1)} slice(bf16[2,1,4,288,8,1024,1,1]{5,3,0,7,6,4,2,1:T(8,128)(2,1)} %param_1.270), slice={[0:1], [0:1], [0:4], [0:288], [0:8], [0:1024], [0:1], [0:1]}
%bitcast.716 = bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} bitcast(bf16[1,1,4,288,8,1024,1,1]{5,3,0,7,6,4,2,1:T(8,128)(2,1)} %slice.1249)
ROOT %add.3082 = bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} add(bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} %param_0.244, bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} %bitcast.716)
}
%fused_computation.113 (param_0.241: bf16[1,4,288,8,1024,1,1], param_1.267: bf16[2,4,288,8,1024,1,1]) -> bf16[1,4,288,8,1024,1,1] {
%param_0.241 = bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} parameter(0)
%param_1.267 = bf16[2,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} parameter(1)
%slice.1246 = bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} slice(bf16[2,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} %param_1.267), slice={[1:2], [0:4], [0:288], [0:8], [0:1024], [0:1], [0:1]}
ROOT %add.3081 = bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} add(bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} %param_0.241, bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} %slice.1246)
}
%fused_computation.112 (param_0.240: bf16[1,4,288,8,1024,1,1], param_1.265: bf16[2,4,288,8,1024,1,1]) -> bf16[1,4,288,8,1024,1,1] {
%param_0.240 = bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} parameter(0)
%param_1.265 = bf16[2,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} parameter(1)
%slice.1245 = bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} slice(bf16[2,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} %param_1.265), slice={[1:2], [0:4], [0:288], [0:8], [0:1024], [0:1], [0:1]}
ROOT %add.3080 = bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} add(bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} %param_0.240, bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} %slice.1245)
}
)";
hlo_string += R"(
ENTRY entry {
%param.163 = (bf16[1,20,256,16,4,288,1]{2,5,1,4,3,6,0:T(8,128)(2,1)}, bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)}, bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)}, bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)}, u32[]{:T(128)}) parameter(0)
%get-tuple-element.20289 = bf16[1,20,256,16,4,288,1]{2,5,1,4,3,6,0:T(8,128)(2,1)} get-tuple-element((bf16[1,20,256,16,4,288,1]{2,5,1,4,3,6,0:T(8,128)(2,1)}, bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)}, bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)}, bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)}, u32[]{:T(128)}) %param.163), index=0
%get-tuple-element.20290 = bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)} get-tuple-element((bf16[1,20,256,16,4,288,1]{2,5,1,4,3,6,0:T(8,128)(2,1)}, bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)}, bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)}, bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)}, u32[]{:T(128)}) %param.163), index=1
%get-tuple-element.20291 = bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} get-tuple-element((bf16[1,20,256,16,4,288,1]{2,5,1,4,3,6,0:T(8,128)(2,1)}, bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)}, bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)}, bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)}, u32[]{:T(128)}) %param.163), index=2
%collective-permute.8 = bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} collective-permute(bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} %get-tuple-element.20291), channel_id=22, source_target_pairs={{0,15},{1,0},{2,1},{3,2},{4,3},{5,4},{6,5},{7,6},{8,7},{9,8},{10,9},{11,10},{12,11},{13,12},{14,13},{15,14}}, backend_config="{\"flag_configs\":[],\"barrier_config\":{\"barrier_type\":\"CUSTOM\",\"id\":\"0\"}}"
%iota.36 = s32[16]{0:T(128)} iota(), iota_dimension=0
%constant.3283 = u32[1024]{0:T(1024)} constant({...})
%partition-id.6 = u32[]{:T(128)} partition-id()
%dynamic-slice.254 = u32[1]{0:T(128)} dynamic-slice(u32[1024]{0:T(1024)} %constant.3283, u32[]{:T(128)} %partition-id.6), dynamic_slice_sizes={1}
%bitcast.55 = u32[]{:T(128)} bitcast(u32[1]{0:T(128)} %dynamic-slice.254)
%constant.5148 = u32[]{:T(128)} constant(8)
%add.2615 = u32[]{:T(128)} add(u32[]{:T(128)} %bitcast.55, u32[]{:T(128)} %constant.5148)
%get-tuple-element.20293 = u32[]{:T(128)} get-tuple-element((bf16[1,20,256,16,4,288,1]{2,5,1,4,3,6,0:T(8,128)(2,1)}, bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)}, bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)}, bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)}, u32[]{:T(128)}) %param.163), index=4
%copy.2385 = u32[]{:T(128)} copy(u32[]{:T(128)} %get-tuple-element.20293)
%constant.3305 = u32[]{:T(128)} constant(1)
%add.1503 = u32[]{:T(128)} add(u32[]{:T(128)} %copy.2385, u32[]{:T(128)} %constant.3305)
%subtract.200 = u32[]{:T(128)} subtract(u32[]{:T(128)} %add.2615, u32[]{:T(128)} %add.1503)
%constant.4875 = u32[]{:T(128)} constant(15)
%and.29 = u32[]{:T(128)} and(u32[]{:T(128)} %subtract.200, u32[]{:T(128)} %constant.4875)
%add.1504 = u32[]{:T(128)} add(u32[]{:T(128)} %add.1503, u32[]{:T(128)} %bitcast.55)
%constant.3285 = u32[]{:T(128)} constant(9)
%add.1506 = u32[]{:T(128)} add(u32[]{:T(128)} %add.1504, u32[]{:T(128)} %constant.3285)
%and.28 = u32[]{:T(128)} and(u32[]{:T(128)} %add.1506, u32[]{:T(128)} %constant.4875)
%subtract.198 = u32[]{:T(128)} subtract(u32[]{:T(128)} %add.2615, u32[]{:T(128)} %copy.2385)
%and.27 = u32[]{:T(128)} and(u32[]{:T(128)} %subtract.198, u32[]{:T(128)} %constant.4875)
%add.1498 = u32[]{:T(128)} add(u32[]{:T(128)} %copy.2385, u32[]{:T(128)} %bitcast.55)
%add.1500 = u32[]{:T(128)} add(u32[]{:T(128)} %add.1498, u32[]{:T(128)} %constant.3285)
%and.26 = u32[]{:T(128)} and(u32[]{:T(128)} %add.1500, u32[]{:T(128)} %constant.4875)
%fusion.1987 = (s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) fusion(s32[16]{0:T(128)} %iota.36, u32[]{:T(128)} %and.29, u32[]{:T(128)} %and.28, u32[]{:T(128)} %and.27, u32[]{:T(128)} %and.26), kind=kLoop, calls=%fused_computation.1793
%get-tuple-element.19793 = s32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) %fusion.1987), index=3
%bitcast.56 = s32[]{:T(128)} bitcast(s32[1]{0:T(128)} %get-tuple-element.19793)
%bitcast.54 = bf16[1,20,256,1,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} bitcast(bf16[1,20,256,16,4,288,1]{2,5,1,4,3,6,0:T(8,128)(2,1)} %get-tuple-element.20289)
%get-tuple-element.19792 = s32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) %fusion.1987), index=2
%bitcast.57 = s32[]{:T(128)} bitcast(s32[1]{0:T(128)} %get-tuple-element.19792)
%fusion.128 = bf16[2,1,4,288,8,1024,1,1]{5,3,0,7,6,4,2,1:T(8,128)(2,1)} fusion(bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)} %get-tuple-element.20290, s32[]{:T(128)} %bitcast.56, bf16[1,20,256,1,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %bitcast.54, s32[]{:T(128)} %bitcast.57), kind=kOutput, calls=%fused_computation.108
%fusion.139 = bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} fusion(bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} %collective-permute.8, bf16[2,1,4,288,8,1024,1,1]{5,3,0,7,6,4,2,1:T(8,128)(2,1)} %fusion.128), kind=kLoop, calls=%fused_computation.117
%collective-permute.10 = bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} collective-permute(bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} %fusion.139), channel_id=24, source_target_pairs={{0,15},{1,0},{2,1},{3,2},{4,3},{5,4},{6,5},{7,6},{8,7},{9,8},{10,9},{11,10},{12,11},{13,12},{14,13},{15,14}}, backend_config="{\"flag_configs\":[],\"barrier_config\":{\"barrier_type\":\"CUSTOM\",\"id\":\"0\"}}"
%get-tuple-element.19791 = s32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) %fusion.1987), index=1
%bitcast.60 = s32[]{:T(128)} bitcast(s32[1]{0:T(128)} %get-tuple-element.19791)
%get-tuple-element.19790 = s32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) %fusion.1987), index=0
%bitcast.61 = s32[]{:T(128)} bitcast(s32[1]{0:T(128)} %get-tuple-element.19790)
%fusion.126 = bf16[2,1,4,288,8,1024,1,1]{5,3,0,7,6,4,2,1:T(8,128)(2,1)} fusion(bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)} %get-tuple-element.20290, s32[]{:T(128)} %bitcast.60, bf16[1,20,256,1,16,4,288,1]{2,6,3,1,0,7,5,4:T(8,128)(2,1)} %bitcast.54, s32[]{:T(128)} %bitcast.61), kind=kOutput, calls=%fused_computation.106
%fusion.137 = bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} fusion(bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} %collective-permute.10, bf16[2,1,4,288,8,1024,1,1]{5,3,0,7,6,4,2,1:T(8,128)(2,1)} %fusion.126), kind=kLoop, calls=%fused_computation.115
%get-tuple-element.20292 = bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} get-tuple-element((bf16[1,20,256,16,4,288,1]{2,5,1,4,3,6,0:T(8,128)(2,1)}, bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)}, bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)}, bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)}, u32[]{:T(128)}) %param.163), index=3
%collective-permute.9 = bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} collective-permute(bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} %get-tuple-element.20292), channel_id=23, source_target_pairs={{0,1},{1,2},{2,3},{3,4},{4,5},{5,6},{6,7},{7,8},{8,9},{9,10},{10,11},{11,12},{12,13},{13,14},{14,15},{15,0}}, backend_config="{\"flag_configs\":[],\"barrier_config\":{\"barrier_type\":\"CUSTOM\",\"id\":\"1\"}}"
%bitcast.63 = bf16[2,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} bitcast(bf16[2,1,4,288,8,1024,1,1]{5,3,0,7,6,4,2,1:T(8,128)(2,1)} %fusion.128)
%fusion.135 = bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} fusion(bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} %collective-permute.9, bf16[2,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} %bitcast.63), kind=kLoop, calls=%fused_computation.113
%collective-permute.11 = bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} collective-permute(bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} %fusion.135), channel_id=25, source_target_pairs={{0,1},{1,2},{2,3},{3,4},{4,5},{5,6},{6,7},{7,8},{8,9},{9,10},{10,11},{11,12},{12,13},{13,14},{14,15},{15,0}}, backend_config="{\"flag_configs\":[],\"barrier_config\":{\"barrier_type\":\"CUSTOM\",\"id\":\"1\"}}"
%bitcast.64 = bf16[2,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} bitcast(bf16[2,1,4,288,8,1024,1,1]{5,3,0,7,6,4,2,1:T(8,128)(2,1)} %fusion.126)
%fusion.134 = bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} fusion(bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} %collective-permute.11, bf16[2,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} %bitcast.64), kind=kLoop, calls=%fused_computation.112
%constant.5023 = u32[]{:T(128)} constant(2)
%add.1925 = u32[]{:T(128)} add(u32[]{:T(128)} %copy.2385, u32[]{:T(128)} %constant.5023)
ROOT %tuple.1457 = (bf16[1,20,256,16,4,288,1]{2,5,1,4,3,6,0:T(8,128)(2,1)}, bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)}, bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)}, bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)}, u32[]{:T(128)}) tuple(bf16[1,20,256,16,4,288,1]{2,5,1,4,3,6,0:T(8,128)(2,1)} %get-tuple-element.20289, bf16[8,1024,1,20,256,1,1]{4,1,3,0,6,5,2:T(8,128)(2,1)} %get-tuple-element.20290, bf16[1,4,288,8,1024,1,1]{4,2,3,1,6,5,0:T(8,128)(2,1)} %fusion.137, bf16[1,4,288,8,1024,1,1]{4,2,0,3,1,6,5:T(8,128)(2,1)} %fusion.134, u32[]{:T(128)} %add.1925)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start"),
GetIndex(new_instruction_sequence, "fusion.128"));
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start.2"),
GetIndex(new_instruction_sequence, "fusion.128"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done"),
GetIndex(new_instruction_sequence, "fusion.128"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done.2"),
GetIndex(new_instruction_sequence, "fusion.128"));
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start.1"),
GetIndex(new_instruction_sequence, "fusion.126"));
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start.3"),
GetIndex(new_instruction_sequence, "fusion.126"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done.1"),
GetIndex(new_instruction_sequence, "fusion.126"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done.3"),
GetIndex(new_instruction_sequence, "fusion.126"));
}
TEST_F(LatencyHidingSchedulerTest, MoveCentainConv) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[16,256,256]{2,1,0} parameter(2)
p3 = f32[16,256,256]{2,1,0} parameter(3)
cp0 = f32[16,256,256]{2,1,0} collective-permute(p2),
source_target_pairs={{0,1},{1,0}}
cp1 = f32[16,256,256]{2,1,0} collective-permute(p3),
source_target_pairs={{0,1},{1,0}}
c0 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
c1 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
a0 = f32[16,256,256]{2,1,0} add(cp0, c1)
cp2 = f32[16,256,256]{2,1,0} collective-permute(a0),
source_target_pairs={{0,1},{1,0}}
a2 = f32[16,256,256]{2,1,0} add(cp2, c0)
a1 = f32[16,256,256]{2,1,0} add(cp1, c1)
cp3 = f32[16,256,256]{2,1,0} collective-permute(a1),
source_target_pairs={{0,1},{1,0}}
ROOT tuple = (f32[16,256,256]{2,1,0}, f32[16,256,256]{2,1,0}) tuple(a2, cp3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start"),
GetIndex(new_instruction_sequence, "c1"));
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start.1"),
GetIndex(new_instruction_sequence, "c1"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done"),
GetIndex(new_instruction_sequence, "c1"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done.1"),
GetIndex(new_instruction_sequence, "c1"));
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start.2"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start.3"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done.2"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done.3"),
GetIndex(new_instruction_sequence, "c0"));
}
TEST_F(LatencyHidingSchedulerTest,
BalanceChainedCollectivePermutesLoopedEinsum2) {
std::string hlo_string = R"(
HloModule module, is_scheduled=true
%fused_computation.1851 (param_0.5170: s32[32], param_1.5848: u32[], param_2.4103: u32[], param_3.3513: u32[], param_4.2356: u32[]) -> (s32[1], s32[1], s32[1], s32[1]) {
%param_0.5170 = s32[32]{0:T(128)} parameter(0)
%param_1.5848 = u32[]{:T(128)} parameter(1)
%dynamic-slice.1636 = s32[1]{0:T(128)} dynamic-slice(s32[32]{0:T(128)} %param_0.5170, u32[]{:T(128)} %param_1.5848), dynamic_slice_sizes={1}
%param_2.4103 = u32[]{:T(128)} parameter(2)
%dynamic-slice.1637 = s32[1]{0:T(128)} dynamic-slice(s32[32]{0:T(128)} %param_0.5170, u32[]{:T(128)} %param_2.4103), dynamic_slice_sizes={1}
%param_3.3513 = u32[]{:T(128)} parameter(3)
%dynamic-slice.1638 = s32[1]{0:T(128)} dynamic-slice(s32[32]{0:T(128)} %param_0.5170, u32[]{:T(128)} %param_3.3513), dynamic_slice_sizes={1}
%param_4.2356 = u32[]{:T(128)} parameter(4)
%dynamic-slice.1639 = s32[1]{0:T(128)} dynamic-slice(s32[32]{0:T(128)} %param_0.5170, u32[]{:T(128)} %param_4.2356), dynamic_slice_sizes={1}
ROOT %tuple.1297 = (s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) tuple(s32[1]{0:T(128)} %dynamic-slice.1636, s32[1]{0:T(128)} %dynamic-slice.1637, s32[1]{0:T(128)} %dynamic-slice.1638, s32[1]{0:T(128)} %dynamic-slice.1639)
}
%fused_computation.117 (param_0.249: bf16[16,1024,1,10,256,1]) -> bf16[16,1024,1,10,256,1,1] {
%param_0.249 = bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} parameter(0)
ROOT %bitcast.672 = bf16[16,1024,1,10,256,1,1]{4,1,6,3,2,0,5:T(8,128)(2,1)} bitcast(bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} %param_0.249)
}
%fused_computation.124.clone (param_0.277: s32[], param_1.330: bf16[1,10,256,1,32,576,1], param_2.233: s32[]) -> bf16[1,10,256,2,1,576,1] {
%param_1.330 = bf16[1,10,256,1,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} parameter(1)
%constant.5658 = bf16[]{:T(256)} constant(-inf)
%pad.357 = bf16[1,10,256,2,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} pad(bf16[1,10,256,1,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %param_1.330, bf16[]{:T(256)} %constant.5658), padding=0_0x0_0x0_0x0_1x0_0x0_0x0_0
%constant.5648 = s32[]{:T(128)} constant(0)
%param_0.277 = s32[]{:T(128)} parameter(0)
%dynamic-slice.1327 = bf16[1,10,256,2,1,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} dynamic-slice(bf16[1,10,256,2,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %pad.357, s32[]{:T(128)} %constant.5648, s32[]{:T(128)} %constant.5648, s32[]{:T(128)} %constant.5648, s32[]{:T(128)} %constant.5648, s32[]{:T(128)} %param_0.277, s32[]{:T(128)} %constant.5648, s32[]{:T(128)} %constant.5648), dynamic_slice_sizes={1,10,256,2,1,576,1}
%pad.363 = bf16[1,10,256,2,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} pad(bf16[1,10,256,1,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %param_1.330, bf16[]{:T(256)} %constant.5658), padding=0_0x0_0x0_0x1_0x0_0x0_0x0_0
%param_2.233 = s32[]{:T(128)} parameter(2)
%dynamic-slice.1333 = bf16[1,10,256,2,1,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} dynamic-slice(bf16[1,10,256,2,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %pad.363, s32[]{:T(128)} %constant.5648, s32[]{:T(128)} %constant.5648, s32[]{:T(128)} %constant.5648, s32[]{:T(128)} %constant.5648, s32[]{:T(128)} %param_2.233, s32[]{:T(128)} %constant.5648, s32[]{:T(128)} %constant.5648), dynamic_slice_sizes={1,10,256,2,1,576,1}
ROOT %maximum.510 = bf16[1,10,256,2,1,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} maximum(bf16[1,10,256,2,1,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %dynamic-slice.1327, bf16[1,10,256,2,1,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %dynamic-slice.1333)
}
%fused_computation.116 (param_0.264: bf16[16,1024,1,10,256,1], param_1.329: s32[], param_2.230: bf16[1,10,256,1,32,576,1], param_3.197: s32[]) -> bf16[2,1,576,16,1024,1,1] {
%param_1.329 = s32[]{:T(128)} parameter(1)
%param_2.230 = bf16[1,10,256,1,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} parameter(2)
%param_3.197 = s32[]{:T(128)} parameter(3)
%fusion.155 = bf16[1,10,256,2,1,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} fusion(s32[]{:T(128)} %param_1.329, bf16[1,10,256,1,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %param_2.230, s32[]{:T(128)} %param_3.197), kind=kLoop, calls=%fused_computation.124.clone
%param_0.264 = bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} parameter(0)
%fusion.147 = bf16[16,1024,1,10,256,1,1]{4,1,6,3,2,0,5:T(8,128)(2,1)} fusion(bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} %param_0.264), kind=kLoop, calls=%fused_computation.117
ROOT %convolution.168 = bf16[2,1,576,16,1024,1,1]{4,2,0,6,5,3,1:T(8,128)(2,1)} convolution(bf16[1,10,256,2,1,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %fusion.155, bf16[16,1024,1,10,256,1,1]{4,1,6,3,2,0,5:T(8,128)(2,1)} %fusion.147), window={size=1x16x1x10x1 pad=0_0x15_15x0_0x0_0x0_0 rhs_reversal=0x1x0x0x0}, dim_labels=23f40b1_1o23i04->40b1f23
}
%fused_computation.123 (param_0.258: bf16[1,576,16,1024,1,1], param_1.306: bf16[2,1,576,16,1024,1,1]) -> bf16[1,576,16,1024,1,1] {
%param_0.258 = bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} parameter(0)
%param_1.306 = bf16[2,1,576,16,1024,1,1]{4,2,0,6,5,3,1:T(8,128)(2,1)} parameter(1)
%slice.1132 = bf16[1,1,576,16,1024,1,1]{4,2,0,6,5,3,1:T(8,128)(2,1)} slice(bf16[2,1,576,16,1024,1,1]{4,2,0,6,5,3,1:T(8,128)(2,1)} %param_1.306), slice={[0:1], [0:1], [0:576], [0:16], [0:1024], [0:1], [0:1]}
%bitcast.678 = bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} bitcast(bf16[1,1,576,16,1024,1,1]{4,2,0,6,5,3,1:T(8,128)(2,1)} %slice.1132)
ROOT %add.3125 = bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} add(bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} %param_0.258, bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} %bitcast.678)
}
%fused_computation.115 (param_0.247: bf16[16,1024,1,10,256,1]) -> bf16[16,1024,1,10,256,1,1] {
%param_0.247 = bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} parameter(0)
ROOT %bitcast.670 = bf16[16,1024,1,10,256,1,1]{4,1,6,3,2,0,5:T(8,128)(2,1)} bitcast(bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} %param_0.247)
}
%fused_computation.125.clone (param_0.276: s32[], param_1.328: bf16[1,10,256,1,32,576,1], param_2.232: s32[]) -> bf16[1,10,256,2,1,576,1] {
%param_1.328 = bf16[1,10,256,1,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} parameter(1)
%constant.5653 = bf16[]{:T(256)} constant(-inf)
%pad.360 = bf16[1,10,256,2,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} pad(bf16[1,10,256,1,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %param_1.328, bf16[]{:T(256)} %constant.5653), padding=0_0x0_0x0_0x0_1x0_0x0_0x0_0
%constant.5643 = s32[]{:T(128)} constant(0)
%param_0.276 = s32[]{:T(128)} parameter(0)
%dynamic-slice.1330 = bf16[1,10,256,2,1,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} dynamic-slice(bf16[1,10,256,2,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %pad.360, s32[]{:T(128)} %constant.5643, s32[]{:T(128)} %constant.5643, s32[]{:T(128)} %constant.5643, s32[]{:T(128)} %constant.5643, s32[]{:T(128)} %param_0.276, s32[]{:T(128)} %constant.5643, s32[]{:T(128)} %constant.5643), dynamic_slice_sizes={1,10,256,2,1,576,1}
%pad.366 = bf16[1,10,256,2,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} pad(bf16[1,10,256,1,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %param_1.328, bf16[]{:T(256)} %constant.5653), padding=0_0x0_0x0_0x1_0x0_0x0_0x0_0
%param_2.232 = s32[]{:T(128)} parameter(2)
%dynamic-slice.1336 = bf16[1,10,256,2,1,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} dynamic-slice(bf16[1,10,256,2,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %pad.366, s32[]{:T(128)} %constant.5643, s32[]{:T(128)} %constant.5643, s32[]{:T(128)} %constant.5643, s32[]{:T(128)} %constant.5643, s32[]{:T(128)} %param_2.232, s32[]{:T(128)} %constant.5643, s32[]{:T(128)} %constant.5643), dynamic_slice_sizes={1,10,256,2,1,576,1}
ROOT %maximum.512 = bf16[1,10,256,2,1,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} maximum(bf16[1,10,256,2,1,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %dynamic-slice.1330, bf16[1,10,256,2,1,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %dynamic-slice.1336)
}
%fused_computation.114 (param_0.269: bf16[16,1024,1,10,256,1], param_1.327: s32[], param_2.228: bf16[1,10,256,1,32,576,1], param_3.196: s32[]) -> bf16[2,1,576,16,1024,1,1] {
%param_1.327 = s32[]{:T(128)} parameter(1)
%param_2.228 = bf16[1,10,256,1,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} parameter(2)
%param_3.196 = s32[]{:T(128)} parameter(3)
%fusion.157 = bf16[1,10,256,2,1,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} fusion(s32[]{:T(128)} %param_1.327, bf16[1,10,256,1,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %param_2.228, s32[]{:T(128)} %param_3.196), kind=kLoop, calls=%fused_computation.125.clone
%param_0.269 = bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} parameter(0)
%fusion.145 = bf16[16,1024,1,10,256,1,1]{4,1,6,3,2,0,5:T(8,128)(2,1)} fusion(bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} %param_0.269), kind=kLoop, calls=%fused_computation.115
ROOT %convolution.167 = bf16[2,1,576,16,1024,1,1]{4,2,0,6,5,3,1:T(8,128)(2,1)} convolution(bf16[1,10,256,2,1,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %fusion.157, bf16[16,1024,1,10,256,1,1]{4,1,6,3,2,0,5:T(8,128)(2,1)} %fusion.145), window={size=1x16x1x10x1 pad=0_0x15_15x0_0x0_0x0_0 rhs_reversal=0x1x0x0x0}, dim_labels=23f40b1_1o23i04->40b1f23
}
%fused_computation.121 (param_0.254: bf16[1,576,16,1024,1,1], param_1.303: bf16[2,1,576,16,1024,1,1]) -> bf16[1,576,16,1024,1,1] {
%param_0.254 = bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} parameter(0)
%param_1.303 = bf16[2,1,576,16,1024,1,1]{4,2,0,6,5,3,1:T(8,128)(2,1)} parameter(1)
%slice.1129 = bf16[1,1,576,16,1024,1,1]{4,2,0,6,5,3,1:T(8,128)(2,1)} slice(bf16[2,1,576,16,1024,1,1]{4,2,0,6,5,3,1:T(8,128)(2,1)} %param_1.303), slice={[0:1], [0:1], [0:576], [0:16], [0:1024], [0:1], [0:1]}
%bitcast.675 = bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} bitcast(bf16[1,1,576,16,1024,1,1]{4,2,0,6,5,3,1:T(8,128)(2,1)} %slice.1129)
ROOT %add.3124 = bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} add(bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} %param_0.254, bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} %bitcast.675)
}
%fused_computation.119 (param_0.251: bf16[1,576,16,1024,1,1], param_1.300: bf16[2,576,16,1024,1,1]) -> bf16[1,576,16,1024,1,1] {
%param_0.251 = bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} parameter(0)
%param_1.300 = bf16[2,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} parameter(1)
%slice.1126 = bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} slice(bf16[2,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} %param_1.300), slice={[1:2], [0:576], [0:16], [0:1024], [0:1], [0:1]}
ROOT %add.3123 = bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} add(bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} %param_0.251, bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} %slice.1126)
}
%fused_computation.118 (param_0.250: bf16[1,576,16,1024,1,1], param_1.298: bf16[2,576,16,1024,1,1]) -> bf16[1,576,16,1024,1,1] {
%param_0.250 = bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} parameter(0)
%param_1.298 = bf16[2,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} parameter(1)
%slice.1125 = bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} slice(bf16[2,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} %param_1.298), slice={[1:2], [0:576], [0:16], [0:1024], [0:1], [0:1]}
ROOT %add.3122 = bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} add(bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} %param_0.250, bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} %slice.1125)
}
)";
hlo_string += R"(
ENTRY entry {
%constant.4782 = u32[]{:T(128)} constant(16)
%constant.4661 = u32[]{:T(128)} constant(2)
%constant.4517 = u32[]{:T(128)} constant(31)
%constant.3078 = u32[]{:T(128)} constant(1)
%constant.3060 = u32[]{:T(128)} constant(17)
%partition-id.6 = u32[]{:T(128)} partition-id()
%param.139 = (bf16[1,10,256,32,576,1]{2,4,1,3,5,0:T(8,128)(2,1)}, bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)}, bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)}, bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)}, u32[]{:T(128)}) parameter(0)
%get-tuple-element.16007 = u32[]{:T(128)} get-tuple-element((bf16[1,10,256,32,576,1]{2,4,1,3,5,0:T(8,128)(2,1)}, bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)}, bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)}, bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)}, u32[]{:T(128)}) %param.139), index=4
%copy.1385 = u32[]{:T(128)} copy(u32[]{:T(128)} %get-tuple-element.16007)
%add.1492 = u32[]{:T(128)} add(u32[]{:T(128)} %copy.1385, u32[]{:T(128)} %constant.3078)
%add.1938 = u32[]{:T(128)} add(u32[]{:T(128)} %copy.1385, u32[]{:T(128)} %constant.4661)
%get-tuple-element.16004 = bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} get-tuple-element((bf16[1,10,256,32,576,1]{2,4,1,3,5,0:T(8,128)(2,1)}, bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)}, bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)}, bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)}, u32[]{:T(128)}) %param.139), index=1
%get-tuple-element.16003 = bf16[1,10,256,32,576,1]{2,4,1,3,5,0:T(8,128)(2,1)} get-tuple-element((bf16[1,10,256,32,576,1]{2,4,1,3,5,0:T(8,128)(2,1)}, bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)}, bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)}, bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)}, u32[]{:T(128)}) %param.139), index=0
%bitcast.58 = bf16[1,10,256,1,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} bitcast(bf16[1,10,256,32,576,1]{2,4,1,3,5,0:T(8,128)(2,1)} %get-tuple-element.16003)
%get-tuple-element.16005 = bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} get-tuple-element((bf16[1,10,256,32,576,1]{2,4,1,3,5,0:T(8,128)(2,1)}, bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)}, bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)}, bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)}, u32[]{:T(128)}) %param.139), index=2
%get-tuple-element.16006 = bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} get-tuple-element((bf16[1,10,256,32,576,1]{2,4,1,3,5,0:T(8,128)(2,1)}, bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)}, bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)}, bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)}, u32[]{:T(128)}) %param.139), index=3
%constant.3058 = u32[1024]{0:T(1024)} constant({...})
%dynamic-slice.218 = u32[1]{0:T(128)} dynamic-slice(u32[1024]{0:T(1024)} %constant.3058, u32[]{:T(128)} %partition-id.6), dynamic_slice_sizes={1}
%bitcast.59 = u32[]{:T(128)} bitcast(u32[1]{0:T(128)} %dynamic-slice.218)
%add.1493 = u32[]{:T(128)} add(u32[]{:T(128)} %add.1492, u32[]{:T(128)} %bitcast.59)
%add.1495 = u32[]{:T(128)} add(u32[]{:T(128)} %add.1493, u32[]{:T(128)} %constant.3060)
%and.28 = u32[]{:T(128)} and(u32[]{:T(128)} %add.1495, u32[]{:T(128)} %constant.4517)
%add.2636 = u32[]{:T(128)} add(u32[]{:T(128)} %bitcast.59, u32[]{:T(128)} %constant.4782)
%subtract.200 = u32[]{:T(128)} subtract(u32[]{:T(128)} %add.2636, u32[]{:T(128)} %add.1492)
%and.29 = u32[]{:T(128)} and(u32[]{:T(128)} %subtract.200, u32[]{:T(128)} %constant.4517)
%subtract.198 = u32[]{:T(128)} subtract(u32[]{:T(128)} %add.2636, u32[]{:T(128)} %copy.1385)
%and.27 = u32[]{:T(128)} and(u32[]{:T(128)} %subtract.198, u32[]{:T(128)} %constant.4517)
%add.1487 = u32[]{:T(128)} add(u32[]{:T(128)} %copy.1385, u32[]{:T(128)} %bitcast.59)
%add.1489 = u32[]{:T(128)} add(u32[]{:T(128)} %add.1487, u32[]{:T(128)} %constant.3060)
%and.26 = u32[]{:T(128)} and(u32[]{:T(128)} %add.1489, u32[]{:T(128)} %constant.4517)
%iota.60 = s32[32]{0:T(128)} iota(), iota_dimension=0
%fusion.2068 = (s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) fusion(s32[32]{0:T(128)} %iota.60, u32[]{:T(128)} %and.29, u32[]{:T(128)} %and.28, u32[]{:T(128)} %and.27, u32[]{:T(128)} %and.26), kind=kLoop, calls=%fused_computation.1851
%get-tuple-element.15499 = s32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) %fusion.2068), index=3
%bitcast.60 = s32[]{:T(128)} bitcast(s32[1]{0:T(128)} %get-tuple-element.15499)
%get-tuple-element.15498 = s32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) %fusion.2068), index=2
%bitcast.61 = s32[]{:T(128)} bitcast(s32[1]{0:T(128)} %get-tuple-element.15498)
%get-tuple-element.15497 = s32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) %fusion.2068), index=1
%bitcast.64 = s32[]{:T(128)} bitcast(s32[1]{0:T(128)} %get-tuple-element.15497)
%get-tuple-element.15496 = s32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) %fusion.2068), index=0
%bitcast.65 = s32[]{:T(128)} bitcast(s32[1]{0:T(128)} %get-tuple-element.15496)
%collective-permute.9 = bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} collective-permute(bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} %get-tuple-element.16006), channel_id=23, source_target_pairs={{0,1},{1,2},{2,3},{3,4},{4,5},{5,6},{6,7},{7,8},{8,9},{9,10},{10,11},{11,12},{12,13},{13,14},{14,15},{15,16},{16,17},{17,18},{18,19},{19,20},{20,21},{21,22},{22,23},{23,24},{24,25},{25,26},{26,27},{27,28},{28,29},{29,30},{30,31},{31,0}}, backend_config="{\"flag_configs\":[],\"barrier_config\":{\"barrier_type\":\"CUSTOM\",\"id\":\"1\"}}"
%collective-permute.8 = bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} collective-permute(bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} %get-tuple-element.16005), channel_id=22, source_target_pairs={{0,31},{1,0},{2,1},{3,2},{4,3},{5,4},{6,5},{7,6},{8,7},{9,8},{10,9},{11,10},{12,11},{13,12},{14,13},{15,14},{16,15},{17,16},{18,17},{19,18},{20,19},{21,20},{22,21},{23,22},{24,23},{25,24},{26,25},{27,26},{28,27},{29,28},{30,29},{31,30}}, backend_config="{\"flag_configs\":[],\"barrier_config\":{\"barrier_type\":\"CUSTOM\",\"id\":\"0\"}}"
%fusion.144 = bf16[2,1,576,16,1024,1,1]{4,2,0,6,5,3,1:T(8,128)(2,1)} fusion(bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} %get-tuple-element.16004, s32[]{:T(128)} %bitcast.64, bf16[1,10,256,1,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %bitcast.58, s32[]{:T(128)} %bitcast.65), kind=kOutput, calls=%fused_computation.114
%bitcast.68 = bf16[2,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} bitcast(bf16[2,1,576,16,1024,1,1]{4,2,0,6,5,3,1:T(8,128)(2,1)} %fusion.144)
%fusion.146 = bf16[2,1,576,16,1024,1,1]{4,2,0,6,5,3,1:T(8,128)(2,1)} fusion(bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} %get-tuple-element.16004, s32[]{:T(128)} %bitcast.60, bf16[1,10,256,1,32,576,1]{2,5,3,1,0,6,4:T(8,128)(2,1)} %bitcast.58, s32[]{:T(128)} %bitcast.61), kind=kOutput, calls=%fused_computation.116
%fusion.153 = bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} fusion(bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} %collective-permute.8, bf16[2,1,576,16,1024,1,1]{4,2,0,6,5,3,1:T(8,128)(2,1)} %fusion.146), kind=kLoop, calls=%fused_computation.123
%collective-permute.10 = bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} collective-permute(bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} %fusion.153), channel_id=24, source_target_pairs={{0,31},{1,0},{2,1},{3,2},{4,3},{5,4},{6,5},{7,6},{8,7},{9,8},{10,9},{11,10},{12,11},{13,12},{14,13},{15,14},{16,15},{17,16},{18,17},{19,18},{20,19},{21,20},{22,21},{23,22},{24,23},{25,24},{26,25},{27,26},{28,27},{29,28},{30,29},{31,30}}, backend_config="{\"flag_configs\":[],\"barrier_config\":{\"barrier_type\":\"CUSTOM\",\"id\":\"0\"}}"
%fusion.151 = bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} fusion(bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} %collective-permute.10, bf16[2,1,576,16,1024,1,1]{4,2,0,6,5,3,1:T(8,128)(2,1)} %fusion.144), kind=kLoop, calls=%fused_computation.121
%bitcast.67 = bf16[2,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} bitcast(bf16[2,1,576,16,1024,1,1]{4,2,0,6,5,3,1:T(8,128)(2,1)} %fusion.146)
%fusion.149 = bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} fusion(bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} %collective-permute.9, bf16[2,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} %bitcast.67), kind=kLoop, calls=%fused_computation.119
%collective-permute.11 = bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} collective-permute(bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} %fusion.149), channel_id=25, source_target_pairs={{0,1},{1,2},{2,3},{3,4},{4,5},{5,6},{6,7},{7,8},{8,9},{9,10},{10,11},{11,12},{12,13},{13,14},{14,15},{15,16},{16,17},{17,18},{18,19},{19,20},{20,21},{21,22},{22,23},{23,24},{24,25},{25,26},{26,27},{27,28},{28,29},{29,30},{30,31},{31,0}}, backend_config="{\"flag_configs\":[],\"barrier_config\":{\"barrier_type\":\"CUSTOM\",\"id\":\"1\"}}"
%fusion.148 = bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} fusion(bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} %collective-permute.11, bf16[2,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} %bitcast.68), kind=kLoop, calls=%fused_computation.118
ROOT %tuple.1373 = (bf16[1,10,256,32,576,1]{2,4,1,3,5,0:T(8,128)(2,1)}, bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)}, bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)}, bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)}, u32[]{:T(128)}) tuple(bf16[1,10,256,32,576,1]{2,4,1,3,5,0:T(8,128)(2,1)} %get-tuple-element.16003, bf16[16,1024,1,10,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} %get-tuple-element.16004, bf16[1,576,16,1024,1,1]{3,1,2,5,4,0:T(8,128)(2,1)} %fusion.151, bf16[1,576,16,1024,1,1]{3,1,0,2,5,4:T(8,128)(2,1)} %fusion.148, u32[]{:T(128)} %add.1938)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start"),
GetIndex(new_instruction_sequence, "fusion.146"));
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start.1"),
GetIndex(new_instruction_sequence, "fusion.146"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done"),
GetIndex(new_instruction_sequence, "fusion.146"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done.1"),
GetIndex(new_instruction_sequence, "fusion.146"));
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start.2"),
GetIndex(new_instruction_sequence, "fusion.144"));
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start.3"),
GetIndex(new_instruction_sequence, "fusion.144"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done.2"),
GetIndex(new_instruction_sequence, "fusion.144"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done.3"),
GetIndex(new_instruction_sequence, "fusion.144"));
}
TEST_F(LatencyHidingSchedulerTest,
BalanceChainedCollectivePermutesLoopedEinsum3) {
std::string hlo_string = R"(
HloModule module, is_scheduled=true
%fused_computation.1799 (param_0.4926: s32[16], param_1.5709: u32[], param_2.3976: u32[], param_3.3386: u32[], param_4.2299: u32[]) -> (s32[1], s32[1], s32[1], s32[1]) {
%param_0.4926 = s32[16]{0:T(128)} parameter(0)
%param_1.5709 = u32[]{:T(128)} parameter(1)
%dynamic-slice.1611 = s32[1]{0:T(128)} dynamic-slice(s32[16]{0:T(128)} %param_0.4926, u32[]{:T(128)} %param_1.5709), dynamic_slice_sizes={1}
%param_2.3976 = u32[]{:T(128)} parameter(2)
%dynamic-slice.1612 = s32[1]{0:T(128)} dynamic-slice(s32[16]{0:T(128)} %param_0.4926, u32[]{:T(128)} %param_2.3976), dynamic_slice_sizes={1}
%param_3.3386 = u32[]{:T(128)} parameter(3)
%dynamic-slice.1613 = s32[1]{0:T(128)} dynamic-slice(s32[16]{0:T(128)} %param_0.4926, u32[]{:T(128)} %param_3.3386), dynamic_slice_sizes={1}
%param_4.2299 = u32[]{:T(128)} parameter(4)
%dynamic-slice.1614 = s32[1]{0:T(128)} dynamic-slice(s32[16]{0:T(128)} %param_0.4926, u32[]{:T(128)} %param_4.2299), dynamic_slice_sizes={1}
ROOT %tuple.1346 = (s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) tuple(s32[1]{0:T(128)} %dynamic-slice.1611, s32[1]{0:T(128)} %dynamic-slice.1612, s32[1]{0:T(128)} %dynamic-slice.1613, s32[1]{0:T(128)} %dynamic-slice.1614)
}
%fused_computation.243 (param_0.505: bf16[8,2048,2,576,1,1], param_1.586: bf16[8,2048,2,576,1,1]) -> bf16[8,2048,4,576,1,1] {
%param_1.586 = bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} parameter(1)
%constant.5838 = bf16[]{:T(256)} constant(-inf)
%pad.368 = bf16[8,2048,4,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} pad(bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %param_1.586, bf16[]{:T(256)} %constant.5838), padding=0_0x0_0x0_2x0_0x0_0x0_0
%param_0.505 = bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} parameter(0)
%pad.367 = bf16[8,2048,4,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} pad(bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %param_0.505, bf16[]{:T(256)} %constant.5838), padding=0_0x0_0x2_0x0_0x0_0x0_0
ROOT %maximum.528 = bf16[8,2048,4,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} maximum(bf16[8,2048,4,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %pad.368, bf16[8,2048,4,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %pad.367)
}
%fused_computation.244 (param_0.507: bf16[8,2048,2,576,1,1], param_1.585: bf16[8,2048,2,576,1,1]) -> bf16[8,2048,4,576,1,1] {
%param_1.585 = bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} parameter(1)
%constant.5832 = bf16[]{:T(256)} constant(-inf)
%pad.370 = bf16[8,2048,4,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} pad(bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %param_1.585, bf16[]{:T(256)} %constant.5832), padding=0_0x0_0x0_2x0_0x0_0x0_0
%param_0.507 = bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} parameter(0)
%pad.369 = bf16[8,2048,4,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} pad(bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %param_0.507, bf16[]{:T(256)} %constant.5832), padding=0_0x0_0x2_0x0_0x0_0x0_0
ROOT %maximum.529 = bf16[8,2048,4,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} maximum(bf16[8,2048,4,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %pad.370, bf16[8,2048,4,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %pad.369)
}
%fused_computation.247 (param_0.511: bf16[8,2048,2,2,576,1,1]) -> bf16[8,2048,2,2,576,1,1] {
%param_0.511 = bf16[8,2048,2,2,576,1,1]{1,4,6,5,3,2,0:T(8,128)(2,1)} parameter(0)
ROOT %copy.2292 = bf16[8,2048,2,2,576,1,1]{1,4,2,3,6,5,0:T(8,128)(2,1)} copy(bf16[8,2048,2,2,576,1,1]{1,4,6,5,3,2,0:T(8,128)(2,1)} %param_0.511)
}
%fused_computation.248.clone (param_0.526: s32[], param_1.589: bf16[1,32,576,1,36,256,1], param_2.400: s32[]) -> bf16[2,2,576,1,36,256,1] {
%param_1.589 = bf16[1,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} parameter(1)
%constant.5843 = bf16[]{:T(256)} constant(-inf)
%pad.378 = bf16[2,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} pad(bf16[1,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %param_1.589, bf16[]{:T(256)} %constant.5843), padding=0_1x0_0x0_0x0_0x0_0x0_0x0_0
%constant.5853 = s32[]{:T(128)} constant(0)
%param_0.526 = s32[]{:T(128)} parameter(0)
%dynamic-slice.1382 = bf16[2,2,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} dynamic-slice(bf16[2,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %pad.378, s32[]{:T(128)} %constant.5853, s32[]{:T(128)} %param_0.526, s32[]{:T(128)} %constant.5853, s32[]{:T(128)} %constant.5853, s32[]{:T(128)} %constant.5853, s32[]{:T(128)} %constant.5853, s32[]{:T(128)} %constant.5853), dynamic_slice_sizes={2,2,576,1,36,256,1}
%pad.377 = bf16[2,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} pad(bf16[1,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %param_1.589, bf16[]{:T(256)} %constant.5843), padding=1_0x0_0x0_0x0_0x0_0x0_0x0_0
%param_2.400 = s32[]{:T(128)} parameter(2)
%dynamic-slice.1381 = bf16[2,2,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} dynamic-slice(bf16[2,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %pad.377, s32[]{:T(128)} %constant.5853, s32[]{:T(128)} %param_2.400, s32[]{:T(128)} %constant.5853, s32[]{:T(128)} %constant.5853, s32[]{:T(128)} %constant.5853, s32[]{:T(128)} %constant.5853, s32[]{:T(128)} %constant.5853), dynamic_slice_sizes={2,2,576,1,36,256,1}
ROOT %maximum.532 = bf16[2,2,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} maximum(bf16[2,2,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %dynamic-slice.1382, bf16[2,2,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %dynamic-slice.1381)
}
%fused_computation.246 (param_0.521: bf16[8,2048,2,2,576,1,1], param_1.588: s32[], param_2.399: bf16[1,32,576,1,36,256,1], param_3.247: s32[]) -> bf16[8,2048,1,36,256,1,1] {
%param_0.521 = bf16[8,2048,2,2,576,1,1]{1,4,6,5,3,2,0:T(8,128)(2,1)} parameter(0)
%fusion.268 = bf16[8,2048,2,2,576,1,1]{1,4,2,3,6,5,0:T(8,128)(2,1)} fusion(bf16[8,2048,2,2,576,1,1]{1,4,6,5,3,2,0:T(8,128)(2,1)} %param_0.521), kind=kLoop, calls=%fused_computation.247
%param_1.588 = s32[]{:T(128)} parameter(1)
%param_2.399 = bf16[1,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} parameter(2)
%param_3.247 = s32[]{:T(128)} parameter(3)
%fusion.271 = bf16[2,2,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} fusion(s32[]{:T(128)} %param_1.588, bf16[1,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %param_2.399, s32[]{:T(128)} %param_3.247), kind=kLoop, calls=%fused_computation.248.clone
ROOT %convolution.172 = bf16[8,2048,1,36,256,1,1]{4,1,6,5,3,2,0:T(8,128)(2,1)} convolution(bf16[8,2048,2,2,576,1,1]{1,4,2,3,6,5,0:T(8,128)(2,1)} %fusion.268, bf16[2,2,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %fusion.271), window={size=1x1x36x2x2 pad=0_0x0_0x35_35x0_0x0_0 rhs_reversal=0x1x1x0x0}, dim_labels=0b43f12_43i12o0->0b12f34
}
%fused_computation.245 (param_0.508: bf16[8,2048,2,2,576,1,1]) -> bf16[8,2048,2,2,576,1,1] {
%param_0.508 = bf16[8,2048,2,2,576,1,1]{1,4,6,5,3,2,0:T(8,128)(2,1)} parameter(0)
ROOT %copy.2290 = bf16[8,2048,2,2,576,1,1]{1,4,2,3,6,5,0:T(8,128)(2,1)} copy(bf16[8,2048,2,2,576,1,1]{1,4,6,5,3,2,0:T(8,128)(2,1)} %param_0.508)
}
%fused_computation.249.clone (param_0.525: s32[], param_1.587: bf16[1,32,576,1,36,256,1], param_2.398: s32[]) -> bf16[2,2,576,1,36,256,1] {
%param_1.587 = bf16[1,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} parameter(1)
%constant.5837 = bf16[]{:T(256)} constant(-inf)
%pad.382 = bf16[2,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} pad(bf16[1,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %param_1.587, bf16[]{:T(256)} %constant.5837), padding=0_1x0_0x0_0x0_0x0_0x0_0x0_0
%constant.5848 = s32[]{:T(128)} constant(0)
%param_0.525 = s32[]{:T(128)} parameter(0)
%dynamic-slice.1386 = bf16[2,2,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} dynamic-slice(bf16[2,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %pad.382, s32[]{:T(128)} %constant.5848, s32[]{:T(128)} %param_0.525, s32[]{:T(128)} %constant.5848, s32[]{:T(128)} %constant.5848, s32[]{:T(128)} %constant.5848, s32[]{:T(128)} %constant.5848, s32[]{:T(128)} %constant.5848), dynamic_slice_sizes={2,2,576,1,36,256,1}
%pad.381 = bf16[2,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} pad(bf16[1,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %param_1.587, bf16[]{:T(256)} %constant.5837), padding=1_0x0_0x0_0x0_0x0_0x0_0x0_0
%param_2.398 = s32[]{:T(128)} parameter(2)
%dynamic-slice.1385 = bf16[2,2,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} dynamic-slice(bf16[2,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %pad.381, s32[]{:T(128)} %constant.5848, s32[]{:T(128)} %param_2.398, s32[]{:T(128)} %constant.5848, s32[]{:T(128)} %constant.5848, s32[]{:T(128)} %constant.5848, s32[]{:T(128)} %constant.5848, s32[]{:T(128)} %constant.5848), dynamic_slice_sizes={2,2,576,1,36,256,1}
ROOT %maximum.533 = bf16[2,2,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} maximum(bf16[2,2,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %dynamic-slice.1386, bf16[2,2,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %dynamic-slice.1385)
}
%fused_computation.241 (param_0.503: bf16[8,2048,1,36,256,1], param_1.561: bf16[8,2048,1,36,256,1,1], param_2.397: bf16[8,2048,2,2,576,1,1], param_3.246: s32[], param_4.127: bf16[1,32,576,1,36,256,1], param_5.55: s32[]) -> bf16[8,2048,1,36,256,1] {
%param_0.503 = bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} parameter(0)
%param_1.561 = bf16[8,2048,1,36,256,1,1]{4,1,6,5,3,2,0:T(8,128)(2,1)} parameter(1)
%bitcast.599 = bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} bitcast(bf16[8,2048,1,36,256,1,1]{4,1,6,5,3,2,0:T(8,128)(2,1)} %param_1.561)
%add.3146 = bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} add(bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} %param_0.503, bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} %bitcast.599)
%param_2.397 = bf16[8,2048,2,2,576,1,1]{1,4,6,5,3,2,0:T(8,128)(2,1)} parameter(2)
%fusion.266 = bf16[8,2048,2,2,576,1,1]{1,4,2,3,6,5,0:T(8,128)(2,1)} fusion(bf16[8,2048,2,2,576,1,1]{1,4,6,5,3,2,0:T(8,128)(2,1)} %param_2.397), kind=kLoop, calls=%fused_computation.245
%param_3.246 = s32[]{:T(128)} parameter(3)
%param_4.127 = bf16[1,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} parameter(4)
%param_5.55 = s32[]{:T(128)} parameter(5)
%fusion.272 = bf16[2,2,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} fusion(s32[]{:T(128)} %param_3.246, bf16[1,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %param_4.127, s32[]{:T(128)} %param_5.55), kind=kLoop, calls=%fused_computation.249.clone
%convolution.171 = bf16[8,2048,1,36,256,1,1]{4,1,6,5,3,2,0:T(8,128)(2,1)} convolution(bf16[8,2048,2,2,576,1,1]{1,4,2,3,6,5,0:T(8,128)(2,1)} %fusion.266, bf16[2,2,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %fusion.272), window={size=1x1x36x2x2 pad=0_0x0_0x35_35x0_0x0_0 rhs_reversal=0x1x1x0x0}, dim_labels=0b43f12_43i12o0->0b12f34
%bitcast.596 = bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} bitcast(bf16[8,2048,1,36,256,1,1]{4,1,6,5,3,2,0:T(8,128)(2,1)} %convolution.171)
ROOT %add.3143 = bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} add(bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} %add.3146, bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} %bitcast.596)
}
)";
hlo_string += R"(
ENTRY entry {
%constant.4735 = u32[]{:T(128)} constant(2)
%constant.4598 = u32[]{:T(128)} constant(15)
%constant.3341 = u32[]{:T(128)} constant(1)
%partition-id.16 = u32[]{:T(128)} partition-id()
%param.149 = (bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)}, bf16[32,576,1,36,256,1]{4,1,0,3,5,2:T(8,128)(2,1)}, bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)}, bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)}, u32[]{:T(128)}) parameter(0)
%get-tuple-element.21127 = u32[]{:T(128)} get-tuple-element((bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)}, bf16[32,576,1,36,256,1]{4,1,0,3,5,2:T(8,128)(2,1)}, bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)}, bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)}, u32[]{:T(128)}) %param.149), index=4
%copy.2357 = u32[]{:T(128)} copy(u32[]{:T(128)} %get-tuple-element.21127)
%add.1530 = u32[]{:T(128)} add(u32[]{:T(128)} %copy.2357, u32[]{:T(128)} %constant.3341)
%add.1943 = u32[]{:T(128)} add(u32[]{:T(128)} %copy.2357, u32[]{:T(128)} %constant.4735)
%get-tuple-element.21124 = bf16[32,576,1,36,256,1]{4,1,0,3,5,2:T(8,128)(2,1)} get-tuple-element((bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)}, bf16[32,576,1,36,256,1]{4,1,0,3,5,2:T(8,128)(2,1)}, bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)}, bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)}, u32[]{:T(128)}) %param.149), index=1
%bitcast.98 = bf16[1,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} bitcast(bf16[32,576,1,36,256,1]{4,1,0,3,5,2:T(8,128)(2,1)} %get-tuple-element.21124)
%get-tuple-element.21123 = bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} get-tuple-element((bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)}, bf16[32,576,1,36,256,1]{4,1,0,3,5,2:T(8,128)(2,1)}, bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)}, bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)}, u32[]{:T(128)}) %param.149), index=0
%get-tuple-element.21125 = bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} get-tuple-element((bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)}, bf16[32,576,1,36,256,1]{4,1,0,3,5,2:T(8,128)(2,1)}, bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)}, bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)}, u32[]{:T(128)}) %param.149), index=2
%get-tuple-element.21126 = bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} get-tuple-element((bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)}, bf16[32,576,1,36,256,1]{4,1,0,3,5,2:T(8,128)(2,1)}, bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)}, bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)}, u32[]{:T(128)}) %param.149), index=3
%constant.3344 = s32[16]{0:T(128)} constant({...})
%constant.3339 = u32[256]{0:T(256)} constant({...})
%dynamic-slice.312 = u32[1]{0:T(128)} dynamic-slice(u32[256]{0:T(256)} %constant.3339, u32[]{:T(128)} %partition-id.16), dynamic_slice_sizes={1}
%bitcast.99 = u32[]{:T(128)} bitcast(u32[1]{0:T(128)} %dynamic-slice.312)
%add.1531 = u32[]{:T(128)} add(u32[]{:T(128)} %add.1530, u32[]{:T(128)} %bitcast.99)
%and.40 = u32[]{:T(128)} and(u32[]{:T(128)} %add.1531, u32[]{:T(128)} %constant.4598)
%add.2637 = u32[]{:T(128)} add(u32[]{:T(128)} %bitcast.99, u32[]{:T(128)} %constant.4598)
%subtract.216 = u32[]{:T(128)} subtract(u32[]{:T(128)} %add.2637, u32[]{:T(128)} %add.1530)
%and.41 = u32[]{:T(128)} and(u32[]{:T(128)} %subtract.216, u32[]{:T(128)} %constant.4598)
%subtract.214 = u32[]{:T(128)} subtract(u32[]{:T(128)} %add.2637, u32[]{:T(128)} %copy.2357)
%and.39 = u32[]{:T(128)} and(u32[]{:T(128)} %subtract.214, u32[]{:T(128)} %constant.4598)
%add.1527 = u32[]{:T(128)} add(u32[]{:T(128)} %copy.2357, u32[]{:T(128)} %bitcast.99)
%and.38 = u32[]{:T(128)} and(u32[]{:T(128)} %add.1527, u32[]{:T(128)} %constant.4598)
%fusion.1974 = (s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) fusion(s32[16]{0:T(128)} %constant.3344, u32[]{:T(128)} %and.41, u32[]{:T(128)} %and.40, u32[]{:T(128)} %and.39, u32[]{:T(128)} %and.38), kind=kLoop, calls=%fused_computation.1799
%get-tuple-element.20616 = s32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) %fusion.1974), index=3
%bitcast.100 = s32[]{:T(128)} bitcast(s32[1]{0:T(128)} %get-tuple-element.20616)
%get-tuple-element.20615 = s32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) %fusion.1974), index=2
%bitcast.101 = s32[]{:T(128)} bitcast(s32[1]{0:T(128)} %get-tuple-element.20615)
%get-tuple-element.20614 = s32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) %fusion.1974), index=1
%bitcast.104 = s32[]{:T(128)} bitcast(s32[1]{0:T(128)} %get-tuple-element.20614)
%get-tuple-element.20613 = s32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}, s32[1]{0:T(128)}) %fusion.1974), index=0
%bitcast.105 = s32[]{:T(128)} bitcast(s32[1]{0:T(128)} %get-tuple-element.20613)
%copy.2356 = bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} copy(bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %get-tuple-element.21126)
%collective-permute.23 = bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} collective-permute(bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %copy.2356), channel_id=51, source_target_pairs={{0,1},{1,2},{2,3},{3,4},{4,5},{5,6},{6,7},{7,8},{8,9},{9,10},{10,11},{11,12},{12,13},{13,14},{14,15},{15,0}}, backend_config="{\"flag_configs\":[],\"barrier_config\":{\"barrier_type\":\"CUSTOM\",\"id\":\"1\"},\"scoped_memory_configs\":[]}"
%copy.2354 = bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} copy(bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %get-tuple-element.21123)
%collective-permute.22 = bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} collective-permute(bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %copy.2354), channel_id=50, source_target_pairs={{0,15},{1,0},{2,1},{3,2},{4,3},{5,4},{6,5},{7,6},{8,7},{9,8},{10,9},{11,10},{12,11},{13,12},{14,13},{15,14}}, backend_config="{\"flag_configs\":[],\"barrier_config\":{\"barrier_type\":\"CUSTOM\",\"id\":\"0\"},\"scoped_memory_configs\":[]}"
%fusion.264 = bf16[8,2048,4,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} fusion(bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %copy.2356, bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %copy.2354), kind=kLoop, calls=%fused_computation.243
%bitcast.97 = bf16[8,2048,2,2,576,1,1]{1,4,6,5,3,2,0:T(8,128)(2,1)} bitcast(bf16[8,2048,4,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %fusion.264)
%collective-permute.24 = bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} collective-permute(bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %collective-permute.22), channel_id=52, source_target_pairs={{0,15},{1,0},{2,1},{3,2},{4,3},{5,4},{6,5},{7,6},{8,7},{9,8},{10,9},{11,10},{12,11},{13,12},{14,13},{15,14}}, backend_config="{\"flag_configs\":[],\"barrier_config\":{\"barrier_type\":\"CUSTOM\",\"id\":\"0\"},\"scoped_memory_configs\":[]}"
%fusion.265 = bf16[8,2048,4,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} fusion(bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %collective-permute.23, bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %collective-permute.22), kind=kLoop, calls=%fused_computation.244
%collective-permute.25 = bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} collective-permute(bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %collective-permute.23), channel_id=53, source_target_pairs={{0,1},{1,2},{2,3},{3,4},{4,5},{5,6},{6,7},{7,8},{8,9},{9,10},{10,11},{11,12},{12,13},{13,14},{14,15},{15,0}}, backend_config="{\"flag_configs\":[],\"barrier_config\":{\"barrier_type\":\"CUSTOM\",\"id\":\"1\"},\"scoped_memory_configs\":[]}"
%bitcast.103 = bf16[8,2048,2,2,576,1,1]{1,4,6,5,3,2,0:T(8,128)(2,1)} bitcast(bf16[8,2048,4,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %fusion.265)
%fusion.267 = bf16[8,2048,1,36,256,1,1]{4,1,6,5,3,2,0:T(8,128)(2,1)} fusion(bf16[8,2048,2,2,576,1,1]{1,4,6,5,3,2,0:T(8,128)(2,1)} %bitcast.97, s32[]{:T(128)} %bitcast.100, bf16[1,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %bitcast.98, s32[]{:T(128)} %bitcast.101), kind=kOutput, calls=%fused_computation.246
%fusion.262 = bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} fusion(bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} %get-tuple-element.21125, bf16[8,2048,1,36,256,1,1]{4,1,6,5,3,2,0:T(8,128)(2,1)} %fusion.267, bf16[8,2048,2,2,576,1,1]{1,4,6,5,3,2,0:T(8,128)(2,1)} %bitcast.103, s32[]{:T(128)} %bitcast.104, bf16[1,32,576,1,36,256,1]{5,2,0,1,4,3,6:T(8,128)(2,1)} %bitcast.98, s32[]{:T(128)} %bitcast.105), kind=kOutput, calls=%fused_computation.241
ROOT %tuple.1419 = (bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)}, bf16[32,576,1,36,256,1]{4,1,0,3,5,2:T(8,128)(2,1)}, bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)}, bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)}, u32[]{:T(128)}) tuple(bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %collective-permute.24, bf16[32,576,1,36,256,1]{4,1,0,3,5,2:T(8,128)(2,1)} %get-tuple-element.21124, bf16[8,2048,1,36,256,1]{4,1,3,0,5,2:T(8,128)(2,1)} %fusion.262, bf16[8,2048,2,576,1,1]{1,3,2,0,5,4:T(8,128)(2,1)} %collective-permute.25, u32[]{:T(128)} %add.1943)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start"),
GetIndex(new_instruction_sequence, "fusion.267"));
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start.1"),
GetIndex(new_instruction_sequence, "fusion.267"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done"),
GetIndex(new_instruction_sequence, "fusion.267"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done.1"),
GetIndex(new_instruction_sequence, "fusion.267"));
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start.2"),
GetIndex(new_instruction_sequence, "fusion.262"));
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start.3"),
GetIndex(new_instruction_sequence, "fusion.262"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done.2"),
GetIndex(new_instruction_sequence, "fusion.262"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done.3"),
GetIndex(new_instruction_sequence, "fusion.262"));
}
TEST_F(LatencyHidingSchedulerTest, MoveCentainConv2) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[16,256,256]{2,1,0} parameter(2)
p3 = f32[16,256,64]{2,1,0} parameter(3)
cp0 = f32[16,64,256]{2,1,0} collective-permute(p0),
source_target_pairs={{0,1},{1,0}}
cp1 = f32[16,64,256]{2,1,0} collective-permute(p1),
source_target_pairs={{0,1},{1,0}}
cp2 = f32[16,64,256]{2,1,0} collective-permute(cp0),
source_target_pairs={{0,1},{1,0}}
cp3 = f32[16,64,256]{2,1,0} collective-permute(cp1),
source_target_pairs={{0,1},{1,0}}
a0 = f32[16,64,256]{2,1,0} add(cp0, cp1)
c0 = f32[16,64,256]{2,1,0} convolution(p2, p3),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
c1 = f32[16,256,256]{2,1,0} convolution(a0, c0),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
ROOT tuple = (f32[16,64,256]{2,1,0}, f32[16,64,256]{2,1,0}, f32[16,256,256]{2,1,0}) tuple(cp2, cp3, c1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start.1"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done.1"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start.2"),
GetIndex(new_instruction_sequence, "c1"));
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start.3"),
GetIndex(new_instruction_sequence, "c1"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done.2"),
GetIndex(new_instruction_sequence, "c1"));
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-done.3"),
GetIndex(new_instruction_sequence, "c1"));
}
TEST_F(LatencyHidingSchedulerTest, WhileOverlapLimit) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
param = (bf16[8]{0}, bf16[8]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(param), index=2
}
while_body {
param = (bf16[8]{0}, bf16[8]{0}, pred[]) parameter(0)
gte0 = bf16[8]{0} get-tuple-element(param), index=0
gte1 = pred[] get-tuple-element(param), index=2
bitcast = bf16[8]{0} bitcast(gte0)
collective-permute.1 = bf16[8]{0} collective-permute(gte0), source_target_pairs={{0,1},{1,2},{2,3}}
add0 = bf16[8]{0} add(collective-permute.1, bitcast)
negate = bf16[8]{0} negate(add0)
collective-permute.2 = bf16[8]{0} collective-permute(collective-permute.1), source_target_pairs={{1,0},{0,3},{3,2}}
ROOT tuple = (bf16[8]{0}, bf16[8]{0}, pred[]) tuple(collective-permute.2, negate, gte1)
}
ENTRY entry {
p0 = bf16[8]{0} parameter(0)
p1 = bf16[8]{0} parameter(1)
p2 = pred[] parameter(2)
tuple = (bf16[8]{0}, bf16[8]{0}, pred[]) tuple(p0, p1, p2)
while = (bf16[8]{0}, bf16[8]{0}, pred[]) while(tuple), condition=while_cond, body=while_body
collective-permute.3 = bf16[8]{0} collective-permute(p1), source_target_pairs={{0,1},{1,2},{2,3}}
gte0 = bf16[8]{0} get-tuple-element(while), index=0
gte1 = bf16[8]{0} get-tuple-element(while), index=1
add = bf16[8]{0} add(gte0, gte1)
ROOT add2 = bf16[8]{0} add(add, collective-permute.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
auto sched_config = GetDefaultSchedConfig();
sched_config.collective_permute_overlap_limit = 2;
EXPECT_TRUE(RunScheduler(hlo_module.get(), sched_config).ok());
EXPECT_TRUE(hlo_module->has_entry_computation());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(hlo_module->entry_computation()).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-start.2"),
GetIndex(new_instruction_sequence, "while"));
}
TEST_F(LatencyHidingSchedulerTest, WhileNestedOverlapLimit) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
param = (bf16[8]{0}, bf16[8]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(param), index=2
}
while_body {
param = (bf16[8]{0}, bf16[8]{0}, pred[]) parameter(0)
gte0 = bf16[8]{0} get-tuple-element(param), index=0
gte1 = pred[] get-tuple-element(param), index=2
bitcast = bf16[8]{0} bitcast(gte0)
collective-permute.1 = bf16[8]{0} collective-permute(gte0), source_target_pairs={{0,1},{1,2},{2,3}}
add0 = bf16[8]{0} add(collective-permute.1, bitcast)
negate = bf16[8]{0} negate(add0)
ROOT tuple = (bf16[8]{0}, bf16[8]{0}, pred[]) tuple(collective-permute.1, negate, gte1)
}
while_cond2 {
param = (bf16[8]{0}, bf16[8]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(param), index=2
}
while_body2 {
param = (bf16[8]{0}, bf16[8]{0}, pred[]) parameter(0)
while.1 = (bf16[8]{0}, bf16[8]{0}, pred[]) while(param), condition=while_cond, body=while_body
gte0 = bf16[8]{0} get-tuple-element(while.1), index=0
gte1 = pred[] get-tuple-element(while.1), index=2
bitcast = bf16[8]{0} bitcast(gte0)
negate = bf16[8]{0} negate(bitcast)
collective-permute.2 = bf16[8]{0} collective-permute(negate), source_target_pairs={{1,0},{0,3},{3,2}}
ROOT tuple = (bf16[8]{0}, bf16[8]{0}, pred[]) tuple(collective-permute.2, negate, gte1)
}
ENTRY entry {
p0 = bf16[8]{0} parameter(0)
p1 = bf16[8]{0} parameter(1)
p2 = pred[] parameter(2)
tuple = (bf16[8]{0}, bf16[8]{0}, pred[]) tuple(p0, p1, p2)
while = (bf16[8]{0}, bf16[8]{0}, pred[]) while(tuple), condition=while_cond2, body=while_body2
collective-permute.3 = bf16[8]{0} collective-permute(p1), source_target_pairs={{0,1},{1,2},{2,3}}
gte0 = bf16[8]{0} get-tuple-element(while), index=0
gte1 = bf16[8]{0} get-tuple-element(while), index=1
add = bf16[8]{0} add(gte0, gte1)
ROOT add2 = bf16[8]{0} add(add, collective-permute.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
auto sched_config = GetDefaultSchedConfig();
sched_config.collective_permute_overlap_limit = 2;
EXPECT_TRUE(RunScheduler(hlo_module.get(), sched_config).ok());
EXPECT_TRUE(hlo_module->has_entry_computation());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(hlo_module->entry_computation()).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_GT(GetIndex(new_instruction_sequence, "collective-permute-start.2"),
GetIndex(new_instruction_sequence, "while"));
}
TEST_F(LatencyHidingSchedulerTest, WhileOverlapUnderLimit) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
param = (bf16[8]{0}, bf16[8]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(param), index=2
}
while_body {
param = (bf16[8]{0}, bf16[8]{0}, pred[]) parameter(0)
gte0 = bf16[8]{0} get-tuple-element(param), index=0
gte1 = pred[] get-tuple-element(param), index=2
bitcast = bf16[8]{0} bitcast(gte0)
collective-permute.1 = bf16[8]{0} collective-permute(gte0), source_target_pairs={{0,1},{1,2},{2,3}}
add0 = bf16[8]{0} add(collective-permute.1, bitcast)
negate = bf16[8]{0} negate(add0)
collective-permute.2 = bf16[8]{0} collective-permute(collective-permute.1), source_target_pairs={{1,0},{0,3},{3,2}}
ROOT tuple = (bf16[8]{0}, bf16[8]{0}, pred[]) tuple(collective-permute.2, negate, gte1)
}
ENTRY entry {
p0 = bf16[8]{0} parameter(0)
p1 = bf16[8]{0} parameter(1)
p2 = pred[] parameter(2)
tuple = (bf16[8]{0}, bf16[8]{0}, pred[]) tuple(p0, p1, p2)
while = (bf16[8]{0}, bf16[8]{0}, pred[]) while(tuple), condition=while_cond, body=while_body
collective-permute.3 = bf16[8]{0} collective-permute(p1), source_target_pairs={{0,1},{1,2},{2,3}}
gte0 = bf16[8]{0} get-tuple-element(while), index=0
gte1 = bf16[8]{0} get-tuple-element(while), index=1
add = bf16[8]{0} add(gte0, gte1)
ROOT add2 = bf16[8]{0} add(add, collective-permute.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
auto sched_config = GetDefaultSchedConfig();
sched_config.collective_permute_overlap_limit = 3;
EXPECT_TRUE(RunScheduler(hlo_module.get(), sched_config).ok());
EXPECT_TRUE(hlo_module->has_entry_computation());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(hlo_module->entry_computation()).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetIndex(new_instruction_sequence, "collective-permute-start.2"),
GetIndex(new_instruction_sequence, "while"));
}
TEST_F(LatencyHidingSchedulerTest, WhileOverlapLimitAllGather) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
param = (bf16[4]{0}, bf16[8]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(param), index=2
}
while_body {
param = (bf16[4]{0}, bf16[8]{0}, pred[]) parameter(0)
gte0 = bf16[4]{0} get-tuple-element(param), index=0
gte1 = bf16[8]{0} get-tuple-element(param), index=1
gte2 = pred[] get-tuple-element(param), index=2
bitcast = bf16[8]{0} bitcast(gte0)
all-gather.1 = bf16[8]{0} all-gather(gte0), replica_groups={{0,1},{2,3}}, dimensions={0}, channel_id=1
add0 = bf16[8]{0} add(all-gather.1, bitcast)
negate = bf16[8]{0} negate(add0)
collective-permute.2 = bf16[4]{0} collective-permute(gte0), source_target_pairs={{1,0},{0,3},{3,2}}
ROOT tuple = (bf16[4]{0}, bf16[8]{0}, pred[]) tuple(collective-permute.2, negate, gte2)
}
ENTRY entry {
p0 = bf16[4]{0} parameter(0)
p1 = bf16[8]{0} parameter(1)
p2 = pred[] parameter(2)
tuple = (bf16[4]{0}, bf16[8]{0}, pred[]) tuple(p0, p1, p2)
while = (bf16[4]{0}, bf16[8]{0}, pred[]) while(tuple), condition=while_cond, body=while_body
all-gather.2 = bf16[8]{0} all-gather(p0), replica_groups={{0,1},{2,3}}, dimensions={0}, channel_id=2
gte0 = bf16[4]{0} get-tuple-element(while), index=0
gte1 = bf16[8]{0} get-tuple-element(while), index=1
ROOT tuple.2 = (bf16[4]{0}, bf16[8]{0}, bf16[8]{0}) tuple(gte0, gte1, all-gather.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
auto sched_config = GetDefaultSchedConfig();
sched_config.collective_permute_overlap_limit = 2;
EXPECT_TRUE(RunScheduler(hlo_module.get(), sched_config).ok());
EXPECT_TRUE(hlo_module->has_entry_computation());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(hlo_module->entry_computation()).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_GT(GetIndex(new_instruction_sequence, "all-gather-start.1"),
GetIndex(new_instruction_sequence, "while"));
}
TEST_F(LatencyHidingSchedulerTest, WhileOverlapUnderLimitAllGather) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
param = (bf16[4]{0}, bf16[8]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(param), index=2
}
while_body {
param = (bf16[4]{0}, bf16[8]{0}, pred[]) parameter(0)
gte0 = bf16[4]{0} get-tuple-element(param), index=0
gte1 = bf16[8]{0} get-tuple-element(param), index=1
gte2 = pred[] get-tuple-element(param), index=2
bitcast = bf16[8]{0} bitcast(gte0)
all-gather.1 = bf16[8]{0} all-gather(gte0), replica_groups={{0,1},{2,3}}, dimensions={0}, channel_id=1
add0 = bf16[8]{0} add(all-gather.1, bitcast)
negate = bf16[8]{0} negate(add0)
collective-permute.2 = bf16[4]{0} collective-permute(gte0), source_target_pairs={{1,0},{0,3},{3,2}}
ROOT tuple = (bf16[4]{0}, bf16[8]{0}, pred[]) tuple(collective-permute.2, negate, gte2)
}
ENTRY entry {
p0 = bf16[4]{0} parameter(0)
p1 = bf16[8]{0} parameter(1)
p2 = pred[] parameter(2)
tuple = (bf16[4]{0}, bf16[8]{0}, pred[]) tuple(p0, p1, p2)
while = (bf16[4]{0}, bf16[8]{0}, pred[]) while(tuple), condition=while_cond, body=while_body
all-gather.2 = bf16[8]{0} all-gather(p0), replica_groups={{0,1},{2,3}}, dimensions={0}, channel_id=2
gte0 = bf16[4]{0} get-tuple-element(while), index=0
gte1 = bf16[8]{0} get-tuple-element(while), index=1
ROOT tuple.2 = (bf16[4]{0}, bf16[8]{0}, bf16[8]{0}) tuple(gte0, gte1, all-gather.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
auto sched_config = GetDefaultSchedConfig();
sched_config.collective_permute_overlap_limit = 2;
sched_config.all_gather_overlap_limit = 2;
EXPECT_TRUE(RunScheduler(hlo_module.get(), sched_config).ok());
EXPECT_TRUE(hlo_module->has_entry_computation());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(hlo_module->entry_computation()).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetIndex(new_instruction_sequence, "all-gather-start.1"),
GetIndex(new_instruction_sequence, "while"));
}
TEST_F(LatencyHidingSchedulerTest, AllToAllAsyncBalance) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
async_computation {
p = f32[2,8,256,256] parameter(0)
ROOT ata = f32[2,8,256,256] all-to-all(p), dimensions={0}, replica_groups={{0,1}}
}
async_computation.2 {
p.2 = f32[2,8,256,256] parameter(0)
ROOT ata.1 = f32[2,8,256,256] all-to-all(p.2), dimensions={0}, replica_groups={{0,1}}
}
ENTRY %module {
%constant.19 = u32[] constant(0)
%replica_id = u32[]{:T(128)} replica-id()
%convert = f32[]{:T(128)} convert(u32[]{:T(128)} %replica_id)
%color_operand.1 = f32[2,8,256,256]{3,2,1,0} broadcast(
f32[]{:T(128)} %convert), dimensions={}
%color_operand.2 = f32[2,8,256,256]{3,2,1,0} broadcast(
f32[]{:T(128)} %convert), dimensions={}
%ata-start = ((f32[2,8,256,256]), f32[2,8,256,256], u32[], u32[]) async-start(
f32[2,8,256,256] %color_operand.1), calls=async_computation,
metadata={op_type="AllToAll" op_name="ata0"}
%ata-start.2 = ((f32[2,8,256,256]), f32[2,8,256,256], u32[], u32[]) async-start(
f32[2,8,256,256] %color_operand.2), calls=async_computation.2,
metadata={op_type="AllToAll" op_name="ata1"}
%ata-done = f32[2,8,256,256] async-done(%ata-start), calls=async_computation,
metadata={op_type="AllToAll" op_name="ata0"}
%ata-done-bc = f32[16,256,256] bitcast(f32[2,8,256,256] %ata-done),
metadata={op_type="Bitcast" op_name="ata0"}
%ata-done.2 = f32[2,8,256,256] async-done(%ata-start.2), calls=async_computation.2,
metadata={op_type="AllToAll" op_name="ata1"}
%ata-done-bc.2 = f32[16,256,256] bitcast(f32[2,8,256,256] %ata-done.2),
metadata={op_type="Bitcast" op_name="ata1"}
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[16,256,256]{2,1,0} parameter(2)
p3 = f32[16,256,256]{2,1,0} parameter(3)
c0 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb,
metadata={op_type="AllToAll" op_name="c0"}
c1 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb,
metadata={op_type="AllToAll" op_name="c1"}
a2 = f32[16,256,256]{2,1,0} add(c1, c0)
ROOT t = (f32[16,256,256], f32[16,256,256], f32[16,256,256]) tuple(a2, %ata-done-bc.2, %ata-done-bc)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get()).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kConvolution,
new_instruction_sequence, "c0"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAsyncDone,
new_instruction_sequence, "ata0"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kConvolution,
new_instruction_sequence, "c0"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAsyncStart,
new_instruction_sequence, "ata0"));
EXPECT_LT(GetOpcodeIndexUsingMetaData(HloOpcode::kConvolution,
new_instruction_sequence, "c1"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAsyncDone,
new_instruction_sequence, "ata1"));
EXPECT_GT(GetOpcodeIndexUsingMetaData(HloOpcode::kConvolution,
new_instruction_sequence, "c1"),
GetOpcodeIndexUsingMetaData(HloOpcode::kAsyncStart,
new_instruction_sequence, "ata1"));
}
TEST_F(LatencyHidingSchedulerTest, ReleaseOneThatStallsLessFirst) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[1024,2048,2048]{2,1,0} parameter(2)
p3 = f32[2048,2048,2048]{2,1,0} parameter(3)
cp1s = (f32[1024,2048,2048]{2,1,0}, f32[1024,2048,2048]{2,1,0}, u32[], u32[]) collective-permute-start(p2), source_target_pairs={{1,0},{0,3},{3,2}}
cp2s = (f32[2048,2048,2048]{2,1,0}, f32[2048,2048,2048]{2,1,0}, u32[], u32[]) collective-permute-start(p3), source_target_pairs={{1,0},{0,3},{3,2}}
c0 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb,
metadata={op_type="AllToAll" op_name="c0"}
cp1d = f32[1024,2048,2048]{2,1,0} collective-permute-done(cp1s)
cp2d = f32[2048,2048,2048]{2,1,0} collective-permute-done(cp2s)
ROOT tuple.2 = (f32[16,256,256]{2,1,0}, f32[1024,2048,2048]{2,1,0}, f32[2048,2048,2048]{2,1,0}) tuple(c0, cp1d, cp2d)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
auto sched_config = GetDefaultSchedConfig();
sched_config.collective_permute_overlap_limit = 2;
sched_config.all_gather_overlap_limit = 2;
EXPECT_TRUE(RunScheduler(hlo_module.get(), sched_config,
std::make_unique<TestLatencyEstimator>())
.ok());
EXPECT_TRUE(hlo_module->has_entry_computation());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(hlo_module->entry_computation()).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetIndex(new_instruction_sequence, "cp2s"),
GetIndex(new_instruction_sequence, "cp1s"));
}
TEST_F(LatencyHidingSchedulerTest, ReleaseStartWhenLatencyDue) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[128,2048,2048]{2,1,0} parameter(1)
p2 = f32[512,2048,2048]{2,1,0} parameter(2)
cp1s = (f32[512,2048,2048]{2,1,0}, f32[512,2048,2048]{2,1,0}, u32[], u32[]) collective-permute-start(p2), source_target_pairs={{1,0},{0,3},{3,2}}
cp1d = f32[512,2048,2048]{2,1,0} collective-permute-done(cp1s)
cp2s = (f32[128,2048,2048]{2,1,0}, f32[128,2048,2048]{2,1,0}, u32[], u32[]) collective-permute-start(p1), source_target_pairs={{1,0},{0,3},{3,2}}
cp2d = f32[128,2048,2048]{2,1,0} collective-permute-done(cp2s)
cp3s = (f32[128,2048,2048]{2,1,0}, f32[128,2048,2048]{2,1,0}, u32[], u32[]) collective-permute-start(cp2d), source_target_pairs={{1,0},{0,3},{3,2}}
cp3d = f32[128,2048,2048]{2,1,0} collective-permute-done(cp3s)
slice = f32[16,64,256]{2,1,0} slice(f32[512,2048,2048]{2,1,0} cp1d), slice={[0:16], [0:64], [0:256]}
c0 = f32[16,256,256]{2,1,0} convolution(p0, slice),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
c1 = f32[16,256,256]{2,1,0} convolution(p0, slice),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
ROOT tuple.2 = (f32[16,256,256]{2,1,0}, f32[16,256,256]{2,1,0}, f32[128,2048,2048]{2,1,0}, f32[128,2048,2048]{2,1,0}) tuple(c0, c1, cp2d, cp3d)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
auto sched_config = GetDefaultSchedConfig();
sched_config.aggressive_scheduling_policies = true;
sched_config.enable_release_start_policy = true;
EXPECT_TRUE(RunScheduler(hlo_module.get(), sched_config,
std::make_unique<TestLatencyEstimator>())
.ok());
EXPECT_TRUE(hlo_module->has_entry_computation());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(hlo_module->entry_computation()).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
EXPECT_LT(GetIndex(new_instruction_sequence, "cp2s"),
GetIndex(new_instruction_sequence, "c0"));
EXPECT_LT(GetIndex(new_instruction_sequence, "c0"),
GetIndex(new_instruction_sequence, "cp2d"));
EXPECT_LT(GetIndex(new_instruction_sequence, "cp2d"),
GetIndex(new_instruction_sequence, "cp3s"));
EXPECT_LT(GetIndex(new_instruction_sequence, "cp3s"),
GetIndex(new_instruction_sequence, "c1"));
EXPECT_LT(GetIndex(new_instruction_sequence, "c1"),
GetIndex(new_instruction_sequence, "cp3d"));
}
TEST_F(LatencyHidingSchedulerTest, AsyncTrackerTestForTargetDefinedResources) {
class AsyncTrackerForMyTarget : public AsyncTracker {
enum class MyTargetResourceType {
kTargetResource0 = 0,
kNumTargetResources = 1,
};
public:
explicit AsyncTrackerForMyTarget(const SchedulerConfig& config,
int64_t target_resource0_limit = 3)
: AsyncTracker(config),
target_resource0_limit_(target_resource0_limit) {}
absl::string_view GetResourceName(int64_t resource_type) const override {
const int64_t first_target_resource = GetFirstTargetDefinedResource();
if (resource_type < first_target_resource) {
return AsyncTracker::GetResourceName(resource_type);
}
CHECK_LE(resource_type,
first_target_resource + GetNumTargetDefinedResources());
switch (resource_type - first_target_resource) {
case static_cast<int64_t>(MyTargetResourceType::kTargetResource0):
return "kTargetResource0";
default:
return "";
}
}
ResourceHazardType GetResourceHazardType(
int64_t resource_type) const override {
const int64_t first_target_resource = GetFirstTargetDefinedResource();
if (resource_type < first_target_resource) {
return AsyncTracker::GetResourceHazardType(resource_type);
}
CHECK_LE(resource_type,
first_target_resource + GetNumTargetDefinedResources());
switch (resource_type - first_target_resource) {
case static_cast<int64_t>(MyTargetResourceType::kTargetResource0):
return ResourceHazardType::kShareable;
default:
return ResourceHazardType::kUnshareable;
}
}
int64_t GetNumTargetDefinedResources() const override {
return static_cast<int64_t>(MyTargetResourceType::kNumTargetResources);
}
int64_t GetNumAvailableResources(int64_t resource_type) const override {
const int64_t first_target_resource =
AsyncTracker::GetFirstTargetDefinedResource();
CHECK_GE(resource_type, first_target_resource);
CHECK_LT(resource_type,
first_target_resource + GetNumTargetDefinedResources());
switch (resource_type - first_target_resource) {
case (static_cast<int64_t>(MyTargetResourceType::kTargetResource0)):
return static_cast<int64_t>(target_resource0_limit_);
default:
return 1;
}
}
private:
const int64_t target_resource0_limit_;
};
const int64_t target_resource0_overlap_limit = 5;
AsyncTrackerForMyTarget async_tracker_for_my_target(
SchedulerConfig(), target_resource0_overlap_limit);
CHECK_EQ(async_tracker_for_my_target.GetNumTargetDefinedResources(), 1);
const int64_t target_resource0_index =
static_cast<int64_t>(ResourceType::kTargetDefinedResourcesBound) + 1;
CHECK_EQ(async_tracker_for_my_target.GetResourceName(target_resource0_index),
"kTargetResource0");
CHECK_EQ(
static_cast<int64_t>(async_tracker_for_my_target.GetResourceHazardType(
target_resource0_index)),
static_cast<int64_t>(ResourceHazardType::kShareable));
CHECK_EQ(async_tracker_for_my_target.GetNumAvailableResources(
target_resource0_index),
target_resource0_overlap_limit);
}
TEST_F(LatencyHidingSchedulerTest, AddDeleteOccupierForSharedResource) {
std::vector<std::pair<HloEdge*, HloGraphNode::TimeCost>> occupiers;
std::function<bool(std::vector<double>)> check_eq = [&occupiers](
std::vector<double>
times) {
if (times.size() != occupiers.size()) {
return false;
}
int64_t i = 0;
for (auto it = occupiers.begin(); it != occupiers.end(); ++it) {
if (std::abs(times[i] - it->second) > 0.0001) {
VLOG(1)
<< "PFT in occupier list does not match the given value (at index "
<< i << "): " << it->second << " vs " << times[i];
return false;
}
i++;
}
return true;
};
HloEdge edge1(3, nullptr);
HloEdge edge2(3, nullptr);
HloEdge edge3(1, nullptr);
DefaultSchedulerCore::AddOccupierToResource(0, edge1, occupiers);
CHECK(check_eq({3}));
DefaultSchedulerCore::AddOccupierToResource(1, edge2, occupiers);
CHECK(check_eq({5, 6}));
DefaultSchedulerCore::AddOccupierToResource(1, edge3, occupiers);
CHECK(check_eq({4, 6, 7}));
occupiers.clear();
edge1.SetOriginalLatency(1);
edge2.SetOriginalLatency(2);
edge3.SetOriginalLatency(3);
DefaultSchedulerCore::AddOccupierToResource(0, edge1, occupiers);
CHECK(check_eq({1}));
DefaultSchedulerCore::AddOccupierToResource(0, edge2, occupiers);
CHECK(check_eq({2, 3}));
DefaultSchedulerCore::AddOccupierToResource(0, edge3, occupiers);
CHECK(check_eq({3, 5, 6}));
occupiers.clear();
DefaultSchedulerCore::AddOccupierToResource(0, edge1, occupiers);
CHECK(check_eq({1}));
DefaultSchedulerCore::AddOccupierToResource(0, edge3, occupiers);
CHECK(check_eq({2, 4}));
DefaultSchedulerCore::AddOccupierToResource(0, edge2, occupiers);
CHECK(check_eq({3, 5, 6}));
occupiers.clear();
DefaultSchedulerCore::AddOccupierToResource(0, edge2, occupiers);
CHECK(check_eq({2}));
DefaultSchedulerCore::AddOccupierToResource(0, edge1, occupiers);
CHECK(check_eq({2, 3}));
DefaultSchedulerCore::AddOccupierToResource(0, edge3, occupiers);
CHECK(check_eq({3, 5, 6}));
occupiers.clear();
DefaultSchedulerCore::AddOccupierToResource(0, edge2, occupiers);
CHECK(check_eq({2}));
DefaultSchedulerCore::AddOccupierToResource(0, edge3, occupiers);
CHECK(check_eq({4, 5}));
DefaultSchedulerCore::AddOccupierToResource(0, edge1, occupiers);
CHECK(check_eq({3, 5, 6}));
occupiers.clear();
DefaultSchedulerCore::AddOccupierToResource(0, edge3, occupiers);
CHECK(check_eq({3}));
DefaultSchedulerCore::AddOccupierToResource(0, edge1, occupiers);
CHECK(check_eq({2, 4}));
DefaultSchedulerCore::AddOccupierToResource(0, edge2, occupiers);
CHECK(check_eq({3, 5, 6}));
occupiers.clear();
DefaultSchedulerCore::AddOccupierToResource(0, edge3, occupiers);
CHECK(check_eq({3}));
DefaultSchedulerCore::AddOccupierToResource(0, edge2, occupiers);
CHECK(check_eq({4, 5}));
DefaultSchedulerCore::AddOccupierToResource(0, edge1, occupiers);
CHECK(check_eq({3, 5, 6}));
occupiers.clear();
DefaultSchedulerCore::AddOccupierToResource(0, edge1, occupiers);
CHECK(check_eq({1}));
DefaultSchedulerCore::AddOccupierToResource(1, edge2, occupiers);
CHECK(check_eq({1, 3}));
DefaultSchedulerCore::AddOccupierToResource(2, edge3, occupiers);
CHECK(check_eq({1, 4, 6}));
HloEdge edge0(0.5, nullptr);
DefaultSchedulerCore::AddOccupierToResource(2, edge0, occupiers);
CHECK(check_eq({1, 3.5, 4.5, 6.5}));
occupiers.clear();
edge1.SetOriginalLatency(1);
edge2.SetOriginalLatency(2);
edge3.SetOriginalLatency(3);
DefaultSchedulerCore::AddOccupierToResource(0, edge1, occupiers);
DefaultSchedulerCore::AddOccupierToResource(0, edge2, occupiers);
DefaultSchedulerCore::AddOccupierToResource(0, edge3, occupiers);
CHECK(check_eq({3, 5, 6}));
auto res =
DefaultSchedulerCore::DeleteOccupierFromResource(0, edge0, occupiers);
CHECK(!res);
DefaultSchedulerCore::DeleteOccupierFromResource(0, edge1, occupiers);
CHECK(check_eq({4, 5}));
DefaultSchedulerCore::AddOccupierToResource(0, edge1, occupiers);
CHECK(check_eq({3, 5, 6}));
DefaultSchedulerCore::DeleteOccupierFromResource(0, edge2, occupiers);
CHECK(check_eq({2, 4}));
DefaultSchedulerCore::AddOccupierToResource(0, edge2, occupiers);
CHECK(check_eq({3, 5, 6}));
DefaultSchedulerCore::DeleteOccupierFromResource(0, edge3, occupiers);
CHECK(check_eq({2, 3}));
DefaultSchedulerCore::AddOccupierToResource(0, edge3, occupiers);
CHECK(check_eq({3, 5, 6}));
DefaultSchedulerCore::DeleteOccupierFromResource(1, edge1, occupiers);
CHECK(check_eq({4.3333333, 5.3333333}));
occupiers.clear();
DefaultSchedulerCore::AddOccupierToResource(0, edge1, occupiers);
DefaultSchedulerCore::AddOccupierToResource(0, edge2, occupiers);
DefaultSchedulerCore::AddOccupierToResource(0, edge3, occupiers);
DefaultSchedulerCore::DeleteOccupierFromResource(4, edge1, occupiers);
CHECK(check_eq({5, 6}));
occupiers.clear();
DefaultSchedulerCore::AddOccupierToResource(0, edge1, occupiers);
DefaultSchedulerCore::AddOccupierToResource(0, edge2, occupiers);
DefaultSchedulerCore::AddOccupierToResource(0, edge3, occupiers);
DefaultSchedulerCore::DeleteOccupierFromResource(4, edge2, occupiers);
CHECK(check_eq({3, 5.5}));
occupiers.clear();
DefaultSchedulerCore::AddOccupierToResource(0, edge1, occupiers);
DefaultSchedulerCore::AddOccupierToResource(0, edge2, occupiers);
DefaultSchedulerCore::AddOccupierToResource(0, edge3, occupiers);
DefaultSchedulerCore::DeleteOccupierFromResource(4, edge3, occupiers);
CHECK(check_eq({3, 4.5}));
}
TEST_F(LatencyHidingSchedulerTest, DepthPressureReduction) {
absl::string_view hlo_string = R"(
HloModule serial_collective_permute_test, is_scheduled=true
ENTRY after_optimizations_test {
%parameter.1 = bf16[8]{0} parameter(0)
%parameter.2 = bf16[8]{0} parameter(1)
%parameter.3 = bf16[8]{0} parameter(2)
%parameter.4 = bf16[8]{0} parameter(3)
%collective-permute.2 = bf16[8]{0} collective-permute(parameter.1), source_target_pairs={{0,1},{1,2},{2,3}}
%a = bf16[8]{0} add(collective-permute.2, parameter.2)
%b = bf16[8]{0} add(a, parameter.3)
%c = bf16[8]{0} add(b, parameter.4)
%d = bf16[8]{0} add(c, parameter.4)
%c1 = bf16[8]{0} copy(d)
%e = bf16[8]{0} add(d, parameter.3)
%c0 = bf16[8]{0} copy(e)
%f = bf16[8]{0} add(e, parameter.2)
%h = bf16[8]{0} add(c0, b)
%g = bf16[8]{0} add(c1, c)
%i = bf16[8]{0} add(f, a)
ROOT %t = (bf16[8]{0}, bf16[8]{0}, bf16[8]{0}, bf16[8]{0}) tuple(f, g, h, i)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
auto sched_config = GetDefaultSchedConfig();
sched_config.memory_limit = 0;
sched_config.depth_based_memory_pressure_reduction = true;
EXPECT_TRUE(RunScheduler(hlo_module.get(), sched_config).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
const HloInstruction* f = FindInstruction(hlo_module.get(), "f");
const HloInstruction* g = FindInstruction(hlo_module.get(), "g");
EXPECT_LT(PositionInVector(new_instruction_sequence, g),
PositionInVector(new_instruction_sequence, f));
}
TEST_F(LatencyHidingSchedulerTest, RerunWithSmallerMemoryLimit) {
absl::string_view hlo_string = R"(
HloModule rerun_scheduler_test, is_scheduled=true
ENTRY main {
p0 = bf16[8]{0} parameter(0)
c = bf16[] constant(0)
b = bf16[43]{0} broadcast(c), dimensions={}
s = bf16[1]{0} slice(b), slice={[0:1]}
cp = bf16[8]{0} collective-permute(p0), source_target_pairs={{0,1},{1,2},{2,3}}
ROOT tuple = (bf16[8]{0}, bf16[1]{0}) tuple(cp, s)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
auto sched_config = GetDefaultSchedConfig();
sched_config.memory_limit = 110;
sched_config.rerun = 1;
EXPECT_TRUE(RunScheduler(hlo_module.get(), sched_config).ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
const HloInstruction* s = FindInstruction(hlo_module.get(), "s");
const HloInstruction* cps =
FindInstruction(hlo_module.get(), "collective-permute-start");
EXPECT_LT(PositionInVector(new_instruction_sequence, s),
PositionInVector(new_instruction_sequence, cps));
}
TEST_F(LatencyHidingSchedulerTest, MultipleAsyncDoneOperationsDoNotCreateLoop) {
absl::string_view hlo_string = R"(
HloModule multiple_async_done_scheduler_test, is_scheduled=true
called_computation {
ROOT %param = s32[<=4096]{0:T(8)M(1024)} parameter(0)
}
ENTRY main {
%while_body_forward_pass_input_tuple = (s32[<=4096]{0:T(8)M(1024)}, s32[<=4096]{0:T(8)M(1024)}, s32[<=4096]{0:T(8)M(1024)}) parameter(0), backend_config={"flag_configs":[],"scoped_memory_configs":[],"compute_type":"COMPUTE_TYPE_SCALAR"}
%get-tuple-element.0 = s32[<=4096]{0:T(8)M(1024)} get-tuple-element(
(s32[<=4096]{0:T(8)M(1024)}, s32[<=4096]{0:T(8)M(1024)}, s32[<=4096]{0:T(8)M(1024)}) %while_body_forward_pass_input_tuple),
index=0, backend_config={"flag_configs":[],"scoped_memory_configs":[],"compute_type":"COMPUTE_TYPE_SCALAR"}
%get-tuple-element.1 = s32[<=4096]{0:T(8)M(1024)} get-tuple-element(
(s32[<=4096]{0:T(8)M(1024)}, s32[<=4096]{0:T(8)M(1024)}, s32[<=4096]{0:T(8)M(1024)}) %while_body_forward_pass_input_tuple),
index=1, backend_config={"flag_configs":[],"scoped_memory_configs":[],"compute_type":"COMPUTE_TYPE_SCALAR"}
%call-start.1 = ((s32[<=4096]{0:T(8)M(1024)}), s32[<=4096]{0:T(8)M(1024)}, u32[]{:T(8)S(8)})
call-start(s32[<=4096]{0:T(8)M(1024)} %get-tuple-element.1),
async_execution_thread="sparsecore", to_apply=%called_computation
%call-done.1 = s32[<=4096]{0:T(8)M(1024)}
call-done(((s32[<=4096]{0:T(8)M(1024)}), s32[<=4096]{0:T(8)M(1024)}, u32[]{:T(8)S(8)}) %call-start.1)
%call-start.2 = ((s32[<=4096]{0:T(8)M(1024)}), s32[<=4096]{0:T(8)M(1024)}, u32[]{:T(8)S(8)})
call-start(s32[<=4096]{0:T(8)M(1024)} %call-done.1),
async_execution_thread="sparsecore", to_apply=%called_computation
%call-done.2 = s32[<=4096]{0:T(8)M(1024)}
call-done(((s32[<=4096]{0:T(8)M(1024)}), s32[<=4096]{0:T(8)M(1024)}, u32[]{:T(8)S(8)}) %call-start.2)
%call-start.3 = ((s32[<=4096]{0:T(8)M(1024)}), s32[<=4096]{0:T(8)M(1024)}, u32[]{:T(8)S(8)})
call-start(s32[<=4096]{0:T(8)M(1024)} %get-tuple-element.0),
async_execution_thread="sparsecore", to_apply=%called_computation
%call-done.3 = s32[<=4096]{0:T(8)M(1024)}
call-done(((s32[<=4096]{0:T(8)M(1024)}), s32[<=4096]{0:T(8)M(1024)}, u32[]{:T(8)S(8)}) %call-start.3)
ROOT %tuple.6 = (s32[<=4096]{0:T(8)M(1024)}, s32[<=4096]{0:T(8)M(1024)})
tuple(s32[<=4096]{0:T(8)M(1024)} %call-done.2, s32[<=4096]{0:T(8)M(1024)} %call-done.3),
backend_config={"flag_configs":[],"scoped_memory_configs":[],"compute_type":"COMPUTE_TYPE_SCALAR"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
auto sched_config = GetDefaultSchedConfig();
EXPECT_TRUE(RunScheduler(hlo_module.get(), sched_config).ok());
}
TEST_F(LatencyHidingSchedulerTest, CopyScheduling) {
absl::string_view hlo_string = R"(
HloModule EinsumTest, is_scheduled=true
ENTRY AddR2 {
y_host = bf16[12800,12800]{1,0:T(8,128)(2,1)} parameter(1)
z = bf16[12800,12800]{1,0:T(8,128)(2,1)} parameter(2)
x = bf16[12800,12800]{1,0:T(8,128)(2,1)} parameter(0)
convolution = bf16[12800,12800]{1,0:T(8,128)(2,1)} convolution(x, z), dim_labels=bf_io->bf
copy-start = (bf16[12800,12800]{1,0:T(8,128)(2,1)}, bf16[12800,12800]{1,0:T(8,128)(2,1)}, u32[]{:S(2)}) copy-start(y_host)
copy-done = bf16[12800,12800]{1,0:T(8,128)(2,1)} copy-done(copy-start)
ROOT convolution.1 = bf16[12800,12800]{1,0:T(8,128)(2,1)} convolution(convolution, copy-done), dim_labels=bf_io->bf
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
auto sched_config = GetDefaultSchedConfig();
EXPECT_TRUE(RunScheduler(hlo_module.get(), sched_config).ok());
const HloInstruction* conv = FindInstruction(hlo_module.get(), "convolution");
const HloInstruction* cps = FindInstruction(hlo_module.get(), "copy-start");
const HloInstruction* cpd = FindInstruction(hlo_module.get(), "copy-done");
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_LT(PositionInVector(new_instruction_sequence, cps),
PositionInVector(new_instruction_sequence, conv));
EXPECT_LT(PositionInVector(new_instruction_sequence, conv),
PositionInVector(new_instruction_sequence, cpd));
XLA_VLOG_LINES(1, hlo_module->ToString());
}
TEST_F(LatencyHidingSchedulerTest, MaxCopyScheduling) {
absl::string_view hlo_string = R"(
HloModule EinsumTest, is_scheduled=true
ENTRY AddR2 {
y_host = bf16[12800,12800]{1,0:T(8,128)(2,1)} parameter(1)
q_host = bf16[12800,12800]{1,0:T(8,128)(2,1)} parameter(3)
z = bf16[12800,12800]{1,0:T(8,128)(2,1)} parameter(2)
x = bf16[12800,12800]{1,0:T(8,128)(2,1)} parameter(0)
convolution = bf16[12800,12800]{1,0:T(8,128)(2,1)} convolution(x, z), dim_labels=bf_io->bf
copy-start = (bf16[12800,12800]{1,0:T(8,128)(2,1)}, bf16[12800,12800]{1,0:T(8,128)(2,1)}, u32[]{:S(2)}) copy-start(y_host)
copy-done = bf16[12800,12800]{1,0:T(8,128)(2,1)} copy-done(copy-start)
copy-start2 = (bf16[12800,12800]{1,0:T(8,128)(2,1)}, bf16[12800,12800]{1,0:T(8,128)(2,1)}, u32[]{:S(2)}) copy-start(q_host)
copy-done2 = bf16[12800,12800]{1,0:T(8,128)(2,1)} copy-done(copy-start2)
ROOT t = (bf16[12800,12800]{1,0:T(8,128)(2,1)}, bf16[12800,12800]{1,0:T(8,128)(2,1)}) tuple(copy-done2, copy-done)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
auto sched_config = GetDefaultSchedConfig();
EXPECT_TRUE(RunScheduler(hlo_module.get(), sched_config).ok());
const HloInstruction* conv = FindInstruction(hlo_module.get(), "convolution");
const HloInstruction* cps = FindInstruction(hlo_module.get(), "copy-start");
const HloInstruction* cps2 = FindInstruction(hlo_module.get(), "copy-start2");
const HloInstruction* cpd2 = FindInstruction(hlo_module.get(), "copy-done2");
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_LT(PositionInVector(new_instruction_sequence, cps2),
PositionInVector(new_instruction_sequence, conv));
EXPECT_LT(PositionInVector(new_instruction_sequence, conv),
PositionInVector(new_instruction_sequence, cpd2));
EXPECT_LT(PositionInVector(new_instruction_sequence, cps),
PositionInVector(new_instruction_sequence, cpd2));
XLA_VLOG_LINES(1, hlo_module->ToString());
}
TEST_F(LatencyHidingSchedulerTest, ScheduleLoopPeeledSendDoneBeforeWhile) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
param = (bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)}, bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(param), index=2
}
while_body {
param = (bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)}, bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)}, pred[]) parameter(0)
gte0 = bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)} get-tuple-element(param), index=0
gte1 = bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)} get-tuple-element(param), index=1
%add.0 = bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)} add(gte0, gte1)
gte2 = pred[] get-tuple-element(param), index=2
ROOT tuple = (bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)}, bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)}, pred[]) tuple(%add.0, gte1, gte2)
}
ENTRY %entry {
%p0 = bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)} parameter(0)
%p1 = bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)} parameter(1)
%after-all = token[] after-all()
%send = (bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)}, u32[], token[]) send(bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)} %p0, token[] %after-all), channel_id=1246, is_host_transfer=true, frontend_attributes={_xla_host_transfer_handler_name="xla_megascale_runtime",_xla_host_transfer_rendezvous="collective-permute.145_0",_xla_megascale_target="{{200000->100000},{200001->100001},{200002->100002},{200003->100003},{200004->100004},{200005->100005},{200006->100006},{200007->100007},{200008->100008},{200009->100009},{200010->100010},{200011->100011},{200012->100012},{200013->100013},{200014->100014},{200015->100015},{200016->100016},{200017->100017},{200018->100018},{200019->100019},{200020->100020},{200021->100021},{200022->100022},{200023->100023},{200024->100024},{200025->100025},{200026->100026},{200027->100027},{200028->100028},{200029->100029},{200030->100030},{200031->100031},{200032->100032},{200033->100033},{200034->100034},{200035->100035},{200036->100036},{200037->100037},{200038->100038},{200039->100039},{200040->100040},{200041->100041},{200042->100042},{200043->100043},{200044->100044},{200045->100045},{200046->100046},{200047->100047},{200048->100048},{200049->100049},{200050->100050},{200051->100051},{200052->100052},{200053->100053},{200054->100054},{200055->100055},{200056->100056},{200057->100057},{200058->100058},{200059->100059},{200060->100060},{200061->100061},{200062->100062},{200063->100063},{200064->100064},{200065->100065},{200066->100066},{200067->100067},{200068->100068},{200069->100069},{200070->100070},{200071->100071},{200072->100072},{200073->100073},{200074->100074},{200075->100075},{200076->100076},{200077->100077},{200078->100078},{200079->100079},{200080->100080},{200081->100081},{200082->100082},{200083->100083},{200084->100084},{200085->100085},{200086->100086},{200087->100087},{200088->100088},{200089->100089},{200090->100090},{200091->100091},{200092->100092},{200093->100093},{200094->100094},{200095->100095},{200096->100096},{200097->100097},{200098->100098},{200099->100099},{200100->100100},{200101->100101},{200102->100102},{200103->100103},{200104->100104},{200105->100105},{200106->100106},{200107->100107},{200108->100108},{200109->100109},{200110->100110},{200111->100111},{200112->100112},{200113->100113},{200114->100114},{200115->100115},{200116->100116},{200117->100117},{200118->100118},{200119->100119},{200120->100120},{200121->100121},{200122->100122},{200123->100123},{200124->100124},{200125->100125},{200126->100126},{200127->100127}}",_xla_megascale_transfer_type="ONE_TO_ONE"}, backend_config={"flag_configs":[],"scoped_memory_configs":[],"compute_type":"COMPUTE_TYPE_DEFAULT","device_type":"DEVICE_TYPE_INVALID","used_scoped_memory_configs":[],"customized_send_recv_config":{"dcn_collective_permute_send":{"non_source_slice_ids":[0]}}}
%recv = (bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)}, u32[], token[]) recv(token[] %after-all), channel_id=1247, is_host_transfer=true, frontend_attributes={_xla_host_transfer_handler_name="xla_megascale_runtime",_xla_host_transfer_rendezvous="collective-permute.145_0",_xla_megascale_target="{{200000->100000},{200001->100001},{200002->100002},{200003->100003},{200004->100004},{200005->100005},{200006->100006},{200007->100007},{200008->100008},{200009->100009},{200010->100010},{200011->100011},{200012->100012},{200013->100013},{200014->100014},{200015->100015},{200016->100016},{200017->100017},{200018->100018},{200019->100019},{200020->100020},{200021->100021},{200022->100022},{200023->100023},{200024->100024},{200025->100025},{200026->100026},{200027->100027},{200028->100028},{200029->100029},{200030->100030},{200031->100031},{200032->100032},{200033->100033},{200034->100034},{200035->100035},{200036->100036},{200037->100037},{200038->100038},{200039->100039},{200040->100040},{200041->100041},{200042->100042},{200043->100043},{200044->100044},{200045->100045},{200046->100046},{200047->100047},{200048->100048},{200049->100049},{200050->100050},{200051->100051},{200052->100052},{200053->100053},{200054->100054},{200055->100055},{200056->100056},{200057->100057},{200058->100058},{200059->100059},{200060->100060},{200061->100061},{200062->100062},{200063->100063},{200064->100064},{200065->100065},{200066->100066},{200067->100067},{200068->100068},{200069->100069},{200070->100070},{200071->100071},{200072->100072},{200073->100073},{200074->100074},{200075->100075},{200076->100076},{200077->100077},{200078->100078},{200079->100079},{200080->100080},{200081->100081},{200082->100082},{200083->100083},{200084->100084},{200085->100085},{200086->100086},{200087->100087},{200088->100088},{200089->100089},{200090->100090},{200091->100091},{200092->100092},{200093->100093},{200094->100094},{200095->100095},{200096->100096},{200097->100097},{200098->100098},{200099->100099},{200100->100100},{200101->100101},{200102->100102},{200103->100103},{200104->100104},{200105->100105},{200106->100106},{200107->100107},{200108->100108},{200109->100109},{200110->100110},{200111->100111},{200112->100112},{200113->100113},{200114->100114},{200115->100115},{200116->100116},{200117->100117},{200118->100118},{200119->100119},{200120->100120},{200121->100121},{200122->100122},{200123->100123},{200124->100124},{200125->100125},{200126->100126},{200127->100127}}",_xla_megascale_transfer_type="ONE_TO_ONE"}, control-predecessors={%send}, backend_config={"flag_configs":[],"scoped_memory_configs":[],"compute_type":"COMPUTE_TYPE_DEFAULT","device_type":"DEVICE_TYPE_INVALID","used_scoped_memory_configs":[],"customized_send_recv_config":{"dcn_collective_permute_recv":{"non_target_slice_ids":[1]}}}
%recv-done = (bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)}, token[]) recv-done((bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)}, u32[], token[]) %recv), channel_id=1247, is_host_transfer=true, backend_config={"flag_configs":[],"scoped_memory_configs":[],"compute_type":"COMPUTE_TYPE_DEFAULT","device_type":"DEVICE_TYPE_INVALID","used_scoped_memory_configs":[],"customized_send_recv_config":{"dcn_collective_permute_recv":{"non_target_slice_ids":[1]}}}
%get-tuple-element = bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)} get-tuple-element((bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)}, token[]) %recv-done), index=0
%send-done = token[] send-done((bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)}, u32[], token[]) %send), channel_id=1246, is_host_transfer=true, control-predecessors={%recv-done}, backend_config={"flag_configs":[],"scoped_memory_configs":[],"compute_type":"COMPUTE_TYPE_DEFAULT","device_type":"DEVICE_TYPE_INVALID","used_scoped_memory_configs":[],"customized_send_recv_config":{"dcn_collective_permute_send":{"non_source_slice_ids":[0]}}}
%p2 = pred[] parameter(2)
tuple = (bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)}, bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)}, pred[]) tuple(%get-tuple-element, %p1, %p2)
while = (bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)}, bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)}, pred[]) while(tuple), condition=while_cond, body=while_body
ROOT gte0 = bf16[1,1,4096,1344]{2,3,1,0:T(8,128)(2,1)} get-tuple-element(while), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
auto sched_config = GetDefaultSchedConfig();
sched_config.collective_permute_overlap_limit = 2;
sched_config.all_gather_overlap_limit = 2;
EXPECT_TRUE(RunScheduler(hlo_module.get(), sched_config).ok());
EXPECT_TRUE(hlo_module->has_entry_computation());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(hlo_module->entry_computation()).instructions();
EXPECT_LT(GetIndex(new_instruction_sequence, "send-done"),
GetIndex(new_instruction_sequence, "while"));
}
TEST_F(LatencyHidingSchedulerTest, AllGatherWithSelectiveOverlap) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY %module {
%constant.19 = u32[] constant(0)
%replica_id = u32[]{:T(128)} replica-id()
%convert = f32[]{:T(128)} convert(u32[]{:T(128)} %replica_id)
%color_operand.1 = f32[8,256,256]{2,1,0:T(8,128)} broadcast(
f32[]{:T(128)} %convert), dimensions={}
%ag-start = (f32[8,256,256], f32[16,256,256]) all-gather-start(
f32[8,256,256] %color_operand.1), replica_groups={{0,1}}, dimensions={0},
metadata={op_type="AllGather" op_name="ag0"}
%ag-done = f32[16,256,256] all-gather-done(
(f32[8,256,256], f32[16,256,256]) %ag-start),
metadata={op_type="AllGather" op_name="ag0"}
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[16,256,256]{2,1,0} parameter(2)
p3 = f32[16,256,256]{2,1,0} parameter(3)
c0 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
c1 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
c2 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
ROOT a2 = f32[16,256,256]{2,1,0} add(%ag-done, c0)
}
)";
class SelectiveOverlapAsyncTracker : public AsyncTracker {
public:
explicit SelectiveOverlapAsyncTracker(const SchedulerConfig& sched_config)
: AsyncTracker(sched_config) {}
ResourceHazardType GetResourceHazardType(
int64_t resource_type) const override {
if (resource_type == ResourceTypeToIndex(ResourceType::kAllGather)) {
return ResourceHazardType::kSelective;
}
if (resource_type == AsyncTracker::GetFirstTargetDefinedResource()) {
return ResourceHazardType::kNonextendable;
}
return AsyncTracker::GetResourceHazardType(resource_type);
}
ResourcesVector GetResourcesFromInstruction(
const HloInstruction& hlo) const override {
ResourcesVector result = AsyncTracker::GetResourcesFromInstruction(hlo);
if (hlo.opcode() == HloOpcode::kAllGatherStart) {
result.push_back({AsyncTracker::GetFirstTargetDefinedResource(),
ResourceUsageType::kResourceRelease});
}
return result;
}
absl::InlinedVector<int64_t, 1> GetReleasedNonextendableResourcesFromVector(
const ResourcesVector& resources) const override {
absl::InlinedVector<int64_t, 1> non_extendable_resources;
for (const ResourcePair& resource : resources) {
if (GetResourceHazardType(resource.first) ==
ResourceHazardType::kNonextendable) {
non_extendable_resources.push_back({resource.first});
}
}
return non_extendable_resources;
}
void PostProcessScheduleGraph(
HloScheduleGraph* schedule_graph,
const LatencyEstimator* latency_estimator) const override {
for (const HloInstruction* instr :
schedule_graph->GetOriginalInstrList()) {
if (instr->name() == "c2") {
schedule_graph->GetNode(instr).SetValuableForSelectiveOverlap(false);
}
}
}
};
SchedulerConfig sched_config = GetDefaultSchedConfig();
sched_config.enable_selective_resources = true;
std::unique_ptr<AsyncTracker> async_tracker =
std::make_unique<SelectiveOverlapAsyncTracker>(sched_config);
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
HloComputation* entry_computation = hlo_module->entry_computation();
std::vector<HloInstruction*> original_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
EXPECT_TRUE(RunScheduler(hlo_module.get(), sched_config,
std::make_unique<ApproximateLatencyEstimator>(),
std::move(async_tracker))
.ok());
std::vector<HloInstruction*> new_instruction_sequence =
module_schedule.sequence(entry_computation).instructions();
if (VLOG_IS_ON(1)) {
for (auto* new_i : new_instruction_sequence) {
VLOG(1) << new_i->ToString();
}
}
int c0_index = GetIndex(new_instruction_sequence, "c0");
int c1_index = GetIndex(new_instruction_sequence, "c1");
int c2_index = GetIndex(new_instruction_sequence, "c2");
int ag_start_index = GetIndex(new_instruction_sequence, "ag-start");
int ag_done_index = GetIndex(new_instruction_sequence, "ag-done");
EXPECT_LT(c0_index, ag_start_index);
EXPECT_LT(ag_start_index, c1_index);
EXPECT_LT(c1_index, c2_index);
EXPECT_LT(c2_index, ag_done_index);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/latency_hiding_scheduler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/latency_hiding_scheduler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
44da8ef5-e24f-4923-a163-f00321724747 | cpp | google/arolla | reduce | arolla/expr/optimization/peephole_optimizations/reduce.cc | arolla/expr/optimization/peephole_optimizations/reduce_test.cc | #include "arolla/expr/optimization/peephole_optimizations/reduce.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/optimization/peephole_optimizer.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr {
namespace {
absl::Status AppendAdd4Optimizations(PeepholeOptimizationPack& optimizations) {
ExprNodePtr a = Placeholder("a");
ExprNodePtr b = Placeholder("b");
ExprNodePtr c = Placeholder("c");
ExprNodePtr d = Placeholder("d");
auto Add = [](auto a, auto b) { return CallOpReference("math.add", {a, b}); };
ASSIGN_OR_RETURN(ExprNodePtr pattern1, Add(Add(a, b), Add(c, d)));
ASSIGN_OR_RETURN(ExprNodePtr pattern2, Add(Add(Add(a, b), c), d));
ASSIGN_OR_RETURN(ExprNodePtr pattern3, Add(a, Add(b, Add(c, d))));
ASSIGN_OR_RETURN(ExprNodePtr replacement,
CallOpReference("math._add4", {a, b, c, d}));
ASSIGN_OR_RETURN(
optimizations.emplace_back(),
PeepholeOptimization::CreatePatternOptimization(pattern1, replacement));
ASSIGN_OR_RETURN(
optimizations.emplace_back(),
PeepholeOptimization::CreatePatternOptimization(pattern2, replacement));
ASSIGN_OR_RETURN(
optimizations.emplace_back(),
PeepholeOptimization::CreatePatternOptimization(pattern3, replacement));
return absl::OkStatus();
}
}
absl::StatusOr<PeepholeOptimizationPack> ReduceOptimizations() {
PeepholeOptimizationPack optimizations;
RETURN_IF_ERROR(AppendAdd4Optimizations(optimizations));
return optimizations;
}
} | #include "arolla/expr/optimization/peephole_optimizations/reduce.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status_matchers.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_visitor.h"
#include "arolla/expr/optimization/peephole_optimizer.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/qtype/base_types.h"
namespace arolla::expr {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::arolla::testing::EqualsExpr;
class ReduceOptimizationsTest : public ::testing::Test {
protected:
void SetUp() override {
ASSERT_OK_AND_ASSIGN(optimizer_,
CreatePeepholeOptimizer({ReduceOptimizations}));
}
std::unique_ptr<PeepholeOptimizer> optimizer_;
};
size_t CountNodes(const ExprNodePtr& expr) {
size_t result = 0;
return PostOrderTraverse(
expr,
[&](const ExprNodePtr& ,
absl::Span<const size_t* const> ) { return ++result; });
}
TEST_F(ReduceOptimizationsTest, SingleSubstitution) {
auto a = Leaf("l1");
auto b = Leaf("l2");
auto c = Leaf("l3");
auto d = Leaf("l4");
ASSERT_OK_AND_ASSIGN(auto ab, CallOp("math.add", {a, b}));
ASSERT_OK_AND_ASSIGN(auto cd, CallOp("math.add", {c, d}));
ASSERT_OK_AND_ASSIGN(auto abcd_balanced, CallOp("math.add", {ab, cd}));
ASSERT_OK_AND_ASSIGN(auto abcd_linear,
CallOp("math.add", {CallOp("math.add", {ab, c}), d}));
ASSERT_OK_AND_ASSIGN(auto abcd_reversed,
CallOp("math.add", {a, CallOp("math.add", {b, cd})}));
ASSERT_OK_AND_ASSIGN(auto abcd_optimized, CallOp("math._add4", {a, b, c, d}));
EXPECT_THAT(optimizer_->Apply(abcd_balanced),
IsOkAndHolds(EqualsExpr(abcd_optimized)));
EXPECT_THAT(optimizer_->Apply(abcd_linear),
IsOkAndHolds(EqualsExpr(abcd_optimized)));
EXPECT_THAT(optimizer_->Apply(abcd_reversed),
IsOkAndHolds(EqualsExpr(abcd_optimized)));
}
TEST_F(ReduceOptimizationsTest, BalancedTree) {
const int leaf_count = 128;
std::vector<ExprNodePtr> nodes;
nodes.reserve(leaf_count);
for (int i = 0; i < leaf_count; ++i) {
nodes.push_back(Leaf(absl::StrFormat("l%d", i)));
}
while (nodes.size() > 1) {
for (int64_t i = 0; i < nodes.size() / 2; ++i) {
nodes[i] = *CallOp("math.add", {nodes[i * 2], nodes[i * 2 + 1]});
}
if (nodes.size() % 2 == 1) {
nodes[nodes.size() / 2] = nodes.back();
}
nodes.resize((nodes.size() + 1) / 2);
}
ExprNodePtr expr = nodes[0];
EXPECT_EQ(CountNodes(expr), leaf_count + 127);
ASSERT_OK_AND_ASSIGN(auto res, optimizer_->Apply(expr));
EXPECT_EQ(CountNodes(res), leaf_count + 43);
}
TEST_F(ReduceOptimizationsTest, LinearTree) {
const int leaf_count = 128;
ExprNodePtr expr = Leaf("l0");
for (int i = 1; i < leaf_count; ++i) {
expr = *CallOp("math.add", {expr, Leaf(absl::StrFormat("l%d", i))});
}
EXPECT_EQ(CountNodes(expr), leaf_count + 127);
ASSERT_OK_AND_ASSIGN(auto res, optimizer_->Apply(expr));
EXPECT_EQ(CountNodes(res), leaf_count + 43);
}
TEST_F(ReduceOptimizationsTest, ReversedLinearTree) {
const int leaf_count = 128;
ExprNodePtr expr = Leaf("l0");
for (int i = 1; i < leaf_count; ++i) {
expr = *CallOp("math.add", {Leaf(absl::StrFormat("l%d", i)), expr});
}
EXPECT_EQ(CountNodes(expr), leaf_count + 127);
ASSERT_OK_AND_ASSIGN(auto res, optimizer_->Apply(expr));
EXPECT_EQ(CountNodes(res), leaf_count + 43);
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/optimization/peephole_optimizations/reduce.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/optimization/peephole_optimizations/reduce_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
c00b722e-3972-4f34-b0cf-754b2294e5cd | cpp | tensorflow/tensorflow | shuffle_dataset_op | tensorflow/core/kernels/data/shuffle_dataset_op.cc | tensorflow/core/kernels/data/shuffle_dataset_op_test.cc | #include "tensorflow/core/kernels/data/shuffle_dataset_op.h"
#include <cstdint>
#include <deque>
#include <memory>
#include <numeric>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/data/random_seed_ops.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/random/philox_random.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/lib/random/random_distributions.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/stringprintf.h"
namespace tensorflow {
namespace data {
constexpr const char* const ShuffleDatasetOpBase::kInputDataset;
constexpr const char* const ShuffleDatasetOpBase::kBufferSize;
constexpr const char* const ShuffleDatasetOpBase::kSeed;
constexpr const char* const ShuffleDatasetOpBase::kSeed2;
constexpr const char* const ShuffleDatasetOpBase::kOutputTypes;
constexpr const char* const ShuffleDatasetOpBase::kOutputShapes;
constexpr const char* const
ShuffleDatasetOpBase::kReshuffleEachIteration;
constexpr const char* const ShuffleDatasetOp::kDatasetType;
constexpr const char* const
ShuffleAndRepeatDatasetOp::kDatasetType;
constexpr const char* const ShuffleAndRepeatDatasetOp::kCount;
const int64_t kLogIntervalMicros = 10 * 1000000;
const int64_t kMaxEpochsInBuffer = 3;
constexpr char kNumRandomSamples[] = "num_random_samples";
constexpr char kDataProduced[] = "data_produced";
constexpr char kEndOfInputSequence[] = "end_of_input_sequence";
constexpr char kEpoch[] = "epoch";
constexpr char kNumElements[] = "num_elements";
constexpr char kSlicesSize[] = "slices_size";
constexpr char kSlicesStart[] = "slices_start";
constexpr char kSlicesEnd[] = "slices_end";
constexpr char kSlicesReachedEndOfSequence[] = "slices_reached_end_of_sequence";
constexpr char kSeedGenerator[] = "SeedGenerator";
constexpr char kEpochNumRandomSamples[] = "epoch_num_random_samples";
constexpr char kShuffleDatasetV1[] = "ShuffleDataset";
constexpr char kShuffleDatasetV2[] = "ShuffleDatasetV2";
constexpr char kShuffleDatasetV3[] = "ShuffleDatasetV3";
constexpr char kShuffleAndRepeatDatasetV1[] = "ShuffleAndRepeatDataset";
constexpr char kShuffleAndRepeatDatasetV2[] = "ShuffleAndRepeatDatasetV2";
ShuffleDatasetOpBase::ShuffleDatasetOpBase(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {}
class ShuffleDatasetOpBase::ShuffleDatasetBase : public DatasetBase {
public:
ShuffleDatasetBase(OpKernelContext* ctx, const DatasetBase* input,
int64_t buffer_size,
std::shared_ptr<SeedGenerator> seed_generator,
int64_t count)
: DatasetBase(DatasetContext(ctx)),
input_(input),
buffer_size_(buffer_size),
seed_generator_(std::move(seed_generator)),
count_(count),
traceme_metadata_(
{{"buffer_size",
strings::Printf("%lld", static_cast<long long>(buffer_size))}}) {
input_->Ref();
}
~ShuffleDatasetBase() override { input_->Unref(); }
virtual string op_type() const = 0;
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
if (count_ == -1 || input_->Cardinality(options) == kInfiniteCardinality) {
return kInfiniteCardinality;
} else if (input_->Cardinality(options) == kUnknownCardinality) {
return kUnknownCardinality;
} else {
return input_->Cardinality(options) * count_;
}
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
{
mutex_lock l(mu_);
if (shuffled_indices_.empty()) {
InitializeRandomAccessIndices();
}
}
int64 shuffled_index;
{
tf_shared_lock l(mu_);
shuffled_index = shuffled_indices_[index];
}
TF_RETURN_IF_ERROR(input_->Get(ctx, shuffled_index, out_tensors));
return absl::OkStatus();
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.set_args(buffer_size_, seed_generator_->seed(),
seed_generator_->seed2(), count_);
return name_utils::DatasetDebugString(op_type(), params);
}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(
Iterator::Params{this, name_utils::IteratorPrefix(op_type(), prefix)},
seed_generator_.get());
}
void InitializeRandomAccessIndices() const TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
const int64 cardinality = Cardinality();
shuffled_indices_ = std::vector<std::int64_t>(cardinality);
std::iota(shuffled_indices_.begin(), shuffled_indices_.end(), 0);
int64_t shuffled_index = 0;
random::PhiloxRandom parent_generator =
random::PhiloxRandom(seed_generator_->seed(), seed_generator_->seed2());
random::SingleSampleAdapter<random::PhiloxRandom> generator =
random::SingleSampleAdapter<random::PhiloxRandom>(&parent_generator);
while (shuffled_index < cardinality) {
int64_t offset = generator() % (cardinality - shuffled_index);
std::swap(shuffled_indices_[shuffled_index + offset],
shuffled_indices_[shuffled_index]);
shuffled_index += 1;
}
}
protected:
class Iterator : public DatasetIterator<ShuffleDatasetBase> {
public:
explicit Iterator(const Params& params, SeedGenerator* seed_generator)
: DatasetIterator<ShuffleDatasetBase>(params),
seed_generator_(seed_generator),
parent_generator_(seed_generator->seed(), seed_generator->seed2()),
generator_(&parent_generator_) {
if (params.dataset->buffer_size_ == kUnknownCardinality) {
buffer_ = std::make_unique<std::vector<std::vector<Tensor>>>();
} else {
buffer_ = std::make_unique<std::vector<std::vector<Tensor>>>(
params.dataset->buffer_size_);
}
}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
seed_generator_->GenerateSeeds(&seed_, &seed2_);
ResetRngs();
if (ctx->symbolic_checkpoint()) {
for (int64_t i = 0; i < buffer_->size(); ++i) {
checkpoint_indices_.insert(i);
}
}
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(FillBuffer(ctx));
if (num_elements_ == 0) {
DCHECK(input_impl_ == nullptr);
*end_of_sequence = true;
return absl::OkStatus();
}
*end_of_sequence = false;
ClearEmptySlices();
DCHECK(!slices_.empty());
int64_t offset =
Random() % (slices_.front()->end - slices_.front()->start);
int64_t index = (slices_.front()->start + offset) % buffer_->size();
*out_tensors = std::move(buffer_->at(index));
this->RecordBufferDequeue(ctx, *out_tensors);
std::swap(buffer_->at(index),
buffer_->at(slices_.front()->start % buffer_->size()));
checkpoint_indices_.insert(index);
checkpoint_indices_.insert(slices_.front()->start % buffer_->size());
slices_.front()->start++;
num_elements_--;
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
void ResetRngs() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
parent_generator_ = random::PhiloxRandom(seed_, seed2_);
generator_ =
random::SingleSampleAdapter<random::PhiloxRandom>(&parent_generator_);
generator_.Skip(num_random_samples_);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kEpochNumRandomSamples,
seed_generator_->num_random_samples()));
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kNumRandomSamples,
num_random_samples_));
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kSeed, seed_));
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kSeed2, seed2_));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kEndOfInputSequence, static_cast<int64_t>(!input_impl_)));
if (input_impl_) {
TF_RETURN_IF_ERROR(this->SaveInput(ctx, writer, input_impl_));
}
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kEpoch, epoch_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kNumElements, num_elements_));
const std::string key_prefix = absl::StrCat(prefix(), kColon, "buffer");
if (ctx->symbolic_checkpoint()) {
TF_RETURN_IF_ERROR(UpdateCheckpointElements(
writer, key_prefix, *buffer_, checkpoint_indices_));
checkpoint_indices_.clear();
} else {
TF_RETURN_IF_ERROR(
WriteElementsToCheckpoint(writer, key_prefix, *buffer_));
}
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kSlicesSize, slices_.size()));
for (size_t i = 0; i < slices_.size(); ++i) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), absl::StrJoin(std::make_tuple(kSlicesStart, i), "_"),
slices_[i]->start));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), absl::StrJoin(std::make_tuple(kSlicesEnd, i), "_"),
slices_[i]->end));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(),
absl::StrJoin(std::make_tuple(kSlicesReachedEndOfSequence, i), "_"),
static_cast<int64_t>(slices_[i]->reached_end_of_sequence)));
}
if (data_produced_) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(this->prefix(), kDataProduced, ""));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
int64_t num_random_samples;
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kEpochNumRandomSamples,
&num_random_samples));
seed_generator_->set_num_random_samples(num_random_samples);
seed_generator_->Reset();
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kNumRandomSamples,
&num_random_samples_));
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kSeed, &seed_));
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kSeed2, &seed2_));
ResetRngs();
int64_t input_empty;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kEndOfInputSequence, &input_empty));
if (static_cast<bool>(!input_empty)) {
TF_RETURN_IF_ERROR(this->dataset()->input_->MakeIterator(
ctx, this, this->prefix(), &input_impl_));
TF_RETURN_IF_ERROR(this->RestoreInput(ctx, reader, input_impl_));
} else {
input_impl_.reset();
}
TF_RETURN_IF_ERROR(reader->ReadScalar(this->prefix(), kEpoch, &epoch_));
TF_RETURN_IF_ERROR(
reader->ReadScalar(this->prefix(), kNumElements, &num_elements_));
size_t slices_size;
{
int64_t temp;
TF_RETURN_IF_ERROR(
reader->ReadScalar(this->prefix(), kSlicesSize, &temp));
slices_size = static_cast<size_t>(temp);
}
buffer_ = std::make_unique<std::vector<std::vector<Tensor>>>();
TF_RETURN_IF_ERROR(ReadElementsFromCheckpoint(
ctx, reader, absl::StrCat(prefix(), kColon, "buffer"),
buffer_.get()));
if (ctx->symbolic_checkpoint()) {
DCHECK(checkpoint_indices_.empty());
for (size_t i = 0; i < buffer_->size(); ++i) {
checkpoint_indices_.insert(i);
}
}
for (const auto& element : *buffer_) {
RecordBufferEnqueue(ctx, element);
}
if (!IsShuffleAll()) {
buffer_->resize(dataset()->buffer_size_);
}
slices_.clear();
for (size_t i = 0; i < slices_size; ++i) {
int64_t start;
TF_RETURN_IF_ERROR(reader->ReadScalar(
this->prefix(),
absl::StrJoin(std::make_tuple(kSlicesStart, i), "_"), &start));
int64_t end;
TF_RETURN_IF_ERROR(reader->ReadScalar(
this->prefix(), absl::StrJoin(std::make_tuple(kSlicesEnd, i), "_"),
&end));
int64_t reached_end_of_sequence;
TF_RETURN_IF_ERROR(reader->ReadScalar(
prefix(),
absl::StrJoin(std::make_tuple(kSlicesReachedEndOfSequence, i), "_"),
&reached_end_of_sequence));
slices_.push_back(std::make_unique<Slice>(
start, end, static_cast<bool>(reached_end_of_sequence)));
}
data_produced_ = reader->Contains(this->prefix(), kDataProduced);
return absl::OkStatus();
}
TraceMeMetadata GetTraceMeMetadata() const override {
return this->dataset()->traceme_metadata_;
}
private:
struct Slice {
Slice(int64_t start, int64_t end, bool reached_end_of_sequence)
: start(start),
end(end),
reached_end_of_sequence(reached_end_of_sequence) {}
int64_t start;
int64_t end;
bool reached_end_of_sequence = false;
};
random::SingleSampleAdapter<random::PhiloxRandom>::ResultType Random()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
num_random_samples_++;
auto out = generator_();
return out;
}
bool IsServingSliceComplete() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
for (auto& slice : slices_) {
if (slice->start != slice->end) {
return slice->reached_end_of_sequence;
}
}
return false;
}
bool IsShuffleAll() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return dataset()->buffer_size_ == kUnknownCardinality;
}
Status FillBuffer(IteratorContext* ctx) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
int64_t start_micros = EnvTime::NowMicros();
int64_t num_log_entries = 0;
while (ShouldFillBuffer()) {
if (EnvTime::NowMicros() >
((num_log_entries + 1) * kLogIntervalMicros) + start_micros) {
num_log_entries++;
LOG_EVERY_N_SEC(INFO, 10)
<< dataset()->metadata().name() << ": "
<< "Filling up shuffle buffer (this may take a while): "
<< num_elements_ << " of " << BufferSizeString();
}
if (!input_impl_) {
TF_RETURN_IF_ERROR(PrepareNextEpoch(ctx));
}
std::vector<Tensor> input_element;
bool end_of_input_sequence = false;
TF_RETURN_IF_ERROR(
input_impl_->GetNext(ctx, &input_element, &end_of_input_sequence));
if (end_of_input_sequence) {
slices_.back()->reached_end_of_sequence = true;
}
if (!end_of_input_sequence) {
AddToShuffleBuffer(ctx, std::move(input_element));
continue;
}
input_impl_.reset();
if (ctx->split_providers().empty() && !data_produced_ &&
this->dataset()->count_ == -1) {
return absl::OkStatus();
}
}
if (num_log_entries > 0) {
LOG(INFO) << "Shuffle buffer filled.";
}
return absl::OkStatus();
}
bool ShouldFillBuffer() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (!input_impl_ && dataset()->count_ != -1 &&
epoch_ >= dataset()->count_) {
return false;
}
if (slices_.size() > kMaxEpochsInBuffer && num_elements_ > 0) {
return false;
}
if (IsShuffleAll() && (slices_.empty() || !IsServingSliceComplete())) {
return true;
}
return num_elements_ < buffer_->size();
}
Status PrepareNextEpoch(IteratorContext* ctx)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (epoch_ == 0) {
slices_.push_back(std::make_unique<Slice>(0, 0, false));
} else {
int64_t n = slices_.back()->end;
slices_.push_back(std::make_unique<Slice>(n, n, false));
for (const auto& provider : ctx->split_providers()) {
TF_RETURN_IF_ERROR(provider->Reset());
}
}
TF_RETURN_IF_ERROR(this->dataset()->input_->MakeIterator(
ctx, this, this->prefix(), &input_impl_));
epoch_++;
return absl::OkStatus();
}
void AddToShuffleBuffer(IteratorContext* ctx, std::vector<Tensor>&& element)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
data_produced_ = true;
if (num_elements_ == 0) {
VLOG(1) << "Starting to fill up shuffle buffer of size: "
<< BufferSizeString();
}
this->RecordBufferEnqueue(ctx, element);
if (num_elements_ == buffer_->size()) {
DCHECK(IsShuffleAll());
checkpoint_indices_.insert(buffer_->size());
buffer_->push_back(element);
} else {
size_t index = slices_.back()->end % buffer_->size();
checkpoint_indices_.insert(index);
buffer_->at(index) = std::move(element);
}
num_elements_++;
slices_.back()->end++;
}
void ClearEmptySlices() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
while (slices_.front()->start == slices_.front()->end) {
slices_.pop_front();
num_random_samples_ = 0;
seed_generator_->GenerateSeeds(&seed_, &seed2_);
ResetRngs();
}
}
std::string BufferSizeString() {
return absl::StrCat(dataset()->buffer_size_);
}
mutex mu_;
SeedGenerator* const seed_generator_ TF_GUARDED_BY(mu_);
std::unique_ptr<std::vector<std::vector<Tensor>>> buffer_
TF_GUARDED_BY(mu_);
absl::flat_hash_set<int64_t> checkpoint_indices_ TF_GUARDED_BY(mu_);
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_) = nullptr;
int64_t epoch_ TF_GUARDED_BY(mu_) = 0;
int64_t num_elements_ TF_GUARDED_BY(mu_) = 0;
int64_t seed_ TF_GUARDED_BY(mu_) = 0;
int64_t seed2_ TF_GUARDED_BY(mu_) = 0;
std::deque<std::unique_ptr<Slice>> slices_ TF_GUARDED_BY(mu_);
random::PhiloxRandom parent_generator_ TF_GUARDED_BY(mu_);
random::SingleSampleAdapter<random::PhiloxRandom> generator_
TF_GUARDED_BY(mu_);
int64_t num_random_samples_ TF_GUARDED_BY(mu_) = 0;
bool data_produced_ TF_GUARDED_BY(mu_) = false;
};
const DatasetBase* const input_;
const int64_t buffer_size_;
const std::shared_ptr<SeedGenerator> seed_generator_;
const int64_t count_;
const TraceMeMetadata traceme_metadata_;
mutable mutex mu_;
mutable std::vector<std::int64_t> shuffled_indices_ TF_GUARDED_BY(mu_);
};
class ShuffleDatasetOp::Dataset : public ShuffleDatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input, int64_t buffer_size,
int64_t count, RandomSeeds&& seeds, SeedGeneratorManager* manager,
ResourceHandle&& resource_handle)
: ShuffleDatasetBase(ctx, input, buffer_size, manager->get(), count),
manager_(manager),
resource_handle_(std::move(resource_handle)),
resource_mgr_(ctx->resource_manager()),
seeds_(std::move(seeds)) {}
~Dataset() override {
manager_->Unref();
Status s = resource_mgr_->Delete<SeedGeneratorManager>(
resource_handle_.container(), resource_handle_.name());
if (!s.ok()) {
LOG(WARNING) << "Failed to delete RNG resource: " << s.ToString();
}
}
string op_type() const override { return kDatasetType; }
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* buffer_size_node = nullptr;
Node* seed_node = nullptr;
Node* seed2_node = nullptr;
AttrValue reshuffle_each_iteration;
TF_RETURN_IF_ERROR(b->AddScalar(buffer_size_, &buffer_size_node));
TF_RETURN_IF_ERROR(b->AddScalar(seeds_.input_seed(), &seed_node));
TF_RETURN_IF_ERROR(b->AddScalar(seeds_.input_seed2(), &seed2_node));
b->BuildAttrValue(seed_generator_->reshuffle_each_iteration(),
&reshuffle_each_iteration);
TF_RETURN_IF_ERROR(b->AddDataset(
this,
{input_graph_node, buffer_size_node, seed_node, seed2_node},
{std::make_pair(kReshuffleEachIteration,
reshuffle_each_iteration)},
output));
return absl::OkStatus();
}
private:
SeedGeneratorManager* const manager_;
const ResourceHandle resource_handle_;
ResourceMgr* const resource_mgr_;
const RandomSeeds seeds_;
};
class ShuffleDatasetOp::DatasetV2 : public ShuffleDatasetBase {
public:
DatasetV2(OpKernelContext* ctx, const DatasetBase* input, int64_t buffer_size,
int64_t count, SeedGeneratorManager* manager,
ResourceHandle&& resource_handle, bool owns_resource)
: ShuffleDatasetBase(ctx, input, buffer_size, manager->get(), count),
manager_(manager),
owns_resource_(owns_resource),
resource_handle_(std::move(resource_handle)),
resource_mgr_(ctx->resource_manager()) {}
~DatasetV2() override {
manager_->Unref();
if (owns_resource_) {
Status s = resource_mgr_->Delete<SeedGeneratorManager>(
resource_handle_.container(), resource_handle_.name());
if (!s.ok()) {
LOG(WARNING) << "Failed to delete RNG resource: " << s.ToString();
}
}
}
string op_type() const override { return kDatasetType; }
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* buffer_size_node = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(buffer_size_, &buffer_size_node));
Node* resource_handle_node = nullptr;
Tensor handle(DT_RESOURCE, TensorShape({}));
handle.scalar<ResourceHandle>()() = resource_handle_;
TF_RETURN_IF_ERROR(b->AddTensor(handle, &resource_handle_node));
TF_RETURN_IF_ERROR(b->AddDataset(
this,
{input_graph_node, buffer_size_node, resource_handle_node},
{},
output));
return absl::OkStatus();
}
private:
SeedGeneratorManager* const manager_;
const bool owns_resource_;
const ResourceHandle resource_handle_;
ResourceMgr* const resource_mgr_;
};
class ShuffleDatasetOp::DatasetV3 : public ShuffleDatasetBase {
public:
DatasetV3(OpKernelContext* ctx, const DatasetBase* input, int64_t buffer_size,
int64_t count, RandomSeeds&& seeds, SeedGeneratorManager* manager,
ResourceHandle&& resource_handle, bool owns_resource)
: ShuffleDatasetBase(ctx, input, buffer_size, manager->get(), count),
manager_(manager),
owns_resource_(owns_resource),
resource_handle_(std::move(resource_handle)),
resource_mgr_(ctx->resource_manager()),
seeds_(std::move(seeds)) {}
~DatasetV3() override {
manager_->Unref();
if (owns_resource_) {
Status s = resource_mgr_->Delete<SeedGeneratorManager>(
resource_handle_.container(), resource_handle_.name());
if (!s.ok()) {
LOG(WARNING) << "Failed to delete RNG resource: " << s.ToString();
}
}
}
string op_type() const override { return kDatasetType; }
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* buffer_size_node = nullptr;
Node* seed_node = nullptr;
Node* seed2_node = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(buffer_size_, &buffer_size_node));
TF_RETURN_IF_ERROR(b->AddScalar(seeds_.input_seed(), &seed_node));
TF_RETURN_IF_ERROR(b->AddScalar(seeds_.input_seed2(), &seed2_node));
Node* resource_handle_node = nullptr;
Tensor handle(DT_RESOURCE, TensorShape({}));
handle.scalar<ResourceHandle>()() = resource_handle_;
TF_RETURN_IF_ERROR(b->AddTensor(handle, &resource_handle_node));
AttrValue reshuffle_each_iteration;
b->BuildAttrValue(seed_generator_->reshuffle_each_iteration(),
&reshuffle_each_iteration);
TF_RETURN_IF_ERROR(
b->AddDataset(this,
{input_graph_node, buffer_size_node, seed_node,
seed2_node, resource_handle_node},
{std::make_pair(kReshuffleEachIteration,
reshuffle_each_iteration)},
output));
return absl::OkStatus();
}
private:
SeedGeneratorManager* const manager_;
const bool owns_resource_;
const ResourceHandle resource_handle_;
ResourceMgr* const resource_mgr_;
const RandomSeeds seeds_;
};
ShuffleDatasetOp::ShuffleDatasetOp(OpKernelConstruction* ctx)
: ShuffleDatasetOpBase(ctx) {
auto& op_name = ctx->def().op();
if (op_name == kShuffleDatasetV3) {
op_version_ = 3;
} else if (op_name == kShuffleDatasetV2) {
op_version_ = 2;
} else if (op_name == kShuffleDatasetV1) {
op_version_ = 1;
}
if (ctx->HasAttr(kReshuffleEachIteration)) {
OP_REQUIRES_OK(
ctx, ctx->GetAttr(kReshuffleEachIteration, &reshuffle_each_iteration_));
}
}
void ShuffleDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64_t buffer_size = 0;
OP_REQUIRES_OK(ctx,
ParseScalarArgument<int64_t>(ctx, kBufferSize, &buffer_size));
OP_REQUIRES(
ctx, buffer_size > 0 || buffer_size == kUnknownCardinality,
errors::InvalidArgument(
"buffer_size must be greater than zero or UNKNOWN_CARDINALITY"));
int64_t count = 1;
static std::atomic<int64_t> resource_id_counter(0);
const string& container = ctx->resource_manager()->default_container();
auto name = strings::StrCat(ctx->op_kernel().name(), "/", kSeedGenerator, "_",
resource_id_counter.fetch_add(1));
if (op_version_ == 3) {
auto handle = HandleFromInput(ctx, 4);
SeedGeneratorManager* manager = nullptr;
Status s = ctx->resource_manager()->Lookup<SeedGeneratorManager>(
handle.container(), handle.name(), &manager);
int64_t seed;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kSeed, &seed));
int64_t seed2;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kSeed2, &seed2));
RandomSeeds seeds(seed, seed2);
bool owns_resource = false;
if (errors::IsNotFound(s)) {
owns_resource = true;
OP_REQUIRES_OK(
ctx,
ctx->resource_manager()->LookupOrCreate<SeedGeneratorManager>(
container, name, &manager,
[reshuffle = reshuffle_each_iteration_,
&seeds](SeedGeneratorManager** manager) {
if (reshuffle) {
*manager =
new SeedGeneratorManager(new RandomSeedGenerator(seeds));
} else {
*manager =
new SeedGeneratorManager(new FixedSeedGenerator(seeds));
}
return absl::OkStatus();
}));
handle = MakeResourceHandle<SeedGenerator>(ctx, container, name);
} else {
OP_REQUIRES_OK(ctx, s);
}
*output = new ShuffleDatasetOp::DatasetV3(ctx, input, buffer_size, count,
std::move(seeds), manager,
std::move(handle), owns_resource);
} else if (op_version_ == 2) {
auto handle = HandleFromInput(ctx, 2);
SeedGeneratorManager* manager = nullptr;
Status s = ctx->resource_manager()->Lookup<SeedGeneratorManager>(
handle.container(), handle.name(), &manager);
bool owns_resource = false;
if (errors::IsNotFound(s)) {
owns_resource = true;
LOG(WARNING) << "Failed to find seed generator resource. Falling back to "
"using a non-deterministically seeded generator and "
"reshuffling each iteration.";
RandomSeeds seeds(0, 0);
OP_REQUIRES_OK(
ctx, ctx->resource_manager()->LookupOrCreate<SeedGeneratorManager>(
container, name, &manager,
[&seeds](SeedGeneratorManager** manager) {
*manager = new SeedGeneratorManager(
new RandomSeedGenerator(seeds));
return absl::OkStatus();
}));
handle = MakeResourceHandle<SeedGeneratorManager>(ctx, container, name);
} else {
OP_REQUIRES_OK(ctx, s);
}
*output =
new ShuffleDatasetOp::DatasetV2(ctx, input, buffer_size, count, manager,
std::move(handle), owns_resource);
} else {
if (op_version_ != 1) {
LOG(WARNING) << "Unsupported version of shuffle dataset op: "
<< op_version_ << ". Defaulting to version 1.";
}
int64_t seed;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kSeed, &seed));
int64_t seed2;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kSeed2, &seed2));
RandomSeeds seeds(seed, seed2);
SeedGeneratorManager* manager;
OP_REQUIRES_OK(
ctx,
ctx->resource_manager()->LookupOrCreate<SeedGeneratorManager>(
container, name, &manager,
[reshuffle = reshuffle_each_iteration_,
&seeds](SeedGeneratorManager** manager) {
if (reshuffle) {
*manager =
new SeedGeneratorManager(new RandomSeedGenerator(seeds));
} else {
*manager =
new SeedGeneratorManager(new FixedSeedGenerator(seeds));
}
return absl::OkStatus();
}));
auto handle =
MakeResourceHandle<SeedGeneratorManager>(ctx, container, name);
*output = new ShuffleDatasetOp::Dataset(ctx, input, buffer_size, count,
std::move(seeds), manager,
std::move(handle));
}
}
class ShuffleAndRepeatDatasetOp::Dataset : public ShuffleDatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input, int64_t buffer_size,
RandomSeeds&& seeds, SeedGeneratorManager* manager, int64_t count,
ResourceHandle&& resource_handle)
: ShuffleDatasetBase(ctx, input, buffer_size, manager->get(), count),
manager_(manager),
resource_handle_(std::move(resource_handle)),
resource_mgr_(ctx->resource_manager()),
seeds_(std::move(seeds)) {}
~Dataset() override {
manager_->Unref();
Status s = resource_mgr_->Delete<SeedGeneratorManager>(
resource_handle_.container(), resource_handle_.name());
if (!s.ok()) {
LOG(WARNING) << "Failed to delete RNG resource: " << s.ToString();
}
}
string op_type() const override { return kDatasetType; }
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* buffer_size = nullptr;
Node* seed = nullptr;
Node* seed2 = nullptr;
Node* count = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(buffer_size_, &buffer_size));
TF_RETURN_IF_ERROR(b->AddScalar(seeds_.input_seed(), &seed));
TF_RETURN_IF_ERROR(b->AddScalar(seeds_.input_seed2(), &seed2));
TF_RETURN_IF_ERROR(b->AddScalar(count_, &count));
AttrValue reshuffle_each_iteration;
b->BuildAttrValue(seed_generator_->reshuffle_each_iteration(),
&reshuffle_each_iteration);
TF_RETURN_IF_ERROR(b->AddDataset(
this, {input_graph_node, buffer_size, seed, seed2, count},
{std::make_pair(kReshuffleEachIteration,
reshuffle_each_iteration)},
output));
return absl::OkStatus();
}
private:
SeedGeneratorManager* const manager_;
const ResourceHandle resource_handle_;
ResourceMgr* const resource_mgr_;
const RandomSeeds seeds_;
};
class ShuffleAndRepeatDatasetOp::DatasetV2 : public ShuffleDatasetBase {
public:
DatasetV2(OpKernelContext* ctx, const DatasetBase* input, int64_t buffer_size,
int64_t count, RandomSeeds&& seeds, SeedGeneratorManager* manager,
ResourceHandle&& resource_handle, bool owns_resource)
: ShuffleDatasetBase(ctx, input, buffer_size, manager->get(), count),
manager_(manager),
owns_resource_(owns_resource),
resource_handle_(std::move(resource_handle)),
resource_mgr_(ctx->resource_manager()),
seeds_(std::move(seeds)) {}
~DatasetV2() override {
manager_->Unref();
if (owns_resource_) {
Status s = resource_mgr_->Delete<SeedGeneratorManager>(
resource_handle_.container(), resource_handle_.name());
if (!s.ok()) {
LOG(WARNING) << "Failed to delete RNG resource: " << s.ToString();
}
}
}
string op_type() const override { return kDatasetType; }
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* buffer_size_node = nullptr;
Node* seed_node = nullptr;
Node* seed2_node = nullptr;
Node* count_node = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(buffer_size_, &buffer_size_node));
TF_RETURN_IF_ERROR(b->AddScalar(seeds_.input_seed(), &seed_node));
TF_RETURN_IF_ERROR(b->AddScalar(seeds_.input_seed2(), &seed2_node));
TF_RETURN_IF_ERROR(b->AddScalar(count_, &count_node));
Node* resource_handle_node = nullptr;
Tensor handle(DT_RESOURCE, TensorShape({}));
handle.scalar<ResourceHandle>()() = resource_handle_;
TF_RETURN_IF_ERROR(b->AddTensor(handle, &resource_handle_node));
AttrValue reshuffle_each_iteration;
b->BuildAttrValue(seed_generator_->reshuffle_each_iteration(),
&reshuffle_each_iteration);
TF_RETURN_IF_ERROR(
b->AddDataset(this,
{input_graph_node, buffer_size_node, seed_node,
seed2_node, count_node, resource_handle_node},
{std::make_pair(kReshuffleEachIteration,
reshuffle_each_iteration)},
output));
return absl::OkStatus();
}
private:
SeedGeneratorManager* const manager_;
const bool owns_resource_;
const ResourceHandle resource_handle_;
ResourceMgr* const resource_mgr_;
const RandomSeeds seeds_;
};
ShuffleAndRepeatDatasetOp::ShuffleAndRepeatDatasetOp(OpKernelConstruction* ctx)
: ShuffleDatasetOpBase(ctx) {
auto& op_name = ctx->def().op();
if (op_name == kShuffleAndRepeatDatasetV2) {
op_version_ = 2;
} else if (op_name == kShuffleAndRepeatDatasetV1) {
op_version_ = 1;
}
if (ctx->HasAttr(kReshuffleEachIteration)) {
OP_REQUIRES_OK(
ctx, ctx->GetAttr(kReshuffleEachIteration, &reshuffle_each_iteration_));
}
}
void ShuffleAndRepeatDatasetOp::MakeDataset(OpKernelContext* ctx,
DatasetBase* input,
DatasetBase** output) {
int64_t buffer_size = 0;
OP_REQUIRES_OK(ctx,
ParseScalarArgument<int64_t>(ctx, kBufferSize, &buffer_size));
OP_REQUIRES(
ctx, buffer_size > 0 || buffer_size == kUnknownCardinality,
errors::InvalidArgument(
"buffer_size must be greater than zero or UNKNOWN_CARDINALITY"));
int64_t seed;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kSeed, &seed));
int64_t seed2;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kSeed2, &seed2));
int64_t count;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kCount, &count));
OP_REQUIRES(ctx, count > 0 || count == -1,
errors::InvalidArgument(
"count must be greater than zero or equal to -1."));
RandomSeeds seeds(seed, seed2);
static std::atomic<int64_t> resource_id_counter(0);
const string& container = ctx->resource_manager()->default_container();
auto name = strings::StrCat(ctx->op_kernel().name(), "/", kSeedGenerator, "_",
resource_id_counter.fetch_add(1));
if (op_version_ == 2) {
auto handle = HandleFromInput(ctx, 5);
SeedGeneratorManager* manager = nullptr;
Status s = ctx->resource_manager()->Lookup<SeedGeneratorManager>(
handle.container(), handle.name(), &manager);
bool owns_resource = false;
if (errors::IsNotFound(s)) {
owns_resource = true;
OP_REQUIRES_OK(
ctx,
ctx->resource_manager()->LookupOrCreate<SeedGeneratorManager>(
container, name, &manager,
[reshuffle = reshuffle_each_iteration_,
&seeds](SeedGeneratorManager** manager) {
if (reshuffle) {
*manager =
new SeedGeneratorManager(new RandomSeedGenerator(seeds));
} else {
*manager =
new SeedGeneratorManager(new FixedSeedGenerator(seeds));
}
return absl::OkStatus();
}));
handle = MakeResourceHandle<SeedGenerator>(ctx, container, name);
} else {
OP_REQUIRES_OK(ctx, s);
}
*output = new ShuffleAndRepeatDatasetOp::DatasetV2(
ctx, input, buffer_size, count, std::move(seeds), manager,
std::move(handle), owns_resource);
} else {
if (op_version_ != 1) {
LOG(WARNING) << "Unsupported version of shuffle dataset op: "
<< op_version_ << ". Defaulting to version 1.";
}
SeedGeneratorManager* manager;
OP_REQUIRES_OK(
ctx,
ctx->resource_manager()->LookupOrCreate<SeedGeneratorManager>(
container, name, &manager,
[reshuffle = reshuffle_each_iteration_,
&seeds](SeedGeneratorManager** manager) {
if (reshuffle) {
*manager =
new SeedGeneratorManager(new RandomSeedGenerator(seeds));
} else {
*manager =
new SeedGeneratorManager(new FixedSeedGenerator(seeds));
}
return absl::OkStatus();
}));
auto handle =
MakeResourceHandle<SeedGeneratorManager>(ctx, container, name);
*output = new Dataset(ctx, input, buffer_size, std::move(seeds), manager,
count, std::move(handle));
}
}
namespace {
REGISTER_KERNEL_BUILDER(Name("ShuffleDataset").Device(DEVICE_CPU),
ShuffleDatasetOp);
REGISTER_KERNEL_BUILDER(Name("ShuffleDatasetV2").Device(DEVICE_CPU),
ShuffleDatasetOp);
REGISTER_KERNEL_BUILDER(Name("ShuffleDatasetV3").Device(DEVICE_CPU),
ShuffleDatasetOp);
REGISTER_KERNEL_BUILDER(Name("ShuffleAndRepeatDataset").Device(DEVICE_CPU),
ShuffleAndRepeatDatasetOp);
REGISTER_KERNEL_BUILDER(Name("ShuffleAndRepeatDatasetV2").Device(DEVICE_CPU),
ShuffleAndRepeatDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/shuffle_dataset_op.h"
#include <string>
#include <utility>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kShuffleNodeName[] = "shuffle_dataset";
constexpr char kShuffleAndRepeatNodeName[] = "shuffle_and_repeat_dataset";
class ShuffleDatasetParams : public DatasetParams {
public:
template <typename T>
ShuffleDatasetParams(T input_dataset_params, int64_t buffer_size,
int64_t seed, int64_t seed2, int64_t count,
bool reshuffle_each_iteration,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
buffer_size_(buffer_size),
seed_(seed),
seed2_(seed2),
count_(count),
reshuffle_each_iteration_(reshuffle_each_iteration) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
std::vector<Tensor> input_tensors = {
CreateTensor<int64_t>(TensorShape({}), {buffer_size_}),
CreateTensor<int64_t>(TensorShape({}), {seed_}),
CreateTensor<int64_t>(TensorShape({}), {seed2_})};
if (count_ != 1) {
input_tensors.emplace_back(
CreateTensor<int64_t>(TensorShape({}), {count_}));
}
return input_tensors;
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back(ShuffleDatasetOpBase::kInputDataset);
input_names->emplace_back(ShuffleDatasetOpBase::kBufferSize);
input_names->emplace_back(ShuffleDatasetOpBase::kSeed);
input_names->emplace_back(ShuffleDatasetOpBase::kSeed2);
if (count_ != 1) {
input_names->emplace_back(ShuffleAndRepeatDatasetOp::kCount);
}
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back("output_types", output_dtypes_);
attr_vector->emplace_back("output_shapes", output_shapes_);
attr_vector->emplace_back("reshuffle_each_iteration",
reshuffle_each_iteration_);
attr_vector->emplace_back("metadata", "");
return absl::OkStatus();
}
string dataset_type() const override {
if (count_ != 1) {
return ShuffleAndRepeatDatasetOp::kDatasetType;
}
return ShuffleDatasetOp::kDatasetType;
}
int64_t count() const { return count_; }
private:
int64_t buffer_size_;
int64_t seed_;
int64_t seed2_;
int64_t count_;
bool reshuffle_each_iteration_;
};
class ShuffleDatasetOpTest : public DatasetOpsTestBase {};
ShuffleDatasetParams ShuffleDatasetParams1() {
return ShuffleDatasetParams(RangeDatasetParams(0, 10, 1),
3,
1,
2,
1,
false,
{DT_INT64},
{PartialTensorShape({})},
kShuffleNodeName);
}
ShuffleDatasetParams ShuffleDatasetParams2() {
return ShuffleDatasetParams(RangeDatasetParams(0, 10, 1),
10,
1,
2,
1,
true,
{DT_INT64},
{PartialTensorShape({})},
kShuffleNodeName);
}
ShuffleDatasetParams ShuffleDatasetParams3() {
return ShuffleDatasetParams(RangeDatasetParams(0, 10, 1),
2,
1,
2,
1,
true,
{DT_INT64},
{PartialTensorShape({})},
kShuffleNodeName);
}
ShuffleDatasetParams ShuffleDatasetParams4() {
return ShuffleDatasetParams(RangeDatasetParams(0, 10, 1),
10,
2,
2,
1,
true,
{DT_INT64},
{PartialTensorShape({})},
kShuffleNodeName);
}
ShuffleDatasetParams ShuffleDatasetParams5() {
return ShuffleDatasetParams(RangeDatasetParams(0, 10, 1),
1,
1,
2,
1,
true,
{DT_INT64},
{PartialTensorShape({})},
kShuffleNodeName);
}
ShuffleDatasetParams ShuffleDatasetParams6() {
return ShuffleDatasetParams(RangeDatasetParams(0, 0, 1),
10,
1,
2,
1,
true,
{DT_INT64},
{PartialTensorShape({})},
kShuffleNodeName);
}
ShuffleDatasetParams ShuffleDatasetParams7() {
return ShuffleDatasetParams(RangeDatasetParams(0, 10, 1),
10,
1,
2,
2,
false,
{DT_INT64},
{PartialTensorShape({})},
kShuffleAndRepeatNodeName);
}
ShuffleDatasetParams ShuffleDatasetParams8() {
return ShuffleDatasetParams(RangeDatasetParams(0, 3, 1),
10,
1,
2,
-1,
false,
{DT_INT64},
{PartialTensorShape({})},
kShuffleAndRepeatNodeName);
}
ShuffleDatasetParams ShuffleDatasetParamsWithUnknownCardinality() {
return ShuffleDatasetParams(RangeDatasetParams(0, 10, 1),
-2,
1,
2,
1,
true,
{DT_INT64},
{PartialTensorShape({})},
kShuffleNodeName);
}
ShuffleDatasetParams ShuffleDatasetParamsWithInvalidBufferSize() {
return ShuffleDatasetParams(RangeDatasetParams(0, 0, 1),
-1,
1,
2,
1,
false,
{DT_INT64},
{PartialTensorShape({})},
kShuffleNodeName);
}
ShuffleDatasetParams ShuffleAndRepeatDatasetParamsWithInvalidBufferSize() {
return ShuffleDatasetParams(RangeDatasetParams(0, 0, 1),
-1,
1,
2,
2,
false,
{DT_INT64},
{PartialTensorShape({})},
kShuffleAndRepeatNodeName);
}
ShuffleDatasetParams ShuffleAndRepeatDatasetParamsWithInvalidCount() {
return ShuffleDatasetParams(RangeDatasetParams(0, 0, 1),
10,
1,
2,
0,
false,
{DT_INT64},
{PartialTensorShape({})},
kShuffleAndRepeatNodeName);
}
template <typename T>
struct GetNextTestCase {
T dataset_params;
std::vector<Tensor> expected_shuffle_outputs;
std::vector<Tensor> expected_reshuffle_outputs;
};
std::vector<GetNextTestCase<ShuffleDatasetParams>> GetNextTestCases() {
return {
{ShuffleDatasetParams1(),
CreateTensors<int64_t>(
TensorShape({}), {{2}, {3}, {0}, {5}, {6}, {4}, {7}, {8}, {9}, {1}}),
CreateTensors<int64_t>(
TensorShape({}),
{{2}, {3}, {0}, {5}, {6}, {4}, {7}, {8}, {9}, {1}})},
{ShuffleDatasetParams2(),
CreateTensors<int64_t>(
TensorShape({}), {{2}, {6}, {1}, {3}, {9}, {5}, {0}, {8}, {7}, {4}}),
CreateTensors<int64_t>(
TensorShape({}),
{{1}, {6}, {0}, {5}, {2}, {7}, {4}, {3}, {9}, {8}})},
{ShuffleDatasetParams3(),
CreateTensors<int64_t>(
TensorShape({}), {{0}, {2}, {1}, {3}, {5}, {6}, {4}, {7}, {8}, {9}}),
CreateTensors<int64_t>(
TensorShape({}),
{{1}, {0}, {2}, {3}, {4}, {5}, {6}, {7}, {9}, {8}})},
{ShuffleDatasetParams4(),
CreateTensors<int64_t>(
TensorShape({}), {{3}, {0}, {8}, {1}, {5}, {4}, {7}, {2}, {6}, {9}}),
CreateTensors<int64_t>(
TensorShape({}),
{{4}, {6}, {9}, {0}, {1}, {8}, {2}, {7}, {3}, {5}})},
{ShuffleDatasetParams5(),
CreateTensors<int64_t>(
TensorShape({}), {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}}),
CreateTensors<int64_t>(
TensorShape({}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{ShuffleDatasetParams6(),
{},
{}},
{ShuffleDatasetParams7(),
CreateTensors<int64_t>(
TensorShape({}), {{9}, {0}, {8}, {6}, {1}, {3}, {7}, {2}, {4}, {5},
{9}, {0}, {8}, {6}, {1}, {3}, {7}, {2}, {4}, {5}}),
CreateTensors<int64_t>(
TensorShape({}),
{{9}, {0}, {8}, {6}, {1}, {3}, {7}, {2}, {4}, {5},
{9}, {0}, {8}, {6}, {1}, {3}, {7}, {2}, {4}, {5}})},
{ShuffleDatasetParams8(),
CreateTensors<int64_t>(
TensorShape({}),
{{2}, {0}, {1}, {2}, {0}, {1}, {2}, {0}, {1}, {2}, {0},
{1}, {2}, {0}, {1}, {2}, {0}, {1}, {2}, {0}, {1}}),
CreateTensors<int64_t>(
TensorShape({}),
{{2}, {0}, {1}, {2}, {0}, {1}, {2}, {0}, {1}, {2}, {0},
{1}, {2}, {0}, {1}, {2}, {0}, {1}, {2}, {0}, {1}})},
{ShuffleDatasetParamsWithUnknownCardinality(),
CreateTensors<int64_t>(
TensorShape({}), {{2}, {6}, {1}, {3}, {9}, {5}, {0}, {8}, {7}, {4}}),
CreateTensors<int64_t>(
TensorShape({}),
{{1}, {6}, {0}, {5}, {2}, {7}, {4}, {3}, {9}, {8}})}};
}
class ParameterizedGetNextTest : public ShuffleDatasetOpTest,
public ::testing::WithParamInterface<
GetNextTestCase<ShuffleDatasetParams>> {};
TEST_P(ParameterizedGetNextTest, GetNext) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> shuffled_out_tensors;
while (!end_of_sequence) {
std::vector<Tensor> next;
TF_EXPECT_OK(
iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence));
shuffled_out_tensors.insert(shuffled_out_tensors.end(), next.begin(),
next.end());
if (test_case.dataset_params.count() == -1 &&
shuffled_out_tensors.size() ==
test_case.expected_shuffle_outputs.size()) {
break;
}
}
end_of_sequence = false;
TF_ASSERT_OK(dataset_->MakeIterator(
iterator_ctx_.get(), nullptr,
test_case.dataset_params.iterator_prefix(), &iterator_));
std::vector<Tensor> reshuffled_out_tensors;
while (!end_of_sequence) {
std::vector<Tensor> next;
TF_EXPECT_OK(
iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence));
reshuffled_out_tensors.insert(reshuffled_out_tensors.end(), next.begin(),
next.end());
if (test_case.dataset_params.count() == -1 &&
reshuffled_out_tensors.size() ==
test_case.expected_shuffle_outputs.size()) {
break;
}
}
TF_EXPECT_OK(ExpectEqual(shuffled_out_tensors,
test_case.expected_shuffle_outputs,
true));
TF_EXPECT_OK(ExpectEqual(reshuffled_out_tensors,
test_case.expected_reshuffle_outputs,
true));
}
INSTANTIATE_TEST_CASE_P(ShuffleDatasetOpTest, ParameterizedGetNextTest,
::testing::ValuesIn(GetNextTestCases()));
std::vector<DatasetNodeNameTestCase<ShuffleDatasetParams>>
DatasetNodeNameTestCases() {
return {{ShuffleDatasetParams1(),
kShuffleNodeName},
{ShuffleDatasetParams7(),
kShuffleAndRepeatNodeName}};
}
DATASET_NODE_NAME_TEST_P(ShuffleDatasetOpTest, ShuffleDatasetParams,
DatasetNodeNameTestCases())
std::vector<DatasetTypeStringTestCase<ShuffleDatasetParams>>
DatasetTypeStringTestCases() {
return {{ShuffleDatasetParams1(),
name_utils::OpName(
ShuffleDatasetOp::kDatasetType)},
{ShuffleDatasetParams7(),
name_utils::OpName(ShuffleAndRepeatDatasetOp::kDatasetType)}};
}
DATASET_TYPE_STRING_TEST_P(ShuffleDatasetOpTest, ShuffleDatasetParams,
DatasetTypeStringTestCases())
TEST_F(ShuffleDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = ShuffleDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
TEST_F(ShuffleDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = ShuffleDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes(dataset_params.output_shapes()));
}
std::vector<CardinalityTestCase<ShuffleDatasetParams>> CardinalityTestCases() {
return {{ShuffleDatasetParams1(),
10},
{ShuffleDatasetParams2(),
10},
{ShuffleDatasetParams3(),
10},
{ShuffleDatasetParams4(),
10},
{ShuffleDatasetParams5(),
10},
{ShuffleDatasetParams6(),
0},
{ShuffleDatasetParams7(),
20},
{ShuffleDatasetParams8(),
kInfiniteCardinality},
{ShuffleDatasetParamsWithUnknownCardinality(),
10}};
}
DATASET_CARDINALITY_TEST_P(ShuffleDatasetOpTest, ShuffleDatasetParams,
CardinalityTestCases())
TEST_F(ShuffleDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = ShuffleDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
TEST_F(ShuffleDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = ShuffleDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes(dataset_params.output_shapes()));
}
TEST_F(ShuffleDatasetOpTest, IteratorOutputPrefix) {
auto dataset_params = ShuffleDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
ShuffleDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
template <typename T>
struct IteratorSaveAndRestoreTestCase {
T dataset_params;
std::vector<int> breakpoints;
std::vector<Tensor> expected_shuffle_outputs;
};
std::vector<IteratorSaveAndRestoreTestCase<ShuffleDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{ShuffleDatasetParams1(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape({}),
{{2}, {3}, {0}, {5}, {6}, {4}, {7}, {8}, {9}, {1}})},
{ShuffleDatasetParams2(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape({}),
{{2}, {6}, {1}, {3}, {9}, {5}, {0}, {8}, {7}, {4}})},
{ShuffleDatasetParams3(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape({}),
{{0}, {2}, {1}, {3}, {5}, {6}, {4}, {7}, {8}, {9}})},
{ShuffleDatasetParams4(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape({}),
{{3}, {0}, {8}, {1}, {5}, {4}, {7}, {2}, {6}, {9}})},
{ShuffleDatasetParams5(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape({}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{ShuffleDatasetParams6(),
{0, 4, 11},
{}},
{ShuffleDatasetParams7(),
{0, 5, 22},
CreateTensors<int64_t>(
TensorShape({}),
{{9}, {0}, {8}, {6}, {1}, {3}, {7}, {2}, {4}, {5},
{9}, {0}, {8}, {6}, {1}, {3}, {7}, {2}, {4}, {5}})},
{ShuffleDatasetParams8(),
{0, 5, 20},
CreateTensors<int64_t>(
TensorShape({}),
{{2}, {0}, {1}, {2}, {0}, {1}, {2}, {0}, {1}, {2}, {0},
{1}, {2}, {0}, {1}, {2}, {0}, {1}, {2}, {0}, {1}})},
{ShuffleDatasetParamsWithUnknownCardinality(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape({}),
{{2}, {6}, {1}, {3}, {9}, {5}, {0}, {8}, {7}, {4}})}};
}
class ParameterizedIteratorSaveAndRestoreTest
: public ShuffleDatasetOpTest,
public ::testing::WithParamInterface<
IteratorSaveAndRestoreTestCase<ShuffleDatasetParams>> {};
TEST_P(ParameterizedIteratorSaveAndRestoreTest, IteratorSaveAndRestore) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
std::unique_ptr<SerializationContext> serialization_ctx;
TF_ASSERT_OK(CreateSerializationContext(&serialization_ctx));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
int cur_iteration = 0;
const std::vector<int>& breakpoints = test_case.breakpoints;
for (int breakpoint : breakpoints) {
VariantTensorDataWriter writer;
TF_EXPECT_OK(iterator_->Save(serialization_ctx.get(), &writer));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
TF_EXPECT_OK(RestoreIterator(iterator_ctx_.get(), &reader,
test_case.dataset_params.iterator_prefix(),
*dataset_, &iterator_));
while (cur_iteration <= breakpoint) {
std::vector<Tensor> next;
TF_EXPECT_OK(
iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence));
out_tensors.insert(out_tensors.end(), next.begin(), next.end());
cur_iteration++;
}
}
TF_EXPECT_OK(ExpectEqual(out_tensors, test_case.expected_shuffle_outputs,
true));
}
INSTANTIATE_TEST_CASE_P(ShuffleDatasetOpTest,
ParameterizedIteratorSaveAndRestoreTest,
::testing::ValuesIn(IteratorSaveAndRestoreTestCases()));
TEST_F(ShuffleDatasetOpTest, InvalidArguments) {
std::vector<ShuffleDatasetParams> dataset_params_vec(
{ShuffleDatasetParamsWithInvalidBufferSize(),
ShuffleAndRepeatDatasetParamsWithInvalidBufferSize(),
ShuffleAndRepeatDatasetParamsWithInvalidCount()});
for (const auto& dataset_params : dataset_params_vec) {
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/shuffle_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/shuffle_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7bc6ad5c-bb5f-48df-87f8-8becd1d89c17 | cpp | tensorflow/tensorflow | per_image_standardization | tensorflow/lite/experimental/ml_adjacent/algo/per_image_standardization.cc | tensorflow/lite/experimental/ml_adjacent/algo/per_image_standardization_test.cc | #include "tensorflow/lite/experimental/ml_adjacent/algo/per_image_standardization.h"
#include <cmath>
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace ml_adj {
namespace per_image_standardization {
namespace {
using ::ml_adj::algo::Algo;
using ::ml_adj::algo::InputPack;
using ::ml_adj::algo::OutputPack;
using ::ml_adj::data::DataRef;
using ::ml_adj::data::MutableDataRef;
inline void PerImageStandardization(dim_t batches, dim_t height, dim_t width,
dim_t num_channels, const float* input_data,
float* output_data) {
const dim_t num_pixels_per_image = height * width * num_channels;
const float inv_num_pixels_per_image = 1.0f / num_pixels_per_image;
for (ind_t b = 0; b < batches; ++b) {
const dim_t offset = b * num_pixels_per_image;
const float* input_ptr = input_data + offset;
float* output_ptr = output_data + offset;
float mean = 0.0f;
for (ind_t i = 0; i < num_pixels_per_image; ++i) {
mean += input_ptr[i];
}
mean *= inv_num_pixels_per_image;
float variance = 0.0f;
for (ind_t i = 0; i < num_pixels_per_image; ++i) {
const float diff = input_ptr[i] - mean;
variance += diff * diff * inv_num_pixels_per_image;
output_ptr[i] = diff;
}
const float inv_adjusted_stddev =
fmin(num_pixels_per_image, 1.0f / sqrt(variance));
for (ind_t i = 0; i < num_pixels_per_image; ++i) {
output_ptr[i] *= inv_adjusted_stddev;
}
}
}
void ComputePerImageStandardization(const InputPack& inputs,
const OutputPack& outputs) {
TFLITE_DCHECK(inputs.size() == 1);
TFLITE_DCHECK(outputs.size() == 1);
const DataRef* img = inputs[0];
const float* img_data = reinterpret_cast<const float*>(img->Data());
const dim_t img_num_batches = img->Dims()[0];
const dim_t img_height = img->Dims()[1];
const dim_t img_width = img->Dims()[2];
const dim_t img_num_channels = img->Dims()[3];
MutableDataRef* output = outputs[0];
output->Resize({img_num_batches, img_height, img_width, img_num_channels});
float* output_data = reinterpret_cast<float*>(output->Data());
PerImageStandardization(img_num_batches, img_height, img_width,
img_num_channels, img_data, output_data);
}
}
const Algo* Impl_PerImageStandardization() {
static const Algo per_image_standardization = {
&ComputePerImageStandardization, nullptr};
return &per_image_standardization;
}
}
} | #include "tensorflow/lite/experimental/ml_adjacent/algo/per_image_standardization.h"
#include <cstring>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
using ::ml_adj::algo::Algo;
using ::ml_adj::data::OwningVectorRef;
namespace ml_adj {
namespace per_image_standardization {
namespace {
struct PerImageStandardizationTestParams {
const std::vector<dim_t> img_dims;
const std::vector<float> img_data;
const std::vector<float> expected_data;
};
class PerImageStandardizationTest
: public testing::TestWithParam<PerImageStandardizationTestParams> {};
TEST_P(PerImageStandardizationTest, FloatPixelType) {
const PerImageStandardizationTestParams& params = GetParam();
OwningVectorRef img(etype_t::f32);
img.Resize(dims_t(params.img_dims));
ASSERT_EQ(img.Bytes(), params.img_data.size() * sizeof(float));
std::memcpy(img.Data(), params.img_data.data(), img.Bytes());
OwningVectorRef output(etype_t::f32);
const Algo* per_image_standardization = Impl_PerImageStandardization();
per_image_standardization->process({&img}, {&output});
ASSERT_EQ(output.Bytes(), params.expected_data.size() * sizeof(float));
ASSERT_EQ(output.Dims(), img.Dims());
constexpr float kAbsError = 0.01f;
float* out_data = reinterpret_cast<float*>(output.Data());
for (int i = 0; i < output.NumElements(); ++i) {
EXPECT_NEAR(out_data[i], params.expected_data[i], kAbsError)
<< "out_data[" << i << "] = " << out_data[i] << ", expected_data[" << i
<< "] = " << params.expected_data[i];
}
}
INSTANTIATE_TEST_SUITE_P(
PerImageStandardizationTests, PerImageStandardizationTest,
testing::ValuesIn({
PerImageStandardizationTestParams{
{1, 2, 2, 1},
{1, 2,
3, 4},
{-1.3416407, -0.4472136,
0.4472136, 1.3416407}},
PerImageStandardizationTestParams{
{2, 2, 2, 1},
{1, 2,
3, 4,
1, 2,
4, 8},
{-1.3416407, -0.4472136,
0.4472136, 1.3416407,
-1.0257553, -0.65275335,
0.09325048, 1.5852581}},
PerImageStandardizationTestParams{
{2, 2, 2, 2},
{1, 2,
1, 3,
1, 4,
1, 5,
1, 2,
2, 2,
3, 2,
4, 2},
{-0.8451542, -0.16903085,
-0.8451542, 0.50709254,
-0.8451542, 1.1832159,
-0.8451542, 1.8593392,
-1.5075567, -0.30151135,
-0.30151135, -0.30151135,
0.904534, -0.30151135,
2.1105793, -0.30151135}},
}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/algo/per_image_standardization.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/algo/per_image_standardization_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8a753d44-009b-4e57-92f1-ff59a6d25495 | cpp | tensorflow/tensorflow | skip_dataset_op | tensorflow/core/kernels/data/skip_dataset_op.cc | tensorflow/core/kernels/data/skip_dataset_op_test.cc | #include "tensorflow/core/kernels/data/skip_dataset_op.h"
#include <cstddef>
#include <cstdint>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/global_shuffle_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace data {
constexpr const char* const SkipDatasetOp::kDatasetType;
constexpr const char* const SkipDatasetOp::kInputDataset;
constexpr const char* const SkipDatasetOp::kCount;
constexpr const char* const SkipDatasetOp::kOutputTypes;
constexpr const char* const SkipDatasetOp::kOutputShapes;
constexpr char kEmptySkip[] = "EmptySkip";
constexpr char kFiniteSkip[] = "FiniteSkip";
constexpr char kCurIndex[] = "i";
constexpr char kInputImplEmpty[] = "input_impl_empty";
class SkipDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, int64_t count, const DatasetBase* input)
: DatasetBase(DatasetContext(ctx)), count_(count), input_(input) {
input_->Ref();
if (input_ != nullptr && count >= 0) {
random_indexing_compatible_ = input_->RandomIndexingCompatible();
} else {
random_indexing_compatible_ = absl::FailedPreconditionError(
absl::StrCat("Global shuffling does not support empty dataset or "
"skipping the entire dataset. Got skip(",
count, ")."));
}
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
if (count_ < 0) {
return std::make_unique<EmptyIterator>(EmptyIterator::Params{
this, name_utils::IteratorPrefix(kEmptySkip, prefix)});
} else {
return std::make_unique<FiniteIterator>(FiniteIterator::Params{
this, name_utils::IteratorPrefix(kFiniteSkip, prefix)});
}
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
int64_t n = input_->Cardinality(options);
if (n == kInfiniteCardinality || n == kUnknownCardinality) {
return n;
}
return count_ < 0 ? 0 : std::max(int64_t{0}, n - count_);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
return input_->Get(ctx, index + count_, out_tensors);
}
absl::Status RandomIndexingCompatible() const override {
return random_indexing_compatible_;
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* count = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(count_, &count));
TF_RETURN_IF_ERROR(b->AddDataset(this, {input_graph_node, count}, output));
return absl::OkStatus();
}
private:
class EmptyIterator : public DatasetIterator<Dataset> {
public:
explicit EmptyIterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
*end_of_sequence = true;
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
return absl::OkStatus();
}
};
class FiniteIterator : public DatasetIterator<Dataset> {
public:
explicit FiniteIterator(const Params& params)
: DatasetIterator<Dataset>(params), i_(0) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
if (ctx->index_mapper() != nullptr) {
return Get(ctx, out_tensors, end_of_sequence);
}
mutex_lock l(mu_);
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
if (i_ < dataset()->count_) {
int num_skipped;
TF_RETURN_IF_ERROR(input_impl_->Skip(ctx, dataset()->count_ - i_,
end_of_sequence, &num_skipped));
i_ += num_skipped;
if (*end_of_sequence) {
input_impl_.reset();
return absl::OkStatus();
}
}
TF_RETURN_IF_ERROR(
input_impl_->GetNext(ctx, out_tensors, end_of_sequence));
if (*end_of_sequence) {
input_impl_.reset();
}
return absl::OkStatus();
}
absl::Status Get(IteratorContext* ctx, std::vector<Tensor>* out_tensors,
bool* end_of_sequence) {
mutex_lock l(mu_);
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
IteratorContextWithIndexMapper ctx_with_index_mapper(ctx, this);
TF_RETURN_IF_ERROR(input_impl_->GetNext(ctx_with_index_mapper.Get(),
out_tensors, end_of_sequence));
ctx_with_index_mapper.MergeCheckpoint();
return absl::OkStatus();
}
IndexMapperFn GetIndexMapper(
IndexMapperFn parent_index_mapper) const override {
int64_t skip_count = dataset()->count_;
return [parent_index_mapper,
skip_count](size_t element_position) -> absl::StatusOr<size_t> {
TF_ASSIGN_OR_RETURN(size_t shuffled_element_position,
parent_index_mapper(element_position));
return shuffled_element_position + skip_count;
};
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kCurIndex, i_));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kInputImplEmpty, static_cast<int64_t>(!input_impl_)));
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
if (ctx->restored_element_count().has_value()) {
mutex_lock l(mu_);
return RestoreInput(ctx, reader, input_impl_);
}
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kCurIndex, &i_));
int64_t input_empty;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kInputImplEmpty, &input_empty));
if (!static_cast<bool>(input_empty)) {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
} else {
input_impl_.reset();
}
return absl::OkStatus();
}
private:
mutex mu_;
int64_t i_ TF_GUARDED_BY(mu_);
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
};
const int64_t count_;
const DatasetBase* const input_;
absl::Status random_indexing_compatible_;
};
SkipDatasetOp::SkipDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {}
void SkipDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64_t count;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kCount, &count));
*output = new Dataset(ctx, count, input);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("SkipDataset").Device(DEVICE_CPU), SkipDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/skip_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "skip_dataset";
class SkipDatasetParams : public DatasetParams {
public:
template <typename T>
SkipDatasetParams(T input_dataset_params, int64_t count,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
count_(count) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
return {CreateTensor<int64_t>(TensorShape({}), {count_})};
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back(SkipDatasetOp::kInputDataset);
input_names->emplace_back(SkipDatasetOp::kCount);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back("output_types", output_dtypes_);
attr_vector->emplace_back("output_shapes", output_shapes_);
attr_vector->emplace_back("metadata", "");
return absl::OkStatus();
}
string dataset_type() const override { return SkipDatasetOp::kDatasetType; }
private:
int64_t count_;
};
class SkipDatasetOpTest : public DatasetOpsTestBase {};
SkipDatasetParams SkipDatasetParams1() {
return SkipDatasetParams(
RangeDatasetParams(0, 10, 1),
4,
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
SkipDatasetParams SkipDatasetParams2() {
return SkipDatasetParams(
RangeDatasetParams(0, 10, 1),
25,
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
SkipDatasetParams SkipDatasetParams3() {
return SkipDatasetParams(
RangeDatasetParams(0, 10, 1),
10,
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
SkipDatasetParams SkipDatasetParams4() {
return SkipDatasetParams(
RangeDatasetParams(0, 10, 1),
0,
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
SkipDatasetParams SkipDatasetParams5() {
return SkipDatasetParams(
RangeDatasetParams(0, 10, 1),
-1,
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
std::vector<GetNextTestCase<SkipDatasetParams>> GetNextTestCases() {
return {
{SkipDatasetParams1(),
CreateTensors<int64_t>(TensorShape{}, {{4}, {5}, {6}, {7}, {8}, {9}})},
{SkipDatasetParams2(),
{}},
{SkipDatasetParams3(),
{}},
{SkipDatasetParams4(),
CreateTensors<int64_t>(
TensorShape{}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{SkipDatasetParams5(),
{}}};
}
ITERATOR_GET_NEXT_TEST_P(SkipDatasetOpTest, SkipDatasetParams,
GetNextTestCases())
TEST_F(SkipDatasetOpTest, DatasetNodeName) {
auto dataset_params = SkipDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(SkipDatasetOpTest, DatasetTypeString) {
auto dataset_params = SkipDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(
CheckDatasetTypeString(name_utils::OpName(SkipDatasetOp::kDatasetType)));
}
TEST_F(SkipDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = SkipDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
TEST_F(SkipDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = SkipDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})}));
}
std::vector<CardinalityTestCase<SkipDatasetParams>> CardinalityTestCases() {
return {{SkipDatasetParams1(),
6},
{SkipDatasetParams2(),
0},
{SkipDatasetParams3(),
0},
{SkipDatasetParams4(),
10},
{SkipDatasetParams5(),
0}};
}
DATASET_CARDINALITY_TEST_P(SkipDatasetOpTest, SkipDatasetParams,
CardinalityTestCases())
TEST_F(SkipDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = SkipDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
TEST_F(SkipDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = SkipDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})}));
}
std::vector<IteratorPrefixTestCase<SkipDatasetParams>>
IteratorPrefixTestCases() {
return {{SkipDatasetParams1(),
name_utils::IteratorPrefix("FiniteSkip",
SkipDatasetParams1().iterator_prefix())},
{SkipDatasetParams2(),
name_utils::IteratorPrefix(
"FiniteSkip", SkipDatasetParams2().iterator_prefix())},
{SkipDatasetParams3(),
name_utils::IteratorPrefix(
"FiniteSkip", SkipDatasetParams3().iterator_prefix())},
{SkipDatasetParams4(),
name_utils::IteratorPrefix(
"FiniteSkip", SkipDatasetParams4().iterator_prefix())},
{SkipDatasetParams5(),
name_utils::IteratorPrefix(
"EmptySkip", SkipDatasetParams5().iterator_prefix())}};
}
ITERATOR_PREFIX_TEST_P(SkipDatasetOpTest, SkipDatasetParams,
IteratorPrefixTestCases())
std::vector<IteratorSaveAndRestoreTestCase<SkipDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {
{SkipDatasetParams1(),
{0, 2, 7},
CreateTensors<int64_t>(TensorShape{}, {{4}, {5}, {6}, {7}, {8}, {9}})},
{SkipDatasetParams2(),
{0, 2, 5},
{}},
{SkipDatasetParams3(),
{0, 2, 5},
{}},
{SkipDatasetParams4(),
{0, 2, 5, 11},
CreateTensors<int64_t>(
TensorShape{}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{SkipDatasetParams5(),
{0, 2, 5},
{}}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(SkipDatasetOpTest, SkipDatasetParams,
IteratorSaveAndRestoreTestCases())
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/skip_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/skip_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2856eed2-70da-4d70-ac4a-3065123d029d | cpp | tensorflow/tensorflow | permutation | tensorflow/compiler/mlir/quantization/stablehlo/cc/permutation.h | tensorflow/compiler/mlir/quantization/stablehlo/cc/permutation_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_CC_PERMUTATION_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_CC_PERMUTATION_H_
#include <cstdint>
#include <type_traits>
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Support/LLVM.h"
namespace mlir::quant {
template <typename T,
typename = std::enable_if_t<std::is_default_constructible_v<T>, void>>
SmallVector<T> Permute(const ArrayRef<T> values,
const ArrayRef<int64_t> permutation) {
SmallVector<T> permuted_values(values.size(), T{});
for (auto [i, permutation_idx] : llvm::enumerate(permutation)) {
permuted_values[i] = std::move(values[permutation_idx]);
}
return permuted_values;
}
}
#endif | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/permutation.h"
#include <cstdint>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "mlir/Support/LLVM.h"
namespace mlir::quant {
namespace {
using testing::ElementsAre;
using testing::IsEmpty;
TEST(PermutationTest, PermuteEmptyArray) {
const SmallVector<int> permutation_result =
Permute<int>(SmallVector<int>{}, SmallVector<int64_t>{});
EXPECT_THAT(permutation_result, IsEmpty());
}
TEST(PermutationTest, PermuteOneElement) {
const SmallVector<int> single_element_array = {8};
const SmallVector<int64_t> permutation = {0};
const SmallVector<int> permutation_result =
Permute<int>(single_element_array, permutation);
EXPECT_THAT(permutation_result, ElementsAre(8));
}
TEST(PermutationTest, PermuteFourElements) {
const SmallVector<int> arr = {0, 3, 1, 2};
const SmallVector<int64_t> permutation = {0, 2, 3, 1};
const SmallVector<int> permutation_result = Permute<int>(arr, permutation);
EXPECT_THAT(permutation_result, ElementsAre(0, 1, 2, 3));
}
TEST(PermutationTest, PermuteFourStringElements) {
const SmallVector<std::string> arr = {"a", "b", "c", "d"};
const SmallVector<int64_t> permutation = {0, 2, 3, 1};
const SmallVector<std::string> permutation_result =
Permute<std::string>(arr, permutation);
EXPECT_THAT(permutation_result, ElementsAre("a", "c", "d", "b"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/permutation.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/permutation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e70903f9-fbb2-42e6-8fd2-9a94528cd959 | cpp | google/cel-cpp | qualified_reference_resolver | eval/compiler/qualified_reference_resolver.cc | eval/compiler/qualified_reference_resolver_test.cc | #include "eval/compiler/qualified_reference_resolver.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "base/ast.h"
#include "base/ast_internal/ast_impl.h"
#include "base/ast_internal/expr.h"
#include "base/builtins.h"
#include "base/kind.h"
#include "common/ast_rewrite.h"
#include "eval/compiler/flat_expr_builder_extensions.h"
#include "eval/compiler/resolver.h"
#include "runtime/internal/issue_collector.h"
#include "runtime/runtime_issue.h"
namespace google::api::expr::runtime {
namespace {
using ::cel::RuntimeIssue;
using ::cel::ast_internal::Expr;
using ::cel::ast_internal::Reference;
using ::cel::runtime_internal::IssueCollector;
constexpr absl::string_view kOptionalOr = "or";
constexpr absl::string_view kOptionalOrValue = "orValue";
bool IsSpecialFunction(absl::string_view function_name) {
return function_name == cel::builtin::kAnd ||
function_name == cel::builtin::kOr ||
function_name == cel::builtin::kIndex ||
function_name == cel::builtin::kTernary ||
function_name == kOptionalOr || function_name == kOptionalOrValue ||
function_name == "cel.@block";
}
bool OverloadExists(const Resolver& resolver, absl::string_view name,
const std::vector<cel::Kind>& arguments_matcher,
bool receiver_style = false) {
return !resolver.FindOverloads(name, receiver_style, arguments_matcher)
.empty() ||
!resolver.FindLazyOverloads(name, receiver_style, arguments_matcher)
.empty();
}
absl::optional<std::string> BestOverloadMatch(const Resolver& resolver,
absl::string_view base_name,
int argument_count) {
if (IsSpecialFunction(base_name)) {
return std::string(base_name);
}
auto arguments_matcher = ArgumentsMatcher(argument_count);
auto names = resolver.FullyQualifiedNames(base_name);
for (auto name = names.begin(); name != names.end(); ++name) {
if (OverloadExists(resolver, *name, arguments_matcher)) {
if (base_name[0] == '.') {
return std::string(base_name);
}
return *name;
}
}
return absl::nullopt;
}
class ReferenceResolver : public cel::AstRewriterBase {
public:
ReferenceResolver(
const absl::flat_hash_map<int64_t, Reference>& reference_map,
const Resolver& resolver, IssueCollector& issue_collector)
: reference_map_(reference_map),
resolver_(resolver),
issues_(issue_collector),
progress_status_(absl::OkStatus()) {}
bool PreVisitRewrite(Expr& expr) override {
const Reference* reference = GetReferenceForId(expr.id());
if (reference != nullptr && reference->has_value()) {
if (reference->value().has_int64_value()) {
expr.mutable_const_expr().set_int64_value(
reference->value().int64_value());
return true;
} else {
return false;
}
}
if (reference != nullptr) {
if (expr.has_ident_expr()) {
return MaybeUpdateIdentNode(&expr, *reference);
} else if (expr.has_select_expr()) {
return MaybeUpdateSelectNode(&expr, *reference);
} else {
return false;
}
}
return false;
}
bool PostVisitRewrite(Expr& expr) override {
const Reference* reference = GetReferenceForId(expr.id());
if (expr.has_call_expr()) {
return MaybeUpdateCallNode(&expr, reference);
}
return false;
}
const absl::Status& GetProgressStatus() const { return progress_status_; }
private:
bool MaybeUpdateCallNode(Expr* out, const Reference* reference) {
auto& call_expr = out->mutable_call_expr();
const std::string& function = call_expr.function();
if (reference != nullptr && reference->overload_id().empty()) {
UpdateStatus(issues_.AddIssue(
RuntimeIssue::CreateWarning(absl::InvalidArgumentError(
absl::StrCat("Reference map doesn't provide overloads for ",
out->call_expr().function())))));
}
bool receiver_style = call_expr.has_target();
int arg_num = call_expr.args().size();
if (receiver_style) {
auto maybe_namespace = ToNamespace(call_expr.target());
if (maybe_namespace.has_value()) {
std::string resolved_name =
absl::StrCat(*maybe_namespace, ".", function);
auto resolved_function =
BestOverloadMatch(resolver_, resolved_name, arg_num);
if (resolved_function.has_value()) {
call_expr.set_function(*resolved_function);
call_expr.set_target(nullptr);
return true;
}
}
} else {
auto maybe_resolved_function =
BestOverloadMatch(resolver_, function, arg_num);
if (!maybe_resolved_function.has_value()) {
UpdateStatus(issues_.AddIssue(RuntimeIssue::CreateWarning(
absl::InvalidArgumentError(absl::StrCat(
"No overload found in reference resolve step for ", function)),
RuntimeIssue::ErrorCode::kNoMatchingOverload)));
} else if (maybe_resolved_function.value() != function) {
call_expr.set_function(maybe_resolved_function.value());
return true;
}
}
if (call_expr.has_target() && !IsSpecialFunction(function) &&
!OverloadExists(resolver_, function, ArgumentsMatcher(arg_num + 1),
true)) {
UpdateStatus(issues_.AddIssue(RuntimeIssue::CreateWarning(
absl::InvalidArgumentError(absl::StrCat(
"No overload found in reference resolve step for ", function)),
RuntimeIssue::ErrorCode::kNoMatchingOverload)));
}
return false;
}
bool MaybeUpdateSelectNode(Expr* out, const Reference& reference) {
if (out->select_expr().test_only()) {
UpdateStatus(issues_.AddIssue(RuntimeIssue::CreateWarning(
absl::InvalidArgumentError("Reference map points to a presence "
"test -- has(container.attr)"))));
} else if (!reference.name().empty()) {
out->mutable_ident_expr().set_name(reference.name());
rewritten_reference_.insert(out->id());
return true;
}
return false;
}
bool MaybeUpdateIdentNode(Expr* out, const Reference& reference) {
if (!reference.name().empty() &&
reference.name() != out->ident_expr().name()) {
out->mutable_ident_expr().set_name(reference.name());
rewritten_reference_.insert(out->id());
return true;
}
return false;
}
absl::optional<std::string> ToNamespace(const Expr& expr) {
absl::optional<std::string> maybe_parent_namespace;
if (rewritten_reference_.find(expr.id()) != rewritten_reference_.end()) {
return absl::nullopt;
}
if (expr.has_ident_expr()) {
return expr.ident_expr().name();
} else if (expr.has_select_expr()) {
if (expr.select_expr().test_only()) {
return absl::nullopt;
}
maybe_parent_namespace = ToNamespace(expr.select_expr().operand());
if (!maybe_parent_namespace.has_value()) {
return absl::nullopt;
}
return absl::StrCat(*maybe_parent_namespace, ".",
expr.select_expr().field());
} else {
return absl::nullopt;
}
}
const Reference* GetReferenceForId(int64_t expr_id) {
auto iter = reference_map_.find(expr_id);
if (iter == reference_map_.end()) {
return nullptr;
}
if (expr_id == 0) {
UpdateStatus(issues_.AddIssue(
RuntimeIssue::CreateWarning(absl::InvalidArgumentError(
"reference map entries for expression id 0 are not supported"))));
return nullptr;
}
return &iter->second;
}
void UpdateStatus(absl::Status status) {
if (progress_status_.ok() && !status.ok()) {
progress_status_ = std::move(status);
return;
}
status.IgnoreError();
}
const absl::flat_hash_map<int64_t, Reference>& reference_map_;
const Resolver& resolver_;
IssueCollector& issues_;
absl::Status progress_status_;
absl::flat_hash_set<int64_t> rewritten_reference_;
};
class ReferenceResolverExtension : public AstTransform {
public:
explicit ReferenceResolverExtension(ReferenceResolverOption opt)
: opt_(opt) {}
absl::Status UpdateAst(PlannerContext& context,
cel::ast_internal::AstImpl& ast) const override {
if (opt_ == ReferenceResolverOption::kCheckedOnly &&
ast.reference_map().empty()) {
return absl::OkStatus();
}
return ResolveReferences(context.resolver(), context.issue_collector(), ast)
.status();
}
private:
ReferenceResolverOption opt_;
};
}
absl::StatusOr<bool> ResolveReferences(const Resolver& resolver,
IssueCollector& issues,
cel::ast_internal::AstImpl& ast) {
ReferenceResolver ref_resolver(ast.reference_map(), resolver, issues);
bool was_rewritten = cel::AstRewrite(ast.root_expr(), ref_resolver);
if (!ref_resolver.GetProgressStatus().ok()) {
return ref_resolver.GetProgressStatus();
}
return was_rewritten;
}
std::unique_ptr<AstTransform> NewReferenceResolverExtension(
ReferenceResolverOption option) {
return std::make_unique<ReferenceResolverExtension>(option);
}
} | #include "eval/compiler/qualified_reference_resolver.h"
#include <memory>
#include <string>
#include <vector>
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "absl/container/flat_hash_map.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "base/ast.h"
#include "base/ast_internal/ast_impl.h"
#include "base/ast_internal/expr.h"
#include "base/builtins.h"
#include "common/memory.h"
#include "common/type_factory.h"
#include "common/type_manager.h"
#include "common/values/legacy_value_manager.h"
#include "eval/compiler/resolver.h"
#include "eval/public/builtin_func_registrar.h"
#include "eval/public/cel_function.h"
#include "eval/public/cel_function_registry.h"
#include "extensions/protobuf/ast_converters.h"
#include "internal/casts.h"
#include "internal/testing.h"
#include "runtime/internal/issue_collector.h"
#include "runtime/runtime_issue.h"
#include "runtime/type_registry.h"
#include "google/protobuf/text_format.h"
namespace google::api::expr::runtime {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::cel::Ast;
using ::cel::RuntimeIssue;
using ::cel::ast_internal::AstImpl;
using ::cel::ast_internal::Expr;
using ::cel::ast_internal::SourceInfo;
using ::cel::extensions::internal::ConvertProtoExprToNative;
using ::cel::runtime_internal::IssueCollector;
using ::testing::Contains;
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::IsEmpty;
using ::testing::UnorderedElementsAre;
constexpr char kExpr[] = R"(
id: 1
call_expr {
function: "_&&_"
args {
id: 2
select_expr {
field: "var1"
operand {
id: 3
select_expr {
field: "bar"
operand {
id: 4
ident_expr { name: "foo" }
}
}
}
}
}
args {
id: 5
select_expr {
field: "var2"
operand {
id: 6
select_expr {
field: "foo"
operand {
id: 7
ident_expr { name: "bar" }
}
}
}
}
}
}
)";
MATCHER_P(StatusCodeIs, x, "") {
const absl::Status& status = arg;
return status.code() == x;
}
std::unique_ptr<AstImpl> ParseTestProto(const std::string& pb) {
google::api::expr::v1alpha1::Expr expr;
EXPECT_TRUE(google::protobuf::TextFormat::ParseFromString(pb, &expr));
return absl::WrapUnique(cel::internal::down_cast<AstImpl*>(
cel::extensions::CreateAstFromParsedExpr(expr).value().release()));
}
std::vector<absl::Status> ExtractIssuesStatus(const IssueCollector& issues) {
std::vector<absl::Status> issues_status;
for (const auto& issue : issues.issues()) {
issues_status.push_back(issue.ToStatus());
}
return issues_status;
}
TEST(ResolveReferences, Basic) {
std::unique_ptr<AstImpl> expr_ast = ParseTestProto(kExpr);
expr_ast->reference_map()[2].set_name("foo.bar.var1");
expr_ast->reference_map()[5].set_name("bar.foo.var2");
IssueCollector issues(RuntimeIssue::Severity::kError);
CelFunctionRegistry func_registry;
cel::TypeRegistry type_registry;
cel::common_internal::LegacyValueManager value_factory(
cel::MemoryManagerRef::ReferenceCounting(),
type_registry.GetComposedTypeProvider());
Resolver registry("", func_registry.InternalGetRegistry(), type_registry,
value_factory, type_registry.resolveable_enums());
auto result = ResolveReferences(registry, issues, *expr_ast);
ASSERT_THAT(result, IsOkAndHolds(true));
google::api::expr::v1alpha1::Expr expected_expr;
google::protobuf::TextFormat::ParseFromString(R"pb(
id: 1
call_expr {
function: "_&&_"
args {
id: 2
ident_expr { name: "foo.bar.var1" }
}
args {
id: 5
ident_expr { name: "bar.foo.var2" }
}
})pb",
&expected_expr);
EXPECT_EQ(expr_ast->root_expr(),
ConvertProtoExprToNative(expected_expr).value());
}
TEST(ResolveReferences, ReturnsFalseIfNoChanges) {
std::unique_ptr<AstImpl> expr_ast = ParseTestProto(kExpr);
IssueCollector issues(RuntimeIssue::Severity::kError);
CelFunctionRegistry func_registry;
cel::TypeRegistry type_registry;
cel::common_internal::LegacyValueManager value_factory(
cel::MemoryManagerRef::ReferenceCounting(),
type_registry.GetComposedTypeProvider());
Resolver registry("", func_registry.InternalGetRegistry(), type_registry,
value_factory, type_registry.resolveable_enums());
auto result = ResolveReferences(registry, issues, *expr_ast);
ASSERT_THAT(result, IsOkAndHolds(false));
expr_ast->reference_map()[4].set_name("foo");
expr_ast->reference_map()[7].set_name("bar");
result = ResolveReferences(registry, issues, *expr_ast);
ASSERT_THAT(result, IsOkAndHolds(false));
}
TEST(ResolveReferences, NamespacedIdent) {
std::unique_ptr<AstImpl> expr_ast = ParseTestProto(kExpr);
SourceInfo source_info;
IssueCollector issues(RuntimeIssue::Severity::kError);
CelFunctionRegistry func_registry;
cel::TypeRegistry type_registry;
cel::common_internal::LegacyValueManager value_factory(
cel::MemoryManagerRef::ReferenceCounting(),
type_registry.GetComposedTypeProvider());
Resolver registry("", func_registry.InternalGetRegistry(), type_registry,
value_factory, type_registry.resolveable_enums());
expr_ast->reference_map()[2].set_name("foo.bar.var1");
expr_ast->reference_map()[7].set_name("namespace_x.bar");
auto result = ResolveReferences(registry, issues, *expr_ast);
ASSERT_THAT(result, IsOkAndHolds(true));
google::api::expr::v1alpha1::Expr expected_expr;
google::protobuf::TextFormat::ParseFromString(
R"pb(
id: 1
call_expr {
function: "_&&_"
args {
id: 2
ident_expr { name: "foo.bar.var1" }
}
args {
id: 5
select_expr {
field: "var2"
operand {
id: 6
select_expr {
field: "foo"
operand {
id: 7
ident_expr { name: "namespace_x.bar" }
}
}
}
}
}
})pb",
&expected_expr);
EXPECT_EQ(expr_ast->root_expr(),
ConvertProtoExprToNative(expected_expr).value());
}
TEST(ResolveReferences, WarningOnPresenceTest) {
std::unique_ptr<AstImpl> expr_ast = ParseTestProto(R"pb(
id: 1
select_expr {
field: "var1"
test_only: true
operand {
id: 2
select_expr {
field: "bar"
operand {
id: 3
ident_expr { name: "foo" }
}
}
}
})pb");
SourceInfo source_info;
IssueCollector issues(RuntimeIssue::Severity::kError);
CelFunctionRegistry func_registry;
cel::TypeRegistry type_registry;
cel::common_internal::LegacyValueManager value_factory(
cel::MemoryManagerRef::ReferenceCounting(),
type_registry.GetComposedTypeProvider());
Resolver registry("", func_registry.InternalGetRegistry(), type_registry,
value_factory, type_registry.resolveable_enums());
expr_ast->reference_map()[1].set_name("foo.bar.var1");
auto result = ResolveReferences(registry, issues, *expr_ast);
ASSERT_THAT(result, IsOkAndHolds(false));
EXPECT_THAT(
ExtractIssuesStatus(issues),
testing::ElementsAre(Eq(absl::Status(
absl::StatusCode::kInvalidArgument,
"Reference map points to a presence test -- has(container.attr)"))));
}
constexpr char kEnumExpr[] = R"(
id: 1
call_expr {
function: "_==_"
args {
id: 2
select_expr {
field: "var1"
operand {
id: 3
select_expr {
field: "bar"
operand {
id: 4
ident_expr { name: "foo" }
}
}
}
}
}
args {
id: 5
ident_expr { name: "bar.foo.Enum.ENUM_VAL1" }
}
}
)";
TEST(ResolveReferences, EnumConstReferenceUsed) {
std::unique_ptr<AstImpl> expr_ast = ParseTestProto(kEnumExpr);
SourceInfo source_info;
CelFunctionRegistry func_registry;
ASSERT_OK(RegisterBuiltinFunctions(&func_registry));
cel::TypeRegistry type_registry;
cel::common_internal::LegacyValueManager value_factory(
cel::MemoryManagerRef::ReferenceCounting(),
type_registry.GetComposedTypeProvider());
Resolver registry("", func_registry.InternalGetRegistry(), type_registry,
value_factory, type_registry.resolveable_enums());
expr_ast->reference_map()[2].set_name("foo.bar.var1");
expr_ast->reference_map()[5].set_name("bar.foo.Enum.ENUM_VAL1");
expr_ast->reference_map()[5].mutable_value().set_int64_value(9);
IssueCollector issues(RuntimeIssue::Severity::kError);
auto result = ResolveReferences(registry, issues, *expr_ast);
ASSERT_THAT(result, IsOkAndHolds(true));
google::api::expr::v1alpha1::Expr expected_expr;
google::protobuf::TextFormat::ParseFromString(R"pb(
id: 1
call_expr {
function: "_==_"
args {
id: 2
ident_expr { name: "foo.bar.var1" }
}
args {
id: 5
const_expr { int64_value: 9 }
}
})pb",
&expected_expr);
EXPECT_EQ(expr_ast->root_expr(),
ConvertProtoExprToNative(expected_expr).value());
}
TEST(ResolveReferences, EnumConstReferenceUsedSelect) {
std::unique_ptr<AstImpl> expr_ast = ParseTestProto(kEnumExpr);
SourceInfo source_info;
CelFunctionRegistry func_registry;
ASSERT_OK(RegisterBuiltinFunctions(&func_registry));
cel::TypeRegistry type_registry;
cel::common_internal::LegacyValueManager value_factory(
cel::MemoryManagerRef::ReferenceCounting(),
type_registry.GetComposedTypeProvider());
Resolver registry("", func_registry.InternalGetRegistry(), type_registry,
value_factory, type_registry.resolveable_enums());
expr_ast->reference_map()[2].set_name("foo.bar.var1");
expr_ast->reference_map()[2].mutable_value().set_int64_value(2);
expr_ast->reference_map()[5].set_name("bar.foo.Enum.ENUM_VAL1");
expr_ast->reference_map()[5].mutable_value().set_int64_value(9);
IssueCollector issues(RuntimeIssue::Severity::kError);
auto result = ResolveReferences(registry, issues, *expr_ast);
ASSERT_THAT(result, IsOkAndHolds(true));
google::api::expr::v1alpha1::Expr expected_expr;
google::protobuf::TextFormat::ParseFromString(R"pb(
id: 1
call_expr {
function: "_==_"
args {
id: 2
const_expr { int64_value: 2 }
}
args {
id: 5
const_expr { int64_value: 9 }
}
})pb",
&expected_expr);
EXPECT_EQ(expr_ast->root_expr(),
ConvertProtoExprToNative(expected_expr).value());
}
TEST(ResolveReferences, ConstReferenceSkipped) {
std::unique_ptr<AstImpl> expr_ast = ParseTestProto(kExpr);
SourceInfo source_info;
CelFunctionRegistry func_registry;
ASSERT_OK(RegisterBuiltinFunctions(&func_registry));
cel::TypeRegistry type_registry;
cel::common_internal::LegacyValueManager value_factory(
cel::MemoryManagerRef::ReferenceCounting(),
type_registry.GetComposedTypeProvider());
Resolver registry("", func_registry.InternalGetRegistry(), type_registry,
value_factory, type_registry.resolveable_enums());
expr_ast->reference_map()[2].set_name("foo.bar.var1");
expr_ast->reference_map()[2].mutable_value().set_bool_value(true);
expr_ast->reference_map()[5].set_name("bar.foo.var2");
IssueCollector issues(RuntimeIssue::Severity::kError);
auto result = ResolveReferences(registry, issues, *expr_ast);
ASSERT_THAT(result, IsOkAndHolds(true));
google::api::expr::v1alpha1::Expr expected_expr;
google::protobuf::TextFormat::ParseFromString(R"pb(
id: 1
call_expr {
function: "_&&_"
args {
id: 2
select_expr {
field: "var1"
operand {
id: 3
select_expr {
field: "bar"
operand {
id: 4
ident_expr { name: "foo" }
}
}
}
}
}
args {
id: 5
ident_expr { name: "bar.foo.var2" }
}
})pb",
&expected_expr);
EXPECT_EQ(expr_ast->root_expr(),
ConvertProtoExprToNative(expected_expr).value());
}
constexpr char kExtensionAndExpr[] = R"(
id: 1
call_expr {
function: "boolean_and"
args {
id: 2
const_expr {
bool_value: true
}
}
args {
id: 3
const_expr {
bool_value: false
}
}
})";
TEST(ResolveReferences, FunctionReferenceBasic) {
std::unique_ptr<AstImpl> expr_ast = ParseTestProto(kExtensionAndExpr);
SourceInfo source_info;
CelFunctionRegistry func_registry;
ASSERT_OK(func_registry.RegisterLazyFunction(
CelFunctionDescriptor("boolean_and", false,
{
CelValue::Type::kBool,
CelValue::Type::kBool,
})));
cel::TypeRegistry type_registry;
cel::common_internal::LegacyValueManager value_factory(
cel::MemoryManagerRef::ReferenceCounting(),
type_registry.GetComposedTypeProvider());
Resolver registry("", func_registry.InternalGetRegistry(), type_registry,
value_factory, type_registry.resolveable_enums());
IssueCollector issues(RuntimeIssue::Severity::kError);
expr_ast->reference_map()[1].mutable_overload_id().push_back(
"udf_boolean_and");
auto result = ResolveReferences(registry, issues, *expr_ast);
ASSERT_THAT(result, IsOkAndHolds(false));
}
TEST(ResolveReferences, FunctionReferenceMissingOverloadDetected) {
std::unique_ptr<AstImpl> expr_ast = ParseTestProto(kExtensionAndExpr);
SourceInfo source_info;
CelFunctionRegistry func_registry;
cel::TypeRegistry type_registry;
cel::common_internal::LegacyValueManager value_factory(
cel::MemoryManagerRef::ReferenceCounting(),
type_registry.GetComposedTypeProvider());
Resolver registry("", func_registry.InternalGetRegistry(), type_registry,
value_factory, type_registry.resolveable_enums());
IssueCollector issues(RuntimeIssue::Severity::kError);
expr_ast->reference_map()[1].mutable_overload_id().push_back(
"udf_boolean_and");
auto result = ResolveReferences(registry, issues, *expr_ast);
ASSERT_THAT(result, IsOkAndHolds(false));
EXPECT_THAT(ExtractIssuesStatus(issues),
ElementsAre(StatusCodeIs(absl::StatusCode::kInvalidArgument)));
}
TEST(ResolveReferences, SpecialBuiltinsNotWarned) {
std::unique_ptr<AstImpl> expr_ast = ParseTestProto(R"pb(
id: 1
call_expr {
function: "*"
args {
id: 2
const_expr { bool_value: true }
}
args {
id: 3
const_expr { bool_value: false }
}
})pb");
SourceInfo source_info;
std::vector<const char*> special_builtins{
cel::builtin::kAnd, cel::builtin::kOr, cel::builtin::kTernary,
cel::builtin::kIndex};
for (const char* builtin_fn : special_builtins) {
CelFunctionRegistry func_registry;
cel::TypeRegistry type_registry;
cel::common_internal::LegacyValueManager value_factory(
cel::MemoryManagerRef::ReferenceCounting(),
type_registry.GetComposedTypeProvider());
Resolver registry("", func_registry.InternalGetRegistry(), type_registry,
value_factory, type_registry.resolveable_enums());
IssueCollector issues(RuntimeIssue::Severity::kError);
expr_ast->reference_map()[1].mutable_overload_id().push_back(
absl::StrCat("builtin.", builtin_fn));
expr_ast->root_expr().mutable_call_expr().set_function(builtin_fn);
auto result = ResolveReferences(registry, issues, *expr_ast);
ASSERT_THAT(result, IsOkAndHolds(false));
EXPECT_THAT(ExtractIssuesStatus(issues), IsEmpty());
}
}
TEST(ResolveReferences,
FunctionReferenceMissingOverloadDetectedAndMissingReference) {
std::unique_ptr<AstImpl> expr_ast = ParseTestProto(kExtensionAndExpr);
SourceInfo source_info;
CelFunctionRegistry func_registry;
cel::TypeRegistry type_registry;
cel::common_internal::LegacyValueManager value_factory(
cel::MemoryManagerRef::ReferenceCounting(),
type_registry.GetComposedTypeProvider());
Resolver registry("", func_registry.InternalGetRegistry(), type_registry,
value_factory, type_registry.resolveable_enums());
IssueCollector issues(RuntimeIssue::Severity::kError);
expr_ast->reference_map()[1].set_name("udf_boolean_and");
auto result = ResolveReferences(registry, issues, *expr_ast);
ASSERT_THAT(result, IsOkAndHolds(false));
EXPECT_THAT(
ExtractIssuesStatus(issues),
UnorderedElementsAre(
Eq(absl::InvalidArgumentError(
"No overload found in reference resolve step for boolean_and")),
Eq(absl::InvalidArgumentError(
"Reference map doesn't provide overloads for boolean_and"))));
}
TEST(ResolveReferences, EmulatesEagerFailing) {
std::unique_ptr<AstImpl> expr_ast = ParseTestProto(kExtensionAndExpr);
SourceInfo source_info;
CelFunctionRegistry func_registry;
cel::TypeRegistry type_registry;
cel::common_internal::LegacyValueManager value_factory(
cel::MemoryManagerRef::ReferenceCounting(),
type_registry.GetComposedTypeProvider());
Resolver registry("", func_registry.InternalGetRegistry(), type_registry,
value_factory, type_registry.resolveable_enums());
IssueCollector issues(RuntimeIssue::Severity::kWarning);
expr_ast->reference_map()[1].set_name("udf_boolean_and");
EXPECT_THAT(
ResolveReferences(registry, issues, *expr_ast),
StatusIs(absl::StatusCode::kInvalidArgument,
"Reference map doesn't provide overloads for boolean_and"));
}
TEST(ResolveReferences, FunctionReferenceToWrongExprKind) {
std::unique_ptr<AstImpl> expr_ast = ParseTestProto(kExtensionAndExpr);
SourceInfo source_info;
IssueCollector issues(RuntimeIssue::Severity::kError);
CelFunctionRegistry func_registry;
cel::TypeRegistry type_registry;
cel::common_internal::LegacyValueManager value_factory(
cel::MemoryManagerRef::ReferenceCounting(),
type_registry.GetComposedTypeProvider());
Resolver registry("", func_registry.InternalGetRegistry(), type_registry,
value_factory, type_registry.resolveable_enums());
expr_ast->reference_map()[2].mutable_overload_id().push_back(
"udf_boolean_and");
auto result = ResolveReferences(registry, issues, *expr_ast);
ASSERT_THAT(result, IsOkAndHolds(false));
EXPECT_THAT(ExtractIssuesStatus(issues),
ElementsAre(StatusCodeIs(absl::StatusCode::kInvalidArgument)));
}
constexpr char kReceiverCallExtensionAndExpr[] = R"(
id: 1
call_expr {
function: "boolean_and"
target {
id: 2
ident_expr {
name: "ext"
}
}
args {
id: 3
const_expr {
bool_value: false
}
}
})";
TEST(ResolveReferences, FunctionReferenceWithTargetNoChange) {
std::unique_ptr<AstImpl> expr_ast =
ParseTestProto(kReceiverCallExtensionAndExpr);
SourceInfo source_info;
IssueCollector issues(RuntimeIssue::Severity::kError);
CelFunctionRegistry func_registry;
ASSERT_OK(func_registry.RegisterLazyFunction(CelFunctionDescriptor(
"boolean_and", true, {CelValue::Type::kBool, CelValue::Type::kBool})));
cel::TypeRegistry type_registry;
cel::common_internal::LegacyValueManager value_factory(
cel::MemoryManagerRef::ReferenceCounting(),
type_registry.GetComposedTypeProvider());
Resolver registry("", func_registry.InternalGetRegistry(), type_registry,
value_factory, type_registry.resolveable_enums());
expr_ast->reference_map()[1].mutable_overload_id().push_back(
"udf_boolean_and");
auto result = ResolveReferences(registry, issues, *expr_ast);
ASSERT_THAT(result, IsOkAndHolds(false));
EXPECT_THAT(ExtractIssuesStatus(issues), IsEmpty());
}
TEST(ResolveReferences,
FunctionReferenceWithTargetNoChangeMissingOverloadDetected) {
std::unique_ptr<AstImpl> expr_ast =
ParseTestProto(kReceiverCallExtensionAndExpr);
SourceInfo source_info;
IssueCollector issues(RuntimeIssue::Severity::kError);
CelFunctionRegistry func_registry;
cel::TypeRegistry type_registry;
cel::common_internal::LegacyValueManager value_factory(
cel::MemoryManagerRef::ReferenceCounting(),
type_registry.GetComposedTypeProvider());
Resolver registry("", func_registry.InternalGetRegistry(), type_registry,
value_factory, type_registry.resolveable_enums());
expr_ast->reference_map()[1].mutable_overload_id().push_back(
"udf_boolean_and");
auto result = ResolveReferences(registry, issues, *expr_ast);
ASSERT_THAT(result, IsOkAndHolds(false));
EXPECT_THAT(ExtractIssuesStatus(issues),
ElementsAre(StatusCodeIs(absl::StatusCode::kInvalidArgument)));
}
TEST(ResolveReferences, FunctionReferenceWithTargetToNamespacedFunction) {
std::unique_ptr<AstImpl> expr_ast =
ParseTestProto(kReceiverCallExtensionAndExpr);
SourceInfo source_info;
IssueCollector issues(RuntimeIssue::Severity::kError);
CelFunctionRegistry func_registry;
ASSERT_OK(func_registry.RegisterLazyFunction(CelFunctionDescriptor(
"ext.boolean_and", false, {CelValue::Type::kBool})));
cel::TypeRegistry type_registry;
cel::common_internal::LegacyValueManager value_factory(
cel::MemoryManagerRef::ReferenceCounting(),
type_registry.GetComposedTypeProvider());
Resolver registry("", func_registry.InternalGetRegistry(), type_registry,
value_factory, type_registry.resolveable_enums());
expr_ast->reference_map()[1].mutable_overload_id().push_back(
"udf_boolean_and");
auto result = ResolveReferences(registry, issues, *expr_ast);
ASSERT_THAT(result, IsOkAndHolds(true));
google::api::expr::v1alpha1::Expr expected_expr;
google::protobuf::TextFormat::ParseFromString(R"pb(
id: 1
call_expr {
function: "ext.boolean_and"
args {
id: 3
const_expr { bool_value: false }
}
}
)pb",
&expected_expr);
EXPECT_EQ(expr_ast->root_expr(),
ConvertProtoExprToNative(expected_expr).value());
EXPECT_THAT(ExtractIssuesStatus(issues), IsEmpty());
}
TEST(ResolveReferences,
FunctionReferenceWithTargetToNamespacedFunctionInContainer) {
std::unique_ptr<AstImpl> expr_ast =
ParseTestProto(kReceiverCallExtensionAndExpr);
SourceInfo source_info;
expr_ast->reference_map()[1].mutable_overload_id().push_back(
"udf_boolean_and");
IssueCollector issues(RuntimeIssue::Severity::kError);
CelFunctionRegistry func_registry;
ASSERT_OK(func_registry.RegisterLazyFunction(CelFunctionDescriptor(
"com.google.ext.boolean_and", false, {CelValue::Type::kBool})));
cel::TypeRegistry type_registry;
cel::common_internal::LegacyValueManager value_factory(
cel::MemoryManagerRef::ReferenceCounting(),
type_registry.GetComposedTypeProvider());
Resolver registry("com.google", func_registry.InternalGetRegistry(),
type_registry, value_factory,
type_registry.resolveable_enums());
auto result = ResolveReferences(registry, issues, *expr_ast);
ASSERT_THAT(result, IsOkAndHolds(true));
google::api::expr::v1alpha1::Expr expected_expr;
google::protobuf::TextFormat::ParseFromString(R"pb(
id: 1
call_expr {
function: "com.google.ext.boolean_and"
args {
id: 3
const_expr { bool_value: false }
}
}
)pb",
&expected_expr);
EXPECT_EQ(expr_ast->root_expr(),
ConvertProtoExprToNative(expected_expr).value());
EXPECT_THAT(ExtractIssuesStatus(issues), IsEmpty());
}
constexpr char kReceiverCallHasExtensionAndExpr[] = R"(
id: 1
call_expr {
function: "boolean_and"
target {
id: 2
select_expr {
test_only: true
field: "option"
operand {
id: 3
ident_expr {
name: "ext"
}
}
}
}
args {
id: 4
const_expr {
bool_value: false
}
}
})";
TEST(ResolveReferences, FunctionReferenceWithHasTargetNoChange) {
std::unique_ptr<AstImpl> expr_ast =
ParseTestProto(kReceiverCallHasExtensionAndExpr);
SourceInfo source_info;
IssueCollector issues(RuntimeIssue::Severity::kError);
CelFunctionRegistry func_registry;
ASSERT_OK(func_registry.RegisterLazyFunction(CelFunctionDescriptor(
"boolean_and", true, {CelValue::Type::kBool, CelValue::Type::kBool})));
ASSERT_OK(func_registry.RegisterLazyFunction(CelFunctionDescriptor(
"ext.option.boolean_and", true, {CelValue::Type::kBool})));
cel::TypeRegistry type_registry;
cel::common_internal::LegacyValueManager value_factory(
cel::MemoryManagerRef::ReferenceCounting(),
type_registry.GetComposedTypeProvider());
Resolver registry("", func_registry.InternalGetRegistry(), type_registry,
value_factory, type_registry.resolveable_enums());
expr_ast->reference_map()[1].mutable_overload_id().push_back(
"udf_boolean_and");
auto result = ResolveReferences(registry, issues, *expr_ast);
ASSERT_THAT(result, IsOkAndHolds(false));
google::api::expr::v1alpha1::Expr expected_expr;
google::protobuf::TextFormat::ParseFromString(kReceiverCallHasExtensionAndExpr,
&expected_expr);
EXPECT_EQ(expr_ast->root_expr(),
ConvertProtoExprToNative(expected_expr).value());
EXPECT_THAT(ExtractIssuesStatus(issues), IsEmpty());
}
constexpr char kComprehensionExpr[] = R"(
id:17
comprehension_expr: {
iter_var:"i"
iter_range:{
id:1
list_expr:{
elements:{
id:2
const_expr:{int64_value:1}
}
elements:{
id:3
ident_expr:{name:"ENUM"}
}
elements:{
id:4
const_expr:{int64_value:3}
}
}
}
accu_var:"__result__"
accu_init: {
id:10
const_expr:{bool_value:false}
}
loop_condition:{
id:13
call_expr:{
function:"@not_strictly_false"
args:{
id:12
call_expr:{
function:"!_"
args:{
id:11
ident_expr:{name:"__result__"}
}
}
}
}
}
loop_step:{
id:15
call_expr: {
function:"_||_"
args:{
id:14
ident_expr: {name:"__result__"}
}
args:{
id:8
call_expr:{
function:"_==_"
args:{
id:7 ident_expr:{name:"ENUM"}
}
args:{
id:9 ident_expr:{name:"i"}
}
}
}
}
}
result:{id:16 ident_expr:{name:"__result__"}}
}
)";
TEST(ResolveReferences, EnumConstReferenceUsedInComprehension) {
std::unique_ptr<AstImpl> expr_ast = ParseTestProto(kComprehensionExpr);
SourceInfo source_info;
CelFunctionRegistry func_registry;
ASSERT_OK(RegisterBuiltinFunctions(&func_registry));
cel::TypeRegistry type_registry;
cel::common_internal::LegacyValueManager value_factory(
cel::MemoryManagerRef::ReferenceCounting(),
type_registry.GetComposedTypeProvider());
Resolver registry("", func_registry.InternalGetRegistry(), type_registry,
value_factory, type_registry.resolveable_enums());
expr_ast->reference_map()[3].set_name("ENUM");
expr_ast->reference_map()[3].mutable_value().set_int64_value(2);
expr_ast->reference_map()[7].set_name("ENUM");
expr_ast->reference_map()[7].mutable_value().set_int64_value(2);
IssueCollector issues(RuntimeIssue::Severity::kError);
auto result = ResolveReferences(registry, issues, *expr_ast);
ASSERT_THAT(result, IsOkAndHolds(true));
google::api::expr::v1alpha1::Expr expected_expr;
google::protobuf::TextFormat::ParseFromString(
R"pb(
id: 17
comprehension_expr {
iter_var: "i"
iter_range {
id: 1
list_expr {
elements {
id: 2
const_expr { int64_value: 1 }
}
elements {
id: 3
const_expr { int64_value: 2 }
}
elements {
id: 4
const_expr { int64_value: 3 }
}
}
}
accu_var: "__result__"
accu_init {
id: 10
const_expr { bool_value: false }
}
loop_condition {
id: 13
call_expr {
function: "@not_strictly_false"
args {
id: 12
call_expr {
function: "!_"
args {
id: 11
ident_expr { name: "__result__" }
}
}
}
}
}
loop_step {
id: 15
call_expr {
function: "_||_"
args {
id: 14
ident_expr { name: "__result__" }
}
args {
id: 8
call_expr {
function: "_==_"
args {
id: 7
const_expr { int64_value: 2 }
}
args {
id: 9
ident_expr { name: "i" }
}
}
}
}
}
result {
id: 16
ident_expr { name: "__result__" }
}
})pb",
&expected_expr);
EXPECT_EQ(expr_ast->root_expr(),
ConvertProtoExprToNative(expected_expr).value());
}
TEST(ResolveReferences, ReferenceToId0Warns) {
std::unique_ptr<AstImpl> expr_ast = ParseTestProto(R"pb(
id: 0
select_expr {
operand {
id: 1
ident_expr { name: "pkg" }
}
field: "var"
})pb");
SourceInfo source_info;
CelFunctionRegistry func_registry;
ASSERT_OK(RegisterBuiltinFunctions(&func_registry));
cel::TypeRegistry type_registry;
cel::common_internal::LegacyValueManager value_factory(
cel::MemoryManagerRef::ReferenceCounting(),
type_registry.GetComposedTypeProvider());
Resolver registry("", func_registry.InternalGetRegistry(), type_registry,
value_factory, type_registry.resolveable_enums());
expr_ast->reference_map()[0].set_name("pkg.var");
IssueCollector issues(RuntimeIssue::Severity::kError);
auto result = ResolveReferences(registry, issues, *expr_ast);
ASSERT_THAT(result, IsOkAndHolds(false));
google::api::expr::v1alpha1::Expr expected_expr;
google::protobuf::TextFormat::ParseFromString(R"pb(
id: 0
select_expr {
operand {
id: 1
ident_expr { name: "pkg" }
}
field: "var"
})pb",
&expected_expr);
EXPECT_EQ(expr_ast->root_expr(),
ConvertProtoExprToNative(expected_expr).value());
EXPECT_THAT(
ExtractIssuesStatus(issues),
Contains(StatusIs(
absl::StatusCode::kInvalidArgument,
"reference map entries for expression id 0 are not supported")));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/compiler/qualified_reference_resolver.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/compiler/qualified_reference_resolver_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
bb168610-5f08-458f-9cab-96ab93874d84 | cpp | tensorflow/tensorflow | call_options | third_party/xla/xla/tsl/distributed_runtime/call_options.cc | tensorflow/core/distributed_runtime/call_options_test.cc | #include "xla/tsl/distributed_runtime/call_options.h"
#include <utility>
#include "tsl/platform/mutex.h"
namespace tsl {
CallOptions::CallOptions() = default;
void CallOptions::StartCancel() {
mutex_lock l(mu_);
if (cancel_func_ != nullptr) {
cancel_func_();
}
}
void CallOptions::SetCancelCallback(CancelFunction cancel_func) {
mutex_lock l(mu_);
cancel_func_ = std::move(cancel_func);
}
void CallOptions::ClearCancelCallback() {
mutex_lock l(mu_);
cancel_func_ = nullptr;
}
int64_t CallOptions::GetTimeout() {
mutex_lock l(mu_);
return timeout_in_ms_;
}
void CallOptions::SetTimeout(int64_t ms) {
mutex_lock l(mu_);
timeout_in_ms_ = ms;
}
} | #include "tensorflow/core/distributed_runtime/call_options.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(CallOptions, Cancel) {
int num_calls = 0;
CallOptions opts;
opts.StartCancel();
EXPECT_EQ(num_calls, 0);
opts.SetCancelCallback([&num_calls]() { num_calls++; });
EXPECT_EQ(num_calls, 0);
opts.StartCancel();
EXPECT_EQ(num_calls, 1);
opts.StartCancel();
EXPECT_EQ(num_calls, 2);
opts.ClearCancelCallback();
EXPECT_EQ(num_calls, 2);
opts.StartCancel();
EXPECT_EQ(num_calls, 2);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/distributed_runtime/call_options.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/call_options_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7a098cd4-54c3-4324-ad2c-669555b1b427 | cpp | tensorflow/tensorflow | int32_fulltype | tensorflow/core/common_runtime/int32_fulltype.cc | tensorflow/core/common_runtime/int32_fulltype_test.cc | #include "tensorflow/core/common_runtime/int32_fulltype.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
Status Int32FulltypePass::Int32FullTypeForTensor(DataType dtype,
FullTypeDef* tensor_t,
bool set_only_int32,
Node* node, int output_idx) {
if (tensor_t->type_id() == TFT_TENSOR) {
if (tensor_t->args_size() != 1) {
if (node != nullptr) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("Full type for node='", node->name(), "' (op='",
node->op_def().name(), "') in '", debug_location_,
"' has TFT_TENSOR output ", output_idx, " which has ",
tensor_t->args_size(), " args instead of 1.\n got:\n",
tensor_t->DebugString()));
} else {
return Status(absl::StatusCode::kInvalidArgument,
absl::StrCat("TFT_TENSOR has ", tensor_t->args_size(),
" args instead of 1.\n got:\n",
tensor_t->DebugString()));
}
}
if (tensor_t->args(0).type_id() == TFT_INT32) {
tensor_t->set_type_id(TFT_SHAPE_TENSOR);
}
} else if ((tensor_t->type_id() == TFT_UNSET) &&
((dtype == DT_INT32) || !set_only_int32)) {
FullTypeDef data_t;
map_dtype_to_tensor(dtype, data_t);
tensor_t->set_type_id(TFT_SHAPE_TENSOR);
(*tensor_t->add_args()) = data_t;
}
return absl::OkStatus();
}
static bool is_host_memory_int32(MemoryType mtype, DataType dtype) {
return (mtype == HOST_MEMORY) && (dtype == DT_INT32);
}
Status Int32FulltypePass::ProcessGraph(Graph* graph, bool ints_on_device) {
for (Node* n : graph->op_nodes()) {
auto output_types = n->output_types();
bool needs_annotation = false;
for (const auto& output_type : output_types) {
MemoryType mtype = ints_on_device
? MTypeFromDTypeIntsOnDevice(output_type)
: MTypeFromDType(output_type);
if (is_host_memory_int32(mtype, output_type)) {
needs_annotation = true;
}
}
if (!needs_annotation) {
continue;
}
if (n->def().has_experimental_type()) {
FullTypeDef* node_t = n->mutable_def()->mutable_experimental_type();
if (node_t->type_id() != TFT_PRODUCT) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("Full type for node='", n->name(), "' (op='",
n->op_def().name(),
"') does not start with TFT_PRODUCT.\n got:\n",
node_t->DebugString()));
}
if (node_t->args_size() != output_types.size()) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("Full type for node='", n->name(), "' (op='",
n->op_def().name(), "') has ", node_t->args_size(),
" outputs but output_types has ", output_types.size(),
" outputs.\n got:\n", node_t->DebugString()));
}
for (int i = 0; i < node_t->args_size(); ++i) {
if (MTypeFromDType(output_types[i]) == HOST_MEMORY) {
TF_RETURN_IF_ERROR(
Int32FullTypeForTensor(output_types[i], node_t->mutable_args(i),
true, n, i));
}
}
VLOG(2) << "Full type information in node '" << n->name() << "' (op='"
<< n->op_def().name()
<< "') modified to use TFT_SHAPE_TENSOR for int32.\n"
<< node_t->DebugString();
} else {
FullTypeDef t;
t.set_type_id(TFT_PRODUCT);
for (const auto& output_type : output_types) {
MemoryType mtype = ints_on_device
? MTypeFromDTypeIntsOnDevice(output_type)
: MTypeFromDType(output_type);
if (is_host_memory_int32(mtype, output_type)) {
FullTypeDef data_t;
map_dtype_to_tensor(output_type, data_t);
FullTypeDef out_t;
out_t.set_type_id(TFT_SHAPE_TENSOR);
(*out_t.add_args()) = data_t;
(*t.add_args()) = out_t;
} else {
t.add_args();
}
}
(*n->mutable_def()->mutable_experimental_type()) = t;
VLOG(2) << "Full type information with TFT_SHAPE_TENSOR for int32 added "
"to node '"
<< n->name() << "' (op='" << n->op_def().name() << "').\n"
<< t.DebugString();
}
}
return absl::OkStatus();
}
} | #include "tensorflow/core/common_runtime/int32_fulltype.h"
#include <string>
#include <unordered_map>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/common_runtime/graph_def_builder_util.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
namespace {
REGISTER_OP("FloatInt32").Output("a: float").Output("b: int32");
REGISTER_OP("FloatInt32Int32FT")
.Output("a: float")
.Output("b: int32")
.Output("c: int32");
REGISTER_OP("FloatWithoutInt32").Output("a: float");
REGISTER_OP("StringWithoutInt32").Output("a: string");
class Int32FulltypeTest : public ::testing::Test {
protected:
Int32FulltypeTest() {}
Status BuildGraph(const GraphDefBuilder& builder, Graph* out_graph) {
TF_RETURN_IF_ERROR(GraphDefBuilderToGraph(builder, out_graph));
RebuildNodeNameMap(*out_graph);
return absl::OkStatus();
}
void AddTensorFT(FullTypeDef& t, tensorflow::FullTypeId out_t_id,
tensorflow::FullTypeId data_t_id) {
FullTypeDef out_t;
FullTypeDef data_t;
if (out_t_id != TFT_UNSET) {
data_t.set_type_id(data_t_id);
out_t.set_type_id(out_t_id);
(*out_t.add_args()) = data_t;
}
(*t.add_args()) = out_t;
}
Status Int32FulltypeAnnotate(Graph* graph, bool ints_on_device = false) {
Int32FulltypePass int32_fulltype;
return int32_fulltype.ProcessGraph(graph, ints_on_device);
}
Node* GetNodeByName(const Graph& graph, const string& name) {
const auto search = nodes_by_name_.find(name);
CHECK(search != nodes_by_name_.end()) << "Unknown node name: " << name;
return graph.FindNodeId(search->second);
}
protected:
std::unordered_map<string, int> nodes_by_name_;
private:
void RebuildNodeNameMap(const Graph& graph) {
nodes_by_name_.clear();
for (Node* node : graph.nodes()) {
nodes_by_name_[node->name()] = node->id();
}
}
};
TEST_F(Int32FulltypeTest, CreateFT) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("FloatInt32", b.opts().WithName("float_int32"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Int32FulltypeAnnotate(&g));
Node* node = GetNodeByName(g, "float_int32");
ASSERT_TRUE(node->def().has_experimental_type());
const FullTypeDef& ft = node->def().experimental_type();
ASSERT_EQ(ft.type_id(), TFT_PRODUCT);
ASSERT_EQ(ft.args_size(), 2);
ASSERT_EQ(ft.args(0).type_id(), TFT_UNSET);
ASSERT_EQ(ft.args(1).type_id(), TFT_SHAPE_TENSOR);
ASSERT_EQ(ft.args(1).args_size(), 1);
ASSERT_EQ(ft.args(1).args(0).type_id(), TFT_INT32);
}
TEST_F(Int32FulltypeTest, ModifyFT) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* node = ops::SourceOp("FloatInt32Int32FT",
b.opts().WithName("float_int32_int32"));
node->mutable_def()->mutable_experimental_type()->set_type_id(TFT_PRODUCT);
FullTypeDef& t = *node->mutable_def()->mutable_experimental_type();
AddTensorFT(t, TFT_TENSOR, TFT_FLOAT);
AddTensorFT(t, TFT_TENSOR, TFT_INT32);
AddTensorFT(t, TFT_SHAPE_TENSOR, TFT_INT32);
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Int32FulltypeAnnotate(&g));
Node* node = GetNodeByName(g, "float_int32_int32");
ASSERT_TRUE(node->def().has_experimental_type());
const FullTypeDef& ft = node->def().experimental_type();
ASSERT_EQ(ft.type_id(), TFT_PRODUCT);
ASSERT_EQ(ft.args_size(), 3);
ASSERT_EQ(ft.args(0).type_id(), TFT_TENSOR);
ASSERT_EQ(ft.args(0).args_size(), 1);
ASSERT_EQ(ft.args(0).args(0).type_id(), TFT_FLOAT);
ASSERT_EQ(ft.args(1).type_id(), TFT_SHAPE_TENSOR);
ASSERT_EQ(ft.args(1).args_size(), 1);
ASSERT_EQ(ft.args(1).args(0).type_id(), TFT_INT32);
ASSERT_EQ(ft.args(2).type_id(), TFT_SHAPE_TENSOR);
ASSERT_EQ(ft.args(2).args_size(), 1);
ASSERT_EQ(ft.args(2).args(0).type_id(), TFT_INT32);
}
TEST_F(Int32FulltypeTest, ModifyUnsetFT) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* node = ops::SourceOp("FloatInt32Int32FT",
b.opts().WithName("float_int32_int32"));
node->mutable_def()->mutable_experimental_type()->set_type_id(TFT_PRODUCT);
FullTypeDef& t = *node->mutable_def()->mutable_experimental_type();
AddTensorFT(t, TFT_UNSET, TFT_FLOAT);
AddTensorFT(t, TFT_UNSET, TFT_INT32);
AddTensorFT(t, TFT_UNSET, TFT_INT32);
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Int32FulltypeAnnotate(&g));
Node* node = GetNodeByName(g, "float_int32_int32");
ASSERT_TRUE(node->def().has_experimental_type());
const FullTypeDef& ft = node->def().experimental_type();
ASSERT_EQ(ft.type_id(), TFT_PRODUCT);
ASSERT_EQ(ft.args_size(), 3);
ASSERT_EQ(ft.args(0).type_id(), TFT_UNSET);
ASSERT_EQ(ft.args(0).args_size(), 0);
ASSERT_EQ(ft.args(1).type_id(), TFT_SHAPE_TENSOR);
ASSERT_EQ(ft.args(1).args_size(), 1);
ASSERT_EQ(ft.args(1).args(0).type_id(), TFT_INT32);
ASSERT_EQ(ft.args(2).type_id(), TFT_SHAPE_TENSOR);
ASSERT_EQ(ft.args(2).args_size(), 1);
ASSERT_EQ(ft.args(2).args(0).type_id(), TFT_INT32);
}
TEST_F(Int32FulltypeTest, NotCreateFTFloat) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("FloatWithoutInt32",
b.opts().WithName("float_without_int32"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Int32FulltypeAnnotate(&g));
Node* node = GetNodeByName(g, "float_without_int32");
ASSERT_FALSE(node->def().has_experimental_type());
}
TEST_F(Int32FulltypeTest, NotCreateFTString) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("StringWithoutInt32",
b.opts().WithName("string_without_int32"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Int32FulltypeAnnotate(&g));
Node* node = GetNodeByName(g, "string_without_int32");
ASSERT_FALSE(node->def().has_experimental_type());
}
TEST_F(Int32FulltypeTest, NotCreateFTIntsOnDevice) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("FloatInt32", b.opts().WithName("float_int32"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Int32FulltypeAnnotate(&g, true));
Node* node = GetNodeByName(g, "float_int32");
ASSERT_FALSE(node->def().has_experimental_type());
}
TEST_F(Int32FulltypeTest, BadTensorFT) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* node =
ops::SourceOp("FloatInt32", b.opts().WithName("float_without_int32"));
node->mutable_def()->mutable_experimental_type()->set_type_id(TFT_PRODUCT);
FullTypeDef& t = *node->mutable_def()->mutable_experimental_type();
t.add_args()->set_type_id(TFT_UNSET);
t.add_args()->set_type_id(TFT_TENSOR);
TF_EXPECT_OK(BuildGraph(b, &g));
}
const auto& status = Int32FulltypeAnnotate(&g);
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
::testing::HasSubstr("which has 0 args instead of 1."));
}
TEST_F(Int32FulltypeTest, BadFTWithoutProduct) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* node =
ops::SourceOp("FloatInt32", b.opts().WithName("float_without_int32"));
node->mutable_def()->mutable_experimental_type()->set_type_id(TFT_FLOAT);
TF_EXPECT_OK(BuildGraph(b, &g));
}
const auto& status = Int32FulltypeAnnotate(&g);
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
::testing::HasSubstr("does not start with TFT_PRODUCT."));
}
TEST_F(Int32FulltypeTest, BadProductFT) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* node =
ops::SourceOp("FloatInt32", b.opts().WithName("float_without_int32"));
node->mutable_def()->mutable_experimental_type()->set_type_id(TFT_PRODUCT);
TF_EXPECT_OK(BuildGraph(b, &g));
}
const auto& status = Int32FulltypeAnnotate(&g);
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
::testing::HasSubstr("has 0 outputs but output_types has 2 outputs."));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/int32_fulltype.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/int32_fulltype_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f3700489-9418-431d-a2bb-ea1b501e9fba | cpp | google/quiche | moqt_bitrate_adjuster | quiche/quic/moqt/moqt_bitrate_adjuster.cc | quiche/quic/moqt/moqt_bitrate_adjuster_test.cc | #include "quiche/quic/moqt/moqt_bitrate_adjuster.h"
#include <algorithm>
#include <cstdint>
#include "quiche/quic/core/quic_bandwidth.h"
#include "quiche/quic/core/quic_time.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/web_transport/web_transport.h"
namespace moqt {
namespace {
using ::quic::QuicBandwidth;
using ::quic::QuicTime;
using ::quic::QuicTimeDelta;
constexpr float kTargetBitrateMultiplier = 0.9f;
constexpr float kMinTimeBetweenAdjustmentsInRtts = 40;
constexpr QuicTimeDelta kMaxTimeBetweenAdjustments =
QuicTimeDelta::FromSeconds(3);
}
void MoqtBitrateAdjuster::OnObjectAckReceived(
uint64_t , uint64_t ,
QuicTimeDelta delta_from_deadline) {
if (delta_from_deadline < QuicTimeDelta::Zero()) {
AttemptAdjustingDown();
}
}
void MoqtBitrateAdjuster::AttemptAdjustingDown() {
webtransport::SessionStats stats = session_->GetSessionStats();
QuicTimeDelta adjustment_delay =
QuicTimeDelta(stats.smoothed_rtt * kMinTimeBetweenAdjustmentsInRtts);
adjustment_delay = std::min(adjustment_delay, kMaxTimeBetweenAdjustments);
QuicTime now = clock_->ApproximateNow();
if (now - last_adjustment_time_ < adjustment_delay) {
return;
}
QuicBandwidth target_bandwidth =
kTargetBitrateMultiplier *
QuicBandwidth::FromBitsPerSecond(stats.estimated_send_rate_bps);
QuicBandwidth current_bandwidth = adjustable_->GetCurrentBitrate();
if (current_bandwidth <= target_bandwidth) {
return;
}
QUICHE_DLOG(INFO) << "Adjusting the bitrate from " << current_bandwidth
<< " to " << target_bandwidth;
bool success = adjustable_->AdjustBitrate(target_bandwidth);
if (success) {
last_adjustment_time_ = now;
}
}
void MoqtBitrateAdjuster::OnObjectAckSupportKnown(bool supported) {
QUICHE_DLOG_IF(WARNING, !supported)
<< "OBJECT_ACK not supported; bitrate adjustments will not work.";
}
} | #include "quiche/quic/moqt/moqt_bitrate_adjuster.h"
#include "quiche/quic/core/quic_bandwidth.h"
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/test_tools/mock_clock.h"
#include "quiche/common/platform/api/quiche_test.h"
#include "quiche/web_transport/test_tools/mock_web_transport.h"
#include "quiche/web_transport/web_transport.h"
namespace moqt::test {
namespace {
using ::quic::QuicBandwidth;
using ::quic::QuicTimeDelta;
using ::testing::_;
class MockBitrateAdjustable : public BitrateAdjustable {
public:
explicit MockBitrateAdjustable(QuicBandwidth initial_bitrate)
: bitrate_(initial_bitrate) {}
QuicBandwidth GetCurrentBitrate() const override { return bitrate_; }
bool AdjustBitrate(QuicBandwidth bandwidth) override {
bitrate_ = bandwidth;
OnBitrateAdjusted(bandwidth);
return true;
}
MOCK_METHOD(void, OnBitrateAdjusted, (QuicBandwidth new_bitrate), ());
private:
QuicBandwidth bitrate_;
};
constexpr QuicBandwidth kDefaultBitrate =
QuicBandwidth::FromBitsPerSecond(2000);
constexpr QuicTimeDelta kDefaultRtt = QuicTimeDelta::FromMilliseconds(20);
class MoqtBitrateAdjusterTest : public quiche::test::QuicheTest {
protected:
MoqtBitrateAdjusterTest()
: adjustable_(kDefaultBitrate),
adjuster_(&clock_, &session_, &adjustable_) {
stats_.min_rtt = stats_.smoothed_rtt = kDefaultRtt.ToAbsl();
stats_.estimated_send_rate_bps = (1.2 * kDefaultBitrate).ToBitsPerSecond();
ON_CALL(session_, GetSessionStats()).WillByDefault([this] {
return stats_;
});
}
MockBitrateAdjustable adjustable_;
webtransport::SessionStats stats_;
quic::MockClock clock_;
webtransport::test::MockSession session_;
MoqtBitrateAdjuster adjuster_;
};
TEST_F(MoqtBitrateAdjusterTest, SteadyState) {
stats_.estimated_send_rate_bps = 1;
EXPECT_CALL(adjustable_, OnBitrateAdjusted(_)).Times(0);
for (int i = 0; i < 250; ++i) {
clock_.AdvanceTime(kDefaultRtt);
for (int j = 0; j < 10; ++j) {
adjuster_.OnObjectAckReceived(i, j, kDefaultRtt * 2);
}
}
}
TEST_F(MoqtBitrateAdjusterTest, AdjustDownOnce) {
stats_.estimated_send_rate_bps = (0.5 * kDefaultBitrate).ToBitsPerSecond();
EXPECT_CALL(adjustable_, OnBitrateAdjusted(_)).Times(0);
adjuster_.OnObjectAckReceived(0, 0, QuicTimeDelta::FromMilliseconds(-1));
clock_.AdvanceTime(100 * kDefaultRtt);
EXPECT_CALL(adjustable_, OnBitrateAdjusted(_))
.WillOnce([](QuicBandwidth new_bitrate) {
EXPECT_LT(new_bitrate, kDefaultBitrate);
});
adjuster_.OnObjectAckReceived(0, 1, QuicTimeDelta::FromMilliseconds(-1));
}
TEST_F(MoqtBitrateAdjusterTest, AdjustDownTwice) {
int adjusted_times = 0;
EXPECT_CALL(adjustable_, OnBitrateAdjusted(_)).WillRepeatedly([&] {
++adjusted_times;
});
clock_.AdvanceTime(100 * kDefaultRtt);
stats_.estimated_send_rate_bps = (0.5 * kDefaultBitrate).ToBitsPerSecond();
adjuster_.OnObjectAckReceived(0, 0, QuicTimeDelta::FromMilliseconds(-1));
EXPECT_EQ(adjusted_times, 1);
clock_.AdvanceTime(100 * kDefaultRtt);
stats_.estimated_send_rate_bps = (0.25 * kDefaultBitrate).ToBitsPerSecond();
adjuster_.OnObjectAckReceived(0, 1, QuicTimeDelta::FromMilliseconds(-1));
EXPECT_EQ(adjusted_times, 2);
}
TEST_F(MoqtBitrateAdjusterTest, AdjustDownSecondTimeIgnoredDueToTimeLimit) {
int adjusted_times = 0;
EXPECT_CALL(adjustable_, OnBitrateAdjusted(_)).WillRepeatedly([&] {
++adjusted_times;
});
clock_.AdvanceTime(100 * kDefaultRtt);
stats_.estimated_send_rate_bps = (0.5 * kDefaultBitrate).ToBitsPerSecond();
adjuster_.OnObjectAckReceived(0, 0, QuicTimeDelta::FromMilliseconds(-1));
EXPECT_EQ(adjusted_times, 1);
clock_.AdvanceTime(2 * kDefaultRtt);
stats_.estimated_send_rate_bps = (0.25 * kDefaultBitrate).ToBitsPerSecond();
adjuster_.OnObjectAckReceived(0, 1, QuicTimeDelta::FromMilliseconds(-1));
EXPECT_EQ(adjusted_times, 1);
}
TEST_F(MoqtBitrateAdjusterTest, AdjustDownIgnoredDueToHighBandwidthMeasured) {
EXPECT_CALL(adjustable_, OnBitrateAdjusted(_)).Times(0);
clock_.AdvanceTime(100 * kDefaultRtt);
stats_.estimated_send_rate_bps = (2.0 * kDefaultBitrate).ToBitsPerSecond();
adjuster_.OnObjectAckReceived(0, 0, QuicTimeDelta::FromMilliseconds(-1));
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/moqt/moqt_bitrate_adjuster.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/moqt/moqt_bitrate_adjuster_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
573359dd-8239-415a-9913-bdd322cdacc7 | cpp | tensorflow/tensorflow | telemetry | tensorflow/lite/delegates/telemetry.cc | tensorflow/lite/delegates/telemetry_test.cc | #include "tensorflow/lite/delegates/telemetry.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/api/profiler.h"
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
namespace delegates {
TfLiteStatus ReportDelegateSettings(TfLiteContext* context,
TfLiteDelegate* delegate,
const TFLiteSettings& settings) {
auto* profiler = reinterpret_cast<Profiler*>(context->profiler);
const int64_t event_metadata1 = reinterpret_cast<int64_t>(delegate);
const int64_t event_metadata2 = reinterpret_cast<int64_t>(&settings);
TFLITE_ADD_RUNTIME_INSTRUMENTATION_EVENT(profiler, kDelegateSettingsTag,
event_metadata1, event_metadata2);
return kTfLiteOk;
}
TfLiteStatus ReportDelegateStatus(TfLiteContext* context,
TfLiteDelegate* delegate,
const DelegateStatus& status) {
auto* profiler = reinterpret_cast<Profiler*>(context->profiler);
TFLITE_ADD_RUNTIME_INSTRUMENTATION_EVENT(profiler, kDelegateStatusTag,
status.full_status(),
static_cast<int64_t>(kTfLiteOk));
return kTfLiteOk;
}
}
} | #include "tensorflow/lite/delegates/telemetry.h"
#include <cstdint>
#include <string>
#include <gtest/gtest.h>
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/api/profiler.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/profiling/profile_buffer.h"
namespace tflite {
namespace delegates {
namespace {
constexpr int32_t kDummyCode = 2;
constexpr bool kDummyGpuPrecisionLossAllowed = true;
constexpr tflite::Delegate kDummyDelegate = tflite::Delegate_GPU;
constexpr DelegateStatusSource kDummySource =
DelegateStatusSource::TFLITE_NNAPI;
TEST(TelemetryTest, StatusConversion) {
DelegateStatus status(kDummySource, kDummyCode);
int64_t serialized_int = status.full_status();
DelegateStatus deserialized_status(serialized_int);
EXPECT_EQ(kDummyCode, deserialized_status.code());
EXPECT_EQ(kDummySource, deserialized_status.source());
EXPECT_EQ(serialized_int, deserialized_status.full_status());
}
class DelegateProfiler : public Profiler {
public:
DelegateProfiler() {}
~DelegateProfiler() override = default;
uint32_t BeginEvent(const char* tag, EventType event_type,
int64_t event_metadata1,
int64_t event_metadata2) override {
int event_handle = -1;
if (event_type ==
Profiler::EventType::GENERAL_RUNTIME_INSTRUMENTATION_EVENT &&
std::string(tag) == kDelegateSettingsTag) {
event_buffer_.emplace_back();
event_handle = event_buffer_.size();
EXPECT_NE(event_metadata1, 0);
auto* delegate = reinterpret_cast<TfLiteDelegate*>(event_metadata1);
EXPECT_EQ(delegate->flags, kTfLiteDelegateFlagsNone);
EXPECT_NE(event_metadata2, 0);
auto* settings = reinterpret_cast<TFLiteSettings*>(event_metadata2);
EXPECT_EQ(settings->delegate(), kDummyDelegate);
EXPECT_EQ(settings->gpu_settings()->is_precision_loss_allowed(),
kDummyGpuPrecisionLossAllowed);
} else if (event_type ==
Profiler::EventType::GENERAL_RUNTIME_INSTRUMENTATION_EVENT &&
std::string(tag) == kDelegateStatusTag) {
event_buffer_.emplace_back();
event_handle = event_buffer_.size();
EXPECT_EQ(event_metadata2, static_cast<int64_t>(kTfLiteOk));
DelegateStatus reported_status(event_metadata1);
EXPECT_EQ(reported_status.source(), kDummySource);
EXPECT_EQ(reported_status.code(), kDummyCode);
}
EXPECT_NE(-1, event_handle);
return event_handle;
}
void EndEvent(uint32_t event_handle) override {
EXPECT_EQ(event_handle, event_buffer_.size());
}
int NumRecordedEvents() { return event_buffer_.size(); }
private:
std::vector<profiling::ProfileEvent> event_buffer_;
};
TEST(TelemetryTest, DelegateStatusReport) {
DelegateProfiler profiler;
TfLiteDelegate delegate = TfLiteDelegateCreate();
TfLiteContext context;
context.profiler = &profiler;
DelegateStatus status(kDummySource, kDummyCode);
EXPECT_EQ(ReportDelegateStatus(&context, &delegate, status), kTfLiteOk);
EXPECT_EQ(ReportDelegateStatus(&context, &delegate, status), kTfLiteOk);
EXPECT_EQ(profiler.NumRecordedEvents(), 2);
}
TEST(TelemetryTest, DelegateSettingsReport) {
DelegateProfiler profiler;
TfLiteDelegate delegate = TfLiteDelegateCreate();
TfLiteContext context;
context.profiler = &profiler;
flatbuffers::FlatBufferBuilder flatbuffer_builder;
flatbuffers::Offset<tflite::GPUSettings> gpu_settings =
tflite::CreateGPUSettings(
flatbuffer_builder,
kDummyGpuPrecisionLossAllowed);
auto* tflite_settings_ptr = flatbuffers::GetTemporaryPointer(
flatbuffer_builder,
CreateTFLiteSettings(flatbuffer_builder, kDummyDelegate,
0,
gpu_settings));
EXPECT_EQ(ReportDelegateSettings(&context, &delegate, *tflite_settings_ptr),
kTfLiteOk);
EXPECT_EQ(profiler.NumRecordedEvents(), 1);
DelegateStatus status(kDummySource, kDummyCode);
EXPECT_EQ(ReportDelegateStatus(&context, &delegate, status), kTfLiteOk);
EXPECT_EQ(ReportDelegateStatus(&context, &delegate, status), kTfLiteOk);
EXPECT_EQ(profiler.NumRecordedEvents(), 3);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/telemetry.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/telemetry_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1e2fd98c-981e-43d2-8f71-65c64b1a1825 | cpp | google/quiche | mock_streams | quiche/common/test_tools/mock_streams.h | quiche/common/test_tools/mock_streams_test.cc | #ifndef QUICHE_COMMON_TEST_TOOLS_MOCK_STREAMS_H_
#define QUICHE_COMMON_TEST_TOOLS_MOCK_STREAMS_H_
#include <algorithm>
#include <cstddef>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "quiche/common/platform/api/quiche_test.h"
#include "quiche/common/quiche_stream.h"
namespace quiche::test {
class MockWriteStream : public quiche::WriteStream {
public:
MockWriteStream() {
ON_CALL(*this, CanWrite()).WillByDefault(testing::Return(true));
ON_CALL(*this, Writev(testing::_, testing::_))
.WillByDefault([&](absl::Span<const absl::string_view> data,
const StreamWriteOptions& options) {
return AppendToData(data, options);
});
}
MOCK_METHOD(absl::Status, Writev,
(absl::Span<const absl::string_view> data,
const StreamWriteOptions& options),
(override));
MOCK_METHOD(bool, CanWrite, (), (const, override));
absl::Status AppendToData(absl::Span<const absl::string_view> data,
const StreamWriteOptions& options) {
for (absl::string_view fragment : data) {
data_.append(fragment.data(), fragment.size());
}
ProcessOptions(options);
return absl::OkStatus();
}
void ProcessOptions(const StreamWriteOptions& options) {
fin_written_ |= options.send_fin();
}
std::string& data() { return data_; }
bool fin_written() { return fin_written_; }
private:
std::string data_;
bool fin_written_ = false;
};
class ReadStreamFromString : public ReadStream {
public:
explicit ReadStreamFromString(std::string* data) : data_(data) {}
ReadResult Read(absl::Span<char> buffer) override {
size_t data_to_copy = std::min(buffer.size(), data_->size());
std::copy(data_->begin(), data_->begin() + data_to_copy, buffer.begin());
*data_ = data_->substr(data_to_copy);
return ReadResult{data_to_copy, data_->empty() && fin_};
}
ReadResult Read(std::string* output) override {
size_t bytes = data_->size();
output->append(std::move(*data_));
data_->clear();
return ReadResult{bytes, fin_};
}
size_t ReadableBytes() const override { return data_->size(); }
virtual PeekResult PeekNextReadableRegion() const override {
PeekResult result;
result.peeked_data = *data_;
result.fin_next = data_->empty() && fin_;
result.all_data_received = fin_;
return result;
}
bool SkipBytes(size_t bytes) override {
*data_ = data_->substr(bytes);
return data_->empty() && fin_;
}
void set_fin() { fin_ = true; }
private:
std::string* data_;
bool fin_ = false;
};
}
#endif | #include "quiche/common/test_tools/mock_streams.h"
#include <array>
#include <string>
#include "absl/types/span.h"
#include "quiche/common/platform/api/quiche_test.h"
#include "quiche/common/quiche_stream.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
namespace quiche::test {
namespace {
using ::testing::ElementsAre;
using ::testing::IsEmpty;
TEST(MockWriteStreamTest, DefaultWrite) {
MockWriteStream stream;
QUICHE_EXPECT_OK(quiche::WriteIntoStream(stream, "test"));
EXPECT_EQ(stream.data(), "test");
EXPECT_FALSE(stream.fin_written());
}
TEST(ReadStreamFromStringTest, ReadIntoSpan) {
std::string source = "abcdef";
std::array<char, 3> buffer;
ReadStreamFromString stream(&source);
EXPECT_EQ(stream.ReadableBytes(), 6);
stream.Read(absl::MakeSpan(buffer));
EXPECT_THAT(buffer, ElementsAre('a', 'b', 'c'));
EXPECT_EQ(stream.ReadableBytes(), 3);
stream.Read(absl::MakeSpan(buffer));
EXPECT_THAT(buffer, ElementsAre('d', 'e', 'f'));
EXPECT_EQ(stream.ReadableBytes(), 0);
EXPECT_THAT(source, IsEmpty());
}
TEST(ReadStreamFromStringTest, ReadIntoString) {
std::string source = "abcdef";
std::string destination;
ReadStreamFromString stream(&source);
stream.Read(&destination);
EXPECT_EQ(destination, "abcdef");
EXPECT_THAT(source, IsEmpty());
}
TEST(ReadStreamFromStringTest, PeekAndSkip) {
std::string source = "abcdef";
ReadStreamFromString stream(&source);
EXPECT_EQ(stream.PeekNextReadableRegion().peeked_data, "abcdef");
stream.SkipBytes(2);
EXPECT_EQ(stream.PeekNextReadableRegion().peeked_data, "cdef");
EXPECT_EQ(source, "cdef");
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/test_tools/mock_streams.h | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/test_tools/mock_streams_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
afc1e4f6-41e2-420b-8809-b2b778f8d91b | cpp | tensorflow/tensorflow | linalg_ops | tensorflow/core/ops/linalg_ops.cc | tensorflow/core/ops/linalg_ops_test.cc | #include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
namespace tensorflow {
using shape_inference::DimensionHandle;
using shape_inference::InferenceContext;
using shape_inference::ShapeHandle;
namespace {
Status MakeBatchSquareMatrix(InferenceContext* c, ShapeHandle input,
ShapeHandle* out) {
ShapeHandle s;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(input, 2, &s));
DimensionHandle d;
TF_RETURN_IF_ERROR(c->Merge(c->Dim(s, -2), c->Dim(s, -1), &d));
ShapeHandle batch_shape;
TF_RETURN_IF_ERROR(c->Subshape(s, 0, -2, &batch_shape));
TF_RETURN_IF_ERROR(c->Concatenate(batch_shape, c->Matrix(d, d), out));
return absl::OkStatus();
}
Status BatchUnchangedSquareShapeFn(InferenceContext* c) {
ShapeHandle out;
TF_RETURN_IF_ERROR(MakeBatchSquareMatrix(c, c->input(0), &out));
c->set_output(0, out);
return absl::OkStatus();
}
Status BandedTriangularSolveShapeFn(InferenceContext* c) {
ShapeHandle lhs;
ShapeHandle rhs;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &lhs));
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 2, &rhs));
DimensionHandle num_bands = c->Dim(lhs, -2);
DimensionHandle m = c->Dim(lhs, -1);
if (c->ValueKnown(num_bands) && c->Value(num_bands) <= 0) {
return errors::InvalidArgument("Number of bands must be positive, but is ",
c->Value(num_bands));
}
if (c->ValueKnown(num_bands) && c->ValueKnown(m) &&
c->Value(num_bands) > c->Value(m)) {
return errors::InvalidArgument("Number of bands ", c->Value(num_bands),
" cannot exceed the size of the matrix ",
c->Value(m));
}
ShapeHandle lhs_batch_shape;
ShapeHandle rhs_batch_shape;
ShapeHandle output_batch_shape;
TF_RETURN_IF_ERROR(c->Subshape(lhs, 0, -2, &lhs_batch_shape));
TF_RETURN_IF_ERROR(c->Subshape(rhs, 0, -2, &rhs_batch_shape));
TF_RETURN_IF_ERROR(BroadcastBinaryOpOutputShapeFnHelper(
c, lhs_batch_shape, rhs_batch_shape, true, &output_batch_shape));
TF_RETURN_IF_ERROR(c->Merge(m, c->Dim(rhs, -2), &m));
ShapeHandle out;
TF_RETURN_IF_ERROR(
c->Concatenate(output_batch_shape, c->Matrix(m, c->Dim(rhs, -1)), &out));
c->set_output(0, out);
return absl::OkStatus();
}
Status MatrixSolveShapeFn(InferenceContext* c, bool square) {
ShapeHandle lhs;
ShapeHandle rhs;
if (square) {
TF_RETURN_IF_ERROR(MakeBatchSquareMatrix(c, c->input(0), &lhs));
} else {
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &lhs));
}
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 2, &rhs));
ShapeHandle lhs_batch_shape;
ShapeHandle rhs_batch_shape;
TF_RETURN_IF_ERROR(c->Subshape(lhs, 0, -2, &lhs_batch_shape));
TF_RETURN_IF_ERROR(c->Subshape(rhs, 0, -2, &rhs_batch_shape));
TF_RETURN_IF_ERROR(
c->Merge(lhs_batch_shape, rhs_batch_shape, &lhs_batch_shape));
DimensionHandle m;
TF_RETURN_IF_ERROR(c->Merge(c->Dim(lhs, -2), c->Dim(rhs, -2), &m));
DimensionHandle n = c->Dim(lhs, -1);
if (square) {
TF_RETURN_IF_ERROR(c->Merge(m, n, &n));
}
ShapeHandle out;
TF_RETURN_IF_ERROR(c->Concatenate(lhs_batch_shape, c->Vector(n), &out));
TF_RETURN_IF_ERROR(c->Concatenate(out, c->Vector(c->Dim(rhs, -1)), &out));
c->set_output(0, out);
return absl::OkStatus();
}
Status MatrixTriangularSolveShapeFn(InferenceContext* c) {
ShapeHandle lhs;
ShapeHandle rhs;
TF_RETURN_IF_ERROR(MakeBatchSquareMatrix(c, c->input(0), &lhs));
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 2, &rhs));
ShapeHandle lhs_batch_shape;
ShapeHandle rhs_batch_shape;
ShapeHandle output_batch_shape;
TF_RETURN_IF_ERROR(c->Subshape(lhs, 0, -2, &lhs_batch_shape));
TF_RETURN_IF_ERROR(c->Subshape(rhs, 0, -2, &rhs_batch_shape));
TF_RETURN_IF_ERROR(BroadcastBinaryOpOutputShapeFnHelper(
c, lhs_batch_shape, rhs_batch_shape, true, &output_batch_shape));
DimensionHandle m;
TF_RETURN_IF_ERROR(c->Merge(c->Dim(lhs, -1), c->Dim(rhs, -2), &m));
ShapeHandle out;
TF_RETURN_IF_ERROR(
c->Concatenate(output_batch_shape, c->Matrix(m, c->Dim(rhs, -1)), &out));
c->set_output(0, out);
return absl::OkStatus();
}
Status SelfAdjointEigV2ShapeFn(InferenceContext* c) {
ShapeHandle input;
TF_RETURN_IF_ERROR(MakeBatchSquareMatrix(c, c->input(0), &input));
DimensionHandle n;
TF_RETURN_IF_ERROR(c->Merge(c->Dim(input, -2), c->Dim(input, -1), &n));
ShapeHandle batch_shape;
TF_RETURN_IF_ERROR(c->Subshape(input, 0, -2, &batch_shape));
ShapeHandle e_shape;
TF_RETURN_IF_ERROR(c->Concatenate(batch_shape, c->Vector(n), &e_shape));
c->set_output(0, e_shape);
bool compute_v;
TF_RETURN_IF_ERROR(c->GetAttr("compute_v", &compute_v));
if (compute_v) {
ShapeHandle v_shape;
TF_RETURN_IF_ERROR(c->Concatenate(batch_shape, c->Matrix(n, n), &v_shape));
c->set_output(1, v_shape);
} else {
c->set_output(1, c->Vector(0ll));
}
return absl::OkStatus();
}
Status LuShapeFn(InferenceContext* c) {
ShapeHandle input;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &input));
DimensionHandle n;
TF_RETURN_IF_ERROR(c->Merge(c->Dim(input, -2), c->Dim(input, -1), &n));
ShapeHandle batch_shape;
TF_RETURN_IF_ERROR(c->Subshape(input, 0, -2, &batch_shape));
ShapeHandle lu_shape;
ShapeHandle p_shape;
TF_RETURN_IF_ERROR(c->Concatenate(batch_shape, c->Matrix(n, n), &lu_shape));
TF_RETURN_IF_ERROR(c->Concatenate(batch_shape, c->Vector(n), &p_shape));
c->set_output(0, lu_shape);
c->set_output(1, p_shape);
return absl::OkStatus();
}
Status QrShapeFn(InferenceContext* c) {
ShapeHandle input;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &input));
DimensionHandle m = c->Dim(input, -2);
DimensionHandle n = c->Dim(input, -1);
DimensionHandle p;
TF_RETURN_IF_ERROR(c->Min(m, n, &p));
ShapeHandle batch_shape;
TF_RETURN_IF_ERROR(c->Subshape(input, 0, -2, &batch_shape));
ShapeHandle q_shape;
ShapeHandle r_shape;
bool full_matrices;
TF_RETURN_IF_ERROR(c->GetAttr("full_matrices", &full_matrices));
if (full_matrices) {
TF_RETURN_IF_ERROR(c->Concatenate(batch_shape, c->Matrix(m, m), &q_shape));
TF_RETURN_IF_ERROR(c->Concatenate(batch_shape, c->Matrix(m, n), &r_shape));
} else {
TF_RETURN_IF_ERROR(c->Concatenate(batch_shape, c->Matrix(m, p), &q_shape));
TF_RETURN_IF_ERROR(c->Concatenate(batch_shape, c->Matrix(p, n), &r_shape));
}
c->set_output(0, q_shape);
c->set_output(1, r_shape);
return absl::OkStatus();
}
Status SvdShapeFn(InferenceContext* c) {
ShapeHandle input;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &input));
DimensionHandle m = c->Dim(input, -2);
DimensionHandle n = c->Dim(input, -1);
DimensionHandle p;
TF_RETURN_IF_ERROR(c->Min(m, n, &p));
ShapeHandle batch_shape;
TF_RETURN_IF_ERROR(c->Subshape(input, 0, -2, &batch_shape));
ShapeHandle e_shape;
TF_RETURN_IF_ERROR(c->Concatenate(batch_shape, c->Vector(p), &e_shape));
c->set_output(0, e_shape);
bool compute_uv;
TF_RETURN_IF_ERROR(c->GetAttr("compute_uv", &compute_uv));
if (compute_uv) {
ShapeHandle u_shape;
ShapeHandle v_shape;
bool full_matrices;
TF_RETURN_IF_ERROR(c->GetAttr("full_matrices", &full_matrices));
if (full_matrices) {
TF_RETURN_IF_ERROR(
c->Concatenate(batch_shape, c->Matrix(m, m), &u_shape));
TF_RETURN_IF_ERROR(
c->Concatenate(batch_shape, c->Matrix(n, n), &v_shape));
} else {
TF_RETURN_IF_ERROR(
c->Concatenate(batch_shape, c->Matrix(m, p), &u_shape));
TF_RETURN_IF_ERROR(
c->Concatenate(batch_shape, c->Matrix(n, p), &v_shape));
}
c->set_output(1, u_shape);
c->set_output(2, v_shape);
} else {
c->set_output(1, c->Vector(0ll));
c->set_output(2, c->Vector(0ll));
}
return absl::OkStatus();
}
Status TridiagonalMatMulShapeFn(InferenceContext* c) {
ShapeHandle superdiag;
ShapeHandle maindiag;
ShapeHandle subdiag;
ShapeHandle rhs;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &superdiag));
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 2, &maindiag));
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(2), 2, &subdiag));
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(3), 2, &rhs));
ShapeHandle superdiag_batch_shape;
ShapeHandle maindiag_batch_shape;
ShapeHandle subdiag_batch_shape;
ShapeHandle rhs_batch_shape;
TF_RETURN_IF_ERROR(c->Subshape(superdiag, 0, -2, &superdiag_batch_shape));
TF_RETURN_IF_ERROR(c->Subshape(maindiag, 0, -2, &maindiag_batch_shape));
TF_RETURN_IF_ERROR(c->Subshape(subdiag, 0, -2, &subdiag_batch_shape));
TF_RETURN_IF_ERROR(c->Subshape(rhs, 0, -2, &rhs_batch_shape));
TF_RETURN_IF_ERROR(c->Merge(superdiag, maindiag, &superdiag));
TF_RETURN_IF_ERROR(
c->Merge(maindiag_batch_shape, rhs_batch_shape, &rhs_batch_shape));
TF_RETURN_IF_ERROR(
c->Merge(subdiag_batch_shape, rhs_batch_shape, &rhs_batch_shape));
TF_RETURN_IF_ERROR(c->Merge(superdiag, maindiag, &maindiag));
TF_RETURN_IF_ERROR(c->Merge(subdiag, maindiag, &maindiag));
DimensionHandle m_lhs = c->Dim(maindiag, -1);
DimensionHandle m_rhs = c->Dim(rhs, -2);
TF_RETURN_IF_ERROR(c->Merge(m_lhs, m_rhs, &m_lhs));
DimensionHandle unused;
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(maindiag, -2), 1, &unused));
c->set_output(0, rhs);
return absl::OkStatus();
}
Status TridiagonalSolveShapeFn(InferenceContext* c) {
ShapeHandle lhs;
ShapeHandle rhs;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &lhs));
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 2, &rhs));
ShapeHandle lhs_batch_shape;
ShapeHandle rhs_batch_shape;
TF_RETURN_IF_ERROR(c->Subshape(lhs, 0, -2, &lhs_batch_shape));
TF_RETURN_IF_ERROR(c->Subshape(rhs, 0, -2, &rhs_batch_shape));
TF_RETURN_IF_ERROR(
c->Merge(lhs_batch_shape, rhs_batch_shape, &lhs_batch_shape));
DimensionHandle m_lhs = c->Dim(lhs, -1);
DimensionHandle m_rhs = c->Dim(rhs, -2);
TF_RETURN_IF_ERROR(c->Merge(m_lhs, m_rhs, &m_lhs));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(lhs, -2), 3, &m_lhs));
c->set_output(0, rhs);
return absl::OkStatus();
}
}
REGISTER_OP("MatrixDeterminant")
.Input("input: T")
.Output("output: T")
.Attr("T: {half, float, double, complex64, complex128}")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle input;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &input));
DimensionHandle unused;
TF_RETURN_IF_ERROR(
c->Merge(c->Dim(input, -1), c->Dim(input, -2), &unused));
ShapeHandle out;
TF_RETURN_IF_ERROR(c->Subshape(input, 0, -2, &out));
c->set_output(0, out);
return absl::OkStatus();
});
REGISTER_OP("LogMatrixDeterminant")
.Input("input: T")
.Output("sign: T")
.Output("log_abs_determinant: T")
.Attr("T: {half, float, double, complex64, complex128}")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle input;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &input));
DimensionHandle unused;
TF_RETURN_IF_ERROR(
c->Merge(c->Dim(input, -1), c->Dim(input, -2), &unused));
ShapeHandle s;
TF_RETURN_IF_ERROR(c->Subshape(input, 0, -2, &s));
c->set_output(0, s);
ShapeHandle out;
TF_RETURN_IF_ERROR(c->Subshape(input, 0, -2, &out));
c->set_output(1, out);
return absl::OkStatus();
});
REGISTER_OP("MatrixInverse")
.Input("input: T")
.Output("output: T")
.Attr("adjoint: bool = False")
.Attr("T: {double, float, half, complex64, complex128}")
.SetShapeFn(BatchUnchangedSquareShapeFn);
REGISTER_OP("MatrixExponential")
.Deprecated(
27, "Use Python implementation tf.linalg.matrix_exponential instead.")
.Input("input: T")
.Output("output: T")
.Attr("T: {double, float, half, complex64, complex128}")
.SetShapeFn(BatchUnchangedSquareShapeFn);
REGISTER_OP("MatrixLogarithm")
.Input("input: T")
.Output("output: T")
.Attr("T: {complex64, complex128}")
.SetShapeFn(BatchUnchangedSquareShapeFn);
REGISTER_OP("Cholesky")
.Input("input: T")
.Output("output: T")
.Attr("T: {double, float, half, complex64, complex128}")
.SetShapeFn(BatchUnchangedSquareShapeFn);
REGISTER_OP("CholeskyGrad")
.Input("l: T")
.Input("grad: T")
.Output("output: T")
.Attr("T: {half, float, double}")
.SetShapeFn(BatchUnchangedSquareShapeFn);
REGISTER_OP("SelfAdjointEig")
.Input("input: T")
.Output("output: T")
.Attr("T: {double, float, half}")
.Deprecated(11, "Use SelfAdjointEigV2 instead.")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle input;
TF_RETURN_IF_ERROR(MakeBatchSquareMatrix(c, c->input(0), &input));
DimensionHandle d = c->Dim(input, -1);
DimensionHandle d_plus_1;
TF_RETURN_IF_ERROR(c->Add(d, 1, &d_plus_1));
ShapeHandle s;
TF_RETURN_IF_ERROR(c->Subshape(input, 0, -2, &s));
TF_RETURN_IF_ERROR(c->Concatenate(s, c->Matrix(d_plus_1, d), &s));
c->set_output(0, s);
return absl::OkStatus();
});
REGISTER_OP("Eig")
.Input("input: T")
.Output("e: Tout")
.Output("v: Tout")
.Attr("compute_v: bool = True")
.Attr("T: {float, double, complex64, complex128}")
.Attr("Tout: {complex64, complex128}")
.SetShapeFn(SelfAdjointEigV2ShapeFn);
REGISTER_OP("SelfAdjointEigV2")
.Input("input: T")
.Output("e: T")
.Output("v: T")
.Attr("compute_v: bool = True")
.Attr("T: {double, float, half, complex64, complex128}")
.SetShapeFn(SelfAdjointEigV2ShapeFn);
REGISTER_OP("Lu")
.Input("input: T")
.Output("lu: T")
.Output("p: output_idx_type")
.Attr("T: {double, float, half, complex64, complex128}")
.Attr("output_idx_type: {int32, int64} = DT_INT32")
.SetShapeFn(LuShapeFn);
REGISTER_OP("MatrixSolve")
.Input("matrix: T")
.Input("rhs: T")
.Output("output: T")
.Attr("adjoint: bool = False")
.Attr("T: {double, float, half, complex64, complex128}")
.SetShapeFn([](InferenceContext* c) {
return MatrixSolveShapeFn(c, true );
});
REGISTER_OP("BandedTriangularSolve")
.Input("matrix: T")
.Input("rhs: T")
.Output("output: T")
.Attr("lower: bool = True")
.Attr("adjoint: bool = False")
.Attr("T: {double, float, half, complex64, complex128}")
.SetShapeFn([](InferenceContext* c) {
return BandedTriangularSolveShapeFn(c);
});
REGISTER_OP("MatrixTriangularSolve")
.Input("matrix: T")
.Input("rhs: T")
.Output("output: T")
.Attr("lower: bool = True")
.Attr("adjoint: bool = False")
.Attr("T: {bfloat16, double, float, half, complex64, complex128}")
.SetShapeFn([](InferenceContext* c) {
return MatrixTriangularSolveShapeFn(c);
});
REGISTER_OP("MatrixSolveLs")
.Input("matrix: T")
.Input("rhs: T")
.Input("l2_regularizer: double")
.Output("output: T")
.Attr("T: {double, float, half, complex64, complex128}")
.Attr("fast: bool = True")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle l2_regularizer;
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &l2_regularizer));
return MatrixSolveShapeFn(c, false );
});
REGISTER_OP("MatrixSquareRoot")
.Input("input: T")
.Output("output: T")
.Attr("T: {double, float, half, complex64, complex128}")
.SetShapeFn(BatchUnchangedSquareShapeFn);
REGISTER_OP("Qr")
.Input("input: T")
.Output("q: T")
.Output("r: T")
.Attr("full_matrices: bool = False")
.Attr("T: {double, float, half, complex64, complex128}")
.SetShapeFn(QrShapeFn);
REGISTER_OP("Svd")
.Input("input: T")
.Output("s: T")
.Output("u: T")
.Output("v: T")
.Attr("compute_uv: bool = True")
.Attr("full_matrices: bool = False")
.Attr("T: {double, float, half, complex64, complex128}")
.SetShapeFn(SvdShapeFn);
REGISTER_OP("TridiagonalMatMul")
.Input("superdiag: T")
.Input("maindiag: T")
.Input("subdiag: T")
.Input("rhs: T")
.Output("output: T")
.Attr("T: {double, float, complex64, complex128}")
.SetShapeFn(TridiagonalMatMulShapeFn);
REGISTER_OP("TridiagonalSolve")
.Input("diagonals: T")
.Input("rhs: T")
.Output("output: T")
.Attr("partial_pivoting: bool = True")
.Attr("perturb_singular: bool = False")
.Attr("T: {double, float, complex64, complex128}")
.SetShapeFn(TridiagonalSolveShapeFn);
REGISTER_OP("Einsum")
.Input("inputs: N * T")
.Output("output: T")
.Attr("equation: string")
.Attr("N: int >= 1")
.Attr("T: type")
.SetShapeFn(shape_inference::EinsumShape);
REGISTER_OP("BatchSelfAdjointEig")
.Input("input: T")
.Output("output: T")
.Attr("T: {double, float}")
.Deprecated(11, "Use SelfAdjointEigV2 instead.")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("BatchMatrixDeterminant")
.Input("input: T")
.Output("output: T")
.Attr("T: {float, double, complex64, complex128}")
.Deprecated(13, "Use MatrixDeterminant instead.")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("BatchMatrixInverse")
.Input("input: T")
.Output("output: T")
.Attr("adjoint: bool = False")
.Attr("T: {double, float}")
.Deprecated(13, "Use MatrixInverse instead.")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("BatchCholesky")
.Input("input: T")
.Output("output: T")
.Attr("T: {double, float}")
.Deprecated(13, "Use Cholesky instead.")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("BatchCholeskyGrad")
.Input("l: T")
.Input("grad: T")
.Output("output: T")
.Attr("T: {float, double}")
.Deprecated(13, "Use CholeskyGrad instead.")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("BatchSelfAdjointEigV2")
.Input("input: T")
.Output("e: T")
.Output("v: T")
.Attr("compute_v: bool = True")
.Attr("T: {double, float}")
.Deprecated(13, "Use SelfAdjointEigV2 instead.")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("BatchMatrixSolve")
.Input("matrix: T")
.Input("rhs: T")
.Output("output: T")
.Attr("adjoint: bool = False")
.Attr("T: {double, float}")
.Deprecated(13, "Use MatrixSolve instead.")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("BatchMatrixTriangularSolve")
.Input("matrix: T")
.Input("rhs: T")
.Output("output: T")
.Attr("lower: bool = True")
.Attr("adjoint: bool = False")
.Attr("T: {double, float}")
.Deprecated(13, "Use MatrixTriangularSolve instead.")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("BatchMatrixSolveLs")
.Input("matrix: T")
.Input("rhs: T")
.Input("l2_regularizer: double")
.Output("output: T")
.Attr("T: {double, float}")
.Attr("fast: bool = True")
.Deprecated(13, "Use MatrixSolveLs instead.")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("BatchSvd")
.Input("input: T")
.Output("s: T")
.Output("u: T")
.Output("v: T")
.Attr("compute_uv: bool = True")
.Attr("full_matrices: bool = False")
.Attr("T: {double, float, complex64, complex128}")
.Deprecated(13, "Use Svd instead.")
.SetShapeFn(shape_inference::UnknownShape);
} | #include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(LinalgOpsTest, MatrixDeterminant_ShapeFn) {
ShapeInferenceTestOp op("MatrixDeterminant");
INFER_OK(op, "?", "?");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1]");
INFER_ERROR("Dimensions must be equal, but are 2 and 1", op, "[1,?,3,4,1,2]");
INFER_OK(op, "[?,?]", "[]");
INFER_OK(op, "[1,?]", "[]");
INFER_OK(op, "[?,1]", "[]");
INFER_OK(op, "[1,?,3,4,?,?]", "[d0_0,d0_1,d0_2,d0_3]");
INFER_OK(op, "[1,?,3,4,1,?]", "[d0_0,d0_1,d0_2,d0_3]");
INFER_OK(op, "[1,?,3,4,?,1]", "[d0_0,d0_1,d0_2,d0_3]");
}
TEST(LinalgOpsTest, UnchangedSquare_ShapeFn) {
for (const char* op_name : {"Cholesky", "CholeskyGrad", "MatrixInverse"}) {
ShapeInferenceTestOp op(op_name);
const string extra_shape = (op.name == "CholeskyGrad" ? ";?" : "");
INFER_OK(op, "?" + extra_shape, "?");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[1]" + extra_shape);
INFER_ERROR("Dimensions must be equal, but are 1 and 2", op,
"[1,2]" + extra_shape);
INFER_OK(op, "[?,?]" + extra_shape, "[d0_0|d0_1,d0_0|d0_1]");
INFER_OK(op, "[1,?]" + extra_shape, "[d0_0,d0_0]");
INFER_OK(op, "[?,1]" + extra_shape, "[d0_1,d0_1]");
INFER_OK(op, "[5,?,7,?,?]" + extra_shape,
"[d0_0,d0_1,d0_2,d0_3|d0_4,d0_3|d0_4]");
INFER_OK(op, "[5,?,7,1,?]" + extra_shape, "[d0_0,d0_1,d0_2,d0_3,d0_3]");
INFER_OK(op, "[5,?,7,?,1]" + extra_shape, "[d0_0,d0_1,d0_2,d0_4,d0_4]");
}
}
TEST(LinalgOpsTest, SelfAdjointEig_ShapeFn) {
ShapeInferenceTestOp op("SelfAdjointEig");
INFER_OK(op, "?", "?");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1]");
INFER_ERROR("Dimensions must be equal, but are 1 and 2", op, "[1,2]");
INFER_OK(op, "[?,?]", "[?,d0_0|d0_1]");
INFER_OK(op, "[1,?]", "[2,d0_0]");
INFER_OK(op, "[?,1]", "[2,d0_1]");
INFER_OK(op, "[5,?,7,?,?]", "[d0_0,d0_1,d0_2,?,d0_3|d0_4]");
INFER_OK(op, "[5,?,7,1,?]", "[d0_0,d0_1,d0_2,2,d0_3]");
INFER_OK(op, "[5,?,7,?,1]", "[d0_0,d0_1,d0_2,2,d0_4]");
}
TEST(LinalgOpsTest, SelfAdjointEigV2_ShapeFn) {
ShapeInferenceTestOp op("SelfAdjointEigV2");
auto set_compute_v = [&op](bool compute_v) {
TF_ASSERT_OK(NodeDefBuilder("test", "Pack")
.Input({{"input", 0, DT_FLOAT}})
.Attr("compute_v", compute_v)
.Finalize(&op.node_def));
TF_ASSERT_OK(NodeDefBuilder("test", "Pack")
.Input({{"input", 0, DT_HALF}})
.Attr("compute_v", compute_v)
.Finalize(&op.node_def));
};
set_compute_v(false);
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1]");
INFER_ERROR("Dimensions must be equal, but are 1 and 2", op, "[1,2]");
INFER_ERROR("Dimensions must be equal, but are 1 and 2", op, "[3,1,2]");
INFER_OK(op, "?", "?;[0]");
INFER_OK(op, "[?,?]", "[d0_0|d0_1];[0]");
INFER_OK(op, "[1,?]", "[d0_0|d0_1];[0]");
INFER_OK(op, "[?,1]", "[d0_0|d0_1];[0]");
INFER_OK(op, "[5,?,7,?,?]", "[d0_0,d0_1,d0_2,d0_3|d0_4];[0]");
INFER_OK(op, "[5,?,7,1,?]", "[d0_0,d0_1,d0_2,d0_3|d0_4];[0]");
INFER_OK(op, "[5,?,7,?,1]", "[d0_0,d0_1,d0_2,d0_3|d0_4];[0]");
set_compute_v(true);
INFER_OK(op, "?", "?;?");
INFER_OK(op, "[?,?]", "[d0_0|d0_1];[d0_0|d0_1,d0_0|d0_1]");
INFER_OK(op, "[1,?]", "[d0_0|d0_1];[d0_0|d0_1,d0_0|d0_1]");
INFER_OK(op, "[?,1]", "[d0_0|d0_1];[d0_0|d0_1,d0_0|d0_1]");
INFER_OK(op, "[5,?,7,?,?]",
"[d0_0,d0_1,d0_2,d0_3|d0_4];[d0_0,d0_1,d0_2,d0_3|d0_4,d0_3|d0_4]");
INFER_OK(op, "[5,?,7,1,?]",
"[d0_0,d0_1,d0_2,d0_3|d0_4];[d0_0,d0_1,d0_2,d0_3|d0_4,d0_3|d0_4]");
INFER_OK(op, "[5,?,7,?,1]",
"[d0_0,d0_1,d0_2,d0_3|d0_4];[d0_0,d0_1,d0_2,d0_3|d0_4,d0_3|d0_4]");
}
TEST(LinalgOpsTest, MatrixSolve_ShapeFn) {
ShapeInferenceTestOp op("MatrixSolve");
INFER_OK(op, "?;?", "?");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1];?");
INFER_ERROR("Dimensions must be equal, but are 1 and 2", op, "[1,2];?");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[5,?,?];[6]");
INFER_ERROR("Shapes must be equal rank, but are 0 and 1", op,
"[5,?];[6,?,?]");
INFER_OK(op, "[?,?];?", "[d0_0|d0_1,?]");
INFER_OK(op, "[?,?];[?,?]", "[d0_0,d1_1]");
INFER_OK(op, "[?,?];[1,?]", "[d1_0,d1_1]");
INFER_OK(op, "[1,?];[1,?]", "[d0_0|d1_0,d1_1]");
INFER_OK(op, "[?,1];[1,?]", "[d0_1|d1_0,d1_1]");
INFER_OK(op, "[1,1];[?,?]", "[d0_0,d1_1]");
INFER_OK(op, "[1,1];[1,?]", "[d0_0|d0_1|d1_0,d1_1]");
INFER_OK(op, "[10,?,?,?];[?,20,1,?]", "[d0_0,d1_1,d1_2,d1_3]");
INFER_OK(op, "[10,?,1,?];[?,20,1,?]", "[d0_0,d1_1,d0_2|d1_2,d1_3]");
INFER_OK(op, "[10,?,?,1];[?,20,1,?]", "[d0_0,d1_1,d0_3|d1_2,d1_3]");
INFER_OK(op, "[10,?,1,1];[?,20,?,?]", "[d0_0,d1_1,d0_2,d1_3]");
INFER_OK(op, "[10,?,1,1];[?,20,1,?]", "[d0_0,d1_1,d0_2|d0_3|d1_2,d1_3]");
}
TEST(LinalgOpsTest, MatrixTriangularSolve_ShapeFn) {
ShapeInferenceTestOp op("MatrixTriangularSolve");
INFER_OK(op, "?;?", "?");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1];?");
INFER_ERROR("Dimensions must be equal, but are 1 and 2", op, "[1,2];?");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[5,?,?];[6]");
INFER_OK(op, "[?,?];[?,?]", "[d0_0,d1_1]");
INFER_OK(op, "[?,?];[1,?]", "[d1_0,d1_1]");
INFER_OK(op, "[1,?];[1,?]", "[d0_0|d1_0,d1_1]");
INFER_OK(op, "[?,1];[1,?]", "[d0_1|d1_0,d1_1]");
INFER_OK(op, "[1,1];[?,?]", "[d0_0,d1_1]");
INFER_OK(op, "[1,1];[1,?]", "[d0_0|d0_1|d1_0,d1_1]");
INFER_OK(op, "[10,?,?,?];[?,20,1,?]", "[d0_0,d1_1,d1_2,d1_3]");
INFER_OK(op, "[10,?,1,?];[?,20,1,?]", "[d0_0,d1_1,d0_2|d1_2,d1_3]");
INFER_OK(op, "[10,?,?,1];[?,20,1,?]", "[d0_0,d1_1,d0_3|d1_2,d1_3]");
INFER_OK(op, "[10,?,1,1];[?,20,?,?]", "[d0_0,d1_1,d0_2,d1_3]");
INFER_OK(op, "[10,?,1,1];[?,20,1,?]", "[d0_0,d1_1,d0_2|d0_3|d1_2,d1_3]");
}
TEST(LinalgOpsTest, MatrixSolveLs_ShapeFn) {
ShapeInferenceTestOp op("MatrixSolveLs");
INFER_OK(op, "?;?;?", "?");
INFER_OK(op, "?;?;[]", "?");
INFER_OK(op, "[1,?];[1,?];?", "[d0_1,d1_1]");
INFER_OK(op, "[1,2];[1,3];?", "[d0_1,d1_1]");
INFER_ERROR("Dimensions must be equal, but are 5 and 6", op, "[5,?];[6,?];?");
INFER_OK(op, "[10,?,1,?];[?,20,1,?];?", "[d0_0,d1_1,d0_3,d1_3]");
INFER_OK(op, "[10,20,1,2];[10,20,1,3];?", "[d0_0|d1_0,d0_1|d1_1,d0_3,d1_3]");
INFER_ERROR("Dimensions must be equal, but are 5 and 6", op,
"[10,?,5,?];[?,20,6,?];?");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 10 and 11", op,
"[10,?,5,?];[11,?,5,?];?");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[?];?;?");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "?;[?];?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;[1]");
}
TEST(LinalgOpsTest, Qr_ShapeFn) {
ShapeInferenceTestOp op("Qr");
auto set_attrs = [&op](bool full_matrices) {
TF_ASSERT_OK(NodeDefBuilder("test", "Qr")
.Input({"input", 0, DT_FLOAT})
.Attr("full_matrices", full_matrices)
.Finalize(&op.node_def));
TF_ASSERT_OK(NodeDefBuilder("test", "Qr")
.Input({"input", 0, DT_HALF})
.Attr("full_matrices", full_matrices)
.Finalize(&op.node_def));
};
set_attrs(false);
INFER_OK(op, "?", "?;?");
INFER_OK(op, "[?,?,?]", "[d0_0,d0_1,?];[d0_0,?,d0_2]");
INFER_OK(op, "[4,?,?]", "[d0_0,d0_1,?];[d0_0,?,d0_2]");
INFER_OK(op, "[4,2,?]", "[d0_0,d0_1,?];[d0_0,?,d0_2]");
INFER_OK(op, "[4,?,2]", "[d0_0,d0_1,?];[d0_0,?,d0_2]");
INFER_OK(op, "[?,2,2]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]");
INFER_OK(op, "[4,2,2]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]");
INFER_OK(op, "[?,3,2]", "[d0_0,d0_1,d0_2];[d0_0,d0_2,d0_2]");
INFER_OK(op, "[4,3,2]", "[d0_0,d0_1,d0_2];[d0_0,d0_2,d0_2]");
INFER_OK(op, "[?,2,3]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]");
INFER_OK(op, "[4,2,3]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1]");
set_attrs(true);
INFER_OK(op, "?", "?;?");
INFER_OK(op, "[?,?,?]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]");
INFER_OK(op, "[4,?,?]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]");
INFER_OK(op, "[4,2,?]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]");
INFER_OK(op, "[4,?,2]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]");
INFER_OK(op, "[?,2,2]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]");
INFER_OK(op, "[4,2,2]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]");
INFER_OK(op, "[?,3,2]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]");
INFER_OK(op, "[4,3,2]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]");
INFER_OK(op, "[?,2,3]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]");
INFER_OK(op, "[4,2,3]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1]");
}
TEST(LinalgOpsTest, Svd_ShapeFn) {
ShapeInferenceTestOp op("Svd");
auto set_attrs = [&op](bool compute_uv, bool full_matrices) {
TF_ASSERT_OK(NodeDefBuilder("test", "Svd")
.Input({"input", 0, DT_FLOAT})
.Attr("compute_uv", compute_uv)
.Attr("full_matrices", full_matrices)
.Finalize(&op.node_def));
TF_ASSERT_OK(NodeDefBuilder("test", "Svd")
.Input({"input", 0, DT_HALF})
.Attr("compute_uv", compute_uv)
.Attr("full_matrices", full_matrices)
.Finalize(&op.node_def));
};
set_attrs(false, false);
INFER_OK(op, "?", "?;[0];[0]");
INFER_OK(op, "[?,?,?]", "[d0_0,?];[0];[0]");
INFER_OK(op, "[4,?,?]", "[d0_0,?];[0];[0]");
INFER_OK(op, "[4,2,?]", "[d0_0,?];[0];[0]");
INFER_OK(op, "[4,?,2]", "[d0_0,?];[0];[0]");
INFER_OK(op, "[?,2,2]", "[d0_0,d0_1];[0];[0]");
INFER_OK(op, "[4,2,2]", "[d0_0,d0_1];[0];[0]");
INFER_OK(op, "[?,3,2]", "[d0_0,d0_2];[0];[0]");
INFER_OK(op, "[4,3,2]", "[d0_0,d0_2];[0];[0]");
INFER_OK(op, "[?,2,3]", "[d0_0,d0_1];[0];[0]");
INFER_OK(op, "[4,2,3]", "[d0_0,d0_1];[0];[0]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1]");
set_attrs(true, false);
INFER_OK(op, "?", "?;?;?");
INFER_OK(op, "[?,?,?]", "[d0_0,?];[d0_0,d0_1,?];[d0_0,d0_2,?]");
INFER_OK(op, "[4,?,?]", "[d0_0,?];[d0_0,d0_1,?];[d0_0,d0_2,?]");
INFER_OK(op, "[4,2,?]", "[d0_0,?];[d0_0,d0_1,?];[d0_0,d0_2,?]");
INFER_OK(op, "[4,?,2]", "[d0_0,?];[d0_0,d0_1,?];[d0_0,d0_2,?]");
INFER_OK(op, "[?,2,2]", "[d0_0,d0_1];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_1]");
INFER_OK(op, "[4,2,2]", "[d0_0,d0_1];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_1]");
INFER_OK(op, "[?,3,2]", "[d0_0,d0_2];[d0_0,d0_1,d0_2];[d0_0,d0_2,d0_2]");
INFER_OK(op, "[4,3,2]", "[d0_0,d0_2];[d0_0,d0_1,d0_2];[d0_0,d0_2,d0_2]");
INFER_OK(op, "[?,2,3]", "[d0_0,d0_1];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_1]");
INFER_OK(op, "[4,2,3]", "[d0_0,d0_1];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_1]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1]");
set_attrs(true, true);
INFER_OK(op, "?", "?;?;?");
INFER_OK(op, "[?,?,?]", "[d0_0,?];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_2]");
INFER_OK(op, "[4,?,?]", "[d0_0,?];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_2]");
INFER_OK(op, "[4,2,?]", "[d0_0,?];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_2]");
INFER_OK(op, "[4,?,2]", "[d0_0,?];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_2]");
INFER_OK(op, "[?,2,2]", "[d0_0,d0_1];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_2]");
INFER_OK(op, "[4,2,2]", "[d0_0,d0_1];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_2]");
INFER_OK(op, "[?,3,2]", "[d0_0,d0_2];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_2]");
INFER_OK(op, "[4,3,2]", "[d0_0,d0_2];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_2]");
INFER_OK(op, "[?,2,3]", "[d0_0,d0_1];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_2]");
INFER_OK(op, "[4,2,3]", "[d0_0,d0_1];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_2]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1]");
}
TEST(LinalgOpsTest, Lu_ShapeFn) {
ShapeInferenceTestOp op("Lu");
INFER_OK(op, "?", "?;?");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1]");
INFER_ERROR("Dimensions must be equal, but are 1 and 2", op, "[1,?,3,4,1,2]");
INFER_OK(op, "[?,?]", "[d0_0,d0_0];[d0_0]");
INFER_OK(op, "[1,?]", "[d0_0,d0_0];[d0_0]");
INFER_OK(op, "[?,1]", "[d0_1,d0_1];[d0_1]");
INFER_OK(op, "[1,?,3,4,?,?]",
"[d0_0,d0_1,d0_2,d0_3,d0_4,d0_4];[d0_0,d0_1,d0_2,d0_3,d0_4]");
INFER_OK(op, "[1,?,3,4,1,?]",
"[d0_0,d0_1,d0_2,d0_3,d0_4,d0_4];[d0_0,d0_1,d0_2,d0_3,d0_4]");
INFER_OK(op, "[1,?,3,4,?,1]",
"[d0_0,d0_1,d0_2,d0_3,d0_5,d0_5];[d0_0,d0_1,d0_2,d0_3,d0_5]");
}
TEST(LinalgOpsTest, TridiagonalMatMul_ShapeFn) {
ShapeInferenceTestOp op("TridiagonalMatMul");
INFER_OK(op, "?;?;?;?", "in3");
INFER_OK(op, "[1,5];[1,5];[1,5];[?,1]", "in3");
INFER_OK(op, "[1,5];[1,5];[1,5];[5,1]", "in3");
INFER_OK(op, "[?,1,?];[?,1,?];[?,1,?];[?,?,?]", "in3");
INFER_OK(op, "[?,1,5];[?,1,5];[?,1,5];[7,5,2]", "in3");
INFER_OK(op, "[7,1,5];[7,1,5];[7,1,5];[?,5,2]", "in3");
INFER_OK(op, "[7,1,5];[7,1,5];[7,1,5];[7,5,2]", "in3");
INFER_OK(op, "[7,?,1,5];[7,?,1,5];[7,?,1,5];[7,8,5,2]", "in3");
INFER_OK(op, "[7,8,1,5];[7,8,1,5];[7,8,1,5];[7,8,5,2]", "in3");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[3];[3];[3];[5,1]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[3,5];[3,5];[3,5];[5]");
INFER_ERROR(
"Dimension 1 in both shapes must be equal, but are 4 and 8. "
"Shapes are [6,4] and [6,8].",
op, "[6,4,3,5];[6,4,3,5];[6,4,3,5];[6,8,5,2]");
INFER_ERROR(
"Dimension 1 in both shapes must be equal, but are 4 and 8. "
"Shapes are [?,4] and [6,8].",
op, "[?,4,3,5];[?,4,3,5];[?,4,3,5];[6,8,5,2]");
INFER_ERROR(
"Dimension 1 in both shapes must be equal, but are 5 and 6. "
"Shapes are [1,5] and [1,6]",
op, "[1,5];[1,6];[1,5];[6,2]");
INFER_ERROR("Dimension must be 1 but is 3", op, "[3,5];[3,5];[3,5];[5,2]");
}
TEST(LinalgOpsTest, TridiagonalSolve_ShapeFn) {
ShapeInferenceTestOp op("TridiagonalSolve");
INFER_OK(op, "?;?", "in1");
INFER_OK(op, "[3,5];[?,1]", "in1");
INFER_OK(op, "[?,5];[5,1]", "in1");
INFER_OK(op, "[?,5];[?,?]", "in1");
INFER_OK(op, "[?,?];[?,?]", "in1");
INFER_OK(op, "[3,5];[5,1]", "in1");
INFER_OK(op, "[3,5];[5,2]", "in1");
INFER_OK(op, "[?,?,?];[?,?,?]", "in1");
INFER_OK(op, "[?,3,5];[7,5,2]", "in1");
INFER_OK(op, "[7,3,5];[?,5,2]", "in1");
INFER_OK(op, "[7,?,5];[?,5,?]", "in1");
INFER_OK(op, "[7,3,5];[7,5,2]", "in1");
INFER_OK(op, "[7,?,3,5];[7,8,5,2]", "in1");
INFER_OK(op, "[7,8,3,5];[7,8,5,2]", "in1");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[3];[5,1]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[3,5];[5]");
INFER_ERROR(
"Dimension 1 in both shapes must be equal, but are 4 and 8. "
"Shapes are [6,4] and [6,8].",
op, "[6,4,3,5];[6,8,5,2]");
INFER_ERROR(
"Dimension 1 in both shapes must be equal, but are 4 and 8. "
"Shapes are [?,4] and [6,8].",
op, "[?,4,3,5];[6,8,5,2]");
INFER_ERROR("Dimension must be 3 but is 4", op, "[4,5];[5,2]");
INFER_ERROR("Dimension must be 3 but is 4", op, "[6,4,5];[6,5,2]");
INFER_ERROR("Dimensions must be equal, but are 9 and 5", op, "[3,9];[5,2]");
INFER_ERROR("Dimensions must be equal, but are 9 and 5", op,
"[6,3,9];[6,5,2]");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/linalg_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/linalg_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
adeb99e1-0e4c-4bf7-a55d-129ccd6739eb | cpp | google/quiche | qpack_encoder_stream_sender | quiche/quic/core/qpack/qpack_encoder_stream_sender.cc | quiche/quic/core/qpack/qpack_encoder_stream_sender_test.cc | #include "quiche/quic/core/qpack/qpack_encoder_stream_sender.h"
#include <cstddef>
#include <limits>
#include <string>
#include "absl/strings/string_view.h"
#include "quiche/quic/core/qpack/qpack_instructions.h"
#include "quiche/quic/platform/api/quic_logging.h"
namespace quic {
namespace {
constexpr uint64_t kMaxBytesBufferedByStream = 64 * 1024;
}
QpackEncoderStreamSender::QpackEncoderStreamSender(
HuffmanEncoding huffman_encoding)
: delegate_(nullptr), instruction_encoder_(huffman_encoding) {}
void QpackEncoderStreamSender::SendInsertWithNameReference(
bool is_static, uint64_t name_index, absl::string_view value) {
instruction_encoder_.Encode(
QpackInstructionWithValues::InsertWithNameReference(is_static, name_index,
value),
&buffer_);
}
void QpackEncoderStreamSender::SendInsertWithoutNameReference(
absl::string_view name, absl::string_view value) {
instruction_encoder_.Encode(
QpackInstructionWithValues::InsertWithoutNameReference(name, value),
&buffer_);
}
void QpackEncoderStreamSender::SendDuplicate(uint64_t index) {
instruction_encoder_.Encode(QpackInstructionWithValues::Duplicate(index),
&buffer_);
}
void QpackEncoderStreamSender::SendSetDynamicTableCapacity(uint64_t capacity) {
instruction_encoder_.Encode(
QpackInstructionWithValues::SetDynamicTableCapacity(capacity), &buffer_);
}
bool QpackEncoderStreamSender::CanWrite() const {
return delegate_ && delegate_->NumBytesBuffered() + buffer_.size() <=
kMaxBytesBufferedByStream;
}
void QpackEncoderStreamSender::Flush() {
if (buffer_.empty()) {
return;
}
delegate_->WriteStreamData(buffer_);
buffer_.clear();
}
} | #include "quiche/quic/core/qpack/qpack_encoder_stream_sender.h"
#include <string>
#include "absl/strings/escaping.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/qpack/qpack_test_utils.h"
using ::testing::Eq;
using ::testing::StrictMock;
namespace quic {
namespace test {
namespace {
class QpackEncoderStreamSenderTest : public QuicTestWithParam<bool> {
protected:
QpackEncoderStreamSenderTest() : stream_(HuffmanEncoding()) {
stream_.set_qpack_stream_sender_delegate(&delegate_);
}
~QpackEncoderStreamSenderTest() override = default;
bool DisableHuffmanEncoding() { return GetParam(); }
HuffmanEncoding HuffmanEncoding() {
return DisableHuffmanEncoding() ? HuffmanEncoding::kDisabled
: HuffmanEncoding::kEnabled;
}
StrictMock<MockQpackStreamSenderDelegate> delegate_;
QpackEncoderStreamSender stream_;
};
INSTANTIATE_TEST_SUITE_P(DisableHuffmanEncoding, QpackEncoderStreamSenderTest,
testing::Values(false, true));
TEST_P(QpackEncoderStreamSenderTest, InsertWithNameReference) {
EXPECT_EQ(0u, stream_.BufferedByteCount());
std::string expected_encoded_data;
ASSERT_TRUE(absl::HexStringToBytes("c500", &expected_encoded_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(expected_encoded_data)));
stream_.SendInsertWithNameReference(true, 5, "");
EXPECT_EQ(expected_encoded_data.size(), stream_.BufferedByteCount());
stream_.Flush();
if (DisableHuffmanEncoding()) {
ASSERT_TRUE(absl::HexStringToBytes("c203666f6f", &expected_encoded_data));
} else {
ASSERT_TRUE(absl::HexStringToBytes("c28294e7", &expected_encoded_data));
}
EXPECT_CALL(delegate_, WriteStreamData(Eq(expected_encoded_data)));
stream_.SendInsertWithNameReference(true, 2, "foo");
EXPECT_EQ(expected_encoded_data.size(), stream_.BufferedByteCount());
stream_.Flush();
ASSERT_TRUE(absl::HexStringToBytes("bf4a03626172", &expected_encoded_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(expected_encoded_data)));
stream_.SendInsertWithNameReference(false, 137, "bar");
EXPECT_EQ(expected_encoded_data.size(), stream_.BufferedByteCount());
stream_.Flush();
ASSERT_TRUE(absl::HexStringToBytes(
"aa7f005a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a"
"5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a"
"5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a"
"5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a",
&expected_encoded_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(expected_encoded_data)));
stream_.SendInsertWithNameReference(false, 42, std::string(127, 'Z'));
EXPECT_EQ(expected_encoded_data.size(), stream_.BufferedByteCount());
stream_.Flush();
}
TEST_P(QpackEncoderStreamSenderTest, InsertWithoutNameReference) {
EXPECT_EQ(0u, stream_.BufferedByteCount());
std::string expected_encoded_data;
ASSERT_TRUE(absl::HexStringToBytes("4000", &expected_encoded_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(expected_encoded_data)));
stream_.SendInsertWithoutNameReference("", "");
EXPECT_EQ(expected_encoded_data.size(), stream_.BufferedByteCount());
stream_.Flush();
if (DisableHuffmanEncoding()) {
ASSERT_TRUE(
absl::HexStringToBytes("43666f6f03666f6f", &expected_encoded_data));
} else {
ASSERT_TRUE(absl::HexStringToBytes("6294e78294e7", &expected_encoded_data));
}
EXPECT_CALL(delegate_, WriteStreamData(Eq(expected_encoded_data)));
stream_.SendInsertWithoutNameReference("foo", "foo");
EXPECT_EQ(expected_encoded_data.size(), stream_.BufferedByteCount());
stream_.Flush();
ASSERT_TRUE(
absl::HexStringToBytes("4362617203626172", &expected_encoded_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(expected_encoded_data)));
stream_.SendInsertWithoutNameReference("bar", "bar");
EXPECT_EQ(expected_encoded_data.size(), stream_.BufferedByteCount());
stream_.Flush();
ASSERT_TRUE(absl::HexStringToBytes(
"5f005a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a7f"
"005a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a"
"5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a"
"5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a"
"5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a",
&expected_encoded_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(expected_encoded_data)));
stream_.SendInsertWithoutNameReference(std::string(31, 'Z'),
std::string(127, 'Z'));
EXPECT_EQ(expected_encoded_data.size(), stream_.BufferedByteCount());
stream_.Flush();
}
TEST_P(QpackEncoderStreamSenderTest, Duplicate) {
EXPECT_EQ(0u, stream_.BufferedByteCount());
std::string expected_encoded_data;
ASSERT_TRUE(absl::HexStringToBytes("11", &expected_encoded_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(expected_encoded_data)));
stream_.SendDuplicate(17);
EXPECT_EQ(expected_encoded_data.size(), stream_.BufferedByteCount());
stream_.Flush();
ASSERT_TRUE(absl::HexStringToBytes("1fd503", &expected_encoded_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(expected_encoded_data)));
stream_.SendDuplicate(500);
EXPECT_EQ(expected_encoded_data.size(), stream_.BufferedByteCount());
stream_.Flush();
}
TEST_P(QpackEncoderStreamSenderTest, SetDynamicTableCapacity) {
EXPECT_EQ(0u, stream_.BufferedByteCount());
std::string expected_encoded_data;
ASSERT_TRUE(absl::HexStringToBytes("31", &expected_encoded_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(expected_encoded_data)));
stream_.SendSetDynamicTableCapacity(17);
EXPECT_EQ(expected_encoded_data.size(), stream_.BufferedByteCount());
stream_.Flush();
EXPECT_EQ(0u, stream_.BufferedByteCount());
ASSERT_TRUE(absl::HexStringToBytes("3fd503", &expected_encoded_data));
EXPECT_CALL(delegate_, WriteStreamData(Eq(expected_encoded_data)));
stream_.SendSetDynamicTableCapacity(500);
EXPECT_EQ(expected_encoded_data.size(), stream_.BufferedByteCount());
stream_.Flush();
EXPECT_EQ(0u, stream_.BufferedByteCount());
}
TEST_P(QpackEncoderStreamSenderTest, Coalesce) {
stream_.SendInsertWithNameReference(true, 5, "");
stream_.SendInsertWithNameReference(true, 2, "foo");
stream_.SendInsertWithoutNameReference("foo", "foo");
stream_.SendDuplicate(17);
std::string expected_encoded_data;
if (DisableHuffmanEncoding()) {
ASSERT_TRUE(absl::HexStringToBytes(
"c500"
"c203666f6f"
"43666f6f03666f6f"
"11",
&expected_encoded_data));
} else {
ASSERT_TRUE(absl::HexStringToBytes(
"c500"
"c28294e7"
"6294e78294e7"
"11",
&expected_encoded_data));
}
EXPECT_CALL(delegate_, WriteStreamData(Eq(expected_encoded_data)));
EXPECT_EQ(expected_encoded_data.size(), stream_.BufferedByteCount());
stream_.Flush();
EXPECT_EQ(0u, stream_.BufferedByteCount());
}
TEST_P(QpackEncoderStreamSenderTest, FlushEmpty) {
EXPECT_EQ(0u, stream_.BufferedByteCount());
stream_.Flush();
EXPECT_EQ(0u, stream_.BufferedByteCount());
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/qpack/qpack_encoder_stream_sender.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/qpack/qpack_encoder_stream_sender_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
84ece1c4-f4db-4de8-b09b-7928502e72a1 | cpp | google/arolla | string_slot_listener | arolla/io/string_slot_listener.cc | arolla/io/string_slot_listener_test.cc | #include "arolla/io/string_slot_listener.h"
#include <memory>
#include <string>
#include <string_view>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/io/accessors_slot_listener.h"
#include "arolla/io/slot_listener.h"
#include "arolla/memory/optional_value.h"
#include "arolla/util/bytes.h"
namespace arolla {
absl::StatusOr<std::unique_ptr<SlotListener<std::string>>> BytesSlotListener(
absl::string_view side_output_name) {
return CreateAccessorsSlotListener<std::string>(
side_output_name, [](const OptionalValue<Bytes>& b, std::string* out) {
*out = b.present ? b.value : "";
});
}
absl::StatusOr<std::unique_ptr<SlotListener<std::vector<std::string>>>>
BytesArraySlotListener(absl::string_view side_output_name) {
return CreateAccessorsSlotListener<std::vector<std::string>>(
side_output_name,
[](const DenseArray<Bytes>& arr, std::vector<std::string>* out) {
out->clear();
out->reserve(arr.size());
arr.ForEach([&](auto _, bool is_present, absl::string_view value) {
out->push_back(is_present ? std::string(value) : "");
});
});
}
} | #include "arolla/io/string_slot_listener.h"
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/io/slot_listener.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/bytes.h"
namespace arolla {
namespace {
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::IsEmpty;
TEST(StringSlotListenerTest, BytesSlotListener) {
ASSERT_OK_AND_ASSIGN(auto slot_listener, BytesSlotListener("debug_html"));
EXPECT_THAT(slot_listener->GetQTypeOf("debug_html"),
Eq(GetOptionalQType<Bytes>()));
FrameLayout::Builder layout_builder;
auto bytes_slot = layout_builder.AddSlot<OptionalValue<Bytes>>();
ASSERT_OK_AND_ASSIGN(BoundSlotListener<std::string> bound_slot_listener,
slot_listener->Bind({
{"debug_html", TypedSlot::FromSlot(bytes_slot)},
}));
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
std::string side_output;
ASSERT_OK(bound_slot_listener(alloc.frame(), &side_output));
EXPECT_THAT(side_output, Eq(""));
alloc.frame().Set(bytes_slot, Bytes{"fifty seven"});
ASSERT_OK(bound_slot_listener(alloc.frame(), &side_output));
EXPECT_THAT(side_output, Eq("fifty seven"));
}
TEST(StringSlotListenerTest, BytesArraySlotListener) {
ASSERT_OK_AND_ASSIGN(auto slot_listener,
BytesArraySlotListener("debug_htmls"));
EXPECT_THAT(slot_listener->GetQTypeOf("debug_htmls"),
Eq(GetDenseArrayQType<Bytes>()));
FrameLayout::Builder layout_builder;
auto bytes_array_slot = layout_builder.AddSlot<DenseArray<Bytes>>();
ASSERT_OK_AND_ASSIGN(
BoundSlotListener<std::vector<std::string>> bound_slot_listener,
slot_listener->Bind({
{"debug_htmls", TypedSlot::FromSlot(bytes_array_slot)},
}));
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
std::vector<std::string> side_output;
ASSERT_OK(bound_slot_listener(alloc.frame(), &side_output));
EXPECT_THAT(side_output, IsEmpty());
alloc.frame().Set(bytes_array_slot,
CreateDenseArray<Bytes>({Bytes("fifty"), Bytes(""),
Bytes("seven"), std::nullopt}));
ASSERT_OK(bound_slot_listener(alloc.frame(), &side_output));
EXPECT_THAT(side_output, ElementsAre("fifty", "", "seven", ""));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/io/string_slot_listener.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/io/string_slot_listener_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
ac970f59-4c27-4d55-8616-1699c02adfcc | cpp | google/tensorstore | transform_array_constraints | tensorstore/index_space/transform_array_constraints.h | tensorstore/index_space/transform_array_constraints_test.cc | #ifndef TENSORSTORE_INDEX_SPACE_TRANSFORM_ARRAY_CONSTRAINTS_H_
#define TENSORSTORE_INDEX_SPACE_TRANSFORM_ARRAY_CONSTRAINTS_H_
#include "tensorstore/util/iterate.h"
namespace tensorstore {
enum class MustAllocateConstraint {
may_allocate = 0,
must_allocate = 1
};
constexpr MustAllocateConstraint may_allocate =
MustAllocateConstraint::may_allocate;
constexpr MustAllocateConstraint must_allocate =
MustAllocateConstraint::must_allocate;
class TransformArrayConstraints {
public:
constexpr TransformArrayConstraints(
IterationConstraints iteration_constraint = {},
MustAllocateConstraint allocate_constraint = may_allocate)
: value_(iteration_constraint.value() |
(static_cast<int>(allocate_constraint)
<< IterationConstraints::kNumBits)) {}
constexpr TransformArrayConstraints(
LayoutOrderConstraint order_constraint,
RepeatedElementsConstraint repeat_constraint = include_repeated_elements,
MustAllocateConstraint allocate_constraint = may_allocate)
: TransformArrayConstraints(
IterationConstraints(order_constraint, repeat_constraint),
allocate_constraint) {}
constexpr TransformArrayConstraints(
UnspecifiedLayoutOrder order_constraint,
RepeatedElementsConstraint repeat_constraint = include_repeated_elements,
MustAllocateConstraint allocate_constraint = may_allocate)
: TransformArrayConstraints(
IterationConstraints(order_constraint, repeat_constraint),
allocate_constraint) {}
constexpr TransformArrayConstraints(
ContiguousLayoutOrder order_constraint,
RepeatedElementsConstraint repeat_constraint = include_repeated_elements,
MustAllocateConstraint allocate_constraint = may_allocate)
: TransformArrayConstraints(
IterationConstraints(order_constraint, repeat_constraint),
allocate_constraint) {}
constexpr TransformArrayConstraints(
LayoutOrderConstraint order_constraint,
MustAllocateConstraint allocate_constraint)
: TransformArrayConstraints(IterationConstraints(order_constraint),
allocate_constraint) {}
constexpr TransformArrayConstraints(
UnspecifiedLayoutOrder order_constraint,
MustAllocateConstraint allocate_constraint)
: TransformArrayConstraints(IterationConstraints(order_constraint),
allocate_constraint) {}
constexpr TransformArrayConstraints(
ContiguousLayoutOrder order_constraint,
MustAllocateConstraint allocate_constraint)
: TransformArrayConstraints(IterationConstraints(order_constraint),
allocate_constraint) {}
constexpr TransformArrayConstraints(
RepeatedElementsConstraint repeat_constraint,
MustAllocateConstraint allocate_constraint = may_allocate)
: TransformArrayConstraints(IterationConstraints(repeat_constraint),
allocate_constraint) {}
constexpr TransformArrayConstraints(
MustAllocateConstraint allocate_constraint)
: TransformArrayConstraints(IterationConstraints{}, allocate_constraint) {
}
explicit constexpr TransformArrayConstraints(int value) : value_(value) {}
constexpr IterationConstraints iteration_constraints() const {
return IterationConstraints(value() &
((1 << IterationConstraints::kNumBits) - 1));
}
constexpr LayoutOrderConstraint order_constraint() const {
return iteration_constraints().order_constraint();
}
constexpr RepeatedElementsConstraint repeated_elements_constraint() const {
return iteration_constraints().repeated_elements_constraint();
}
constexpr MustAllocateConstraint allocate_constraint() const {
return static_cast<MustAllocateConstraint>(value_ >>
IterationConstraints::kNumBits);
}
constexpr int value() const { return value_; }
constexpr static int kNumBits = IterationConstraints::kNumBits + 1;
friend constexpr bool operator==(TransformArrayConstraints a,
TransformArrayConstraints b) {
return a.value() == b.value();
}
friend constexpr bool operator!=(TransformArrayConstraints a,
TransformArrayConstraints b) {
return a.value() != b.value();
}
private:
int value_;
};
}
#endif | #include "tensorstore/index_space/transform_array_constraints.h"
#include <gtest/gtest.h>
namespace {
using ::tensorstore::ContiguousLayoutOrder;
using ::tensorstore::IterationConstraints;
using ::tensorstore::TransformArrayConstraints;
TEST(TransformArrayConstraintsTest, Basic) {
EXPECT_TRUE(
TransformArrayConstraints(ContiguousLayoutOrder::c).order_constraint());
EXPECT_EQ(IterationConstraints(ContiguousLayoutOrder::c,
tensorstore::skip_repeated_elements),
TransformArrayConstraints(
IterationConstraints(ContiguousLayoutOrder::c,
tensorstore::skip_repeated_elements))
.iteration_constraints());
EXPECT_FALSE(TransformArrayConstraints(tensorstore::unspecified_order)
.order_constraint());
EXPECT_EQ(tensorstore::skip_repeated_elements,
TransformArrayConstraints(tensorstore::skip_repeated_elements,
tensorstore::may_allocate)
.repeated_elements_constraint());
EXPECT_EQ(tensorstore::may_allocate,
TransformArrayConstraints(tensorstore::skip_repeated_elements,
tensorstore::may_allocate)
.allocate_constraint());
EXPECT_EQ(tensorstore::must_allocate,
TransformArrayConstraints(tensorstore::skip_repeated_elements,
tensorstore::must_allocate)
.allocate_constraint());
EXPECT_EQ(
tensorstore::c_order,
TransformArrayConstraints(tensorstore::c_order, tensorstore::may_allocate)
.order_constraint()
.order());
EXPECT_EQ(tensorstore::fortran_order,
TransformArrayConstraints(tensorstore::fortran_order,
tensorstore::may_allocate)
.order_constraint()
.order());
static_assert(
11 == TransformArrayConstraints(tensorstore::fortran_order,
tensorstore::include_repeated_elements,
tensorstore::must_allocate)
.value(),
"");
static_assert(
3 == TransformArrayConstraints(tensorstore::fortran_order,
tensorstore::include_repeated_elements)
.value(),
"");
static_assert(
10 == TransformArrayConstraints(tensorstore::c_order,
tensorstore::include_repeated_elements,
tensorstore::must_allocate)
.value(),
"");
static_assert(
8 == TransformArrayConstraints(tensorstore::include_repeated_elements,
tensorstore::must_allocate)
.value(),
"");
EXPECT_EQ(tensorstore::fortran_order,
TransformArrayConstraints(tensorstore::fortran_order,
tensorstore::include_repeated_elements,
tensorstore::must_allocate)
.order_constraint()
.order());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/transform_array_constraints.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/transform_array_constraints_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
5680cdf5-915e-4052-877f-c155631c5ca6 | cpp | tensorflow/tensorflow | device_description | third_party/xla/xla/stream_executor/device_description.cc | third_party/xla/xla/stream_executor/device_description_test.cc | #include "xla/stream_executor/device_description.h"
#include <cstdint>
#include <string>
#include <variant>
#include "xla/stream_executor/device_description.pb.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/tsl/lib/math/math_util.h"
#include "tsl/platform/logging.h"
namespace stream_executor {
DeviceDescription::DeviceDescription(const GpuDeviceInfoProto &proto)
: block_dim_limit_(BlockDim(proto.block_dim_limit_x(),
proto.block_dim_limit_y(),
proto.block_dim_limit_z())),
threads_per_core_limit_(proto.threads_per_core_limit()),
threads_per_block_limit_(proto.threads_per_block_limit()),
threads_per_warp_(proto.threads_per_warp()),
registers_per_core_limit_(proto.registers_per_core_limit()),
registers_per_block_limit_(proto.registers_per_block_limit()),
device_memory_size_(proto.device_memory_size()),
l2_cache_size_(proto.l2_cache_size()),
memory_bandwidth_(proto.memory_bandwidth()),
shared_memory_per_core_(proto.shared_memory_per_core()),
shared_memory_per_block_(proto.shared_memory_per_block()),
shared_memory_per_block_optin_(proto.shared_memory_per_block_optin()),
clock_rate_ghz_(proto.clock_rate_ghz()),
gpu_compute_capability_(
proto.has_cuda_compute_capability()
? GpuComputeCapability(stream_executor::CudaComputeCapability(
proto.cuda_compute_capability()))
: GpuComputeCapability(stream_executor::RocmComputeCapability(
proto.rocm_compute_capability()))),
core_count_(proto.core_count()),
fpus_per_core_(proto.fpus_per_core()) {}
GpuDeviceInfoProto DeviceDescription::ToGpuProto() const {
stream_executor::GpuDeviceInfoProto proto;
if (auto *ptr = std::get_if<stream_executor::CudaComputeCapability>(
&gpu_compute_capability_))
*proto.mutable_cuda_compute_capability() = ptr->ToProto();
if (auto *ptr = std::get_if<stream_executor::RocmComputeCapability>(
&gpu_compute_capability_))
*proto.mutable_rocm_compute_capability() = ptr->ToProto();
proto.set_threads_per_block_limit(threads_per_block_limit_);
proto.set_threads_per_warp(threads_per_warp_);
proto.set_shared_memory_per_block(shared_memory_per_block_);
proto.set_shared_memory_per_block_optin(shared_memory_per_block_optin_);
proto.set_shared_memory_per_core(shared_memory_per_core_);
proto.set_threads_per_core_limit(threads_per_core_limit_);
proto.set_core_count(core_count_);
proto.set_fpus_per_core(fpus_per_core_);
proto.set_block_dim_limit_x(block_dim_limit().x);
proto.set_block_dim_limit_y(block_dim_limit().y);
proto.set_block_dim_limit_z(block_dim_limit().z);
proto.set_memory_bandwidth(memory_bandwidth_);
proto.set_l2_cache_size(l2_cache_size_);
proto.set_clock_rate_ghz(clock_rate_ghz_);
proto.set_device_memory_size(device_memory_size_);
proto.set_registers_per_core_limit(registers_per_core_limit_);
proto.set_registers_per_block_limit(registers_per_block_limit_);
return proto;
}
std::string DeviceDescription::ToString() const {
return ToGpuProto().DebugString();
}
const GpuComputeCapability &DeviceDescription::gpu_compute_capability() const {
return gpu_compute_capability_;
}
CudaComputeCapability DeviceDescription::cuda_compute_capability() const {
if (auto *ptr =
std::get_if<CudaComputeCapability>(&gpu_compute_capability_)) {
return *ptr;
}
return CudaComputeCapability{-1, -1};
}
RocmComputeCapability DeviceDescription::rocm_compute_capability() const {
if (auto *ptr =
std::get_if<RocmComputeCapability>(&gpu_compute_capability_)) {
return *ptr;
}
return RocmComputeCapability{};
}
bool ThreadDimOk(const DeviceDescription &device_description,
const ThreadDim &thread_dim) {
const int64_t total_threads = thread_dim.x * thread_dim.y * thread_dim.z;
const int64_t threads_per_block_limit =
device_description.threads_per_block_limit();
if (total_threads > threads_per_block_limit) {
VLOG(2) << "exceeded total-thread-per-block limit: " << total_threads
<< " vs limit " << threads_per_block_limit;
return false;
}
const auto &limit = device_description.thread_dim_limit();
bool ok = thread_dim.x <= limit.x && thread_dim.y <= limit.y &&
thread_dim.z <= limit.z;
if (!ok) {
VLOG(2) << "thread dim " << thread_dim.ToString()
<< " exceeds limit constraints of " << limit.ToString();
}
return ok;
}
void CalculateDimensionality(const DeviceDescription &device_description,
int64_t element_count, int64_t *threads_per_block,
int64_t *block_count) {
*threads_per_block = device_description.threads_per_block_limit();
*block_count = tsl::MathUtil::CeilOfRatio(element_count, *threads_per_block);
if (*block_count == 1) {
CHECK_LE(element_count, *threads_per_block);
*threads_per_block = element_count;
}
}
} | #include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/semantic_version.h"
#include "tsl/platform/test.h"
namespace stream_executor {
namespace {
TEST(DeviceDescription, DefaultConstruction) {
DeviceDescription desc;
EXPECT_EQ(desc.device_address_bits(), -1);
EXPECT_EQ(desc.device_memory_size(), -1);
EXPECT_EQ(desc.clock_rate_ghz(), -1);
EXPECT_EQ(desc.name(), "<undefined>");
EXPECT_EQ(desc.platform_version(), "<undefined>");
constexpr SemanticVersion kZeroVersion = {0, 0, 0};
EXPECT_EQ(desc.driver_version(), kZeroVersion);
EXPECT_EQ(desc.runtime_version(), kZeroVersion);
EXPECT_EQ(desc.pci_bus_id(), "<undefined>");
}
TEST(CudaComputeCapability, GenerationNumericTest) {
EXPECT_TRUE(CudaComputeCapability(7, 5).IsAtLeastVolta());
EXPECT_TRUE(CudaComputeCapability(8, 0).IsAtLeastAmpere());
EXPECT_TRUE(CudaComputeCapability(9, 0).IsAtLeastHopper());
EXPECT_TRUE(CudaComputeCapability(10, 0).IsAtLeastBlackwell());
}
TEST(CudaComputeCapability, GenerationLiteralTest) {
EXPECT_TRUE(CudaComputeCapability::Volta().IsAtLeast(7));
EXPECT_TRUE(CudaComputeCapability::Ampere().IsAtLeast(8));
EXPECT_TRUE(CudaComputeCapability::Hopper().IsAtLeast(9));
EXPECT_TRUE(CudaComputeCapability::Blackwell().IsAtLeast(10));
}
TEST(CudaComputeCapability, ComparisonTest) {
CudaComputeCapability lower{1, 0};
CudaComputeCapability slightly_higher{1, 1};
CudaComputeCapability higher{2, 0};
EXPECT_TRUE(lower == lower);
EXPECT_FALSE(lower == slightly_higher);
EXPECT_FALSE(lower == higher);
EXPECT_TRUE(lower <= lower);
EXPECT_TRUE(lower < slightly_higher);
EXPECT_TRUE(lower <= slightly_higher);
EXPECT_FALSE(lower < lower);
EXPECT_FALSE(slightly_higher <= lower);
EXPECT_FALSE(slightly_higher < lower);
EXPECT_TRUE(slightly_higher >= slightly_higher);
EXPECT_TRUE(slightly_higher > lower);
EXPECT_TRUE(slightly_higher >= lower);
EXPECT_FALSE(slightly_higher > slightly_higher);
EXPECT_FALSE(lower > slightly_higher);
EXPECT_FALSE(lower >= slightly_higher);
EXPECT_TRUE(higher > slightly_higher);
EXPECT_TRUE(higher >= slightly_higher);
EXPECT_TRUE(higher >= higher);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/device_description.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/device_description_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3829685b-1880-4132-b56c-60ef31326b82 | cpp | tensorflow/tensorflow | xla_gpu_ops | third_party/xla/xla/service/gpu/fusions/ir/xla_gpu_ops.cc | third_party/xla/xla/service/gpu/fusions/ir/xla_gpu_ops_test.cc | #include "xla/service/gpu/fusions/ir/xla_gpu_ops.h"
#include <cstdint>
#include <optional>
#include <utility>
#include <vector>
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/LogicalResult.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/DialectImplementation.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/OpImplementation.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/SymbolTable.h"
#include "mlir/IR/TypeRange.h"
#include "mlir/IR/TypeUtilities.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Value.h"
#include "mlir/IR/ValueRange.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "xla/service/gpu/fusions/ir/xla_gpu_dialect.cc.inc"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/gpu/model/indexing_map_serialization.h"
namespace xla {
namespace gpu {
namespace {
using llvm::ArrayRef;
using mlir::AffineExpr;
using mlir::AffineMap;
using mlir::Block;
using mlir::DenseI64ArrayAttr;
using mlir::failure;
using mlir::getAffineConstantExpr;
using mlir::getAffineDimExpr;
using mlir::getAffineSymbolExpr;
using mlir::Location;
using mlir::LogicalResult;
using mlir::MLIRContext;
using mlir::OpAsmParser;
using mlir::OpAsmPrinter;
using mlir::OpBuilder;
using mlir::OperationState;
using mlir::ParseResult;
using mlir::PatternRewriter;
using mlir::RankedTensorType;
using mlir::Region;
using mlir::SmallVector;
using mlir::success;
using mlir::Type;
using mlir::TypeRange;
using mlir::Value;
using mlir::ValueRange;
namespace arith = mlir::arith;
}
LogicalResult PureCallOp::verifySymbolUses(
mlir::SymbolTableCollection& symbolTable) {
auto callee = getCalleeAttr();
auto function =
symbolTable.lookupNearestSymbolFrom<mlir::func::FuncOp>(*this, callee);
if (!function) {
return emitError("'f' attribute refers to an undefined function: ")
<< callee;
}
int func_arg_count = function.getFunctionType().getNumInputs();
int arg_count = getOperands().size();
if (arg_count != func_arg_count) {
return emitError() << "argument count mismatch: 'operands' has "
<< arg_count << " arguments, but '" << callee
<< "' expects " << func_arg_count;
}
return success();
}
void AllocateSharedOp::getAsmResultNames(
llvm::function_ref<void(mlir::Value, mlir::StringRef)> setNameFn) {
setNameFn(getResult(), "shmem");
}
void ApplyIndexingOp::build(OpBuilder& builder, OperationState& result,
ValueRange dims, ValueRange symbols,
const IndexingMap& indexing_map) {
SmallVector<Value, 4> operands;
operands.reserve(dims.size() + symbols.size());
operands.append(dims.begin(), dims.end());
operands.append(symbols.begin(), symbols.end());
build(builder, result, operands, indexing_map);
}
void ApplyIndexingOp::build(OpBuilder& builder, OperationState& result,
ValueRange operands,
const IndexingMap& indexing_map) {
SmallVector<Type, 2> result_types(indexing_map.GetAffineMap().getNumResults(),
builder.getIndexType());
IndexingMapAttr indexing_map_attr =
IndexingMapAttr::get(builder.getContext(), indexing_map);
build(builder, result, result_types, operands, indexing_map_attr);
}
void ApplyIndexingOp::build(OpBuilder& builder, OperationState& result,
ValueRange operands, AffineMap affine_map,
ArrayRef<IndexingMap::Variable> dim_vars,
ArrayRef<IndexingMap::Variable> range_vars) {
IndexingMap indexing_map(affine_map, dim_vars, range_vars, {});
build(builder, result, operands, indexing_map);
}
ParseResult parseOperands(
OpAsmParser& parser,
SmallVector<OpAsmParser::UnresolvedOperand, 4>* operands) {
OpAsmParser::UnresolvedOperand operand;
return parser.parseCommaSeparatedList(
[&]() { return parser.parseOperand(operands->emplace_back()); });
}
ParseResult ApplyIndexingOp::parse(OpAsmParser& parser,
OperationState& result) {
mlir::Builder& builder = parser.getBuilder();
auto index_type = builder.getIndexType();
IndexingMapAttr indexing_map_attr;
if (parser.parseAttribute(indexing_map_attr, "indexing_map_attr",
result.attributes)) {
return failure();
}
SmallVector<OpAsmParser::UnresolvedOperand, 4> operands;
SmallVector<int64_t, 4> lower_bounds, upper_bounds;
if (succeeded(parser.parseOptionalLParen())) {
if (parseOperands(parser, &operands) || parser.parseRParen()) {
return failure();
}
}
if (succeeded(parser.parseOptionalLSquare())) {
if (parseOperands(parser, &operands) || parser.parseRSquare()) {
return failure();
}
}
if (parser.resolveOperands(operands, index_type, result.operands) ||
parser.parseOptionalAttrDict(result.attributes)) {
return failure();
}
auto map = indexing_map_attr.getIndexingMap().GetAffineMap();
result.addTypes(SmallVector<Type, 2>(map.getNumResults(), index_type));
return success();
}
void ApplyIndexingOp::print(OpAsmPrinter& p) {
AffineMap affine_map = getIndexingMapAttr().getIndexingMap().GetAffineMap();
p << " " << getIndexingMapAttr();
auto operands = getOperands();
unsigned num_dimensions = affine_map.getNumDims();
if (num_dimensions > 0) {
p << '(';
auto dimension_operands = operands.slice(0, num_dimensions);
llvm::interleaveComma(dimension_operands, p);
p << ')';
}
unsigned num_symbols = affine_map.getNumSymbols();
if (num_symbols > 0) {
p << '[';
auto symbol_operands = operands.slice(num_dimensions, num_symbols);
llvm::interleaveComma(symbol_operands, p);
p << ']';
}
p.printOptionalAttrDict((*this)->getAttrs(),
{"indexing_map_attr"});
}
LogicalResult ApplyIndexingOp::verify() {
auto affine_map = getIndexingMapAttr().getIndexingMap().GetAffineMap();
unsigned num_variables = affine_map.getNumDims() + affine_map.getNumSymbols();
if (getOperands().size() != num_variables) {
return emitOpError(
"operand count must match the number of dimensions and symbols in the "
"affine map");
}
if (!getIndexingMap().GetConstraints().empty()) {
return emitOpError("apply indexing op cannot have any constraints");
}
return success();
}
IndexingMap ApplyIndexingOp::getIndexingMap() {
return getIndexingMapAttr().getIndexingMap();
}
namespace {
struct IndexingMapWithAdditions {
IndexingMap indexing_map;
SmallVector<Value> added_dim_args;
SmallVector<Value> added_sym_args;
};
IndexingMapWithAdditions GetNewIndexingMapAfterFoldingSequence(
IndexingMap indexing_map,
SmallVector<std::pair<int, ApplyIndexingOp>, 2> apply_indexing_ops,
mlir::DenseMap<Value, AffineExpr> operand_exprs, MLIRContext* ctx) {
int num_dims = indexing_map.GetDimensionCount();
int num_syms = indexing_map.GetSymbolCount();
SmallVector<Value> added_dim_args;
SmallVector<Value> added_sym_args;
auto new_dim_vars = indexing_map.GetDimVars();
auto new_sym_vars = indexing_map.GetRangeVars();
mlir::DenseMap<AffineExpr, AffineExpr> replacements;
for (auto& [operand_id, producer] : apply_indexing_ops) {
auto producer_map = producer.getIndexingMap();
mlir::OpResult producer_result = producer->getOpResult(0);
int producer_result_id = producer_result.getResultNumber();
int num_producer_dims = producer.getAffineMap().getNumDims();
SmallVector<AffineExpr> producer_dim_replacements;
SmallVector<AffineExpr> producer_sym_replacements;
for (auto& producer_operand : producer->getOpOperands()) {
int producer_operand_number = producer_operand.getOperandNumber();
bool is_dim = producer_operand_number < num_producer_dims;
auto& replacement_expr = operand_exprs[producer_operand.get()];
if (!replacement_expr) {
if (is_dim) {
int dim_num = producer_operand_number;
replacement_expr =
getAffineDimExpr(num_dims + added_dim_args.size(), ctx);
added_dim_args.push_back(producer_operand.get());
new_dim_vars.push_back(producer_map.GetDimVars(dim_num));
} else {
int sym_num =
producer_operand_number - producer.getAffineMap().getNumDims();
replacement_expr =
getAffineSymbolExpr(num_syms + added_sym_args.size(), ctx);
added_sym_args.push_back(producer_operand.get());
new_sym_vars.push_back(producer_map.GetRangeVar(sym_num));
}
}
if (is_dim) {
producer_dim_replacements.push_back(replacement_expr);
} else {
producer_sym_replacements.push_back(replacement_expr);
}
}
replacements[operand_exprs[producer_result]] =
producer.getAffineMap()
.getResult(producer_result_id)
.replaceDimsAndSymbols(producer_dim_replacements,
producer_sym_replacements);
}
auto new_affine_map = indexing_map.GetAffineMap().replace(
replacements, num_dims + added_dim_args.size(),
num_syms + added_sym_args.size());
IndexingMap new_indexing_map(new_affine_map, new_dim_vars, new_sym_vars,
{});
return {new_indexing_map, added_dim_args, added_sym_args};
}
}
namespace {
struct SimplifyIndexingMap : public mlir::OpRewritePattern<ApplyIndexingOp> {
using OpRewritePattern<ApplyIndexingOp>::OpRewritePattern;
LogicalResult matchAndRewrite(ApplyIndexingOp indexing_op,
PatternRewriter& rewriter) const override {
IndexingMap indexing_map = indexing_op.getIndexingMap();
if (!indexing_map.Simplify()) {
return rewriter.notifyMatchFailure(indexing_op,
"IndexingMap is already simplified");
}
rewriter.replaceOpWithNewOp<ApplyIndexingOp>(
indexing_op, indexing_op.getOperands(), indexing_map);
return success();
}
};
struct RemoveUnusedVariables : public mlir::OpRewritePattern<ApplyIndexingOp> {
using OpRewritePattern<ApplyIndexingOp>::OpRewritePattern;
LogicalResult matchAndRewrite(ApplyIndexingOp indexing_op,
PatternRewriter& rewriter) const override {
IndexingMap indexing_map = indexing_op.getIndexingMap();
auto unused_symbols_bit_vector = indexing_map.RemoveUnusedVars();
if (unused_symbols_bit_vector.count() == 0) {
return rewriter.notifyMatchFailure(indexing_op,
"IndexingMap stayed unchanged");
}
SmallVector<Value, 4> operands;
operands.reserve(unused_symbols_bit_vector.count());
for (int i = 0; i < unused_symbols_bit_vector.size(); ++i) {
if (!unused_symbols_bit_vector[i]) {
operands.push_back(indexing_op.getOperand(i));
}
}
rewriter.replaceOpWithNewOp<ApplyIndexingOp>(indexing_op, operands,
indexing_map);
return success();
}
};
struct MoveSymbolsToDims : public mlir::OpRewritePattern<ApplyIndexingOp> {
using OpRewritePattern<ApplyIndexingOp>::OpRewritePattern;
LogicalResult matchAndRewrite(ApplyIndexingOp indexing_op,
PatternRewriter& rewriter) const override {
IndexingMap indexing_map = indexing_op.getIndexingMap();
if (indexing_map.GetSymbolCount() == 0) {
return rewriter.notifyMatchFailure(indexing_op, "No symbols found");
}
rewriter.replaceOpWithNewOp<ApplyIndexingOp>(
indexing_op, indexing_op->getOperands(),
indexing_map.ConvertSymbolsToDimensions());
return success();
}
};
struct FoldApplyIndexingSequence
: public mlir::OpRewritePattern<ApplyIndexingOp> {
using OpRewritePattern<ApplyIndexingOp>::OpRewritePattern;
LogicalResult matchAndRewrite(ApplyIndexingOp indexing_op,
PatternRewriter& rewriter) const override {
auto indexing_map = indexing_op.getIndexingMap();
SmallVector<std::pair<int, ApplyIndexingOp>, 2> apply_indexing_ops;
bool all_apply_indexing_operands_have_one_use = true;
for (auto& operand : indexing_op->getOpOperands()) {
if (auto producer = operand.get().getDefiningOp<ApplyIndexingOp>()) {
apply_indexing_ops.push_back({operand.getOperandNumber(), producer});
all_apply_indexing_operands_have_one_use &= producer->hasOneUse();
}
}
if (apply_indexing_ops.empty()) {
return rewriter.notifyMatchFailure(indexing_op,
"No apply_indexing sequences found");
}
auto indexing_map_with_no_unused_vars = indexing_map;
if (indexing_map_with_no_unused_vars.RemoveUnusedVars().count() > 0) {
indexing_map_with_no_unused_vars.RemoveUnusedVars();
return rewriter.notifyMatchFailure(indexing_op,
"IndexingMap has unused variables");
}
MLIRContext* ctx = indexing_op.getContext();
int num_dims = indexing_op.getAffineMap().getNumDims();
int num_syms = indexing_op.getAffineMap().getNumSymbols();
mlir::DenseMap<Value, AffineExpr> operand_exprs;
for (auto& operand : indexing_op->getOpOperands()) {
int operand_number = operand.getOperandNumber();
operand_exprs[operand.get()] =
operand_number < num_dims
? getAffineDimExpr(operand_number, ctx)
: getAffineSymbolExpr(operand_number - num_dims, ctx);
}
auto replacement = GetNewIndexingMapAfterFoldingSequence(
indexing_map, apply_indexing_ops, operand_exprs, ctx);
if (!all_apply_indexing_operands_have_one_use &&
!replacement.indexing_map.Simplify()) {
return rewriter.notifyMatchFailure(
indexing_op, "Folded indexing map was not simplified");
}
int new_num_operands = indexing_op->getNumOperands() +
replacement.added_dim_args.size() +
replacement.added_sym_args.size();
SmallVector<Value> new_operands;
new_operands.reserve(new_num_operands);
auto begin = indexing_op.getOperands().begin();
new_operands.append(begin, begin + num_dims);
new_operands.append(replacement.added_dim_args);
new_operands.append(begin + num_dims, begin + num_dims + num_syms);
new_operands.append(replacement.added_sym_args);
rewriter.replaceOpWithNewOp<ApplyIndexingOp>(indexing_op, new_operands,
replacement.indexing_map);
return success();
}
};
struct FoldApplyIndexingOperands
: public mlir::OpRewritePattern<ApplyIndexingOp> {
using OpRewritePattern<ApplyIndexingOp>::OpRewritePattern;
LogicalResult matchAndRewrite(ApplyIndexingOp indexing_op,
PatternRewriter& rewriter) const override {
IndexingMap indexing_map = indexing_op.getIndexingMap();
AffineMap affine_map = indexing_map.GetAffineMap();
MLIRContext* ctx = affine_map.getContext();
unsigned num_operands = indexing_op->getNumOperands();
unsigned num_dims = affine_map.getNumDims();
unsigned num_symbols = affine_map.getNumSymbols();
SmallVector<std::optional<int64_t>> constant_values(num_operands,
std::nullopt);
int num_constants = 0;
for (auto& operand : indexing_op->getOpOperands()) {
if (auto constant =
operand.get().getDefiningOp<arith::ConstantIndexOp>()) {
constant_values[operand.getOperandNumber()] = constant.value();
++num_constants;
}
}
if (num_constants == 0) {
return rewriter.notifyMatchFailure(indexing_op,
"No constant operands found");
}
SmallVector<AffineExpr, 2> dim_replacements, symbol_replacements;
dim_replacements.reserve(num_dims);
symbol_replacements.reserve(num_symbols);
unsigned new_num_operands = indexing_op->getNumOperands() - num_constants;
SmallVector<Value, 4> new_operands;
new_operands.reserve(new_num_operands);
SmallVector<IndexingMap::Variable, 2> new_dim_vars;
new_dim_vars.reserve(num_dims);
SmallVector<IndexingMap::Variable, 2> new_range_vars;
new_range_vars.reserve(num_symbols);
unsigned new_num_dims = 0;
unsigned new_num_symbols = 0;
for (auto [operand, constant_value] :
llvm::zip(indexing_op->getOpOperands(), constant_values)) {
unsigned operand_id = operand.getOperandNumber();
if (constant_value.has_value()) {
if (operand_id < num_dims) {
dim_replacements.push_back(
getAffineConstantExpr(*constant_value, ctx));
} else {
symbol_replacements.push_back(
getAffineConstantExpr(*constant_value, ctx));
}
} else {
new_operands.push_back(operand.get());
if (operand_id < num_dims) {
dim_replacements.push_back(getAffineDimExpr(new_num_dims++, ctx));
new_dim_vars.push_back(indexing_map.GetDimVars(operand_id));
} else {
symbol_replacements.push_back(
getAffineSymbolExpr(new_num_symbols++, ctx));
new_range_vars.push_back(
indexing_map.GetRangeVar(operand_id - num_dims));
}
}
}
rewriter.replaceOpWithNewOp<ApplyIndexingOp>(
indexing_op, new_operands,
affine_map.replaceDimsAndSymbols(dim_replacements, symbol_replacements,
new_num_dims, new_num_symbols),
new_dim_vars, new_range_vars);
return success();
}
};
struct FoldApplyIndexingResults
: public mlir::OpRewritePattern<ApplyIndexingOp> {
using OpRewritePattern<ApplyIndexingOp>::OpRewritePattern;
LogicalResult matchAndRewrite(ApplyIndexingOp indexing_op,
PatternRewriter& rewriter) const override {
Location loc = indexing_op.getLoc();
IndexingMap indexing_map = indexing_op.getIndexingMap();
if (indexing_map.IsKnownEmpty()) {
return rewriter.notifyMatchFailure(indexing_op,
"Domain of the indexing map is empty");
}
AffineMap* affine_map = &indexing_map.GetMutableAffineMap();
unsigned num_results = affine_map->getNumResults();
SmallVector<AffineExpr, 4> new_exprs;
new_exprs.reserve(num_results);
SmallVector<Value, 4> new_values;
new_values.reserve(num_results);
for (mlir::OpResult opresult : indexing_op->getOpResults()) {
if (opresult.use_empty()) {
new_values.push_back(rewriter.create<arith::ConstantIndexOp>(loc, 0));
continue;
}
unsigned id = opresult.getResultNumber();
AffineExpr result_expr = affine_map->getResult(id);
if (auto const_expr =
mlir::dyn_cast<mlir::AffineConstantExpr>(result_expr)) {
new_values.push_back(rewriter.create<arith::ConstantIndexOp>(
loc, const_expr.getValue()));
continue;
}
if (auto dim_expr = mlir::dyn_cast<mlir::AffineDimExpr>(result_expr)) {
new_values.push_back(indexing_op.getOperand(dim_expr.getPosition()));
continue;
}
if (auto symbol_expr =
mlir::dyn_cast<mlir::AffineSymbolExpr>(result_expr)) {
new_values.push_back(indexing_op.getOperand(
indexing_map.GetDimVarsCount() + symbol_expr.getPosition()));
continue;
}
new_exprs.push_back(result_expr);
new_values.push_back(Value{});
}
if (new_exprs.size() == num_results) {
return rewriter.notifyMatchFailure(
indexing_op, "No constant or dim/symbol expression found");
}
*affine_map =
AffineMap::get(affine_map->getNumDims(), affine_map->getNumSymbols(),
new_exprs, affine_map->getContext());
auto new_indexing_op = rewriter.create<ApplyIndexingOp>(
loc, indexing_op.getOperands(), indexing_map);
for (int new_result_id = 0, new_indexing_op_result_id = 0;
new_result_id < new_values.size(); ++new_result_id) {
auto& new_value = new_values[new_result_id];
if (new_value) continue;
new_value = new_indexing_op.getResult(new_indexing_op_result_id++);
}
rewriter.replaceOp(indexing_op, new_values);
return success();
}
};
}
void ApplyIndexingOp::getCanonicalizationPatterns(
mlir::RewritePatternSet& results, MLIRContext* context) {
results.add<FoldApplyIndexingOperands, FoldApplyIndexingResults,
FoldApplyIndexingSequence, MoveSymbolsToDims,
RemoveUnusedVariables, SimplifyIndexingMap>(context);
}
mlir::LogicalResult ApplyIndexingOp::fold(
FoldAdaptor adaptor, llvm::SmallVectorImpl<mlir::OpFoldResult>& results) {
auto map = getAffineMap();
for (auto expr : map.getResults()) {
if (auto dim = mlir::dyn_cast<mlir::AffineDimExpr>(expr)) {
results.push_back(getOperand(dim.getPosition()));
} else if (auto sym = mlir::dyn_cast<mlir::AffineSymbolExpr>(expr)) {
results.push_back(getOperand(map.getNumDims() + sym.getPosition()));
} else {
results.clear();
return failure();
}
}
return success();
}
void AtomicRMWOp::getAsmResultNames(
llvm::function_ref<void(mlir::Value, mlir::StringRef)> setNameFn) {
setNameFn(getResult(), "atomic_rmw");
}
void AtomicRMWOp::build(OpBuilder& builder, OperationState& result,
Value tensor, ValueRange ivs) {
OpBuilder::InsertionGuard g(builder);
result.addOperands(tensor);
result.addOperands(ivs);
result.addTypes(tensor.getType());
auto tensor_type = llvm::cast<RankedTensorType>(tensor.getType());
Region* body = result.addRegion();
builder.createBlock(body);
body->addArgument(tensor_type.getElementType(), tensor.getLoc());
}
mlir::OpFoldResult AtomicRMWOp::fold(FoldAdaptor adaptor) {
auto* body = getBody();
if (&body->front() == body->getTerminator() &&
body->front().getOperand(0) == body->getArgument(0)) {
return getOperand(0);
}
return {};
}
void PureCallOp::getAsmResultNames(
llvm::function_ref<void(mlir::Value, mlir::StringRef)> setNameFn) {
for (auto result : getResults()) {
setNameFn(result, "pure_call");
}
}
void SyncThreadsOp::getAsmResultNames(
llvm::function_ref<void(mlir::Value, mlir::StringRef)> setNameFn) {
for (auto result : getResults()) {
setNameFn(result, "synced_tensor");
}
}
void LoopOp::getAsmResultNames(
llvm::function_ref<void(mlir::Value, mlir::StringRef)> setNameFn) {
for (auto result : getResults()) {
setNameFn(result, "xla_loop");
}
}
void LoopOp::getAsmBlockArgumentNames(mlir::Region& region,
mlir::OpAsmSetValueNameFn setFn) {
char iv_name = 'i';
for (auto iv : getInductionVars()) {
setFn(iv, std::string{iv_name});
if (iv_name <= 'n') {
++iv_name;
}
}
std::string map_result_name = "ra";
char map_result_char = 'a';
for (auto map_result : getIndexingMapResults()) {
setFn(map_result, map_result_name);
if (map_result_char <= 'z') {
++map_result_char;
map_result_name[1] = map_result_char;
}
}
for (auto iv : getRegionIterArgs()) {
setFn(iv, "iter");
}
}
void LoopOp::build(OpBuilder& builder, OperationState& result,
IndexingMapAttr indexing_map_attr, ValueRange dims,
ValueRange inits, BodyBuilderFn bodyBuilder) {
OpBuilder::InsertionGuard guard(builder);
int64_t num_ivs = indexing_map_attr.getRangeVars().size();
int64_t num_indexing_map_results =
indexing_map_attr.getIndexingMap().GetNumResults();
int64_t num_inits = inits.size();
result.addOperands(dims);
result.addOperands(inits);
result.addTypes(TypeRange(inits));
Block* body_block = builder.createBlock(result.addRegion());
for (int i = 0, e = num_ivs + num_indexing_map_results; i < e; ++i) {
body_block->addArgument(builder.getIndexType(), result.location);
}
for (auto init_type : TypeRange(inits)) {
body_block->addArguments(init_type, result.location);
}
mlir::OperationName opname(LoopOp::getOperationName(), builder.getContext());
result.addAttribute(LoopOp::getIndexingMapAttrAttrName(opname),
indexing_map_attr);
result.addAttribute(
LoopOp::getOperandSegmentSizesAttrName(opname),
builder.getDenseI32ArrayAttr({static_cast<int32_t>(dims.size()),
static_cast<int32_t>(inits.size())}));
if (bodyBuilder) {
OpBuilder::InsertionGuard guard(builder);
builder.setInsertionPointToStart(body_block);
bodyBuilder(
builder, result.location,
body_block->getArguments().take_front(num_ivs),
body_block->getArguments().drop_front(num_ivs).drop_back(num_inits),
body_block->getArguments().take_back(num_inits));
}
}
void LoopOp::build(OpBuilder& builder, OperationState& result,
const IndexingMap& indexing_map, ValueRange dims,
ValueRange inits, BodyBuilderFn bodyBuilder) {
build(builder, result,
IndexingMapAttr::get(builder.getContext(), indexing_map), dims, inits,
bodyBuilder);
}
ParseResult LoopOp::parse(OpAsmParser& parser, OperationState& result) {
SmallVector<OpAsmParser::Argument, 4> region_args, ivs, map_results,
iter_args;
SmallVector<OpAsmParser::UnresolvedOperand, 4> dim_operands;
auto* ctx = parser.getContext();
OpBuilder b(ctx);
Type index_type = b.getIndexType();
if (parser.parseOperandList(dim_operands, OpAsmParser::Delimiter::Paren) ||
parser.resolveOperands(dim_operands, index_type, result.operands))
return failure();
if (parser.parseArgumentList(ivs, OpAsmParser::Delimiter::Square))
return failure();
for (auto iv : ivs) {
region_args.push_back(iv);
region_args.back().type = index_type;
}
if (parser.parseArrow() ||
parser.parseArgumentList(map_results, OpAsmParser::Delimiter::Paren))
return failure();
for (auto map_result : map_results) {
region_args.push_back(map_result);
region_args.back().type = index_type;
}
IndexingMapAttr indexing_map_attr;
if (parser.parseKeyword("in") ||
parser.parseAttribute(indexing_map_attr, "indexing_map_attr",
result.attributes)) {
return failure();
}
SmallVector<OpAsmParser::UnresolvedOperand, 4> init_operands;
if (parser.parseKeyword("iter_args") ||
parser.parseAssignmentList(iter_args, init_operands) ||
parser.parseArrowTypeList(result.types) ||
parser.resolveOperands(init_operands, result.types, parser.getNameLoc(),
result.operands))
return failure();
for (auto [index, iter_arg] : llvm::enumerate(iter_args)) {
region_args.push_back(iter_arg);
region_args.back().type = result.types[index];
}
if (region_args.size() !=
result.types.size() + ivs.size() + map_results.size()) {
return parser.emitError(
parser.getNameLoc(),
"mismatch in number of induction variables + loop-carried values + "
"number of indexing map results variables and the number of results");
}
Region* body = result.addRegion();
if (parser.parseRegion(*body, region_args)) return failure();
LoopOp::ensureTerminator(*body, b, result.location);
result.addAttribute(
LoopOp::getOperandSegmentSizeAttr(),
b.getDenseI32ArrayAttr({static_cast<int32_t>(dim_operands.size()),
static_cast<int32_t>(iter_args.size())}));
if (parser.parseOptionalAttrDict(result.attributes)) return failure();
return success();
}
void LoopOp::print(OpAsmPrinter& p) {
p << " (" << getDims() << ")[" << getInductionVars() << "] -> ("
<< getIndexingMapResults() << ") in " << getIndexingMapAttr()
<< " iter_args(";
llvm::interleaveComma(
llvm::zip(getRegionIterArgs(), getInits()), p,
[&](auto it) { p << std::get<0>(it) << " = " << std::get<1>(it); });
p << ") -> (" << getInits().getTypes() << ") ";
p.printRegion(getRegion(), false,
true);
p.printOptionalAttrDict((*this)->getAttrs(),
{
getIndexingMapAttrAttrName(),
getOperandSegmentSizesAttrName(),
});
}
LogicalResult LoopOp::verify() {
if (getInits().size() != getNumResults()) {
return emitOpError("mismatch in number of loop-carried values and results");
}
IndexingMap indexing_map = getIndexingMap();
if (indexing_map.GetRangeVarsCount() != getNumInductionVars()) {
return emitOpError() << "mismatch in number of induction variables "
<< getNumInductionVars()
<< " and RangeVars in the indexing map "
<< ToString(indexing_map);
}
if (indexing_map.GetDimVarsCount() != getDims().size()) {
return emitOpError() << "mismatch in number of dims operands "
<< getDims().size()
<< " and DimVars in the indexing map "
<< ToString(indexing_map);
}
for (auto [bb_arg, result_type, init] :
llvm::zip(getRegionIterArgs(), getResultTypes(), getInits())) {
if (bb_arg.getType() != result_type || init.getType() != result_type) {
return emitOpError() << "block iter arg type = " << bb_arg.getType()
<< ", result type = " << result_type
<< " and init operand type = " << init.getType()
<< " should match";
}
}
return success();
}
IndexingMap LoopOp::getIndexingMap() {
return getIndexingMapAttr().getIndexingMap();
}
namespace {
struct SimplifyLoopOfApplyIndexing : public mlir::OpRewritePattern<LoopOp> {
using OpRewritePattern<LoopOp>::OpRewritePattern;
LogicalResult matchAndRewrite(LoopOp loop_op,
PatternRewriter& rewriter) const override {
auto loop_indexing_map = loop_op.getIndexingMap();
MLIRContext* ctx = loop_op.getContext();
int num_dims = loop_indexing_map.GetDimVarsCount();
SmallVector<std::pair<int, ApplyIndexingOp>, 2> apply_indexing_ops;
bool all_apply_indexing_operands_have_one_use = true;
for (auto& operand : loop_op->getOpOperands().take_front(num_dims)) {
if (auto producer = operand.get().getDefiningOp<ApplyIndexingOp>()) {
if (producer.getIndexingMap().GetSymbolCount() > 0) {
continue;
}
apply_indexing_ops.push_back({operand.getOperandNumber(), producer});
all_apply_indexing_operands_have_one_use &= producer->hasOneUse();
}
}
if (apply_indexing_ops.empty()) {
return rewriter.notifyMatchFailure(
loop_op,
"No loop(apply_indexing) patterns found. Note that producer "
"apply_indexing should have already been simplified via "
"MoveSymbolsToDims pattern.");
}
mlir::DenseMap<Value, AffineExpr> operand_exprs;
for (auto& operand : loop_op->getOpOperands().take_front(num_dims)) {
int operand_number = operand.getOperandNumber();
operand_exprs[operand.get()] = getAffineDimExpr(operand_number, ctx);
}
auto replacement = GetNewIndexingMapAfterFoldingSequence(
loop_indexing_map, apply_indexing_ops, operand_exprs, ctx);
if (!all_apply_indexing_operands_have_one_use &&
!replacement.indexing_map.Simplify()) {
return rewriter.notifyMatchFailure(
loop_op, "Folded indexing map of the loop op was not simplified");
}
int new_num_dims = num_dims + replacement.added_dim_args.size();
SmallVector<Value> aggregate_dims;
aggregate_dims.reserve(new_num_dims);
auto begin = loop_op.getOperands().begin();
aggregate_dims.append(begin, begin + num_dims);
aggregate_dims.append(replacement.added_dim_args);
SmallVector<Value, 4> used_dims;
used_dims.reserve(aggregate_dims.size());
auto used_dim_bit_vector = ~replacement.indexing_map.RemoveUnusedVars();
for (auto used_dim_idx : used_dim_bit_vector.set_bits()) {
if (used_dim_idx < new_num_dims) {
used_dims.push_back(aggregate_dims[used_dim_idx]);
}
}
auto new_loop_op =
rewriter.create<LoopOp>(loop_op.getLoc(), replacement.indexing_map,
used_dims, loop_op.getInits());
Block* original_block = &loop_op.getRegion().front();
Block* new_block = &new_loop_op.getRegion().front();
rewriter.mergeBlocks(original_block, new_block, new_block->getArguments());
rewriter.replaceOp(loop_op, new_loop_op.getResults());
return success();
}
};
}
void LoopOp::getCanonicalizationPatterns(mlir::RewritePatternSet& results,
MLIRContext* context) {
results.add<SimplifyLoopOfApplyIndexing>(context);
}
VariableConstraints GetConstraintsForVariables(const IndexingMap& map) {
VariableConstraints result;
result.constraints_for_dims.resize(map.GetDimensionCount());
result.constraints_for_symbols.resize(map.GetSymbolCount());
for (const auto& constraint : map.GetConstraints()) {
constraint.first.walk([&](mlir::AffineExpr leaf) {
if (auto dim = mlir::dyn_cast<mlir::AffineDimExpr>(leaf)) {
result.constraints_for_dims[dim.getPosition()].insert(constraint);
} else if (auto sym = mlir::dyn_cast<mlir::AffineSymbolExpr>(leaf)) {
result.constraints_for_symbols[sym.getPosition()].insert(constraint);
}
});
}
return result;
}
LogicalResult MaterializeOp::verify() {
IndexingMap map_in = getMap().getIndexingMap();
IndexingMap map_out =
getResult().getType().getIndexingMapAttr().getIndexingMap();
if (getIndices().size() != map_in.GetDimVarsCount()) {
return emitOpError() << "number of indices must match number of dimensions "
"of indexing map";
}
if (map_in.GetDimVarsCount() == 0 || map_out.GetDimVarsCount() == 0) {
return emitOpError()
<< "must have thread_id dimension in both indexing maps";
}
if (map_in.GetDimVars(0).bounds != map_out.GetDimVars(0).bounds) {
return emitOpError() << "thread_id dimension must have the same bounds in "
"both indexing maps";
}
auto variable_constraints_in = GetConstraintsForVariables(map_in);
auto variable_constraints_out = GetConstraintsForVariables(map_out);
if (variable_constraints_in.constraints_for_dims[0] !=
variable_constraints_out.constraints_for_dims[0]) {
return emitOpError() << "constraints of indexing maps must be equal for "
<< "the thread_id dimension";
}
if (map_in.GetRangeVarsCount() != map_out.GetRangeVarsCount()) {
return emitOpError()
<< "number of symbols in both indexing_maps must match";
}
for (auto const& [range_in, range_out] :
llvm::zip(map_in.GetRangeVars(), map_out.GetRangeVars())) {
if (range_in.bounds != range_out.bounds) {
return emitOpError() << "domain of symbols of indexing_maps must match";
}
}
if (variable_constraints_in.constraints_for_symbols !=
variable_constraints_out.constraints_for_symbols) {
return emitOpError()
<< "constraints of indexing maps must be equal for all symbols";
}
if (map_out.GetDimVarsCount() > 1) {
for (auto expr : map_out.GetAffineMap().getResults()) {
if (expr.isFunctionOfDim(1)) {
return emitOpError() << "vector mapping indices must not depend on the "
<< "block ID";
}
}
}
if (map_in.GetDimVarsCount() > 1 && map_out.GetDimVarsCount() > 1) {
if (variable_constraints_in.constraints_for_dims[1] !=
variable_constraints_out.constraints_for_dims[1]) {
return emitOpError() << "constraints of indexing maps must be equal for "
<< "the block_id dimension";
}
} else if (map_in.GetDimVarsCount() > 1 &&
!variable_constraints_in.constraints_for_dims[1].empty()) {
return emitOpError() << "constraints of indexing maps must be equal for "
<< "the block_id dimension";
} else if (map_out.GetDimVarsCount() > 1 &&
!variable_constraints_out.constraints_for_dims[1].empty()) {
return emitOpError() << "constraints of indexing maps must be equal for "
<< "the block_id dimension";
}
return success();
}
LogicalResult InsertOp::verify() {
if (!getMap().getIndexingMap().GetRangeVars().empty()) {
return emitOpError() << "insert_op map must not have any symbols";
}
int64_t vector_map_num_results =
getSource().getType().getIndexingMapAttr().getNumResults();
if (vector_map_num_results != getMap().getIndexingMap().GetDimVars().size()) {
return emitOpError() << "source map result count must equal insert_op's "
"map's dimension count";
}
return success();
}
void ReindexOp::build(mlir::OpBuilder& builder, mlir::OperationState& result,
mlir::Type type, mlir::Value operand, mlir::Value padding,
const xla::gpu::IndexingMap& indexing_map) {
IndexingMapAttr indexing_map_attr =
IndexingMapAttr::get(builder.getContext(), indexing_map);
build(builder, result, type, operand, padding, indexing_map_attr);
}
SmallVector<Type, 2> inferReductionResultTypes(TypeRange input_types,
ArrayRef<int64_t> reduced_dims) {
auto input_shape =
mlir::cast<RankedTensorType>(input_types.front()).getShape();
auto num_reduced_dims = reduced_dims.size();
SmallVector<int64_t, 4> output_shape;
output_shape.reserve(input_shape.size() - num_reduced_dims);
int reduce_dim = 0;
for (int64_t i = 0; i < input_shape.size(); ++i) {
if (reduce_dim < num_reduced_dims && i == reduced_dims[reduce_dim]) {
++reduce_dim;
continue;
}
output_shape.push_back(input_shape[i]);
}
SmallVector<Type, 2> result_types;
result_types.reserve(input_types.size());
for (auto input_type : input_types) {
result_types.push_back(RankedTensorType::get(
output_shape,
mlir::cast<RankedTensorType>(input_type).getElementType()));
}
return result_types;
}
SmallVector<Type, 2> inferReductionInitTypes(TypeRange input_types) {
SmallVector<Type, 2> init_types;
init_types.reserve(input_types.size());
for (auto input_type : input_types) {
init_types.push_back(
mlir::cast<RankedTensorType>(input_type).getElementType());
}
return init_types;
}
LogicalResult ReduceOp::inferReturnTypes(
MLIRContext* context, std::optional<Location> location, ValueRange operands,
mlir::DictionaryAttr attributes, mlir::OpaqueProperties properties,
mlir::RegionRange regions,
mlir::SmallVectorImpl<Type>& inferredReturnTypes) {
ReduceOp::Adaptor adaptor(operands, attributes, properties, regions);
inferredReturnTypes.append(inferReductionResultTypes(
TypeRange{adaptor.getInputs()}, adaptor.getDimensions()));
return success();
}
ParseResult ReduceOp::parse(OpAsmParser& parser, OperationState& result) {
SmallVector<OpAsmParser::UnresolvedOperand, 4> inputs;
SmallVector<OpAsmParser::UnresolvedOperand, 4> inits;
SmallVector<int64_t, 2> dimensions;
mlir::StringAttr combiner;
SmallVector<Type, 2> input_types;
SmallVector<Type, 2> result_types;
if (parser.parseLParen() || parseOperands(parser, &inputs) ||
parser.parseRParen() || parser.parseKeyword("inits") ||
parser.parseLParen() || parseOperands(parser, &inits) ||
parser.parseRParen() || parser.parseKeyword("dimensions") ||
parser.parseEqual() ||
parser.parseCommaSeparatedList(OpAsmParser::Delimiter::Square,
[&]() -> ParseResult {
return parser.parseInteger(
dimensions.emplace_back());
}) ||
parser.parseKeyword("combiner") || parser.parseEqual() ||
parser.parseSymbolName(combiner) ||
parser.parseOptionalAttrDict(result.attributes) ||
parser.parseColonTypeList(input_types) || parser.parseKeyword("to") ||
parser.parseTypeList(result_types)) {
return failure();
}
auto ctx = result.getContext();
mlir::OperationName opname(ReduceOp::getOperationName(), ctx);
result.addAttribute(ReduceOp::getDimensionsAttrName(opname),
DenseI64ArrayAttr::get(ctx, dimensions));
result.addAttribute(ReduceOp::getCombinerAttrName(opname),
mlir::FlatSymbolRefAttr::get(ctx, combiner));
result.addTypes(result_types);
auto init_types = inferReductionInitTypes(input_types);
mlir::SMLoc loc = parser.getCurrentLocation();
if (parser.resolveOperands(inputs, input_types, loc, result.operands) ||
parser.resolveOperands(inits, init_types, loc, result.operands)) {
return failure();
}
return success();
}
void ReduceOp::print(OpAsmPrinter& p) {
p << '(' << getInputs() << ") inits(" << getInits() << ") dimensions=["
<< getDimensions() << "] combiner=@" << getCombiner();
p.printOptionalAttrDict((*this)->getAttrs(),
{getCombinerAttrName(), getDimensionsAttrName()});
p << " : " << TypeRange(getInputs()) << " to " << TypeRange(getResults());
}
LogicalResult ReduceOp::verify() {
auto inferred_init_types = inferReductionInitTypes(TypeRange(getInputs()));
for (auto [inferred_init_type, init_type] :
llvm::zip(inferred_init_types, TypeRange(getInits()))) {
if (inferred_init_type != init_type) {
return emitOpError() << "init type " << init_type
<< " does not match inferred type "
<< inferred_init_type;
}
}
auto module = this->getOperation()->getParentOfType<mlir::ModuleOp>();
auto combiner = module.lookupSymbol<mlir::func::FuncOp>(getCombinerAttr());
if (!combiner) {
return emitOpError() << "combiner `@" << getCombiner() << "` not found";
}
SmallVector<Type, 2> combiner_operand_types;
combiner_operand_types.reserve(getNumOperands());
combiner_operand_types.append(inferred_init_types);
combiner_operand_types.append(inferred_init_types);
auto expected_combiner_type = mlir::FunctionType::get(
getContext(), combiner_operand_types, inferred_init_types);
if (expected_combiner_type != combiner.getFunctionType()) {
return emitOpError() << "provided combiner `@" << getCombiner()
<< " expected to have type " << expected_combiner_type
<< " but got " << combiner.getFunctionType();
}
return success();
}
ParseResult ShuffleReduceOp::parse(OpAsmParser& parser,
OperationState& result) {
SmallVector<OpAsmParser::UnresolvedOperand, 4> inputs;
mlir::StringAttr combiner;
int64_t max_distance;
SmallVector<Type, 2> operand_types;
mlir::SMLoc loc = parser.getCurrentLocation();
if (parser.parseLParen() || parseOperands(parser, &inputs) ||
parser.parseRParen() || parser.parseKeyword("to") ||
parser.parseInteger(max_distance) || parser.parseKeyword("combiner") ||
parser.parseEqual() || parser.parseSymbolName(combiner) ||
parser.parseOptionalAttrDict(result.attributes) ||
parser.parseColonTypeList(operand_types) ||
parser.resolveOperands(inputs, operand_types, loc, result.operands)) {
return failure();
}
auto ctx = result.getContext();
mlir::OperationName opname(ShuffleReduceOp::getOperationName(), ctx);
result.addAttribute(ShuffleReduceOp::getCombinerAttrName(opname),
mlir::FlatSymbolRefAttr::get(ctx, combiner));
result.addAttribute(
ShuffleReduceOp::getMaxDistanceAttrName(opname),
mlir::IntegerAttr::get(mlir::IntegerType::get(ctx, 64), max_distance));
result.addTypes(operand_types);
return success();
}
void ShuffleReduceOp::print(OpAsmPrinter& p) {
p << '(' << getOperands() << ") to " << getMaxDistance() << " combiner=@"
<< getCombiner();
p.printOptionalAttrDict((*this)->getAttrs(),
{getCombinerAttrName(), getMaxDistanceAttrName()});
p << " : " << TypeRange(getResultTypes());
}
}
}
#define GET_OP_CLASSES
#include "xla/service/gpu/fusions/ir/xla_gpu_ops.cc.inc" | #include "xla/service/gpu/fusions/ir/xla_gpu_ops.h"
#include <gtest/gtest.h>
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/gpu/model/indexing_map_serialization.h"
#include "xla/service/gpu/model/indexing_test_utils.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/test.h"
namespace xla::gpu {
namespace {
using ::testing::IsEmpty;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
class XLAGPUOpsTest : public HloTestBase {
public:
mlir::MLIRContext mlir_context_;
};
TEST_F(XLAGPUOpsTest, GetConstraintsForVariables) {
auto map = *ParseIndexingMap(R"(
(d0, d1)[s0, s1] -> (d0 + s0, d1 + s1),
domain: d0 in [0, 5],
d1 in [0, 2],
s0 in [0, 32],
s1 in [0, 1024],
d1 + s1 in [0, 4],
d1 mod 32 in [0, 6],
s0 + s1 in [0, 3],
s0 mod 4 in [0, 1],
s1 mod 4 in [0, 2]
)",
&mlir_context_);
auto constraints_for_variables = GetConstraintsForVariables(map);
EXPECT_THAT(constraints_for_variables.constraints_for_dims[0],
UnorderedElementsAre());
EXPECT_THAT(
constraints_for_variables.constraints_for_dims[1],
UnorderedElementsAre(
Pair(ParseAffineExpr("s1 + d1", &mlir_context_), Interval{0, 4}),
Pair(ParseAffineExpr("d1 mod 32", &mlir_context_), Interval{0, 6})));
EXPECT_THAT(
constraints_for_variables.constraints_for_symbols[0],
UnorderedElementsAre(
Pair(ParseAffineExpr("s0 mod 4", &mlir_context_), Interval{0, 1}),
Pair(ParseAffineExpr("s0 + s1", &mlir_context_), Interval{0, 3})));
EXPECT_THAT(
constraints_for_variables.constraints_for_symbols[1],
UnorderedElementsAre(
Pair(ParseAffineExpr("s1 mod 4", &mlir_context_), Interval{0, 2}),
Pair(ParseAffineExpr("s0 + s1", &mlir_context_), Interval{0, 3}),
Pair(ParseAffineExpr("s1 + d1", &mlir_context_), Interval{0, 4})));
}
TEST_F(XLAGPUOpsTest, GetConstraintsForVariablesEmpty) {
auto map = *ParseIndexingMap(R"(
(d0, d1)[s0, s1] -> (d0 + s0, d1 + s1),
domain: d0 in [0, 5],
d1 in [0, 2],
s0 in [0, 32],
s1 in [0, 1024],
)",
&mlir_context_);
auto constraints_for_variables = GetConstraintsForVariables(map);
EXPECT_THAT(constraints_for_variables.constraints_for_dims,
ElementsAre(IsEmpty(), IsEmpty()));
EXPECT_THAT(constraints_for_variables.constraints_for_symbols,
ElementsAre(IsEmpty(), IsEmpty()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/ir/xla_gpu_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/ir/xla_gpu_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4ea07cfb-2e0c-4af9-bde7-dfeec7e2fe36 | cpp | tensorflow/tensorflow | convert_type | tensorflow/compiler/mlir/tensorflow/utils/convert_type.cc | tensorflow/compiler/mlir/tensorflow/utils/convert_type_test.cc | #include "tensorflow/compiler/mlir/tensorflow/utils/convert_type.h"
#include <limits>
#include "absl/strings/str_cat.h"
#include "llvm/Support/Casting.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Types.h"
#include "mlir/Support/DebugStringHelper.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dynamic_shape_utils.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
using mlir::Builder;
using mlir::ShapedType;
using mlir::Type;
Status ConvertDataType(DataType dtype, Builder builder, Type* type) {
switch (dtype) {
case DT_HALF:
*type = builder.getF16Type();
return absl::OkStatus();
case DT_FLOAT:
*type = builder.getF32Type();
return absl::OkStatus();
case DT_DOUBLE:
*type = builder.getF64Type();
return absl::OkStatus();
case DT_BOOL:
*type = builder.getIntegerType(1);
return absl::OkStatus();
case DT_INT8:
*type = builder.getIntegerType(8);
return absl::OkStatus();
case DT_INT16:
*type = builder.getIntegerType(16);
return absl::OkStatus();
case DT_INT32:
*type = builder.getIntegerType(32);
return absl::OkStatus();
case DT_INT64:
*type = builder.getIntegerType(64);
return absl::OkStatus();
case DT_UINT8:
*type = builder.getIntegerType(8, false);
return absl::OkStatus();
case DT_UINT16:
*type = builder.getIntegerType(16, false);
return absl::OkStatus();
case DT_UINT32:
*type = builder.getIntegerType(32, false);
return absl::OkStatus();
case DT_UINT64:
*type = builder.getIntegerType(64, false);
return absl::OkStatus();
case DT_BFLOAT16:
*type = builder.getBF16Type();
return absl::OkStatus();
case DT_COMPLEX64:
*type = mlir::ComplexType::get(builder.getF32Type());
return absl::OkStatus();
case DT_COMPLEX128:
*type = mlir::ComplexType::get(builder.getF64Type());
return absl::OkStatus();
case tensorflow::DT_FLOAT8_E4M3FN:
*type = builder.getFloat8E4M3FNType();
return absl::OkStatus();
case tensorflow::DT_FLOAT8_E5M2:
*type = builder.getFloat8E5M2Type();
return absl::OkStatus();
case DT_INT4:
*type = builder.getIntegerType(4, true);
return absl::OkStatus();
case DT_UINT4:
*type = builder.getIntegerType(4, false);
return absl::OkStatus();
#define HANDLE_TF_TYPE(tftype, enumerant, name) \
case DT_##enumerant: \
*type = builder.getType<mlir::tf_type::tftype##Type>(); \
return OkStatus();
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.def"
default:
return errors::Unimplemented(absl::StrCat(
"Converting DataType '", DataTypeString(dtype), "' to MLIR Type"));
}
}
Status ConvertScalarTypeToDataType(Type type, DataType* dtype) {
if (type.isF16()) {
*dtype = DT_HALF;
return absl::OkStatus();
} else if (type.isF32()) {
*dtype = DT_FLOAT;
return absl::OkStatus();
} else if (type.isF64()) {
*dtype = DT_DOUBLE;
return absl::OkStatus();
} else if (type.isBF16()) {
*dtype = DT_BFLOAT16;
return absl::OkStatus();
} else if (type.isFloat8E4M3FN()) {
*dtype = DT_FLOAT8_E4M3FN;
return absl::OkStatus();
} else if (type.isFloat8E5M2()) {
*dtype = DT_FLOAT8_E5M2;
return absl::OkStatus();
} else if (auto itype = mlir::dyn_cast<mlir::IntegerType>(type)) {
switch (itype.getWidth()) {
case 1:
*dtype = DT_BOOL;
return absl::OkStatus();
case 4:
*dtype = itype.isUnsigned() ? DT_UINT4 : DT_INT4;
return absl::OkStatus();
case 8:
*dtype = itype.isUnsigned() ? DT_UINT8 : DT_INT8;
return absl::OkStatus();
case 16:
*dtype = itype.isUnsigned() ? DT_UINT16 : DT_INT16;
return absl::OkStatus();
case 32:
*dtype = itype.isUnsigned() ? DT_UINT32 : DT_INT32;
return absl::OkStatus();
case 64:
*dtype = itype.isUnsigned() ? DT_UINT64 : DT_INT64;
return absl::OkStatus();
default:
return errors::Unimplemented(
absl::StrCat("Converting ", debugString(type), " to DataType"));
}
} else if (auto complex_type = mlir::dyn_cast<mlir::ComplexType>(type)) {
auto etype = complex_type.getElementType();
if (etype.isF32()) {
*dtype = DT_COMPLEX64;
return absl::OkStatus();
} else if (etype.isF64()) {
*dtype = DT_COMPLEX128;
return absl::OkStatus();
}
return errors::Unimplemented(
absl::StrCat("Converting ", debugString(type), " to DataType"));
}
#define HANDLE_TF_TYPE(tftype, enumerant, name) \
if (type.isa<mlir::tf_type::tftype##Type>()) { \
*dtype = DT_##enumerant; \
return OkStatus(); \
}
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.def"
return errors::Unimplemented(
absl::StrCat("Converting ", debugString(type), " to DataType"));
}
Status ConvertToDataType(Type type, DataType* dtype) {
if (auto stype = mlir::dyn_cast<ShapedType>(type)) {
TF_RETURN_IF_ERROR(
ConvertScalarTypeToDataType(stype.getElementType(), dtype));
} else {
TF_RETURN_IF_ERROR(ConvertScalarTypeToDataType(type, dtype));
}
return absl::OkStatus();
}
void ConvertToMlirShape(const TensorShape& input_shape,
llvm::SmallVectorImpl<int64_t>* shape) {
shape->reserve(input_shape.dims());
for (const auto& d : input_shape) {
shape->push_back(d.size == kTFDynamicSize ? ShapedType::kDynamic : d.size);
}
}
Status ConvertToMlirShape(const TensorShapeProto& input_shape,
llvm::SmallVectorImpl<int64_t>* shape) {
shape->reserve(input_shape.dim_size());
auto& dims = input_shape.dim();
for (auto& d : dims) {
if (d.size() > std::numeric_limits<int64_t>::max()) {
return errors::InvalidArgument("Shape element overflows");
}
shape->push_back(d.size() == kTFDynamicSize ? ShapedType::kDynamic
: d.size());
}
return absl::OkStatus();
}
absl::StatusOr<mlir::Type> ConvertToMlirTensorType(
const TensorShapeProto& shape, DataType dtype, mlir::Builder* builder) {
mlir::Type element_type;
TF_RETURN_IF_ERROR(ConvertDataType(dtype, *builder, &element_type));
if (shape.unknown_rank()) {
return mlir::UnrankedTensorType::get(element_type);
}
llvm::SmallVector<int64_t, 4> shape_dims;
TF_RETURN_IF_ERROR(ConvertToMlirShape(shape, &shape_dims));
return GetTypeFromTFTensorShape(shape_dims, element_type);
}
} | #include "tensorflow/compiler/mlir/tensorflow/utils/convert_type.h"
#include <string>
#include <vector>
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/test.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
namespace tensorflow {
namespace {
std::string ConvertToMlirString(const std::vector<int64_t>& dims,
bool unknown_rank, DataType dtype) {
TensorShapeProto shape;
shape.set_unknown_rank(unknown_rank);
for (int64_t dim : dims) {
shape.add_dim()->set_size(dim);
}
mlir::MLIRContext context;
mlir::Builder b(&context);
auto status_or = ConvertToMlirTensorType(shape, dtype, &b);
std::string buf;
llvm::raw_string_ostream os(buf);
status_or.value().print(os);
return os.str();
}
TEST(MlirConvertType, ConvertToMlirTensorType) {
EXPECT_EQ("tensor<4x8x16xi32>",
ConvertToMlirString({4, 8, 16}, false,
DataType::DT_INT32));
EXPECT_EQ("tensor<?x27x?xbf16>",
ConvertToMlirString({-1, 27, -1}, false,
DataType::DT_BFLOAT16));
EXPECT_EQ("tensor<*xf32>",
ConvertToMlirString({}, true, DataType::DT_FLOAT));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/convert_type.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/convert_type_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
56ea50e1-e04c-4cea-bb42-857fa1568bc0 | cpp | tensorflow/tensorflow | index | third_party/xla/xla/python/ifrt/index.cc | third_party/xla/xla/python/ifrt/index_test.cc | #include "xla/python/ifrt/index.h"
#include <ostream>
#include <string>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
namespace xla {
namespace ifrt {
std::string Index::DebugString() const {
return absl::StrCat("[", absl::StrJoin(elements_, ","), "]");
}
std::ostream& operator<<(std::ostream& os, const Index& index) {
return os << index.DebugString();
}
}
} | #include "xla/python/ifrt/index.h"
#include <memory>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/hash/hash_testing.h"
namespace xla {
namespace ifrt {
namespace {
using ::testing::ElementsAre;
TEST(IndexTest, Construction) {
EXPECT_THAT(Index({1, 2}).elements(), ElementsAre(1, 2));
EXPECT_THAT(Index::Zeros(2).elements(), ElementsAre(0, 0));
}
TEST(IndexTest, Operations) {
EXPECT_EQ(Index({1, 2}), Index({1, 2}));
EXPECT_NE(Index({1, 2}), Index({1, 3}));
Index a({11, 22});
Index b({2, 3});
EXPECT_EQ(a + b, Index({13, 25}));
{
Index c = a;
EXPECT_EQ(c += b, Index({13, 25}));
}
EXPECT_EQ(a - b, Index({9, 19}));
{
Index c = a;
EXPECT_EQ(c -= b, Index({9, 19}));
}
EXPECT_EQ(a * std::vector<int64_t>({1, 2}), Index({11, 44}));
{
Index c = a;
EXPECT_EQ(c *= std::vector<int64_t>({1, 2}), Index({11, 44}));
}
}
TEST(IndexTest, Hash) {
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({
Index({}),
Index({1}),
Index({2}),
Index({1, 2}),
Index({1, 3}),
Index({2, 1}),
Index({1, 2, 3}),
Index({1, 2, 4}),
}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/index.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/index_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
85256940-c06d-4a66-ad6c-9c9d871a9f1c | cpp | tensorflow/tensorflow | bidirectional_sequence_rnn | tensorflow/lite/kernels/bidirectional_sequence_rnn.cc | tensorflow/lite/kernels/bidirectional_sequence_rnn_test.cc | #include <algorithm>
#include <cstddef>
#include <cstdint>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/kernel_utils.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/op_macros.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace bidirectional_sequence_rnn {
namespace {
struct OpData {
int scratch_tensor_index;
bool fw_compute_row_sums = false;
bool bw_compute_row_sums = false;
};
}
constexpr int kInputTensor = 0;
constexpr int kFwWeightsTensor = 1;
constexpr int kFwRecurrentWeightsTensor = 2;
constexpr int kFwBiasTensor = 3;
constexpr int kFwHiddenStateTensor = 4;
constexpr int kBwWeightsTensor = 5;
constexpr int kBwRecurrentWeightsTensor = 6;
constexpr int kBwBiasTensor = 7;
constexpr int kBwHiddenStateTensor = 8;
constexpr int kAuxInputTensor = 9;
constexpr int kFwAuxWeightsTensor = 10;
constexpr int kBwAuxWeightsTensor = 11;
constexpr int kFwOutputTensor = 0;
constexpr int kBwOutputTensor = 1;
enum TemporaryTensor {
kInputQuantized = 0,
kFwHiddenStateQuantized = 1,
kBwHiddenStateQuantized = 2,
kScalingFactors = 3,
kAccumScratch = 4,
kZeroPoints = 5,
kFwRowSums = 6,
kBwRowSums = 7,
kAuxInputQuantized = 8,
kNumTemporaryTensors = 9
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* op_data = new OpData();
context->AddTensors(context, kNumTemporaryTensors,
&op_data->scratch_tensor_index);
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const auto* params = reinterpret_cast<TfLiteBidirectionalSequenceRNNParams*>(
node->builtin_data);
TF_LITE_ENSURE_EQ(context, node->inputs->size, 12);
TF_LITE_ENSURE_EQ(context, node->outputs->size,
params->merge_outputs ? 1 : 2);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* fw_input_weights;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kFwWeightsTensor,
&fw_input_weights));
const TfLiteTensor* fw_recurrent_weights;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kFwRecurrentWeightsTensor,
&fw_recurrent_weights));
const TfLiteTensor* fw_bias;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kFwBiasTensor, &fw_bias));
const TfLiteTensor* fw_hidden_state;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kFwHiddenStateTensor,
&fw_hidden_state));
const TfLiteTensor* bw_input_weights;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kBwWeightsTensor,
&bw_input_weights));
const TfLiteTensor* bw_recurrent_weights;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kBwRecurrentWeightsTensor,
&bw_recurrent_weights));
const TfLiteTensor* bw_bias;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kBwBiasTensor, &bw_bias));
const TfLiteTensor* bw_hidden_state;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kBwHiddenStateTensor,
&bw_hidden_state));
const TfLiteTensor* aux_input =
GetOptionalInputTensor(context, node, kAuxInputTensor);
const TfLiteTensor* fw_aux_input_weights =
GetOptionalInputTensor(context, node, kFwAuxWeightsTensor);
const TfLiteTensor* bw_aux_input_weights =
GetOptionalInputTensor(context, node, kBwAuxWeightsTensor);
const bool aux_inputs_weights_or_none =
((fw_aux_input_weights != nullptr) &&
(bw_aux_input_weights != nullptr)) ||
((fw_aux_input_weights == nullptr) && (bw_aux_input_weights == nullptr));
TF_LITE_ENSURE(context, aux_inputs_weights_or_none);
const bool has_aux_input = (fw_aux_input_weights != nullptr);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, input->dims->size, 3);
const bool time_major = params->time_major;
const int batch_size =
(time_major) ? input->dims->data[1] : input->dims->data[0];
const int max_time =
(time_major) ? input->dims->data[0] : input->dims->data[1];
const int fw_num_units = fw_input_weights->dims->data[0];
const int bw_num_units = bw_input_weights->dims->data[0];
TF_LITE_ENSURE_EQ(context, input->dims->data[2],
fw_input_weights->dims->data[1]);
TF_LITE_ENSURE_EQ(context, input->dims->data[2],
bw_input_weights->dims->data[1]);
TF_LITE_ENSURE_EQ(context, fw_input_weights->dims->data[0],
fw_bias->dims->data[0]);
TF_LITE_ENSURE_EQ(context, bw_input_weights->dims->data[0],
bw_bias->dims->data[0]);
TF_LITE_ENSURE_EQ(context, fw_recurrent_weights->dims->data[0],
fw_bias->dims->data[0]);
TF_LITE_ENSURE_EQ(context, bw_recurrent_weights->dims->data[1],
bw_bias->dims->data[0]);
TF_LITE_ENSURE_EQ(context, NumDimensions(fw_hidden_state), 2);
TF_LITE_ENSURE_EQ(context, fw_hidden_state->dims->data[0], batch_size);
TF_LITE_ENSURE_EQ(context, fw_hidden_state->dims->data[1], fw_num_units);
TF_LITE_ENSURE_EQ(context, NumDimensions(bw_hidden_state), 2);
TF_LITE_ENSURE_EQ(context, bw_hidden_state->dims->data[0], batch_size);
TF_LITE_ENSURE_EQ(context, bw_hidden_state->dims->data[1], bw_num_units);
if (has_aux_input) {
TF_LITE_ASSERT_EQ(aux_input->dims->data[0], input->dims->data[0]);
TF_LITE_ASSERT_EQ(aux_input->dims->data[1], input->dims->data[1]);
TF_LITE_ASSERT_EQ(fw_aux_input_weights->dims->data[0], fw_num_units);
TF_LITE_ASSERT_EQ(bw_aux_input_weights->dims->data[0], bw_num_units);
TF_LITE_ASSERT_EQ(aux_input->dims->data[2],
fw_aux_input_weights->dims->data[1]);
TF_LITE_ASSERT_EQ(aux_input->dims->data[2],
bw_aux_input_weights->dims->data[1]);
}
if (IsHybridOp(input, fw_input_weights)) {
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
op_data->fw_compute_row_sums = true;
op_data->bw_compute_row_sums = true;
TfLiteIntArrayFree(node->temporaries);
if (has_aux_input) {
node->temporaries = TfLiteIntArrayCreate(kNumTemporaryTensors);
} else {
node->temporaries = TfLiteIntArrayCreate(kNumTemporaryTensors - 1);
}
node->temporaries->data[kInputQuantized] =
op_data->scratch_tensor_index + kInputQuantized;
TfLiteTensor* input_quantized;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, kInputQuantized,
&input_quantized));
input_quantized->type = fw_input_weights->type;
input_quantized->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqual(input_quantized->dims, input->dims)) {
TfLiteIntArray* input_quantized_size = TfLiteIntArrayCopy(input->dims);
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized,
input_quantized_size));
}
node->temporaries->data[kFwHiddenStateQuantized] =
op_data->scratch_tensor_index + kFwHiddenStateQuantized;
TfLiteTensor* fw_hidden_state_quantized;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kFwHiddenStateQuantized,
&fw_hidden_state_quantized));
fw_hidden_state_quantized->type = fw_input_weights->type;
fw_hidden_state_quantized->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqual(fw_hidden_state_quantized->dims,
fw_hidden_state->dims)) {
TfLiteIntArray* fw_hidden_state_quantized_size =
TfLiteIntArrayCopy(fw_hidden_state->dims);
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, fw_hidden_state_quantized,
fw_hidden_state_quantized_size));
}
node->temporaries->data[kBwHiddenStateQuantized] =
op_data->scratch_tensor_index + kBwHiddenStateQuantized;
TfLiteTensor* bw_hidden_state_quantized;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kBwHiddenStateQuantized,
&bw_hidden_state_quantized));
bw_hidden_state_quantized->type = fw_input_weights->type;
bw_hidden_state_quantized->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqual(bw_hidden_state_quantized->dims,
bw_hidden_state->dims)) {
TfLiteIntArray* bw_hidden_state_quantized_size =
TfLiteIntArrayCopy(bw_hidden_state->dims);
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, bw_hidden_state_quantized,
bw_hidden_state_quantized_size));
}
node->temporaries->data[kScalingFactors] =
op_data->scratch_tensor_index + kScalingFactors;
TfLiteTensor* scaling_factors;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, kScalingFactors,
&scaling_factors));
scaling_factors->type = kTfLiteFloat32;
scaling_factors->allocation_type = kTfLiteArenaRw;
int scaling_dims[1] = {batch_size};
if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) {
TfLiteIntArray* scaling_factors_size = TfLiteIntArrayCreate(1);
scaling_factors_size->data[0] = batch_size;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors,
scaling_factors_size));
}
node->temporaries->data[kAccumScratch] =
op_data->scratch_tensor_index + kAccumScratch;
TfLiteTensor* accum_scratch;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, kAccumScratch,
&accum_scratch));
accum_scratch->type = kTfLiteInt32;
accum_scratch->allocation_type = kTfLiteArenaRw;
int accum_scratch_dims[2] = {std::max(fw_num_units, bw_num_units),
batch_size};
if (!TfLiteIntArrayEqualsArray(accum_scratch->dims, 2,
accum_scratch_dims)) {
TfLiteIntArray* accum_scratch_size = TfLiteIntArrayCreate(2);
accum_scratch_size->data[0] = accum_scratch_dims[0];
accum_scratch_size->data[1] = accum_scratch_dims[1];
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, accum_scratch,
accum_scratch_size));
}
node->temporaries->data[kZeroPoints] =
op_data->scratch_tensor_index + kZeroPoints;
TfLiteTensor* zero_points;
TF_LITE_ENSURE_OK(
context,
GetTemporarySafe(context, node, kZeroPoints, &zero_points));
zero_points->type = kTfLiteInt32;
zero_points->allocation_type = kTfLiteArenaRw;
int zero_points_dims[1] = {batch_size};
if (!TfLiteIntArrayEqualsArray(zero_points->dims, 1, zero_points_dims)) {
TfLiteIntArray* zero_points_size = TfLiteIntArrayCreate(1);
zero_points_size->data[0] = batch_size;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, zero_points,
zero_points_size));
}
const int num_row_sums = has_aux_input ? 3 : 2;
node->temporaries->data[kFwRowSums] =
op_data->scratch_tensor_index + kFwRowSums;
TfLiteTensor* fw_row_sums;
TF_LITE_ENSURE_OK(
context,
GetTemporarySafe(context, node, kFwRowSums, &fw_row_sums));
fw_row_sums->type = kTfLiteInt32;
fw_row_sums->name = "Lstm_fw_row_sums";
fw_row_sums->allocation_type = kTfLiteArenaRwPersistent;
int fw_row_sums_dims[2] = {num_row_sums, fw_num_units};
if (!TfLiteIntArrayEqualsArray(fw_row_sums->dims, 2, fw_row_sums_dims)) {
TfLiteIntArray* fw_row_sums_size = TfLiteIntArrayCreate(2);
fw_row_sums_size->data[0] = fw_row_sums_dims[0];
fw_row_sums_size->data[1] = fw_row_sums_dims[1];
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, fw_row_sums,
fw_row_sums_size));
}
node->temporaries->data[kBwRowSums] =
op_data->scratch_tensor_index + kBwRowSums;
TfLiteTensor* bw_row_sums;
TF_LITE_ENSURE_OK(
context,
GetTemporarySafe(context, node, kBwRowSums, &bw_row_sums));
bw_row_sums->type = kTfLiteInt32;
bw_row_sums->name = "Lstm_bw_row_sums";
bw_row_sums->allocation_type = kTfLiteArenaRwPersistent;
int bw_row_sums_dims[2] = {num_row_sums, bw_num_units};
if (!TfLiteIntArrayEqualsArray(bw_row_sums->dims, 2, bw_row_sums_dims)) {
TfLiteIntArray* bw_row_sums_size = TfLiteIntArrayCreate(2);
bw_row_sums_size->data[0] = bw_row_sums_dims[0];
bw_row_sums_size->data[1] = bw_row_sums_dims[1];
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, bw_row_sums,
bw_row_sums_size));
}
if (has_aux_input) {
node->temporaries->data[kAuxInputQuantized] =
op_data->scratch_tensor_index + kAuxInputQuantized;
TfLiteTensor* aux_input_quantized;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kAuxInputQuantized,
&aux_input_quantized));
aux_input_quantized->type = fw_input_weights->type;
aux_input_quantized->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqual(aux_input_quantized->dims, aux_input->dims)) {
TfLiteIntArray* aux_input_quantized_size =
TfLiteIntArrayCopy(aux_input->dims);
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, aux_input_quantized,
aux_input_quantized_size));
}
}
}
TfLiteTensor* fw_output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kFwOutputTensor, &fw_output));
TfLiteIntArray* fw_output_size_array = TfLiteIntArrayCreate(3);
fw_output_size_array->data[0] = (time_major) ? max_time : batch_size;
fw_output_size_array->data[1] = (time_major) ? batch_size : max_time;
fw_output_size_array->data[2] =
params->merge_outputs ? fw_num_units + bw_num_units : fw_num_units;
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, fw_output, fw_output_size_array));
if (!params->merge_outputs) {
TfLiteTensor* bw_output;
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, kBwOutputTensor, &bw_output));
TfLiteIntArray* bw_output_size_array = TfLiteIntArrayCreate(3);
bw_output_size_array->data[0] = (time_major) ? max_time : batch_size;
bw_output_size_array->data[1] = (time_major) ? batch_size : max_time;
bw_output_size_array->data[2] = bw_num_units;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, bw_output,
bw_output_size_array));
}
return kTfLiteOk;
}
TfLiteStatus EvalFloat(const TfLiteTensor* input, const TfLiteTensor* bw_input,
const TfLiteTensor* fw_input_weights,
const TfLiteTensor* fw_recurrent_weights,
const TfLiteTensor* fw_bias,
const TfLiteTensor* bw_input_weights,
const TfLiteTensor* bw_recurrent_weights,
const TfLiteTensor* bw_bias,
const TfLiteTensor* aux_input,
const TfLiteTensor* fw_aux_input_weights,
const TfLiteTensor* bw_aux_input_weights,
const TfLiteBidirectionalSequenceRNNParams* params,
TfLiteTensor* fw_hidden_state, TfLiteTensor* fw_output,
TfLiteTensor* bw_hidden_state, TfLiteTensor* bw_output) {
const bool time_major = params->time_major;
const int batch_size =
(time_major) ? input->dims->data[1] : input->dims->data[0];
const int max_time =
(time_major) ? input->dims->data[0] : input->dims->data[1];
const int input_size = input->dims->data[2];
const int aux_input_size = (aux_input) ? aux_input->dims->data[2] : 0;
const int fw_num_units = fw_input_weights->dims->data[0];
const float* fw_bias_ptr = GetTensorData<float>(fw_bias);
const float* fw_input_weights_ptr = GetTensorData<float>(fw_input_weights);
const float* fw_recurrent_weights_ptr =
GetTensorData<float>(fw_recurrent_weights);
const int bw_num_units = bw_input_weights->dims->data[0];
const float* bw_bias_ptr = GetTensorData<float>(bw_bias);
const float* bw_input_weights_ptr = GetTensorData<float>(bw_input_weights);
const float* bw_recurrent_weights_ptr =
GetTensorData<float>(bw_recurrent_weights);
const float* fw_aux_input_weights_ptr =
(fw_aux_input_weights != nullptr)
? GetTensorData<float>(fw_aux_input_weights)
: nullptr;
const float* bw_aux_input_weights_ptr =
(bw_aux_input_weights != nullptr)
? GetTensorData<float>(bw_aux_input_weights)
: nullptr;
const int fw_output_step =
params->merge_outputs ? fw_num_units + bw_num_units : fw_num_units;
const int bw_output_step =
params->merge_outputs ? fw_num_units + bw_num_units : bw_num_units;
if (time_major) {
float* fw_hidden_state_ptr_batch = GetTensorData<float>(fw_hidden_state);
for (int s = 0; s < max_time; s++) {
const float* input_ptr_batch =
GetTensorData<float>(input) + s * input_size * batch_size;
const float* aux_input_ptr_batch =
(aux_input != nullptr)
? GetTensorData<float>(aux_input) + s * input_size * batch_size
: nullptr;
float* output_ptr_batch =
GetTensorData<float>(fw_output) + s * fw_output_step * batch_size;
kernel_utils::RnnBatchStep(
input_ptr_batch, fw_input_weights_ptr, aux_input_ptr_batch,
fw_aux_input_weights_ptr, fw_recurrent_weights_ptr, fw_bias_ptr,
input_size, aux_input_size, fw_num_units, batch_size, fw_output_step,
params->activation, fw_hidden_state_ptr_batch, output_ptr_batch);
}
float* bw_hidden_state_ptr_batch = GetTensorData<float>(bw_hidden_state);
for (int s = max_time - 1; s >= 0; s--) {
const float* input_ptr_batch =
GetTensorData<float>(bw_input) + s * input_size * batch_size;
const float* aux_input_ptr_batch =
(aux_input != nullptr)
? GetTensorData<float>(aux_input) + s * input_size * batch_size
: nullptr;
float* output_ptr_batch =
(params->merge_outputs
? GetTensorData<float>(fw_output) + fw_num_units
: GetTensorData<float>(bw_output)) +
s * bw_output_step * batch_size;
kernel_utils::RnnBatchStep(
input_ptr_batch, bw_input_weights_ptr, aux_input_ptr_batch,
bw_aux_input_weights_ptr, bw_recurrent_weights_ptr, bw_bias_ptr,
input_size, aux_input_size, bw_num_units, batch_size, bw_output_step,
params->activation, bw_hidden_state_ptr_batch, output_ptr_batch);
}
} else {
for (int b = 0; b < batch_size; b++) {
float* fw_hidden_state_ptr_batch =
GetTensorData<float>(fw_hidden_state) + b * fw_num_units;
float* fw_output_offset =
GetTensorData<float>(fw_output) + b * fw_output_step * max_time;
for (int s = 0; s < max_time; s++) {
const float* input_ptr_batch = GetTensorData<float>(input) +
b * input_size * max_time +
s * input_size;
const float* aux_input_ptr_batch =
(aux_input != nullptr)
? GetTensorData<float>(aux_input) +
b * aux_input_size * max_time + s * aux_input_size
: nullptr;
float* output_ptr_batch = fw_output_offset + s * fw_output_step;
kernel_utils::RnnBatchStep(
input_ptr_batch, fw_input_weights_ptr, aux_input_ptr_batch,
fw_aux_input_weights_ptr, fw_recurrent_weights_ptr, fw_bias_ptr,
input_size, aux_input_size, fw_num_units, 1,
fw_output_step, params->activation, fw_hidden_state_ptr_batch,
output_ptr_batch);
}
float* bw_hidden_state_ptr_batch =
GetTensorData<float>(bw_hidden_state) + b * bw_num_units;
float* bw_output_offset =
params->merge_outputs
? GetTensorData<float>(fw_output) +
b * bw_output_step * max_time + fw_num_units
: GetTensorData<float>(bw_output) + b * bw_output_step * max_time;
for (int s = max_time - 1; s >= 0; s--) {
const float* input_ptr_batch = GetTensorData<float>(input) +
b * input_size * max_time +
s * input_size;
const float* aux_input_ptr_batch =
(aux_input != nullptr)
? GetTensorData<float>(aux_input) +
b * aux_input_size * max_time + s * aux_input_size
: nullptr;
float* output_ptr_batch = bw_output_offset + s * bw_output_step;
kernel_utils::RnnBatchStep(
input_ptr_batch, bw_input_weights_ptr, aux_input_ptr_batch,
bw_aux_input_weights_ptr, bw_recurrent_weights_ptr, bw_bias_ptr,
input_size, aux_input_size, bw_num_units, 1,
bw_output_step, params->activation, bw_hidden_state_ptr_batch,
output_ptr_batch);
}
}
}
return kTfLiteOk;
}
TfLiteStatus EvalHybrid(
const TfLiteTensor* input, const TfLiteTensor* bw_input,
const TfLiteTensor* fw_input_weights,
const TfLiteTensor* fw_recurrent_weights, const TfLiteTensor* fw_bias,
const TfLiteTensor* bw_input_weights,
const TfLiteTensor* bw_recurrent_weights, const TfLiteTensor* bw_bias,
const TfLiteTensor* aux_input, const TfLiteTensor* aux_fw_input_weights,
const TfLiteTensor* aux_bw_input_weights,
const TfLiteBidirectionalSequenceRNNParams* params,
TfLiteTensor* scaling_factors, TfLiteTensor* input_quantized,
TfLiteTensor* aux_input_quantized, TfLiteTensor* fw_hidden_state_quantized,
TfLiteTensor* fw_hidden_state, TfLiteTensor* fw_output,
TfLiteTensor* bw_hidden_state_quantized, TfLiteTensor* bw_hidden_state,
TfLiteTensor* bw_output, TfLiteTensor* zero_points,
TfLiteTensor* accum_scratch, TfLiteTensor* fw_row_sums,
TfLiteTensor* bw_row_sums, bool* fw_compute_row_sums,
bool* bw_compute_row_sums) {
const bool time_major = params->time_major;
const int batch_size =
(time_major) ? input->dims->data[1] : input->dims->data[0];
const int max_time =
(time_major) ? input->dims->data[0] : input->dims->data[1];
const int input_size = input->dims->data[2];
const int aux_input_size = (aux_input) ? aux_input->dims->data[2] : 0;
const int fw_num_units = fw_input_weights->dims->data[0];
const float* fw_bias_ptr = GetTensorData<float>(fw_bias);
const int8_t* fw_input_weights_ptr = GetTensorData<int8_t>(fw_input_weights);
float fw_input_weights_scale = fw_input_weights->params.scale;
const int8_t* fw_recurrent_weights_ptr =
GetTensorData<int8_t>(fw_recurrent_weights);
float fw_recurrent_weights_scale = fw_recurrent_weights->params.scale;
const int bw_num_units = bw_input_weights->dims->data[0];
const float* bw_bias_ptr = GetTensorData<float>(bw_bias);
const int8_t* bw_input_weights_ptr = GetTensorData<int8_t>(bw_input_weights);
float bw_input_weights_scale = bw_input_weights->params.scale;
const int8_t* bw_recurrent_weights_ptr =
GetTensorData<int8_t>(bw_recurrent_weights);
float bw_recurrent_weights_scale = bw_recurrent_weights->params.scale;
const int8_t* aux_fw_input_weights_ptr = nullptr;
float aux_fw_input_weights_scale = 0.0f;
const int8_t* aux_bw_input_weights_ptr = nullptr;
float aux_bw_input_weights_scale = 0.0f;
int8_t* aux_quantized_input_ptr = nullptr;
if (aux_input_size > 0) {
aux_fw_input_weights_ptr = GetTensorData<int8_t>(aux_fw_input_weights);
aux_fw_input_weights_scale = aux_fw_input_weights->params.scale;
aux_bw_input_weights_ptr = GetTensorData<int8_t>(aux_bw_input_weights);
aux_bw_input_weights_scale = aux_bw_input_weights->params.scale;
aux_quantized_input_ptr = GetTensorData<int8_t>(aux_input_quantized);
}
int8_t* quantized_input_ptr = GetTensorData<int8_t>(input_quantized);
int8_t* fw_quantized_hidden_state_ptr =
GetTensorData<int8_t>(fw_hidden_state_quantized);
int8_t* bw_quantized_hidden_state_ptr =
GetTensorData<int8_t>(bw_hidden_state_quantized);
float* scaling_factors_ptr = GetTensorData<float>(scaling_factors);
int32_t* accum_scratch_ptr = GetTensorData<int32_t>(accum_scratch);
int32_t* zero_points_ptr = nullptr;
int32_t* fw_row_sums_ptr = nullptr;
int32_t* bw_row_sums_ptr = nullptr;
if (params->asymmetric_quantize_inputs) {
zero_points_ptr = GetTensorData<int32_t>(zero_points);
fw_row_sums_ptr = GetTensorData<int32_t>(fw_row_sums);
bw_row_sums_ptr = GetTensorData<int32_t>(bw_row_sums);
}
const int fw_output_step =
params->merge_outputs ? fw_num_units + bw_num_units : fw_num_units;
const int bw_output_step =
params->merge_outputs ? fw_num_units + bw_num_units : bw_num_units;
if (time_major) {
for (int t = 0; t < max_time; t++) {
float* fw_hidden_state_ptr_batch = GetTensorData<float>(fw_hidden_state);
for (int s = 0; s < max_time; s++) {
const float* input_ptr_batch =
GetTensorData<float>(input) + s * input_size * batch_size;
const float* aux_input_ptr_batch =
(aux_input != nullptr)
? GetTensorData<float>(aux_input) + s * input_size * batch_size
: nullptr;
float* output_ptr_batch =
GetTensorData<float>(fw_output) + s * fw_output_step * batch_size;
kernel_utils::RnnBatchStep(
input_ptr_batch, fw_input_weights_ptr, fw_input_weights_scale,
aux_input_ptr_batch, aux_fw_input_weights_ptr,
aux_fw_input_weights_scale, fw_recurrent_weights_ptr,
fw_recurrent_weights_scale, fw_bias_ptr, input_size, aux_input_size,
fw_num_units, batch_size, fw_output_step, params->activation,
quantized_input_ptr, aux_quantized_input_ptr,
fw_quantized_hidden_state_ptr, scaling_factors_ptr,
fw_hidden_state_ptr_batch, output_ptr_batch,
params->asymmetric_quantize_inputs, zero_points_ptr,
accum_scratch_ptr, fw_row_sums_ptr, fw_compute_row_sums);
}
float* bw_hidden_state_ptr_batch = GetTensorData<float>(bw_hidden_state);
for (int s = max_time - 1; s >= 0; s--) {
const float* input_ptr_batch =
GetTensorData<float>(bw_input) + s * input_size * batch_size;
const float* aux_input_ptr_batch =
(aux_input != nullptr)
? GetTensorData<float>(aux_input) + s * input_size * batch_size
: nullptr;
float* output_ptr_batch =
(params->merge_outputs
? GetTensorData<float>(fw_output) + fw_num_units
: GetTensorData<float>(bw_output)) +
s * bw_output_step * batch_size;
kernel_utils::RnnBatchStep(
input_ptr_batch, bw_input_weights_ptr, bw_input_weights_scale,
aux_input_ptr_batch, aux_bw_input_weights_ptr,
aux_bw_input_weights_scale, bw_recurrent_weights_ptr,
bw_recurrent_weights_scale, bw_bias_ptr, input_size, aux_input_size,
bw_num_units, batch_size, bw_output_step, params->activation,
quantized_input_ptr, aux_quantized_input_ptr,
bw_quantized_hidden_state_ptr, scaling_factors_ptr,
bw_hidden_state_ptr_batch, output_ptr_batch,
params->asymmetric_quantize_inputs, zero_points_ptr,
accum_scratch_ptr, bw_row_sums_ptr, bw_compute_row_sums);
}
}
} else {
for (int b = 0; b < batch_size; b++) {
float* fw_hidden_state_ptr_batch =
GetTensorData<float>(fw_hidden_state) + b * fw_num_units;
float* fw_output_offset =
GetTensorData<float>(fw_output) + b * fw_output_step * max_time;
for (int s = 0; s < max_time; s++) {
const float* input_ptr_batch = GetTensorData<float>(input) +
b * input_size * max_time +
s * input_size;
const float* aux_input_ptr_batch =
(aux_input != nullptr)
? GetTensorData<float>(aux_input) + b * input_size * max_time +
s * input_size
: nullptr;
float* output_ptr_batch = fw_output_offset + s * fw_output_step;
kernel_utils::RnnBatchStep(
input_ptr_batch, fw_input_weights_ptr, fw_input_weights_scale,
aux_input_ptr_batch, aux_fw_input_weights_ptr,
aux_fw_input_weights_scale, fw_recurrent_weights_ptr,
fw_recurrent_weights_scale, fw_bias_ptr, input_size, aux_input_size,
fw_num_units, 1, fw_output_step, params->activation,
quantized_input_ptr, aux_quantized_input_ptr,
fw_quantized_hidden_state_ptr, scaling_factors_ptr,
fw_hidden_state_ptr_batch, output_ptr_batch,
params->asymmetric_quantize_inputs, zero_points_ptr,
accum_scratch_ptr, fw_row_sums_ptr, fw_compute_row_sums);
}
float* bw_hidden_state_ptr_batch =
GetTensorData<float>(bw_hidden_state) + b * bw_num_units;
float* bw_output_offset =
params->merge_outputs
? GetTensorData<float>(fw_output) +
b * bw_output_step * max_time + fw_num_units
: GetTensorData<float>(bw_output) + b * bw_output_step * max_time;
for (int s = max_time - 1; s >= 0; s--) {
const float* input_ptr_batch = GetTensorData<float>(input) +
b * input_size * max_time +
s * input_size;
const float* aux_input_ptr_batch =
(aux_input != nullptr)
? GetTensorData<float>(aux_input) + b * input_size * max_time +
s * input_size
: nullptr;
float* output_ptr_batch = bw_output_offset + s * bw_output_step;
kernel_utils::RnnBatchStep(
input_ptr_batch, bw_input_weights_ptr, bw_input_weights_scale,
aux_input_ptr_batch, aux_bw_input_weights_ptr,
aux_bw_input_weights_scale, bw_recurrent_weights_ptr,
bw_recurrent_weights_scale, bw_bias_ptr, input_size, aux_input_size,
bw_num_units, 1, bw_output_step, params->activation,
quantized_input_ptr, aux_quantized_input_ptr,
bw_quantized_hidden_state_ptr, scaling_factors_ptr,
bw_hidden_state_ptr_batch, output_ptr_batch,
params->asymmetric_quantize_inputs, zero_points_ptr,
accum_scratch_ptr, bw_row_sums_ptr, bw_compute_row_sums);
}
}
}
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const auto* params = reinterpret_cast<TfLiteBidirectionalSequenceRNNParams*>(
node->builtin_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* fw_input_weights;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kFwWeightsTensor,
&fw_input_weights));
const TfLiteTensor* fw_recurrent_weights;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kFwRecurrentWeightsTensor,
&fw_recurrent_weights));
const TfLiteTensor* fw_bias;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kFwBiasTensor, &fw_bias));
const TfLiteTensor* bw_input_weights;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kBwWeightsTensor,
&bw_input_weights));
const TfLiteTensor* bw_recurrent_weights;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kBwRecurrentWeightsTensor,
&bw_recurrent_weights));
const TfLiteTensor* bw_bias;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kBwBiasTensor, &bw_bias));
const TfLiteTensor* aux_input =
GetOptionalInputTensor(context, node, kAuxInputTensor);
const TfLiteTensor* fw_aux_input_weights =
GetOptionalInputTensor(context, node, kFwAuxWeightsTensor);
const TfLiteTensor* bw_aux_input_weights =
GetOptionalInputTensor(context, node, kBwAuxWeightsTensor);
TfLiteTensor* fw_hidden_state =
GetVariableInput(context, node, kFwHiddenStateTensor);
TFLITE_DCHECK(fw_hidden_state != nullptr);
TfLiteTensor* bw_hidden_state =
GetVariableInput(context, node, kBwHiddenStateTensor);
TFLITE_DCHECK(bw_hidden_state != nullptr);
TfLiteTensor* fw_output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kFwOutputTensor, &fw_output));
TfLiteTensor* bw_output = params->merge_outputs
? nullptr
: GetOutput(context, node, kBwOutputTensor);
const bool has_previous_bw_output = (aux_input != nullptr);
const bool use_aux_input = (fw_aux_input_weights != nullptr);
const bool non_stacking_mode = !use_aux_input && has_previous_bw_output;
const TfLiteTensor* bw_input = non_stacking_mode ? aux_input : input;
const TfLiteTensor* real_aux_input = non_stacking_mode ? nullptr : aux_input;
switch (fw_input_weights->type) {
case kTfLiteFloat32:
return EvalFloat(input, bw_input, fw_input_weights, fw_recurrent_weights,
fw_bias, bw_input_weights, bw_recurrent_weights, bw_bias,
real_aux_input, fw_aux_input_weights,
bw_aux_input_weights, params, fw_hidden_state, fw_output,
bw_hidden_state, bw_output);
case kTfLiteUInt8:
case kTfLiteInt8: {
TfLiteTensor* input_quantized;
TF_LITE_ENSURE_OK(
context,
GetTemporarySafe(context, node, kInputQuantized, &input_quantized));
TfLiteTensor* fw_hidden_state_quantized;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kFwHiddenStateQuantized,
&fw_hidden_state_quantized));
TfLiteTensor* bw_hidden_state_quantized;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kBwHiddenStateQuantized,
&bw_hidden_state_quantized));
TfLiteTensor* scaling_factors;
TF_LITE_ENSURE_OK(
context,
GetTemporarySafe(context, node, kScalingFactors, &scaling_factors));
TfLiteTensor* zero_points;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kZeroPoints, &zero_points));
TfLiteTensor* accum_scratch;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, kAccumScratch,
&accum_scratch));
TfLiteTensor* fw_row_sums;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kFwRowSums, &fw_row_sums));
TfLiteTensor* bw_row_sums;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kBwRowSums, &bw_row_sums));
TfLiteTensor* aux_input_quantized =
use_aux_input ? GetTemporary(context, node, kAuxInputQuantized)
: nullptr;
auto* op_data = reinterpret_cast<OpData*>(node->user_data);
return EvalHybrid(
input, bw_input, fw_input_weights, fw_recurrent_weights, fw_bias,
bw_input_weights, bw_recurrent_weights, bw_bias, real_aux_input,
fw_aux_input_weights, bw_aux_input_weights, params, scaling_factors,
input_quantized, aux_input_quantized, fw_hidden_state_quantized,
fw_hidden_state, fw_output, bw_hidden_state_quantized,
bw_hidden_state, bw_output, zero_points, accum_scratch, fw_row_sums,
bw_row_sums, &op_data->fw_compute_row_sums,
&op_data->bw_compute_row_sums);
}
default:
TF_LITE_KERNEL_LOG(context, "Type not currently supported.");
return kTfLiteError;
}
}
}
TfLiteRegistration* Register_BIDIRECTIONAL_SEQUENCE_RNN() {
static TfLiteRegistration r = {
bidirectional_sequence_rnn::Init, bidirectional_sequence_rnn::Free,
bidirectional_sequence_rnn::Prepare, bidirectional_sequence_rnn::Eval};
return &r;
}
}
}
} | #include <algorithm>
#include <functional>
#include <initializer_list>
#include <iterator>
#include <tuple>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
enum class AuxInputMode {
kNoAuxInput,
kCrossLinking,
kNoCrossLinking,
};
using ::testing::ElementsAreArray;
static float rnn_input[] = {
0.23689353, 0.285385, 0.037029743, -0.19858193, -0.27569133,
0.43773448, 0.60379338, 0.35562468, -0.69424844, -0.93421471,
-0.87287879, 0.37144363, -0.62476718, 0.23791671, 0.40060222,
0.1356622, -0.99774903, -0.98858172, -0.38952237, -0.47685933,
0.31073618, 0.71511042, -0.63767755, -0.31729108, 0.33468103,
0.75801885, 0.30660987, -0.37354088, 0.77002847, -0.62747043,
-0.68572164, 0.0069220066, 0.65791464, 0.35130811, 0.80834007,
-0.61777675, -0.21095741, 0.41213346, 0.73784804, 0.094794154,
0.47791874, 0.86496925, -0.53376222, 0.85315156, 0.10288584,
0.86684, -0.011186242, 0.10513687, 0.87825835, 0.59929144,
0.62827742, 0.18899453, 0.31440187, 0.99059987, 0.87170351,
-0.35091716, 0.74861872, 0.17831337, 0.2755419, 0.51864719,
0.55084288, 0.58982027, -0.47443086, 0.20875752, -0.058871567,
-0.66609079, 0.59098077, 0.73017097, 0.74604273, 0.32882881,
-0.17503482, 0.22396147, 0.19379807, 0.29120302, 0.077113032,
-0.70331609, 0.15804303, -0.93407321, 0.40182066, 0.036301374,
0.66521823, 0.0300982, -0.7747041, -0.02038002, 0.020698071,
-0.90300065, 0.62870288, -0.23068321, 0.27531278, -0.095755219,
-0.712036, -0.17384434, -0.50593495, -0.18646687, -0.96508682,
0.43519354, 0.14744234, 0.62589407, 0.1653645, -0.10651493,
-0.045277178, 0.99032974, -0.88255352, -0.85147917, 0.28153265,
0.19455957, -0.55479527, -0.56042433, 0.26048636, 0.84702539,
0.47587705, -0.074295521, -0.12287641, 0.70117295, 0.90532446,
0.89782166, 0.79817224, 0.53402734, -0.33286154, 0.073485017,
-0.56172788, -0.044897556, 0.89964068, -0.067662835, 0.76863563,
0.93455386, -0.6324693, -0.083922029};
static float rnn_golden_fw_output[] = {
0.496726, 0, 0.965996, 0, 0.0584254, 0,
0, 0.12315, 0, 0, 0.612266, 0.456601,
0, 0.52286, 1.16099, 0.0291232,
0, 0, 0.524901, 0, 0, 0,
0, 1.02116, 0, 1.35762, 0, 0.356909,
0.436415, 0.0355727, 0, 0,
0, 0, 0, 0.262335, 0, 0,
0, 1.33992, 0, 2.9739, 0, 0,
1.31914, 2.66147, 0, 0,
0.942568, 0, 0, 0, 0.025507, 0,
0, 0, 0.321429, 0.569141, 1.25274, 1.57719,
0.8158, 1.21805, 0.586239, 0.25427,
1.04436, 0, 0.630725, 0, 0.133801, 0.210693,
0.363026, 0, 0.533426, 0, 1.25926, 0.722707,
0, 1.22031, 1.30117, 0.495867,
0.222187, 0, 0.72725, 0, 0.767003, 0,
0, 0.147835, 0, 0, 0, 0.608758,
0.469394, 0.00720298, 0.927537, 0,
0.856974, 0.424257, 0, 0, 0.937329, 0,
0, 0, 0.476425, 0, 0.566017, 0.418462,
0.141911, 0.996214, 1.13063, 0,
0.967899, 0, 0, 0, 0.0831304, 0,
0, 1.00378, 0, 0, 0, 1.44818,
1.01768, 0.943891, 0.502745, 0,
0.940135, 0, 0, 0, 0, 0,
0, 2.13243, 0, 0.71208, 0.123918, 1.53907,
1.30225, 1.59644, 0.70222, 0,
0.804329, 0, 0.430576, 0, 0.505872, 0.509603,
0.343448, 0, 0.107756, 0.614544, 1.44549, 1.52311,
0.0454298, 0.300267, 0.562784, 0.395095,
0.228154, 0, 0.675323, 0, 1.70536, 0.766217,
0, 0, 0, 0.735363, 0.0759267, 1.91017,
0.941888, 0, 0, 0,
0, 0, 1.5909, 0, 0, 0,
0, 0.5755, 0, 0.184687, 0, 1.56296,
0.625285, 0, 0, 0,
0, 0, 0.0857888, 0, 0, 0,
0, 0.488383, 0.252786, 0, 0, 0,
1.02817, 1.85665, 0, 0,
0.00981836, 0, 1.06371, 0, 0, 0,
0, 0, 0, 0.290445, 0.316406, 0,
0.304161, 1.25079, 0.0707152, 0,
0.986264, 0.309201, 0, 0, 0, 0,
0, 1.64896, 0.346248, 0, 0.918175, 0.78884,
0.524981, 1.92076, 2.07013, 0.333244,
0.415153, 0.210318, 0, 0, 0, 0,
0, 2.02616, 0, 0.728256, 0.84183, 0.0907453,
0.628881, 3.58099, 1.49974, 0};
static float rnn_golden_bw_output[] = {
0.496726, 0, 1.00883, 0, 0.0584256, 0, 0,
0.236412, 0, 0, 0.612267, 0.487726, 0, 0.54883,
1.16099, 0.0291233, 0, 0, 0.428302, 0, 0,
0, 0, 1.13262, 0, 1.64415, 0, 0.311249,
0.570804, 0.259696, 0, 0, 0, 0, 0,
0.262334, 0, 0, 0, 1.23781, 0, 2.86532,
0, 0, 1.34389, 2.76409, 0, 0, 1.03969,
0, 0.00410865, 0, 0.0470295, 0, 0, 0,
0.371556, 0.27175, 1.36614, 1.63956, 0.683887, 1.06176, 0.719552,
0.301314, 0.971195, 0, 0.697143, 0, 0.215219, 0.210693,
0.363027, 0, 0.501283, 0, 1.13399, 0.623774, 0,
1.09851, 1.33313, 0.470441, 0.210965, 0, 0.664178, 0,
0.839686, 0, 0, 0.147834, 0, 0, 0,
0.58786, 0.490128, 0, 0.905806, 0, 0.932134, 0.424257,
0, 0, 0.860629, 0, 0, 0, 0.476425,
0, 0.566017, 0.513721, 0.207341, 1.09508, 1.08385, 0,
0.973787, 0, 0, 0, 0, 0, 0,
1.20698, 0, 0, 0, 1.56135, 1.12369, 0.99588,
0.459803, 0, 0.915854, 0, 0, 0, 0,
0, 0, 2.03206, 0, 0.773264, 0.267228, 1.55012,
1.202, 1.51611, 0.701202, 0, 0.725088, 0, 0.509069,
0, 0.671349, 0.581129, 0.343447, 0, 0.107755, 0.611838,
1.4331, 1.55871, 0.015242, 0.140624, 0.492562, 0.395095, 0.147722,
0, 0.784925, 0, 1.65477, 0.715257, 0, 0,
0, 0.685024, 0, 1.89505, 1.00037, 0, 0,
0, 0, 0, 1.52659, 0, 0, 0,
0, 0.618583, 0, 0.11115, 0, 1.37194, 0.630225,
0, 0, 0, 0, 0, 0.0322124, 0,
0, 0, 0, 0.430834, 0.252786, 0, 0,
0, 0.991297, 1.98451, 0, 0, 0.111511, 0,
1.05513, 0, 0, 0, 0, 0, 0,
0.290445, 0.412559, 0.0429958, 0.256564, 1.27858, 0.289948, 0,
1.01693, 0.327141, 0, 0, 0, 0, 0,
1.83508, 0.346248, 0, 0.961535, 0.790026, 0.552203, 2.13457,
2.19233, 0.333244, 0.316526, 0.179398, 0, 0, 0,
0, 0, 1.86126, 0, 0.728256, 0.750013, 0.011861,
0.576383, 3.38891, 1.29273, 0};
const std::initializer_list<float> weights = {
0.461459, 0.153381, 0.529743, -0.00371218, 0.676267, -0.211346,
0.317493, 0.969689, -0.343251, 0.186423, 0.398151, 0.152399,
0.448504, 0.317662, 0.523556, -0.323514, 0.480877, 0.333113,
-0.757714, -0.674487, -0.643585, 0.217766, -0.0251462, 0.79512,
-0.595574, -0.422444, 0.371572, -0.452178, -0.556069, -0.482188,
-0.685456, -0.727851, 0.841829, 0.551535, -0.232336, 0.729158,
-0.00294906, -0.69754, 0.766073, -0.178424, 0.369513, -0.423241,
0.548547, -0.0152023, -0.757482, -0.85491, 0.251331, -0.989183,
0.306261, -0.340716, 0.886103, -0.0726757, -0.723523, -0.784303,
0.0354295, 0.566564, -0.485469, -0.620498, 0.832546, 0.697884,
-0.279115, 0.294415, -0.584313, 0.548772, 0.0648819, 0.968726,
0.723834, -0.0080452, -0.350386, -0.272803, 0.115121, -0.412644,
-0.824713, -0.992843, -0.592904, -0.417893, 0.863791, -0.423461,
-0.147601, -0.770664, -0.479006, 0.654782, 0.587314, -0.639158,
0.816969, -0.337228, 0.659878, 0.73107, 0.754768, -0.337042,
0.0960841, 0.368357, 0.244191, -0.817703, -0.211223, 0.442012,
0.37225, -0.623598, -0.405423, 0.455101, 0.673656, -0.145345,
-0.511346, -0.901675, -0.81252, -0.127006, 0.809865, -0.721884,
0.636255, 0.868989, -0.347973, -0.10179, -0.777449, 0.917274,
0.819286, 0.206218, -0.00785118, 0.167141, 0.45872, 0.972934,
-0.276798, 0.837861, 0.747958, -0.0151566, -0.330057, -0.469077,
0.277308, 0.415818};
static float endtoend_input[] = {
0.996808, 0.060710, 0.981855, 0.570017, 0.525164, 0.796859, 0.696547,
0.505925, 0.991844, 0.461208, 0.949371, 0.027624, 0.539236, 0.841854,
0.915222, 0.538569, 0.069375, 0.237905, 0.903700, 0.441703, 0.536196,
0.402724, 0.761635, 0.025063, 0.082592, 0.688245, 0.239310, 0.256931,
0.658900, 0.105695, 0.301983, 0.655708, 0.166405, 0.283837, 0.225725,
0.691569, 0.080696, 0.922272, 0.197494, 0.072540, 0.383481, 0.146865,
0.100163, 0.922717, 0.988720, 0.015386, 0.461286, 0.058095, 0.253290,
0.364986, 0.499797, 0.789487, 0.767709, 0.261433, 0.814549, 0.850302,
0.949678, 0.053859, 0.107233, 0.608577, 0.159554, 0.409215, 0.264285,
0.325960, 0.693053, 0.490011, 0.017529, 0.773749, 0.412283, 0.215023,
0.846288, 0.795764, 0.361889, 0.946452, 0.718481, 0.350608, 0.961837,
0.179767, 0.408703, 0.215128, 0.544753, 0.908500, 0.004614, 0.312462,
0.169933, 0.819163, 0.162764, 0.119611, 0.873022, 0.269997, 0.728188,
0.032576, 0.679212, 0.992474, 0.358536, 0.372265, 0.482484, 0.376065,
0.146014, 0.894767, 0.591088, 0.992302, 0.690531, 0.952977, 0.938754,
0.409012, 0.303585, 0.900591, 0.588780, 0.712287, 0.115719, 0.133533,
0.620788, 0.120334, 0.445995, 0.790720, 0.939497, 0.608759, 0.910331,
0.812519, 0.878756, 0.638519, 0.845096, 0.557968, 0.630993, 0.203632,
0.930233, 0.113477, 0.579697, 0.076247, 0.008244, 0.170785, 0.068549,
0.698776, 0.123761, 0.007303, 0.107788, 0.427346, 0.907894, 0.696568,
0.139633, 0.023613, 0.830100, 0.760421, 0.143947, 0.276096, 0.551141,
0.083444, 0.884855, 0.461472, 0.895963, 0.763611, 0.099992, 0.741059,
0.321579, 0.730984, 0.944691, 0.251812, 0.844461, 0.524388, 0.328059,
0.852706, 0.695172, 0.396607, 0.551482, 0.818934, 0.403910, 0.659270,
0.246280, 0.311804, 0.355838, 0.385913, 0.335418, 0.185938, 0.146334,
0.479364, 0.462034, 0.697475, 0.562808, 0.346888, 0.158948, 0.458771,
0.110499, 0.258939, 0.199830, 0.432078, 0.989924, 0.144521, 0.683890,
0.834385, 0.668908, 0.011949, 0.687091, 0.364081, 0.408556, 0.238572,
0.183015, 0.812466, 0.897842, 0.429294, 0.124271, 0.253680, 0.815207,
0.459688, 0.439618, 0.961541, 0.939053, 0.901651, 0.659016, 0.501861,
0.248539, 0.817964, 0.960632, 0.359038, 0.076903, 0.160462, 0.791117,
0.066826, 0.304983, 0.475007, 0.901211, 0.973891, 0.486955, 0.588302,
0.337972, 0.895512, 0.826874, 0.520987, 0.707978, 0.724716, 0.950281,
0.832249, 0.978396, 0.765488, 0.291937, 0.418014, 0.727029, 0.230990,
0.319665, 0.386045, 0.732850, 0.568204, 0.204009, 0.693482, 0.927242,
0.280912, 0.853944, 0.718359, 0.347738, 0.158927, 0.193366, 0.248950,
0.132818, 0.680321, 0.837252, 0.470790, 0.575833, 0.664126, 0.991777,
0.283811, 0.388843, 0.942058, 0.116060, 0.367239, 0.707546, 0.407997,
0.785253, 0.434575, 0.638986, 0.104917, 0.820620, 0.371837, 0.673121,
0.024629, 0.065319, 0.600363, 0.305541, 0.919263, 0.318722, 0.653279,
0.078190, 0.512088, 0.902229, 0.211009, 0.192409, 0.739480, 0.681799,
0.768242, 0.403607, 0.673576, 0.052052, 0.792450, 0.615634, 0.168112,
0.159689, 0.323180, 0.576109, 0.944941, 0.757755, 0.215095, 0.049858,
0.578375, 0.586932, 0.722979, 0.603003, 0.652251, 0.323343, 0.908544,
0.571514, 0.642065, 0.561823, 0.649704, 0.154153, 0.464051, 0.860713,
0.346562, 0.203532, 0.542512, 0.114804, 0.607139, 0.216088, 0.166856,
0.399588, 0.831722, 0.334968, 0.559277, 0.154902, 0.911077, 0.504218,
0.912656, 0.126172, 0.554076, 0.491031, 0.713104, 0.277055, 0.094034,
0.365355, 0.600398, 0.002578, 0.936869, 0.242463, 0.564401, 0.586574,
0.396616, 0.028452, 0.447287, 0.743178, 0.231984, 0.989799, 0.857982,
0.839122, 0.205887, 0.024838, 0.238711, 0.037608, 0.359806, 0.797987,
0.192510, 0.270883, 0.302205, 0.105166, 0.397055, 0.856281, 0.596197,
0.110160, 0.133336, 0.690231, 0.475515, 0.733734, 0.692809, 0.412384,
0.976196, 0.257209, 0.998958, 0.372812, 0.285661, 0.446245, 0.115990,
0.517645, 0.436044, 0.973972, 0.356767, 0.641930, 0.998810, 0.595478,
0.679539, 0.358617, 0.393465, 0.872049, 0.629500, 0.695670, 0.977215,
0.026555, 0.551951, 0.573412, 0.136715, 0.685287, 0.263643, 0.612229,
0.419020, 0.956451, 0.024613, 0.395216, 0.213661, 0.023572, 0.768029,
0.499322, 0.469816, 0.884019, 0.016967, 0.905860, 0.857991, 0.373734,
0.547791, 0.856802, 0.969211, 0.227330, 0.215418, 0.362676, 0.099378,
0.844918, 0.058346, 0.076594, 0.871473, 0.610297, 0.650006, 0.008188,
0.295583, 0.913648, 0.620417, 0.714603, 0.870100, 0.645031, 0.109820,
0.083760, 0.668602, 0.877849, 0.583082, 0.138419, 0.761868, 0.600049,
0.044279, 0.619859, 0.973783, 0.592069, 0.476661, 0.942994, 0.819399,
0.692079, 0.305670, 0.918778, 0.536997, 0.364016, 0.995371, 0.408470,
0.974313, 0.645377, 0.416658, 0.269896, 0.559025, 0.037075, 0.984499,
0.429125, 0.682105, 0.094319, 0.512885, 0.350707, 0.972168, 0.095967,
0.489126, 0.734035, 0.696016, 0.533405, 0.353894, 0.669799, 0.125474,
0.830555, 0.612793, 0.944873, 0.522634, 0.918463, 0.863651, 0.059631,
0.282479, 0.859022, 0.468101, 0.256791, 0.504398, 0.884758, 0.526687,
0.063423, 0.921833, 0.511186, 0.492548, 0.603939, 0.605505, 0.005433,
0.954646, 0.577673, 0.101400, 0.443772, 0.311708, 0.797417, 0.977176,
0.665602, 0.467216, 0.102650, 0.496157, 0.080009, 0.047524, 0.018791,
0.998471, 0.911174, 0.078422, 0.280950, 0.770196, 0.546523, 0.537741,
0.274594, 0.431281, 0.064428, 0.338017, 0.353115, 0.575615, 0.830565,
0.957053, 0.181120, 0.835998, 0.911699, 0.758793, 0.937398, 0.355471,
0.070501, 0.734815, 0.332647, 0.736103, 0.202031, 0.435297, 0.232261,
0.282039, 0.482821, 0.251052, 0.280511, 0.393995, 0.329474, 0.561460,
0.164191, 0.875997, 0.099202, 0.438785, 0.307278, 0.163630, 0.776802,
0.660393, 0.739244, 0.607367, 0.617446, 0.920364, 0.443365, 0.529145,
0.679157, 0.380763, 0.884616, 0.749658, 0.115578, 0.217263, 0.485761,
0.317609, 0.652560, 0.718021, 0.599648, 0.135381, 0.969073, 0.880159,
0.529376, 0.298547, 0.441619, 0.693567, 0.174544, 0.540821, 0.132351,
0.481822, 0.704450, 0.909153, 0.142215, 0.443695, 0.516520, 0.759661,
0.364059, 0.959885, 0.288806, 0.043216, 0.340648, 0.173422, 0.792874,
0.456226, 0.390685, 0.278634, 0.773834, 0.043245, 0.996656, 0.373483,
0.178625, 0.965729, 0.253641, 0.708001, 0.264276, 0.695260, 0.401568,
0.438820, 0.236081, 0.533919, 0.920642, 0.940531, 0.443072, 0.062857,
0.384226, 0.959592, 0.822518, 0.748285, 0.919477, 0.111325, 0.791501,
0.260124, 0.284747, 0.584375, 0.716350, 0.675431, 0.863009, 0.490184,
0.718676, 0.859665, 0.863666, 0.897301, 0.825393, 0.117308, 0.605302,
0.089669, 0.812568, 0.006870, 0.528489, 0.048649, 0.540788, 0.449131,
0.989180, 0.983860, 0.511988, 0.373407, 0.943452, 0.334506, 0.121692,
0.862929, 0.445831, 0.913193, 0.123053, 0.730578, 0.497568, 0.839402,
0.406009, 0.360577, 0.329586, 0.124685, 0.220241, 0.193253, 0.021986,
0.045634, 0.310560, 0.627288, 0.135303, 0.123128, 0.634158, 0.663792,
0.171777, 0.174946, 0.112923, 0.160958, 0.158806, 0.624911, 0.534364,
0.102259, 0.959418, 0.656056, 0.965187, 0.405249, 0.569249, 0.088240,
0.135827, 0.066817, 0.927642, 0.541836, 0.427393, 0.257229, 0.666520,
0.647634, 0.450481, 0.688506, 0.693269, 0.761042, 0.315794, 0.828572,
0.884170, 0.949952, 0.492364, 0.055947, 0.124898, 0.605288, 0.216905,
0.283705, 0.230199, 0.751269, 0.385963, 0.189616, 0.407326, 0.351151,
0.594865, 0.976575, 0.439391, 0.730692, 0.043392, 0.367033, 0.272527,
0.470785, 0.624261, 0.939048, 0.118419, 0.074743, 0.627554, 0.811688,
0.835784, 0.943348, 0.640260, 0.719954, 0.893300, 0.132625, 0.775901,
0.018199, 0.737913, 0.992806, 0.301903, 0.968111, 0.744076, 0.687867,
0.157728, 0.151401, 0.039017, 0.752593, 0.127976, 0.478408, 0.483284,
0.171368, 0.845441, 0.755811, 0.642153, 0.469702, 0.694859, 0.760572,
0.544445, 0.322413, 0.572260, 0.380229, 0.265761, 0.212521, 0.100183,
0.159062, 0.345146, 0.876084, 0.177261, 0.083058, 0.868891, 0.479164,
0.051169, 0.612966, 0.167030, 0.208897, 0.764367, 0.206048, 0.961490,
0.892343, 0.684456, 0.444774, 0.063711, 0.529896, 0.200585, 0.705863,
0.999598, 0.895444, 0.466435, 0.544043, 0.217857, 0.038696, 0.924272,
0.483618, 0.251217, 0.024455, 0.642680, 0.596362, 0.900539, 0.819941,
0.679420, 0.769430, 0.299105, 0.730590, 0.382396, 0.466135, 0.939487,
0.146763, 0.672183, 0.900977, 0.039106, 0.356638, 0.345750, 0.102817,
0.886535, 0.546336, 0.808681, 0.886133, 0.441780, 0.275116, 0.430176,
0.659637, 0.313812, 0.354448, 0.143255, 0.565028, 0.378903, 0.785935,
0.161391, 0.279443, 0.605876, 0.840811, 0.048873, 0.904980, 0.571401,
0.431269, 0.371115, 0.510887, 0.578032, 0.043298, 0.411864, 0.617138,
0.399936, 0.757614, 0.719955, 0.286471, 0.303950, 0.528636, 0.172604,
0.745730, 0.803752, 0.602780, 0.405367, 0.117564, 0.957228, 0.548622,
0.682592, 0.336131, 0.334557, 0.843983, 0.615574, 0.940433, 0.684794,
0.664447, 0.845413, 0.256194, 0.095715, 0.216529, 0.767082, 0.673747,
0.259827, 0.178946, 0.290885, 0.659763, 0.936560, 0.010840, 0.946234,
0.240510, 0.539476, 0.118838, 0.986240, 0.343228, 0.721618, 0.391606,
0.460792, 0.678846, 0.940228, 0.143384, 0.014977, 0.274785, 0.987367,
0.630551, 0.215218, 0.672161, 0.294998, 0.060631, 0.928355, 0.390713,
0.277160, 0.695436, 0.064460, 0.536987, 0.874382, 0.355345, 0.196751,
0.810942, 0.366185, 0.142985, 0.051452, 0.905661, 0.261823, 0.037691,
0.248889, 0.983441, 0.429297, 0.709681, 0.662286, 0.369525, 0.853066,
0.677263, 0.644310, 0.840433, 0.307814, 0.859528, 0.512593, 0.602812,
0.920160, 0.440948, 0.993525, 0.197320, 0.136384, 0.057984, 0.734307,
0.010766, 0.413329, 0.931058, 0.821707, 0.779514, 0.074043, 0.873159,
0.685175, 0.335865, 0.910850, 0.934065, 0.319306, 0.340147, 0.643746,
0.981592, 0.709673, 0.496812, 0.658856, 0.353983, 0.337245, 0.966670,
0.213511, 0.849838, 0.569482, 0.133671, 0.290786, 0.563007, 0.330991,
0.427170, 0.620991, 0.065299, 0.437936, 0.034320, 0.996356, 0.259643,
0.813834, 0.070399, 0.132802, 0.499009, 0.406265, 0.043652, 0.433074,
0.725570, 0.383800, 0.076820, 0.707163, 0.093473, 0.573632, 0.366018,
0.447456, 0.910877, 0.332688, 0.660967, 0.760714, 0.902170, 0.794638,
0.051500, 0.465177, 0.125630, 0.478670, 0.086168, 0.190928, 0.916605,
0.120488, 0.187285, 0.176248, 0.934322, 0.257684, 0.309050, 0.433331,
0.663949, 0.352703, 0.866405, 0.389519, 0.736502, 0.943226, 0.096682,
0.829975, 0.516858, 0.462700, 0.277430, 0.427734, 0.795388, 0.938398,
0.188449, 0.697558, 0.733036, 0.239948, 0.162735, 0.858666, 0.718618,
0.248903, 0.049594, 0.635223, 0.369391, 0.236879, 0.811472, 0.303713,
0.494563, 0.120522, 0.737044, 0.158511, 0.473225, 0.603450, 0.548030,
0.209727, 0.546675, 0.644712, 0.039702, 0.063533, 0.107412, 0.317132,
0.491267, 0.902800, 0.255530, 0.679716, 0.600359, 0.988566, 0.919664,
0.763094, 0.847232, 0.638283, 0.011997, 0.896825, 0.273506, 0.381388,
0.133704, 0.084978, 0.685101, 0.628267, 0.205500, 0.422145, 0.786778,
0.678725, 0.025595, 0.334808, 0.888452, 0.572271, 0.979520, 0.928154,
0.635804, 0.086932, 0.245286, 0.127071, 0.989732, 0.500816, 0.806787,
0.590091, 0.489382, 0.726451, 0.353185, 0.336614, 0.364734, 0.365182,
0.233439, 0.638240, 0.746570, 0.367143, 0.723218, 0.431671, 0.995410,
0.928718, 0.853816, 0.782188, 0.607442, 0.879411, 0.116995, 0.495894,
0.451682, 0.096515, 0.424048, 0.087485, 0.183447, 0.669334, 0.214556,
0.173179, 0.170151, 0.021343, 0.763269, 0.659533, 0.747794, 0.116454,
0.996147, 0.112528, 0.481635, 0.229586, 0.750768, 0.228205, 0.596730,
0.473985, 0.659876, 0.592139, 0.402703, 0.513692, 0.374327, 0.010145,
0.393103, 0.491322, 0.506039, 0.844785, 0.587837, 0.930088, 0.932270,
0.771284, 0.599422, 0.146826, 0.944463, 0.769573, 0.168169, 0.707732,
0.429106, 0.915964, 0.824186, 0.425253, 0.028492, 0.305821, 0.654839,
0.779259, 0.534026, 0.251569, 0.253245, 0.193901, 0.843708, 0.655947,
0.707593, 0.218035, 0.666093, 0.100696, 0.709357, 0.172132, 0.945481,
0.297195, 0.102220, 0.877751, 0.068479, 0.701642, 0.024577, 0.012941,
0.471215, 0.192747, 0.720673, 0.900321, 0.108710, 0.544859, 0.325574,
0.137202, 0.850679, 0.980413, 0.916462, 0.384705, 0.231982, 0.169706,
0.578607, 0.075690, 0.825654, 0.286200, 0.293725, 0.491746, 0.386896,
0.003083, 0.663878, 0.332377, 0.300278, 0.766098, 0.210128, 0.368756,
0.467740, 0.234705, 0.381697, 0.938955, 0.427451, 0.102370, 0.839275,
0.536162, 0.647229, 0.164849, 0.673364, 0.497908, 0.145262, 0.589825,
0.882613, 0.377244, 0.759532, 0.461220, 0.452934, 0.585185, 0.747420,
0.746660, 0.076932, 0.134316, 0.749743, 0.740810, 0.466692, 0.050020,
0.506908, 0.676820, 0.418776, 0.974648, 0.911525, 0.800474, 0.913602,
0.338976, 0.902844, 0.752878, 0.875138, 0.550072, 0.917727, 0.548502,
0.047981, 0.062989, 0.138327, 0.930594, 0.440233, 0.897859, 0.391814,
0.893168, 0.483044, 0.139234, 0.639828, 0.559975, 0.273549, 0.389570,
0.300785, 0.740242, 0.439590, 0.807693, 0.417062, 0.858367, 0.782341,
0.328586, 0.658840, 0.695943, 0.667562, 0.561684, 0.448821, 0.542700,
0.111756, 0.366548, 0.091202, 0.159737, 0.429537, 0.229529, 0.090331,
0.869770, 0.127388, 0.482145, 0.762938, 0.610432, 0.621379, 0.402765,
0.170407, 0.894928, 0.792336, 0.471192, 0.635170, 0.231926, 0.278886,
0.052232, 0.090293, 0.061226, 0.380818, 0.749133, 0.757170, 0.048380,
0.310817, 0.205990, 0.591080, 0.422573, 0.572538, 0.682282, 0.582310,
0.002075, 0.911812, 0.672641, 0.871845, 0.039199, 0.154786, 0.634783,
0.649631, 0.776165, 0.037548, 0.820038, 0.671093, 0.829884, 0.291231,
0.306263, 0.061810, 0.570116, 0.358495, 0.152103, 0.631343, 0.739313,
0.901236, 0.388512, 0.787693, 0.212053, 0.594503, 0.378773, 0.634626,
0.167040, 0.061056, 0.216937, 0.169115, 0.972867, 0.889578, 0.040960,
0.012067, 0.044364, 0.675743, 0.661698, 0.820529, 0.713291, 0.481736,
0.491623, 0.543175, 0.772966, 0.797886, 0.604985, 0.343083, 0.156380,
0.757088, 0.974425, 0.895693, 0.658324, 0.362938, 0.683386, 0.870376,
0.957440, 0.062159, 0.505002, 0.124481, 0.123215, 0.721939, 0.293596,
0.096082, 0.611517, 0.334556, 0.108149, 0.655881, 0.010299, 0.769846,
0.476411, 0.723590, 0.251582, 0.968033, 0.266765, 0.024548, 0.765919,
0.871750, 0.367631, 0.922299, 0.628838, 0.342056, 0.817992, 0.287162,
0.704994, 0.501378, 0.157538, 0.662434, 0.563537, 0.662541, 0.786915,
0.686752, 0.384480, 0.080511, 0.782834, 0.995997, 0.415067, 0.890983,
0.651878, 0.425365, 0.660829, 0.128289, 0.148956, 0.912411, 0.096322,
0.415721, 0.936959, 0.862241, 0.287471, 0.304590, 0.784540, 0.916309,
0.646646, 0.602533, 0.203471, 0.351640, 0.103911, 0.361009, 0.014074,
0.667448, 0.023550, 0.800989, 0.354200, 0.408030, 0.881500, 0.137034,
0.404026, 0.296566, 0.028017, 0.055904, 0.721932, 0.688846, 0.184193,
0.870887, 0.601257, 0.280515, 0.286608, 0.538216, 0.142755, 0.574079,
0.842806, 0.927296, 0.490388, 0.489452, 0.529828, 0.693859, 0.841092,
0.633739, 0.054869, 0.855167, 0.301187, 0.078419, 0.656156, 0.655388,
0.486448, 0.537656, 0.792422, 0.890475, 0.834222, 0.820439, 0.946379,
0.556153, 0.509285, 0.130571, 0.427041, 0.110542, 0.411086, 0.713648,
0.648758, 0.553842, 0.287727, 0.491563, 0.481137, 0.778116, 0.981015,
0.010966, 0.471975, 0.822107, 0.644705, 0.526844, 0.677274, 0.945892,
0.605263, 0.333430, 0.601280, 0.091711, 0.871086, 0.393702, 0.982186,
0.705307, 0.214141, 0.928564, 0.261461, 0.723426, 0.059136, 0.688501,
0.833968, 0.470222, 0.402150, 0.482725, 0.024063, 0.689877, 0.974289,
0.505201, 0.467993, 0.955304, 0.516166, 0.939968, 0.777411, 0.160871,
0.466812, 0.454685, 0.106763, 0.072075, 0.788115, 0.708043, 0.163786,
0.659201, 0.101744, 0.145971, 0.364508, 0.315885, 0.074536, 0.625969,
0.039311, 0.133672, 0.314471, 0.873279, 0.603893, 0.716620, 0.356004,
0.627957, 0.406498, 0.330292, 0.133157, 0.874490, 0.285596, 0.649324,
0.814458, 0.063007, 0.810195, 0.281270, 0.517693, 0.916958, 0.353345,
0.305808, 0.625000, 0.517131, 0.965009, 0.726745, 0.663102, 0.329518,
0.042630, 0.737638, 0.955487, 0.081940, 0.871310, 0.269957, 0.955219,
0.475203, 0.986578, 0.311223, 0.103160, 0.393075, 0.641515, 0.236317,
0.267566, 0.927112, 0.885641, 0.082024, 0.990119, 0.695835, 0.363295,
0.507812, 0.612793, 0.716640, 0.813620, 0.237793, 0.233770, 0.778629,
0.964538, 0.896872, 0.108147, 0.007167, 0.634510, 0.063633, 0.089108,
0.505820, 0.333591, 0.044327, 0.981023, 0.320168, 0.355550, 0.084182,
0.713244, 0.997065, 0.320499, 0.980810, 0.924177, 0.206140, 0.062834,
0.914296, 0.901975, 0.426129, 0.422107, 0.514768, 0.142768, 0.235727,
0.752561, 0.376539, 0.014356, 0.717099, 0.273411, 0.122502, 0.724266,
0.907921, 0.186136, 0.813374, 0.413741, 0.519726, 0.857701, 0.394764,
0.839895, 0.213251, 0.478946, 0.553139, 0.210317, 0.799446, 0.533948,
0.134493, 0.005586, 0.596782, 0.048789, 0.907561, 0.022911, 0.470896,
0.422329, 0.165679, 0.706623, 0.174890, 0.542218, 0.720979, 0.891989,
0.815629, 0.843481, 0.616255, 0.723551, 0.029617, 0.429630, 0.137292,
0.549343, 0.287331, 0.532056, 0.389238, 0.500583, 0.011002, 0.942377,
0.710899, 0.810448, 0.476326, 0.845392, 0.816033, 0.073108, 0.894181,
0.723594, 0.096019, 0.365077, 0.145923, 0.261699, 0.071700, 0.320813,
0.803917, 0.792679, 0.212802, 0.619546, 0.636160, 0.829057, 0.343096,
0.665777, 0.258687, 0.480388, 0.215121, 0.546018, 0.012444, 0.604359,
0.046601, 0.023446, 0.546736, 0.757500, 0.833893, 0.023062, 0.602892,
0.649927, 0.096170, 0.497074, 0.373521, 0.192189, 0.862151, 0.519444,
0.453887, 0.933851, 0.840257, 0.257804, 0.726531, 0.053058, 0.877350,
0.362691, 0.882115, 0.220446, 0.028468, 0.140802, 0.700834, 0.243589,
0.686821, 0.713278, 0.847948, 0.733421, 0.736723, 0.394684, 0.490921,
0.570617, 0.417746, 0.093813, 0.220543, 0.513916, 0.590887, 0.594064,
0.706105, 0.453038, 0.113508, 0.159992, 0.386889, 0.953765, 0.417796,
0.113420, 0.006823, 0.295146, 0.476111, 0.888938, 0.515592, 0.504579,
0.029741, 0.216426, 0.748168, 0.716561, 0.929703, 0.596117, 0.449982,
0.666427, 0.990801, 0.940903, 0.237043, 0.408547, 0.034717, 0.457587,
0.922463, 0.625603, 0.051651, 0.628568, 0.078641, 0.165159, 0.788560,
0.465530, 0.118923, 0.206356, 0.578950, 0.125746, 0.501502, 0.055060,
0.014685, 0.017094, 0.559640, 0.044425, 0.233519, 0.307808, 0.760986,
0.163223, 0.903925, 0.210969, 0.829650, 0.894726, 0.151872, 0.066693,
0.303273, 0.186589, 0.524279, 0.225736, 0.812192, 0.575930, 0.854304,
0.890833, 0.741089, 0.642864, 0.356363, 0.860012, 0.849220, 0.935313,
0.985758, 0.350722, 0.990373, 0.000443, 0.367815, 0.550013, 0.044868,
0.601335, 0.857820, 0.805855, 0.764557, 0.761745, 0.016823, 0.594207,
0.656471, 0.168696, 0.660900, 0.959744, 0.355284, 0.185179, 0.185480,
0.167477, 0.761110, 0.039784, 0.058310, 0.502199, 0.682648, 0.414673,
0.362211, 0.531868, 0.349985, 0.347969, 0.882589, 0.340358, 0.348412,
0.250404, 0.890371, 0.393280, 0.851739, 0.748191, 0.199135, 0.616297,
0.509936, 0.215958, 0.210504, 0.166407, 0.384654, 0.871404, 0.126151,
0.739938, 0.056583, 0.311631, 0.907415, 0.817693, 0.351415, 0.965724,
0.319891, 0.034062, 0.380397, 0.682102, 0.565930, 0.730382, 0.030072,
0.448519, 0.070741, 0.378484, 0.698924, 0.961112, 0.771764, 0.550663,
0.709303, 0.970899, 0.166959, 0.219239, 0.186857, 0.377463, 0.385647,
0.571511, 0.248867, 0.511798, 0.311449, 0.305450, 0.823429, 0.218864,
0.123142, 0.174844, 0.184588, 0.443034, 0.208906, 0.564986, 0.125136,
0.774836, 0.295368, 0.155207, 0.223355, 0.366109, 0.533691, 0.922279,
0.327221, 0.305455, 0.472942, 0.036524, 0.276354, 0.639901, 0.255763,
0.463211, 0.017364, 0.641410, 0.034722, 0.266231, 0.153207, 0.346171,
0.571680, 0.976636, 0.565036, 0.694822, 0.151480, 0.749624, 0.137856,
0.360386, 0.314610, 0.262992, 0.135222, 0.609978, 0.418200, 0.358578,
0.976087, 0.951891, 0.280856, 0.303307, 0.257346, 0.753798, 0.339831,
0.533700, 0.393699, 0.595594, 0.996911, 0.411063, 0.237003, 0.031634,
0.677294, 0.390211, 0.377805, 0.248974, 0.366847, 0.942841, 0.943796,
0.518327, 0.692465, 0.081653, 0.878713, 0.007074, 0.344645, 0.013936,
0.617052, 0.762845, 0.372513, 0.593138, 0.714736, 0.653370, 0.896446,
0.972082, 0.407168, 0.236276, 0.505782, 0.800867, 0.831870, 0.502693,
0.211930, 0.068873, 0.534327, 0.889224, 0.459084, 0.912132, 0.138197,
0.825931, 0.854972, 0.081994, 0.344259, 0.547437, 0.163646, 0.222972,
0.554511, 0.508291, 0.236908, 0.171563, 0.271135, 0.609421, 0.764701,
0.985871, 0.262790, 0.661147, 0.957953, 0.669958, 0.897423, 0.463734,
0.470825, 0.729293, 0.966427, 0.682755, 0.798166, 0.500754, 0.571978,
0.257251, 0.412886, 0.710176, 0.083182, 0.267858, 0.792169, 0.427441,
0.815295, 0.955815, 0.650413, 0.369805, 0.464106, 0.887320, 0.541368,
0.735242, 0.496741, 0.306069, 0.721113, 0.759531, 0.967216, 0.679065,
0.429489, 0.864639, 0.142799, 0.900314, 0.593932, 0.109227, 0.583069,
0.392098, 0.609981, 0.155047, 0.649349, 0.022867, 0.865222, 0.732531,
0.290725, 0.657392, 0.159972, 0.106019, 0.613207, 0.810384, 0.475824,
0.077313, 0.697704, 0.017192, 0.812555};
static float golden_endtoend_output[] = {
-1.881211, -0.028385, -3.585066, 1.939770, -3.461155, 1.280415, -4.408978,
0.608663, -2.704937, 1.859742, -5.777429, 2.691839, -1.049012, 1.640870,
-4.856245, 1.604236, 0.992707, 0.422858, -4.307465, 1.887332, -0.884831,
-0.154277, -2.634801, 0.586827, -1.849960, 1.399608, -4.531559, 1.943591,
0.271676, -2.893054, -2.066826, 0.235467, -1.248263, -1.164534, -2.640174,
-0.112878, -4.386484, 1.253024, -4.135623, 1.068984, -0.043579, -0.832957,
-3.257258, -0.514396, -1.651174, 0.638630, -4.364372, 1.548441, -0.289455,
0.539845, -4.097627, 0.635001, -0.465071, -0.927701, -2.481498, 0.356616,
-2.355012, 0.728806, -3.340283, 1.609038, -4.786268, -0.532272, -1.886150,
0.254797, 0.746620, -1.657134, -3.264265, 0.525551, -1.756837, 0.845446,
-5.572190, 1.715797, -2.856942, 3.394245, -5.803662, 2.281806, -3.014739,
2.616136, -4.728482, 1.659984, -2.106307, 2.711709, -6.173832, 1.352869,
-0.038035, 0.107619, -4.279774, 2.341930, -0.980413, -0.119538, -4.049717,
1.172128, -3.477744, 2.602274, -6.231380, 2.537300, -0.862214, 0.568722,
-3.858362, 0.197867, -1.725885, 3.687312, -7.067363, 2.403544, -0.944963,
0.235639, -3.250094, 0.659117, -1.459576, 0.426128, -3.637207, 1.030386,
-4.224351, 3.516220, -6.053367, 0.993473, -2.182416, -0.762625, -1.884405,
-0.113736, -2.572602, 0.329290, -1.913233, 0.517418, -0.019757, 0.203176,
-3.715881, 0.482136, -1.912823, 1.357907, -5.473043, 1.714658, -3.177160,
0.089285, -3.127669, 1.268076, 0.772498, -1.622712, -3.850314, 0.436124,
-1.495983, 3.439982, -7.623405, 1.726721, -0.423979, 0.180201, -2.902406,
0.986457, -1.845638, 0.460903, -5.359343, -1.133931, -1.074456, 0.717304,
-3.519856, 1.012126, -0.562301, 1.881967, -6.716627, 2.525036, 0.945480,
0.337081, -5.210562, 2.572035, -0.943370, 0.442026, -2.666313, 0.411296,
0.002787, -0.000735, -2.498933, 0.771719, -3.568153, 3.833721, -6.617026,
2.813922, -0.573970, 1.025208, -3.909923, 1.722648, -1.406849, 0.719783,
-5.207438, 1.819442, -0.530895, -0.010887, -2.939614, 0.971225, -1.660297,
1.345243, -4.454571, 2.244876, -2.021213, 1.756090, -4.880947, 0.364597,
-2.380270, 2.763117, -5.613013, 2.137534, 0.289101, -2.279400, -3.365582,
0.170028, -1.142254, -0.709604, -3.656223, 1.804870, -0.854690, 0.592102,
-5.010415, 2.462687, -1.474710, 0.566002, -3.621819, -0.391946, -0.423524,
-0.631428, -3.513310, 0.962825, -1.480262, 0.319791, -3.610137, 1.842339,
-0.250073, 1.182022, -6.249267, 1.604172, 1.153759, -0.734054, -4.620415,
-0.030858, 0.050911, 1.524406, -4.724010, 1.451846, -3.277104, 2.414182,
-4.605285, 1.846092, -1.503047, -0.618200, -2.746546, -0.459332, -0.980326,
-1.199977, -2.043865, -0.165793, -2.214698, 3.108281, -7.127830, -0.123065,
1.244948, -3.039923, -4.660061, -0.225957, -0.307210, -1.513205, -2.456005,
0.840048, -0.741445, 2.328635, -6.015267, 2.723240, -1.381171, -0.728878,
-5.114925, -0.362034, -0.574923, 0.518080, -3.892457, 1.798948, 0.435119,
-0.371696, -2.807571, 1.302864, -2.063052, 1.036388, -4.232038, 1.397059,
-1.615668, -1.511019, -3.095508, 1.290955, -3.428723, 2.000287, -4.196487,
1.566983, 0.196957, 0.224343, -4.926359, -0.691975, -0.214941, 1.546821,
-5.384868, 2.290820, -1.878865, 0.493692, -4.129823, 2.112036, 0.516558,
-2.553077, -2.717338, 0.017146, -2.016057, 1.628995, -4.240602, 1.189533,
-5.460220, 1.254738, -4.214903, 0.755659, -2.893235, 2.937762, -6.169453,
2.035456, -5.613212, -0.122254, -1.973646, -0.060619, -2.119598, 1.413512,
-4.938738, 1.890244, 0.544169, -2.062413, -3.329637, -0.062515, -1.855805,
-0.791297, -2.570353, 0.607615, 0.305812, 0.338930, -4.150270, 2.274937,
0.042653, 0.133825, -3.538155, 1.523639, -3.173690, -1.496599, -2.414655,
0.464687, -1.448998, -0.368907, -3.520129, 0.203382, -2.443626, 1.266233,
-3.393848, 0.605911, -0.015353, 1.402006, -4.441003, 1.419281, 0.603587,
0.434146, -4.966566, 2.171872, -0.688264, -0.009981, -4.461103, 1.538354,
-5.029816, -0.264424, -1.713510, -0.315258, -1.891606, 0.252074, -2.419428,
0.043970, -1.291143, 2.048704, -4.590105, 0.524734, -1.889576, 0.134836,
-3.462745, 1.390663, -0.112773, 0.402735, -4.203784, 1.381043, -1.201634,
-1.968277, -1.425637, -0.181725, -1.250742, -2.102041, -3.925464, -1.256797,
-3.701354, -1.754610, -1.917231, -1.455910, -1.838006, 2.041781, -5.666212,
2.752957, -2.659553, 2.553637, -4.872212, 1.443437, -2.081846, 3.311263,
-5.912457, 1.871049, 0.196148, -0.307044, -4.024967, 2.149149, 0.361809,
0.620415, -5.939984, 0.180672, -1.209180, -0.269122, -3.240285, 1.460315,
-1.040803, 1.125700, -6.060366, 0.887767, -3.214111, 1.314368, -3.026808,
1.023640, -3.815175, 1.795642, -4.355603, 1.064454, -0.046472, 0.618463,
-5.941646, 2.861891, -2.852155, -0.990457, -2.624445, 1.794494, -1.176747,
-0.358159, -3.206776, 1.138721, -2.819523, -1.825522, -1.450902, -0.187312,
-0.808727, 0.636872, -4.120567, 1.192623, 0.810731, -1.768519, -3.699450,
1.527116, -2.772720, 3.012835, -5.912736, 1.599365, -4.696381, 2.234591,
-4.139552, 1.061768, -1.880089, 3.596274, -7.006379, 2.382152, -3.158115,
3.844430, -7.044156, 2.307596, -2.473970, 1.312644, -5.467269, 0.197154,
-1.530040, 1.762275, -5.550757, 0.630276, -3.048947, 1.043777, -3.096658,
1.345893, -1.329494, 2.065748, -4.711032, 2.227600, -0.413321, -0.032428,
-4.599650, 1.668734, -4.351490, -0.200022, -2.359903, 0.021997, 0.116028,
1.159718, -5.093972, -0.142951, -2.409895, 0.906133, -2.728812, 0.809932,
-2.597363, 0.494130, -2.357861, 0.369825, -2.165235, 1.148522, -3.130562,
0.759034, 0.646335, -1.463660, -3.508299, 1.059679, -1.485465, 1.007319,
-4.340716, 1.789864, -1.590654, 1.612324, -4.452007, 2.389805, -5.200148,
-1.068398, -1.306923, -0.472408, -0.392165, -0.524996, -2.933478, 1.518430,
-1.287781, 0.113422, -3.020525, 1.338359, -0.105982, 0.936014, -4.132197,
1.836807, -0.616589, -1.029716, -3.271347, 0.284889, -2.653359, 2.135829,
-4.643613, 1.627981, 0.287733, -2.017263, -2.776574, 1.184792, 1.004161,
-1.483019, -4.339290, -0.787322, 0.582420, 1.137839, -5.673941, -0.001862,
-1.219142, 0.532561, -4.457245, 1.826807, -3.343291, 3.034610, -6.179855,
2.235917, -4.369989, 4.018128, -6.632714, 0.926585, -0.485469, 0.536073,
-4.179557, 1.489637, -0.521762, 1.636089, -6.137912, 1.500867, -4.086009,
1.961372, -3.688977, 1.358220, -1.544034, 1.763837, -4.357567, 1.852201,
-2.018725, 1.046264, -6.211127, 1.609419, -0.118441, 1.602284, -6.242423,
1.518578, -0.604078, 1.106613, -5.393445, 2.595629, 0.142712, -1.903953,
-2.821177, 0.032758, -0.009152, 0.184628, -4.227636, 2.046843, -2.240138,
1.256176, -5.108516, -0.308447, -2.998571, 4.657396, -7.582112, 2.510951,
-3.535784, 1.704560, -5.068484, 1.318466, -3.058265, 3.073172, -6.998089,
3.178849, -2.420286, 2.277806, -4.999528, 1.423890, -1.672914, 0.447460,
-4.088940, 1.351087, -1.051546, -0.417955, -4.042147, 1.604102, -1.700931,
2.796663, -6.497579, 2.857974, -0.240828, 0.858001, -5.778933, 2.778508,
-0.406211, 1.300766, -5.073671, 2.089362, -0.201673, 1.588396, -6.000150,
2.185055, -2.332125, 0.768216, -2.609184, 0.327277, -3.358943, -1.020736,
-2.389984, 0.315512, -0.561905, 1.948740, -6.408485, 2.231985, -0.603652,
0.661829, -5.070386, -1.063058, -0.624796, 1.375772, -4.379606, 1.929358,
-1.047263, 0.739100, -5.217857, 2.127625, -5.025338, 0.650344, -2.068460,
0.076936, -0.457505, -1.050984, -1.917765, 1.150908, 0.782625, 0.855595,
-5.321719, 0.787209, -0.460232, 1.106736, -5.552326, 2.801043, -0.360217,
-0.434432, -4.273378, 0.967556, -0.972652, 0.874811, -5.429918, -0.331039,
0.115477, 0.111883, -5.418786, 1.240546, -1.842794, 0.505880, -3.676064,
-0.682369, 1.858984, -0.742566, -5.784060, 0.673239, -1.280398, 0.280842,
-4.848077, 2.214860, -0.785100, -0.588488, -2.438206, 0.786651, -1.568752,
1.935400, -6.320256, 2.125338, -1.476457, -1.651941, -2.695734, 0.007338,
-3.280860, 2.310385, -5.319578, 1.890123, -0.775723, 0.630606, -4.321582,
1.085521, -1.847371, 1.188521, -4.596577, 2.056443, -2.340172, -0.108501,
-3.156392, 0.933279, -0.495331, 0.122405, -5.171133, 1.763245, -0.796913,
2.310487, -7.247197, 2.401678, -1.908860, 0.043798, -2.393796, 0.573806,
-0.608531, 0.154710, -4.669001, 0.750680, 0.468380, 0.392591, -4.755001,
2.615217, -1.957774, 1.153513, -4.530099, 1.124362, -3.569415, 1.697154,
-3.536335, 0.910758, -2.976264, 1.833129, -4.287203, -0.547050, -2.409768,
0.061585, -1.324116, 0.268497, -2.962222, -1.524245, -2.063413, 0.442058,
-4.292337, 3.538863, -6.699603, 1.718664, -2.290363, 1.994596, -6.245037,
-0.433084, -0.367059, 1.020297, -4.940721, 2.902264, -0.577056, -0.709887,
-5.001413, -0.268316, -1.112048, -1.083307, -1.753492, 0.209973, 0.139540,
0.917602, -5.232745, 2.538467, -2.139234, -0.187388, -1.837249, -0.478582,
-0.731653, -0.481550, -2.531261, 1.044770, 0.707750, 0.279971, -3.221119,
1.552074, -2.373144, 0.859518, -3.665156, 1.620278, -1.440871, -0.525581,
-2.758271, 1.491873, -2.302013, 1.119935, -5.257080, 2.627170, -3.174739,
1.363282, -4.831639, 1.101076, -4.337008, 2.689639, -5.165915, 1.069201,
-1.882078, -0.120370, -2.287967, 1.147619, -1.403616, 1.077150, -5.084296,
1.658236, -0.919642, 0.487423, -3.001075, 0.741268, 0.107300, 0.943556,
-3.544311, 1.000239, -1.627171, 2.871253, -5.179172, 1.429893, -0.826040,
0.188670, -4.499894, 1.013447, -2.101299, 0.317516, -3.452141, -0.833776,
-1.362144, 1.272437, -4.449355, 1.613591, -2.039873, 2.613175, -6.229640,
1.659790, -1.595520, -0.237462, -2.744997, 0.337841, 0.148981, -1.703771,
-2.388023, 1.276469, 1.058508, -0.401642, -4.680769, 0.861881, -1.336381,
1.153080, -2.834378, 0.721075, 0.900115, 1.360511, -5.573611, 0.949182,
-2.970844, 2.017563, -5.186108, -0.201038, -1.192824, 0.610142, -4.450919,
-0.897114, -1.812093, 0.422310, -5.245487, 0.256549, 0.320275, -2.324150,
-2.967040, -0.260536, -0.721467, 0.454148, -5.058031, 0.526370, -0.895656,
0.732240, -3.327363, 1.353953, -1.277912, -0.483171, -1.926713, 0.065044,
-2.167506, -0.196606, -1.923437, 0.604962, -2.088319, 1.406834, -5.227296,
2.247351, -4.421744, 1.729791, -5.007922, 1.264769, -0.897019, 0.922902,
-3.887108, 2.087432, -1.310226, -0.101938, -3.359082, -0.079662, -0.514988,
-0.963179, -4.038209, 2.223278, -0.590083, -2.310458, -1.748338, 0.363406,
-0.540731, -0.885913, -4.179595, 2.216781, -3.044339, -0.447100, -2.446098,
0.931101, -1.676190, 2.096175, -4.980755, 2.262151, -1.095047, 1.897516,
-5.996138, 2.191038, 0.297128, -0.780974, -2.884299, 1.195408, -0.521065,
-1.955837, -3.091064, -0.404183, -1.961519, 4.076096, -7.521851, 2.242064,
-1.988043, 0.303300, -2.422585, 0.322230, -3.377634, 3.499955, -7.084434,
2.375587, -0.718851, 2.150076, -5.412241, 2.374280, -2.006088, 2.229828,
-5.848188, 2.543077, -2.171042, 2.096026, -5.300007, 0.141405, -1.187745,
0.105340, -4.003816, 1.034281, -3.980804, 1.856709, -5.103042, 0.623737,
-2.080307, 0.896140, -3.104050, 0.983158, -0.424898, -1.154270, -3.805728,
1.978917, -1.314387, 1.235096, -3.148906, 1.113173, 0.111713, 2.055213,
-7.565283, 2.100342};
const std::initializer_list<float> biases = {
0.065691948, -0.69055247, 0.1107955, -0.97084129, -0.23957068, -0.23566568,
-0.389184, 0.47481549, -0.4791103, 0.29931796, 0.10463274, 0.83918178,
0.37197268, 0.61957061, 0.3956964, -0.37609905};
const std::initializer_list<float> recurrent_weights = {
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1};
class BidirectionalRNNOpModel : public SingleOpModel {
public:
BidirectionalRNNOpModel(int batches, int sequence_len, int fw_units,
int bw_units, int input_size, int aux_input_size,
AuxInputMode aux_input_mode, bool time_major,
bool merge_outputs, bool quantize_weights = false,
bool asymmetric_quantize_weights = false)
: batches_(batches),
sequence_len_(sequence_len),
fw_units_(fw_units),
bw_units_(bw_units),
input_size_(input_size),
aux_input_size_(aux_input_size),
quantize_weights_(quantize_weights) {
const TensorType tensor_type =
quantize_weights ? TensorType_UINT8 : TensorType_FLOAT32;
input_ = AddInput(TensorType_FLOAT32);
fw_weights_ = AddInput(tensor_type);
fw_recurrent_weights_ = AddInput(tensor_type);
fw_bias_ = AddInput(TensorType_FLOAT32);
fw_hidden_state_ = AddVariableInput(TensorType_FLOAT32);
bw_weights_ = AddInput(tensor_type);
bw_recurrent_weights_ = AddInput(tensor_type);
bw_bias_ = AddInput(TensorType_FLOAT32);
bw_hidden_state_ = AddVariableInput(TensorType_FLOAT32);
const auto input_shape =
(time_major) ? std::vector<int>({sequence_len_, batches_, input_size_})
: std::vector<int>({batches_, sequence_len_, input_size_});
std::vector<int> aux_input_shape = {0};
std::vector<int> aux_fw_weights_shape = {0};
std::vector<int> aux_bw_weights_shape = {0};
if (aux_input_mode != AuxInputMode::kNoAuxInput) {
aux_input_ = AddInput(TensorType_FLOAT32);
aux_input_shape =
(time_major)
? std::vector<int>({sequence_len_, batches_, aux_input_size_})
: std::vector<int>({batches_, sequence_len_, aux_input_size_});
} else {
aux_input_ = AddNullInput();
}
if (aux_input_mode == AuxInputMode::kCrossLinking) {
aux_fw_weights_ = AddInput(tensor_type);
aux_bw_weights_ = AddInput(tensor_type);
aux_fw_weights_shape = {fw_units, aux_input_size_};
aux_bw_weights_shape = {bw_units, aux_input_size_};
} else {
aux_fw_weights_ = AddNullInput();
aux_bw_weights_ = AddNullInput();
}
fw_output_ = AddOutput(TensorType_FLOAT32);
if (!merge_outputs) {
bw_output_ = AddOutput(TensorType_FLOAT32);
}
SetBuiltinOp(BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN,
BuiltinOptions_BidirectionalSequenceRNNOptions,
CreateBidirectionalSequenceRNNOptions(
builder_, time_major, ActivationFunctionType_RELU,
merge_outputs, asymmetric_quantize_weights)
.Union());
BuildInterpreter({
input_shape,
{fw_units_, input_size_},
{fw_units_, fw_units_},
{fw_units_},
{batches_, fw_units_},
{bw_units_, input_size_},
{bw_units_, bw_units_},
{bw_units_},
{batches_, bw_units_},
aux_input_shape,
aux_fw_weights_shape,
aux_bw_weights_shape,
});
}
void SetFwBias(std::initializer_list<float> f) {
PopulateTensor(fw_bias_, f);
}
void SetBwBias(std::initializer_list<float> f) {
PopulateTensor(bw_bias_, f);
}
void SetFwWeights(const std::vector<float>& f) {
if (quantize_weights_) {
SymmetricQuantizeAndPopulate(fw_weights_, f);
} else {
PopulateTensor(fw_weights_, f);
}
}
void SetBwWeights(const std::vector<float>& f) {
if (quantize_weights_) {
SymmetricQuantizeAndPopulate(bw_weights_, f);
} else {
PopulateTensor(bw_weights_, f);
}
}
void SetFwRecurrentWeights(const std::vector<float>& f) {
if (quantize_weights_) {
SymmetricQuantizeAndPopulate(fw_recurrent_weights_, f);
} else {
PopulateTensor(fw_recurrent_weights_, f);
}
}
void SetBwRecurrentWeights(const std::vector<float>& f) {
if (quantize_weights_) {
SymmetricQuantizeAndPopulate(bw_recurrent_weights_, f);
} else {
PopulateTensor(bw_recurrent_weights_, f);
}
}
void SetInput(std::initializer_list<float> data) {
PopulateTensor(input_, data);
}
void SetInput(int offset, float* begin, float* end) {
PopulateTensor(input_, offset, begin, end);
}
void SetAuxInput(int offset, float* begin, float* end) {
PopulateTensor(aux_input_, offset, begin, end);
}
void SetAuxFwWeights(const std::vector<float>& f) {
if (quantize_weights_) {
SymmetricQuantizeAndPopulate(aux_fw_weights_, f);
} else {
PopulateTensor(aux_fw_weights_, f);
}
}
void SetAuxBwWeights(const std::vector<float>& f) {
if (quantize_weights_) {
SymmetricQuantizeAndPopulate(aux_bw_weights_, f);
} else {
PopulateTensor(aux_bw_weights_, f);
}
}
std::vector<float> GetFwOutput() { return ExtractVector<float>(fw_output_); }
std::vector<float> GetBwOutput() { return ExtractVector<float>(bw_output_); }
int input_size() { return input_size_; }
int aux_input_size() { return aux_input_size_; }
int num_fw_units() { return fw_units_; }
int num_bw_units() { return bw_units_; }
int num_batches() { return batches_; }
int sequence_len() { return sequence_len_; }
private:
int input_;
int fw_weights_;
int fw_recurrent_weights_;
int fw_bias_;
int fw_hidden_state_;
int fw_output_;
int bw_weights_;
int bw_recurrent_weights_;
int bw_bias_;
int bw_hidden_state_;
int bw_output_;
int aux_input_;
int aux_fw_weights_;
int aux_bw_weights_;
int batches_;
int sequence_len_;
int fw_units_;
int bw_units_;
int input_size_;
int aux_input_size_;
bool quantize_weights_;
};
class BidirectionalRNNOpTest
: public ::testing::TestWithParam<::testing::tuple<bool, bool>> {};
INSTANTIATE_TEST_SUITE_P(QuantizationOrNot, BidirectionalRNNOpTest,
::testing::Combine(
::testing::Bool(),
::testing::Bool()));
TEST_P(BidirectionalRNNOpTest, ClosedBoxTest) {
auto params = GetParam();
const bool quantize_weights = std::get<0>(params);
const bool asymmetric_quantize_inputs = std::get<1>(params);
BidirectionalRNNOpModel rnn(2, 16,
16, 16,
8, 0,
AuxInputMode::kNoAuxInput,
false,
false, quantize_weights,
asymmetric_quantize_inputs);
rnn.SetFwWeights(weights);
rnn.SetBwWeights(weights);
rnn.SetFwBias(biases);
rnn.SetBwBias(biases);
rnn.SetFwRecurrentWeights(recurrent_weights);
rnn.SetBwRecurrentWeights(recurrent_weights);
const int input_sequence_size = rnn.input_size() * rnn.sequence_len();
float* batch_start = rnn_input;
float* batch_end = batch_start + input_sequence_size;
rnn.SetInput(0, batch_start, batch_end);
rnn.SetInput(input_sequence_size, batch_start, batch_end);
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
float* golden_fw_start = rnn_golden_fw_output;
float* golden_fw_end =
golden_fw_start + rnn.num_fw_units() * rnn.sequence_len();
std::vector<float> fw_expected;
fw_expected.insert(fw_expected.end(), golden_fw_start, golden_fw_end);
fw_expected.insert(fw_expected.end(), golden_fw_start, golden_fw_end);
EXPECT_THAT(rnn.GetFwOutput(),
ElementsAreArray(ArrayFloatNear(
fw_expected, quantize_weights ? 1.42e-2 : 1e-5)));
float* golden_bw_start = rnn_golden_bw_output;
float* golden_bw_end =
golden_bw_start + rnn.num_bw_units() * rnn.sequence_len();
std::vector<float> bw_expected;
bw_expected.insert(bw_expected.end(), golden_bw_start, golden_bw_end);
bw_expected.insert(bw_expected.end(), golden_bw_start, golden_bw_end);
EXPECT_THAT(rnn.GetBwOutput(),
ElementsAreArray(ArrayFloatNear(
bw_expected, quantize_weights ? 1.42e-2 : 1e-5)));
}
TEST_P(BidirectionalRNNOpTest, ClosedBoxTestTimeMajor) {
auto params = GetParam();
const bool quantize_weights = std::get<0>(params);
const bool asymmetric_quantize_inputs = std::get<1>(params);
BidirectionalRNNOpModel rnn(2, 16,
16, 16,
8, 0,
AuxInputMode::kNoAuxInput,
true,
false, quantize_weights,
asymmetric_quantize_inputs);
rnn.SetFwWeights(weights);
rnn.SetBwWeights(weights);
rnn.SetFwBias(biases);
rnn.SetBwBias(biases);
rnn.SetFwRecurrentWeights(recurrent_weights);
rnn.SetBwRecurrentWeights(recurrent_weights);
for (int i = 0; i < rnn.sequence_len(); i++) {
float* batch_start = rnn_input + i * rnn.input_size();
float* batch_end = batch_start + rnn.input_size();
rnn.SetInput(2 * i * rnn.input_size(), batch_start, batch_end);
rnn.SetInput((2 * i + 1) * rnn.input_size(), batch_start, batch_end);
}
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
std::vector<float> fw_expected;
for (int i = 0; i < rnn.sequence_len(); i++) {
float* golden_fw_start = rnn_golden_fw_output + i * rnn.num_fw_units();
float* golden_fw_end = golden_fw_start + rnn.num_fw_units();
fw_expected.insert(fw_expected.end(), golden_fw_start, golden_fw_end);
fw_expected.insert(fw_expected.end(), golden_fw_start, golden_fw_end);
}
constexpr float kHybridTolerance = 3.57e-1;
constexpr float kFloatTolerance = 1e-5;
EXPECT_THAT(
rnn.GetFwOutput(),
ElementsAreArray(ArrayFloatNear(
fw_expected, quantize_weights ? kHybridTolerance : kFloatTolerance)));
}
TEST_P(BidirectionalRNNOpTest, ClosedBoxTestMergeOutputs) {
auto params = GetParam();
const bool quantize_weights = std::get<0>(params);
const bool asymmetric_quantize_inputs = std::get<1>(params);
BidirectionalRNNOpModel rnn(2, 16,
16, 16,
8, 0,
AuxInputMode::kNoAuxInput,
false,
true, quantize_weights,
asymmetric_quantize_inputs);
rnn.SetFwWeights(weights);
rnn.SetBwWeights(weights);
rnn.SetFwBias(biases);
rnn.SetBwBias(biases);
rnn.SetFwRecurrentWeights(recurrent_weights);
rnn.SetBwRecurrentWeights(recurrent_weights);
const int input_sequence_size = rnn.input_size() * rnn.sequence_len();
float* batch_start = rnn_input;
float* batch_end = batch_start + input_sequence_size;
rnn.SetInput(0, batch_start, batch_end);
rnn.SetInput(input_sequence_size, batch_start, batch_end);
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
std::vector<float> merged_expected;
for (int bid = 0; bid < rnn.num_batches(); bid++) {
for (int step = 0; step < rnn.sequence_len(); step++) {
merged_expected.insert(
merged_expected.end(),
rnn_golden_fw_output + rnn.num_fw_units() * step,
rnn_golden_fw_output + rnn.num_fw_units() * (step + 1));
merged_expected.insert(
merged_expected.end(),
rnn_golden_bw_output + rnn.num_bw_units() * step,
rnn_golden_bw_output + rnn.num_bw_units() * (step + 1));
}
}
EXPECT_THAT(rnn.GetFwOutput(),
ElementsAreArray(ArrayFloatNear(
merged_expected, quantize_weights ? 1.42e-2 : 1e-5)));
}
TEST(BidirectionalRNNOpTest, ClosedBoxTestTimeMajorMergeOutputs) {
BidirectionalRNNOpModel rnn(2, 16,
16, 16,
8, 0,
AuxInputMode::kNoAuxInput,
true,
true);
rnn.SetFwWeights(weights);
rnn.SetBwWeights(weights);
rnn.SetFwBias(biases);
rnn.SetBwBias(biases);
rnn.SetFwRecurrentWeights(recurrent_weights);
rnn.SetBwRecurrentWeights(recurrent_weights);
for (int i = 0; i < rnn.sequence_len(); i++) {
float* batch_start = rnn_input + i * rnn.input_size();
float* batch_end = batch_start + rnn.input_size();
rnn.SetInput(2 * i * rnn.input_size(), batch_start, batch_end);
rnn.SetInput((2 * i + 1) * rnn.input_size(), batch_start, batch_end);
}
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
std::vector<float> merged_expected;
for (int step = 0; step < rnn.sequence_len(); step++) {
for (int bid = 0; bid < rnn.num_batches(); bid++) {
merged_expected.insert(
merged_expected.end(),
rnn_golden_fw_output + rnn.num_fw_units() * step,
rnn_golden_fw_output + rnn.num_fw_units() * (step + 1));
merged_expected.insert(
merged_expected.end(),
rnn_golden_bw_output + rnn.num_bw_units() * step,
rnn_golden_bw_output + rnn.num_bw_units() * (step + 1));
}
}
EXPECT_THAT(rnn.GetFwOutput(),
ElementsAreArray(ArrayFloatNear(merged_expected)));
}
TEST(BidirectionalRNNOpTest, ClosedBoxTestReverseInputs) {
BidirectionalRNNOpModel rnn(2, 16,
16, 16,
8, 0,
AuxInputMode::kNoAuxInput,
false,
false);
rnn.SetFwWeights(weights);
rnn.SetBwWeights(weights);
rnn.SetFwBias(biases);
rnn.SetBwBias(biases);
rnn.SetFwRecurrentWeights(recurrent_weights);
rnn.SetBwRecurrentWeights(recurrent_weights);
for (int i = 0; i < rnn.sequence_len(); i++) {
float* batch_start = rnn_input + i * rnn.input_size();
float* batch_end = batch_start + rnn.input_size();
const int reverse_idx = rnn.sequence_len() - i - 1;
rnn.SetInput(reverse_idx * rnn.input_size(), batch_start, batch_end);
rnn.SetInput((rnn.sequence_len() + reverse_idx) * rnn.input_size(),
batch_start, batch_end);
}
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
std::vector<float> fw_expected;
for (int i = 0; i < rnn.sequence_len(); i++) {
float* golden_fw_start = rnn_golden_bw_output + i * rnn.num_fw_units();
float* golden_fw_end = golden_fw_start + rnn.num_fw_units();
fw_expected.insert(fw_expected.begin(), golden_fw_start, golden_fw_end);
}
fw_expected.insert(fw_expected.end(), fw_expected.begin(), fw_expected.end());
EXPECT_THAT(rnn.GetFwOutput(), ElementsAreArray(ArrayFloatNear(fw_expected)));
std::vector<float> bw_expected;
for (int i = 0; i < rnn.sequence_len(); i++) {
float* golden_bw_start = rnn_golden_fw_output + i * rnn.num_bw_units();
float* golden_bw_end = golden_bw_start + rnn.num_bw_units();
bw_expected.insert(bw_expected.begin(), golden_bw_start, golden_bw_end);
}
bw_expected.insert(bw_expected.end(), bw_expected.begin(), bw_expected.end());
EXPECT_THAT(rnn.GetBwOutput(), ElementsAreArray(ArrayFloatNear(bw_expected)));
}
TEST(BidirectionalRNNOpTest, EndToEndTest) {
BidirectionalRNNOpModel rnn(1, 4,
16, 16,
8, 0,
AuxInputMode::kNoAuxInput,
false,
false);
const int output_size = 4;
float dnn_weights[] = {
-0.5782342, -0.052212059, 0.73036242, -0.81216097, -0.80088139,
-0.23420811, -0.39647382, 0.31423986, 0.61819065, -0.73659575,
-0.89698344, -0.8931554, -0.0845688, 0.5617367, 0.38415289,
-0.11487955, -0.7617774, 0.17927337, 0.15726972, 0.059798479,
0.19009054, -0.27616632, -0.39142907, 0.77744663, -0.046830714,
-0.6603595, 0.21945822, 0.051494241, 0.23785079, 0.19239247,
-0.53268754, 0.65961659, -0.85981959, -0.80232513, 0.84745562,
-0.66070104, -0.036533296, -0.54901814, 0.65353882, -0.41834265,
-0.28561389, 0.75655544, -0.31149811, 0.62981737, 0.31829214,
-0.92734522, -0.48506218, 0.55651462, 0.25192821, 0.67220747,
-0.3836869, -0.55798125, -0.60395885, 0.22488403, -0.78053463,
0.3492105, 0.56452453, 0.4389236, -0.59929526, -0.19762468,
-0.36868393, -0.13198286, -0.53800809, -0.22850353};
std::initializer_list<float> dnn_biases = {0.29177809, -0.98799044,
0.065919638, 0.68781924};
rnn.SetFwWeights(weights);
rnn.SetBwWeights(weights);
rnn.SetFwBias(biases);
rnn.SetBwBias(biases);
rnn.SetFwRecurrentWeights(recurrent_weights);
rnn.SetBwRecurrentWeights(recurrent_weights);
const int input_sequence_size = rnn.input_size() * rnn.sequence_len();
const int output_sequence_size = output_size * rnn.sequence_len();
const int num_examples = 64;
for (int k = 0; k < num_examples; k++) {
float* batch_start = endtoend_input + k * input_sequence_size;
float* batch_end = batch_start + input_sequence_size;
rnn.SetInput(0, batch_start, batch_end);
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
std::vector<float> fw_output = rnn.GetFwOutput();
std::vector<float> bw_output = rnn.GetBwOutput();
EXPECT_EQ(fw_output.size(), bw_output.size());
std::transform(fw_output.begin(), fw_output.end(), bw_output.begin(),
fw_output.begin(), std::plus<float>());
std::vector<float> sequence_result;
for (int s = 0; s < rnn.sequence_len(); s++) {
const float* rnn_output = fw_output.data() + s * rnn.num_fw_units();
std::vector<float> results(dnn_biases);
for (int i = 0; i < output_size; i++) {
for (int j = 0; j < rnn.num_fw_units(); j++) {
results[i] += *(rnn_output + j) * dnn_weights[output_size * j + i];
}
}
sequence_result.insert(sequence_result.end(), results.begin(),
results.end());
}
float* golden_start = golden_endtoend_output + k * output_sequence_size;
float* golden_end = golden_start + output_sequence_size;
std::vector<float> expected;
expected.insert(expected.end(), golden_start, golden_end);
EXPECT_THAT(sequence_result, ElementsAreArray(ArrayFloatNear(expected)));
}
}
TEST(BidirectionalRNNOpTest, ClosedBoxTestNoCrossLinkingRegularAndAuxInput) {
BidirectionalRNNOpModel rnn(2, 16,
16, 16,
8, 8,
AuxInputMode::kNoCrossLinking,
true,
false);
rnn.SetFwWeights(weights);
rnn.SetBwWeights(weights);
rnn.SetFwBias(biases);
rnn.SetBwBias(biases);
rnn.SetFwRecurrentWeights(recurrent_weights);
rnn.SetBwRecurrentWeights(recurrent_weights);
for (int i = 0; i < rnn.sequence_len(); i++) {
float* batch_start = rnn_input + i * rnn.input_size();
float* batch_end = batch_start + rnn.input_size();
rnn.SetInput(2 * i * rnn.input_size(), batch_start, batch_end);
rnn.SetAuxInput(2 * i * rnn.input_size(), batch_start, batch_end);
rnn.SetInput((2 * i + 1) * rnn.input_size(), batch_start, batch_end);
rnn.SetAuxInput((2 * i + 1) * rnn.input_size(), batch_start, batch_end);
}
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
std::vector<float> fw_expected;
std::vector<float> bw_expected;
for (int i = 0; i < rnn.sequence_len(); i++) {
float* golden_fw_start = rnn_golden_fw_output + i * rnn.num_fw_units();
float* golden_fw_end = golden_fw_start + rnn.num_fw_units();
fw_expected.insert(fw_expected.end(), golden_fw_start, golden_fw_end);
fw_expected.insert(fw_expected.end(), golden_fw_start, golden_fw_end);
float* golden_bw_start = rnn_golden_bw_output + i * rnn.num_fw_units();
float* golden_bw_end = golden_bw_start + rnn.num_fw_units();
bw_expected.insert(bw_expected.end(), golden_bw_start, golden_bw_end);
bw_expected.insert(bw_expected.end(), golden_bw_start, golden_bw_end);
}
EXPECT_THAT(rnn.GetFwOutput(), ElementsAreArray(ArrayFloatNear(fw_expected)));
EXPECT_THAT(rnn.GetBwOutput(), ElementsAreArray(ArrayFloatNear(bw_expected)));
}
TEST(BidirectionalRNNOpTest, ClosedBoxTestNoCrossLinkingRegularInputOnly) {
BidirectionalRNNOpModel rnn(2, 16,
16, 16,
8, 8,
AuxInputMode::kNoCrossLinking,
true,
false);
rnn.SetFwWeights(weights);
rnn.SetBwWeights(weights);
rnn.SetFwBias(biases);
rnn.SetBwBias(biases);
rnn.SetFwRecurrentWeights(recurrent_weights);
rnn.SetBwRecurrentWeights(recurrent_weights);
std::vector<float> bw_inputs(rnn.input_size(), 0);
for (int i = 0; i < rnn.sequence_len(); i++) {
float* batch_start = rnn_input + i * rnn.input_size();
float* batch_end = batch_start + rnn.input_size();
rnn.SetInput(2 * i * rnn.input_size(), batch_start, batch_end);
rnn.SetAuxInput(2 * i * rnn.input_size(), &bw_inputs[0],
&bw_inputs[bw_inputs.size() - 1]);
rnn.SetInput((2 * i + 1) * rnn.input_size(), batch_start, batch_end);
rnn.SetAuxInput((2 * i + 1) * rnn.input_size(), &bw_inputs[0],
&bw_inputs[bw_inputs.size() - 1]);
}
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
std::vector<float> fw_expected;
for (int i = 0; i < rnn.sequence_len(); i++) {
float* golden_fw_start = rnn_golden_fw_output + i * rnn.num_fw_units();
float* golden_fw_end = golden_fw_start + rnn.num_fw_units();
fw_expected.insert(fw_expected.end(), golden_fw_start, golden_fw_end);
fw_expected.insert(fw_expected.end(), golden_fw_start, golden_fw_end);
}
EXPECT_THAT(rnn.GetFwOutput(), ElementsAreArray(ArrayFloatNear(fw_expected)));
}
TEST(BidirectionalRNNOpTest, ClosedBoxTestNoCrossLinkingAuxInputOnly) {
BidirectionalRNNOpModel rnn(2, 16,
16, 16,
8, 8,
AuxInputMode::kNoCrossLinking,
true,
false);
rnn.SetFwWeights(weights);
rnn.SetBwWeights(weights);
rnn.SetFwBias(biases);
rnn.SetBwBias(biases);
rnn.SetFwRecurrentWeights(recurrent_weights);
rnn.SetBwRecurrentWeights(recurrent_weights);
std::vector<float> fw_inputs(rnn.input_size(), 0);
for (int i = 0; i < rnn.sequence_len(); i++) {
float* batch_start = rnn_input + i * rnn.input_size();
float* batch_end = batch_start + rnn.input_size();
rnn.SetAuxInput(2 * i * rnn.input_size(), batch_start, batch_end);
rnn.SetInput(2 * i * rnn.input_size(), &fw_inputs[0],
&fw_inputs[fw_inputs.size() - 1]);
rnn.SetAuxInput((2 * i + 1) * rnn.input_size(), batch_start, batch_end);
rnn.SetInput((2 * i + 1) * rnn.input_size(), &fw_inputs[0],
&fw_inputs[fw_inputs.size() - 1]);
}
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
std::vector<float> bw_expected;
for (int i = 0; i < rnn.sequence_len(); i++) {
float* golden_bw_start = rnn_golden_bw_output + i * rnn.num_fw_units();
float* golden_bw_end = golden_bw_start + rnn.num_fw_units();
bw_expected.insert(bw_expected.end(), golden_bw_start, golden_bw_end);
bw_expected.insert(bw_expected.end(), golden_bw_start, golden_bw_end);
}
EXPECT_THAT(rnn.GetBwOutput(), ElementsAreArray(ArrayFloatNear(bw_expected)));
}
TEST(BidirectionalRNNOpTest, ClosedBoxTestCrossLinkingAuxInputOnly) {
BidirectionalRNNOpModel rnn(2, 16,
16, 16,
8, 8,
AuxInputMode::kCrossLinking,
false,
false);
rnn.SetFwWeights(std::vector<float>(weights.size(), 0.0));
rnn.SetBwWeights(std::vector<float>(weights.size(), 0.0));
rnn.SetFwBias(biases);
rnn.SetBwBias(biases);
rnn.SetFwRecurrentWeights(recurrent_weights);
rnn.SetBwRecurrentWeights(recurrent_weights);
rnn.SetAuxFwWeights(weights);
rnn.SetAuxBwWeights(weights);
const int input_sequence_size = rnn.input_size() * rnn.sequence_len();
std::vector<float> zero_input(input_sequence_size, 0.f);
float* batch_start = rnn_input;
float* batch_end = batch_start + input_sequence_size;
rnn.SetInput(0, zero_input.data(), zero_input.data() + zero_input.size());
rnn.SetAuxInput(0, batch_start, batch_end);
rnn.SetInput(input_sequence_size, zero_input.data(),
zero_input.data() + zero_input.size());
rnn.SetAuxInput(input_sequence_size, batch_start, batch_end);
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
float* golden_fw_start = rnn_golden_fw_output;
float* golden_fw_end =
golden_fw_start + rnn.num_fw_units() * rnn.sequence_len();
std::vector<float> fw_expected;
fw_expected.insert(fw_expected.end(), golden_fw_start, golden_fw_end);
fw_expected.insert(fw_expected.end(), golden_fw_start, golden_fw_end);
EXPECT_THAT(rnn.GetFwOutput(), ElementsAreArray(ArrayFloatNear(fw_expected)));
float* golden_bw_start = rnn_golden_bw_output;
float* golden_bw_end =
golden_bw_start + rnn.num_bw_units() * rnn.sequence_len();
std::vector<float> bw_expected;
bw_expected.insert(bw_expected.end(), golden_bw_start, golden_bw_end);
bw_expected.insert(bw_expected.end(), golden_bw_start, golden_bw_end);
EXPECT_THAT(rnn.GetBwOutput(), ElementsAreArray(ArrayFloatNear(bw_expected)));
}
TEST(BidirectionalRNNOpTest, ClosedBoxTestCrossLinkingAuxInputOnlyTimeMajor) {
BidirectionalRNNOpModel rnn(2, 16,
16, 16,
8, 8,
AuxInputMode::kCrossLinking,
true,
false);
rnn.SetFwWeights(std::vector<float>(weights.size(), 0.0));
rnn.SetBwWeights(std::vector<float>(weights.size(), 0.0));
rnn.SetFwBias(biases);
rnn.SetBwBias(biases);
rnn.SetFwRecurrentWeights(recurrent_weights);
rnn.SetBwRecurrentWeights(recurrent_weights);
rnn.SetAuxFwWeights(weights);
rnn.SetAuxBwWeights(weights);
std::vector<float> zero_input(rnn.input_size(), 0.f);
for (int i = 0; i < rnn.sequence_len(); i++) {
float* batch_start = rnn_input + i * rnn.input_size();
float* batch_end = batch_start + rnn.input_size();
rnn.SetInput(2 * i * rnn.input_size(), &zero_input.front(),
&zero_input.back() + 1);
rnn.SetAuxInput(2 * i * rnn.input_size(), batch_start, batch_end);
rnn.SetInput((2 * i + 1) * rnn.input_size(), &zero_input.front(),
&zero_input.back() + 1);
rnn.SetAuxInput((2 * i + 1) * rnn.input_size(), batch_start, batch_end);
}
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
std::vector<float> fw_expected;
for (int i = 0; i < rnn.sequence_len(); i++) {
float* golden_fw_start = rnn_golden_fw_output + i * rnn.num_fw_units();
float* golden_fw_end = golden_fw_start + rnn.num_fw_units();
fw_expected.insert(fw_expected.end(), golden_fw_start, golden_fw_end);
fw_expected.insert(fw_expected.end(), golden_fw_start, golden_fw_end);
}
EXPECT_THAT(rnn.GetFwOutput(), ElementsAreArray(ArrayFloatNear(fw_expected)));
}
void run_closedbox_test_with_input_split(int input_size, int aux_input_size) {
const int num_units = 16;
BidirectionalRNNOpModel rnn(2, 16,
num_units, num_units,
input_size, aux_input_size,
AuxInputMode::kCrossLinking,
false,
false);
std::vector<float> reg_weights(num_units * rnn.input_size());
std::vector<float> aux_weights(num_units * rnn.aux_input_size());
int full_weights_size = weights.size();
int reg_weights_offset = 0;
int aux_weights_offset = 0;
int weights_offset = 0;
while (weights_offset < full_weights_size) {
std::copy(weights.begin() + weights_offset,
weights.begin() + weights_offset + rnn.input_size(),
reg_weights.begin() + reg_weights_offset);
weights_offset += rnn.input_size();
reg_weights_offset += rnn.input_size();
std::copy(weights.begin() + weights_offset,
weights.begin() + weights_offset + rnn.aux_input_size(),
aux_weights.begin() + aux_weights_offset);
weights_offset += rnn.aux_input_size();
aux_weights_offset += rnn.aux_input_size();
}
rnn.SetFwWeights(reg_weights);
rnn.SetBwWeights(reg_weights);
rnn.SetFwBias(biases);
rnn.SetBwBias(biases);
rnn.SetFwRecurrentWeights(recurrent_weights);
rnn.SetBwRecurrentWeights(recurrent_weights);
rnn.SetAuxFwWeights(aux_weights);
rnn.SetAuxBwWeights(aux_weights);
int full_input_size =
(rnn.input_size() + rnn.aux_input_size()) * rnn.sequence_len();
int reg_input_offset = 0;
int aux_input_offset = 0;
for (int batch = 0; batch < 2; ++batch) {
int input_offset = 0;
while (input_offset < full_input_size) {
rnn.SetInput(reg_input_offset, rnn_input + input_offset,
rnn_input + input_offset + rnn.input_size());
input_offset += rnn.input_size();
reg_input_offset += rnn.input_size();
rnn.SetAuxInput(aux_input_offset, rnn_input + input_offset,
rnn_input + input_offset + rnn.aux_input_size());
input_offset += rnn.aux_input_size();
aux_input_offset += rnn.aux_input_size();
}
}
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
float* golden_fw_start = rnn_golden_fw_output;
float* golden_fw_end =
golden_fw_start + rnn.num_fw_units() * rnn.sequence_len();
std::vector<float> fw_expected;
fw_expected.insert(fw_expected.end(), golden_fw_start, golden_fw_end);
fw_expected.insert(fw_expected.end(), golden_fw_start, golden_fw_end);
EXPECT_THAT(rnn.GetFwOutput(), ElementsAreArray(ArrayFloatNear(fw_expected)));
float* golden_bw_start = rnn_golden_bw_output;
float* golden_bw_end =
golden_bw_start + rnn.num_bw_units() * rnn.sequence_len();
std::vector<float> bw_expected;
bw_expected.insert(bw_expected.end(), golden_bw_start, golden_bw_end);
bw_expected.insert(bw_expected.end(), golden_bw_start, golden_bw_end);
EXPECT_THAT(rnn.GetBwOutput(), ElementsAreArray(ArrayFloatNear(bw_expected)));
}
TEST(BidirectionalRNNOpTest,
ClosedBoxTestCrossLinkingRegularAndAuxInputEvenSplit) {
run_closedbox_test_with_input_split(4, 4);
}
TEST(BidirectionalRNNOpTest,
ClosedBoxTestCrossLinkingRegularAndAuxInputUnevenSplit) {
run_closedbox_test_with_input_split(2, 6);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/bidirectional_sequence_rnn.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/bidirectional_sequence_rnn_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9d49cf0e-1088-4c54-b0ca-15e68adff234 | cpp | google/arolla | raw_buffer_factory | arolla/memory/raw_buffer_factory.cc | arolla/memory/raw_buffer_factory_test.cc | #include "arolla/memory/raw_buffer_factory.h"
#include <algorithm>
#include <cstddef>
#include <cstdlib>
#include <cstring>
#include <memory>
#include <tuple>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/dynamic_annotations.h"
#include "absl/base/optimization.h"
#include "absl/log/check.h"
namespace arolla {
namespace {
void noop_free(void*) noexcept {}
void AnnotateMemoryIsInitialized(void* data, size_t size) {
ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(data, size);
}
}
std::tuple<RawBufferPtr, void*> HeapBufferFactory::CreateRawBuffer(
size_t nbytes) {
if (ABSL_PREDICT_FALSE(nbytes == 0)) return {nullptr, nullptr};
void* data = malloc(nbytes);
AnnotateMemoryIsInitialized(data, nbytes);
return {std::shared_ptr<void>(data, free), data};
}
std::tuple<RawBufferPtr, void*> HeapBufferFactory::ReallocRawBuffer(
RawBufferPtr&& old_buffer, void* old_data, size_t old_size,
size_t new_size) {
if (new_size == 0) return {nullptr, nullptr};
if (old_size == 0) return CreateRawBuffer(new_size);
DCHECK_EQ(old_buffer.use_count(), 1);
void* new_data = realloc(old_data, new_size);
if (new_size > old_size) {
AnnotateMemoryIsInitialized(static_cast<char*>(new_data) + old_size,
new_size - old_size);
}
*std::get_deleter<decltype(&free)>(old_buffer) = &noop_free;
old_buffer.reset(new_data, free);
return {std::move(old_buffer), new_data};
}
std::tuple<RawBufferPtr, void*> ProtobufArenaBufferFactory::CreateRawBuffer(
size_t nbytes) {
char* data = arena_.CreateArray<char>(&arena_, nbytes);
AnnotateMemoryIsInitialized(data, nbytes);
return {nullptr, data};
}
std::tuple<RawBufferPtr, void*> ProtobufArenaBufferFactory::ReallocRawBuffer(
RawBufferPtr&& old_buffer, void* data, size_t old_size, size_t new_size) {
if (old_size >= new_size) return {nullptr, data};
char* new_data = arena_.CreateArray<char>(&arena_, new_size);
memcpy(new_data, data, std::min(old_size, new_size));
AnnotateMemoryIsInitialized(new_data + old_size, new_size - old_size);
return {nullptr, new_data};
}
std::tuple<RawBufferPtr, void*> UnsafeArenaBufferFactory::CreateRawBuffer(
size_t nbytes) {
auto last_alloc =
reinterpret_cast<char*>(reinterpret_cast<size_t>(current_ + 7) & ~7ull);
if (ABSL_PREDICT_FALSE(last_alloc + nbytes > end_)) {
return {nullptr, SlowAlloc(nbytes)};
}
current_ = last_alloc + nbytes;
return {nullptr, last_alloc};
}
std::tuple<RawBufferPtr, void*> UnsafeArenaBufferFactory::ReallocRawBuffer(
RawBufferPtr&& old_buffer, void* data, size_t old_size, size_t new_size) {
char* last_alloc = current_ - old_size;
if ((data != last_alloc) || last_alloc + new_size > end_) {
if (old_size >= new_size) return {nullptr, data};
if (data == last_alloc) current_ = last_alloc;
void* new_data = SlowAlloc(new_size);
memcpy(new_data, data, std::min(old_size, new_size));
AnnotateMemoryIsInitialized(data, old_size);
return {nullptr, new_data};
}
current_ = last_alloc + new_size;
if (new_size < old_size) {
AnnotateMemoryIsInitialized(current_, old_size - new_size);
}
return {nullptr, last_alloc};
}
void UnsafeArenaBufferFactory::Reset() {
if (page_id_ >= 0) {
page_id_ = 0;
current_ = reinterpret_cast<char*>(std::get<1>(pages_[0]));
AnnotateMemoryIsInitialized(current_, page_size_);
end_ = current_ + page_size_;
}
big_allocs_.clear();
}
ABSL_ATTRIBUTE_NOINLINE void* UnsafeArenaBufferFactory::SlowAlloc(
size_t nbytes) {
if (ABSL_PREDICT_FALSE(nbytes > page_size_ ||
end_ - current_ >= page_size_ / 2)) {
auto [holder, memory] = base_factory_.CreateRawBuffer(nbytes);
AnnotateMemoryIsInitialized(memory, nbytes);
big_allocs_.emplace_back(std::move(holder), memory);
return memory;
}
NextPage();
auto last_alloc = current_;
current_ += nbytes;
return last_alloc;
}
void UnsafeArenaBufferFactory::NextPage() {
++page_id_;
if (ABSL_PREDICT_FALSE(page_id_ == pages_.size())) {
auto [holder, page] = base_factory_.CreateRawBuffer(page_size_);
current_ = reinterpret_cast<char*>(page);
pages_.emplace_back(std::move(holder), page);
} else {
current_ = reinterpret_cast<char*>(std::get<1>(pages_[page_id_]));
}
AnnotateMemoryIsInitialized(current_, page_size_);
end_ = current_ + page_size_;
}
} | #include "arolla/memory/raw_buffer_factory.h"
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <utility>
#include <vector>
#include "benchmark/benchmark.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "google/protobuf/arena.h"
namespace arolla {
namespace {
using ::testing::AnyOf;
using ::testing::Eq;
using ::testing::Ge;
using ::testing::Le;
void VerifyCanReadUninitialized(const void* ptr, size_t size) {
const char* char_ptr = static_cast<const char*>(ptr);
for (size_t i = 0; i != size; ++i) {
char c = *(char_ptr + i);
benchmark::DoNotOptimize(c);
}
}
TEST(HeapBufferFactory, CreateEmptyBuffer) {
auto [buf, data] = GetHeapBufferFactory()->CreateRawBuffer(0);
EXPECT_EQ(buf, nullptr);
EXPECT_EQ(data, nullptr);
}
TEST(HeapBufferFactory, CreateRawBuffer) {
const size_t size = 13;
auto [buf, data] = GetHeapBufferFactory()->CreateRawBuffer(size);
EXPECT_NE(buf, nullptr);
VerifyCanReadUninitialized(data, size);
EXPECT_EQ(reinterpret_cast<size_t>(data) & 7, 0);
memset(data, 0, size);
}
TEST(HeapBufferFactory, ReallocRawBuffer) {
size_t size = 13;
RawBufferPtr buf;
char* data;
{
auto res = GetHeapBufferFactory()->CreateRawBuffer(size);
buf = std::get<0>(res);
data = reinterpret_cast<char*>(std::get<1>(res));
VerifyCanReadUninitialized(data, size);
}
auto resize_fn = [&](size_t new_size) {
auto res = GetHeapBufferFactory()->ReallocRawBuffer(std::move(buf), data,
size, new_size);
buf = std::get<0>(res);
data = reinterpret_cast<char*>(std::get<1>(res));
size = new_size;
};
data[0] = 5;
resize_fn(4);
EXPECT_EQ(data[0], 5);
VerifyCanReadUninitialized(data + 1, size - 1);
resize_fn(145);
EXPECT_EQ(data[0], 5);
VerifyCanReadUninitialized(data + 1, 144);
}
TEST(ProtobufArenaBufferFactory, CreateAndResize) {
google::protobuf::Arena arena;
ProtobufArenaBufferFactory buf_factory(arena);
auto [buf1, data1] = buf_factory.CreateRawBuffer(2);
VerifyCanReadUninitialized(data1, 2);
char* d = reinterpret_cast<char*>(data1);
d[0] = 'A';
d[1] = 'B';
auto [buf2, data2] =
buf_factory.ReallocRawBuffer(std::move(buf1), data1, 2, 1);
EXPECT_EQ(data1, data2);
auto [buf3, data3] =
buf_factory.ReallocRawBuffer(std::move(buf2), data2, 1, 3);
EXPECT_NE(data2, data3);
d = reinterpret_cast<char*>(data3);
EXPECT_EQ(d[0], 'A');
VerifyCanReadUninitialized(d + 1, 2);
}
TEST(UnsafeArenaBufferFactory, CreateEmptyBuffer) {
UnsafeArenaBufferFactory arena(25);
auto [buf1, data1] = arena.CreateRawBuffer(0);
auto [buf2, data2] = arena.CreateRawBuffer(0);
auto [buf3, data3] = arena.CreateRawBuffer(1);
VerifyCanReadUninitialized(data3, 1);
auto [buf4, data4] = arena.CreateRawBuffer(0);
auto [buf5, data5] = arena.CreateRawBuffer(0);
EXPECT_EQ(data1, data2);
EXPECT_NE(data3, nullptr);
EXPECT_NE(data2, data4);
EXPECT_NE(data3, data4);
EXPECT_EQ(data4, data5);
}
TEST(UnsafeArenaBufferFactory, CreateRawBuffer) {
std::vector<int64_t> sizes = {17, 1, 15, 1, 10};
std::vector<RawBufferPtr> bufs;
std::vector<char*> ptrs;
bufs.reserve(sizes.size());
ptrs.reserve(sizes.size());
UnsafeArenaBufferFactory arena1(25);
google::protobuf::Arena proto_arena;
ProtobufArenaBufferFactory proto_buf_factory(proto_arena);
UnsafeArenaBufferFactory arena2(25, proto_buf_factory);
for (UnsafeArenaBufferFactory* arena_ptr : {&arena1, &arena2}) {
UnsafeArenaBufferFactory& arena = *arena_ptr;
for (size_t i = 0; i < sizes.size(); ++i) {
auto [buf, data] = arena.CreateRawBuffer(sizes[i]);
VerifyCanReadUninitialized(data, sizes[i]);
EXPECT_EQ(reinterpret_cast<size_t>(data) & 7, 0);
memset(data, i, sizes[i]);
bufs.push_back(buf);
ptrs.push_back(reinterpret_cast<char*>(data));
}
EXPECT_EQ(ptrs[0] + 24, ptrs[1]);
EXPECT_EQ(ptrs[2] + 16, ptrs[3]);
for (size_t i = 0; i < sizes.size(); ++i) {
for (int64_t j = 0; j < sizes[i]; ++j) {
EXPECT_EQ(ptrs[i][j], i);
}
}
}
}
TEST(UnsafeArenaBufferFactory, ReallocRawBuffer) {
UnsafeArenaBufferFactory arena1(25);
google::protobuf::Arena proto_arena;
ProtobufArenaBufferFactory proto_buf_factory(proto_arena);
UnsafeArenaBufferFactory arena2(25, proto_buf_factory);
for (UnsafeArenaBufferFactory* arena_ptr : {&arena1, &arena2}) {
UnsafeArenaBufferFactory& arena = *arena_ptr;
auto [buf1, data1] = arena.CreateRawBuffer(10);
VerifyCanReadUninitialized(data1, 10);
EXPECT_EQ(buf1, nullptr);
reinterpret_cast<char*>(data1)[0] = 7;
auto [buf2, data2] = arena.ReallocRawBuffer(std::move(buf1), data1, 10, 25);
reinterpret_cast<char*>(data1)[24] = -1;
EXPECT_EQ(reinterpret_cast<char*>(data2)[0], 7);
EXPECT_EQ(data1, data2);
auto [buf3, data3] = arena.ReallocRawBuffer(std::move(buf2), data2, 25, 26);
VerifyCanReadUninitialized(data2, 25);
EXPECT_NE(data1, data3);
EXPECT_EQ(reinterpret_cast<char*>(data3)[0], 7);
auto [buf4, data4] = arena.ReallocRawBuffer(std::move(buf3), data3, 26, 10);
EXPECT_NE(data1, data4);
EXPECT_EQ(reinterpret_cast<char*>(data4)[0], 7);
auto [buf5, data5] = arena.CreateRawBuffer(20);
VerifyCanReadUninitialized(data5, 20);
auto [buf6, data6] = arena.ReallocRawBuffer(std::move(buf5), data5, 20, 15);
VerifyCanReadUninitialized(static_cast<const char*>(data6) + 15, 5);
EXPECT_EQ(data1, data5);
EXPECT_EQ(data1, data6);
auto [buf7, data7] = arena.CreateRawBuffer(8);
VerifyCanReadUninitialized(data7, 8);
EXPECT_EQ(reinterpret_cast<char*>(data1) + 16,
reinterpret_cast<char*>(data7));
reinterpret_cast<char*>(data7)[0] = 3;
auto [buf8, data8] = arena.ReallocRawBuffer(std::move(buf7), data7, 8, 20);
EXPECT_EQ(reinterpret_cast<char*>(data8)[0], 3);
auto [buf9, data9] = arena.CreateRawBuffer(1);
VerifyCanReadUninitialized(data9, 1);
EXPECT_EQ(reinterpret_cast<char*>(data8) + 24,
reinterpret_cast<char*>(data9));
}
}
TEST(UnsafeArenaBufferFactory, BigAlloc) {
UnsafeArenaBufferFactory arena1(32);
google::protobuf::Arena proto_arena;
ProtobufArenaBufferFactory proto_buf_factory(proto_arena);
UnsafeArenaBufferFactory arena2(32, proto_buf_factory);
for (UnsafeArenaBufferFactory* arena_ptr : {&arena1, &arena2}) {
UnsafeArenaBufferFactory& arena = *arena_ptr;
auto [buf1, data1] = arena.CreateRawBuffer(16);
VerifyCanReadUninitialized(data1, 16);
auto [buf2, data2] = arena.CreateRawBuffer(64);
VerifyCanReadUninitialized(data2, 64);
auto [buf3, data3] = arena.CreateRawBuffer(16);
VerifyCanReadUninitialized(data3, 16);
EXPECT_THAT(reinterpret_cast<char*>(data3),
Eq(reinterpret_cast<char*>(data1) + 16));
EXPECT_THAT(reinterpret_cast<char*>(data2) - reinterpret_cast<char*>(data1),
AnyOf(Le(-64), Ge(32)));
memset(data2, 0, 64);
EXPECT_THAT(reinterpret_cast<int64_t*>(data2)[0], Eq(0));
}
}
TEST(UnsafeArenaBufferFactory, Reset) {
UnsafeArenaBufferFactory arena1(32);
google::protobuf::Arena proto_arena;
ProtobufArenaBufferFactory proto_buf_factory(proto_arena);
UnsafeArenaBufferFactory arena2(32, proto_buf_factory);
for (UnsafeArenaBufferFactory* arena_ptr : {&arena1, &arena2}) {
UnsafeArenaBufferFactory& arena = *arena_ptr;
arena.Reset();
auto [buf1, data1] = arena.CreateRawBuffer(16);
VerifyCanReadUninitialized(data1, 16);
auto [buf2, data2] = arena.CreateRawBuffer(16);
VerifyCanReadUninitialized(data2, 16);
auto [buf3, data3] = arena.CreateRawBuffer(16);
VerifyCanReadUninitialized(data3, 16);
std::memset(data1, 255, 16);
std::memset(data2, 255, 16);
std::memset(data3, 255, 16);
arena.Reset();
auto [buf4, data4] = arena.CreateRawBuffer(8);
VerifyCanReadUninitialized(data4, 16);
auto [buf5, data5] = arena.CreateRawBuffer(16);
VerifyCanReadUninitialized(data5, 16);
auto [buf6, data6] = arena.CreateRawBuffer(24);
VerifyCanReadUninitialized(data6, 16);
EXPECT_EQ(data1, data4);
EXPECT_EQ(reinterpret_cast<char*>(data2),
reinterpret_cast<char*>(data5) + 8);
EXPECT_EQ(data3, data6);
}
}
TEST(UnsafeArenaBufferFactory, BaseFactory) {
UnsafeArenaBufferFactory arena1(1024);
auto [buf_before, ptr_before] = arena1.CreateRawBuffer(1);
UnsafeArenaBufferFactory arena2(32, arena1);
auto [buf_small, ptr_small] = arena2.CreateRawBuffer(8);
auto [buf_big, ptr_big] = arena2.CreateRawBuffer(128);
auto [buf_after, ptr_after] = arena1.CreateRawBuffer(1);
EXPECT_LT(ptr_before, ptr_small);
EXPECT_LT(ptr_before, ptr_big);
EXPECT_GT(ptr_after, ptr_small);
EXPECT_GT(ptr_after, ptr_big);
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/memory/raw_buffer_factory.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/memory/raw_buffer_factory_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
7aae3b80-e7af-4d6f-83db-331cfbf07047 | cpp | abseil/abseil-cpp | log_format | absl/log/internal/log_format.cc | absl/log/log_format_test.cc | #include "absl/log/internal/log_format.h"
#include <string.h>
#ifdef _MSC_VER
#include <winsock2.h>
#else
#include <sys/time.h>
#endif
#include <cstddef>
#include <cstdint>
#include <limits>
#include <string>
#include <type_traits>
#include "absl/base/config.h"
#include "absl/base/log_severity.h"
#include "absl/base/optimization.h"
#include "absl/log/internal/append_truncated.h"
#include "absl/log/internal/config.h"
#include "absl/log/internal/globals.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/time/civil_time.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace log_internal {
namespace {
template <typename T>
inline std::enable_if_t<!std::is_signed<T>::value>
PutLeadingWhitespace(T tid, char*& p) {
if (tid < 10) *p++ = ' ';
if (tid < 100) *p++ = ' ';
if (tid < 1000) *p++ = ' ';
if (tid < 10000) *p++ = ' ';
if (tid < 100000) *p++ = ' ';
if (tid < 1000000) *p++ = ' ';
}
template <typename T>
inline std::enable_if_t<std::is_signed<T>::value>
PutLeadingWhitespace(T tid, char*& p) {
if (tid >= 0 && tid < 10) *p++ = ' ';
if (tid > -10 && tid < 100) *p++ = ' ';
if (tid > -100 && tid < 1000) *p++ = ' ';
if (tid > -1000 && tid < 10000) *p++ = ' ';
if (tid > -10000 && tid < 100000) *p++ = ' ';
if (tid > -100000 && tid < 1000000) *p++ = ' ';
}
size_t FormatBoundedFields(absl::LogSeverity severity, absl::Time timestamp,
log_internal::Tid tid, absl::Span<char>& buf) {
constexpr size_t kBoundedFieldsMaxLen =
sizeof("SMMDD HH:MM:SS.NNNNNN ") +
(1 + std::numeric_limits<log_internal::Tid>::digits10 + 1) - sizeof("");
if (ABSL_PREDICT_FALSE(buf.size() < kBoundedFieldsMaxLen)) {
buf.remove_suffix(buf.size());
return 0;
}
const absl::TimeZone* tz = absl::log_internal::TimeZone();
if (ABSL_PREDICT_FALSE(tz == nullptr)) {
auto tv = absl::ToTimeval(timestamp);
int snprintf_result = absl::SNPrintF(
buf.data(), buf.size(), "%c0000 00:00:%02d.%06d %7d ",
absl::LogSeverityName(severity)[0], static_cast<int>(tv.tv_sec),
static_cast<int>(tv.tv_usec), static_cast<int>(tid));
if (snprintf_result >= 0) {
buf.remove_prefix(static_cast<size_t>(snprintf_result));
return static_cast<size_t>(snprintf_result);
}
return 0;
}
char* p = buf.data();
*p++ = absl::LogSeverityName(severity)[0];
const absl::TimeZone::CivilInfo ci = tz->At(timestamp);
absl::numbers_internal::PutTwoDigits(static_cast<uint32_t>(ci.cs.month()), p);
p += 2;
absl::numbers_internal::PutTwoDigits(static_cast<uint32_t>(ci.cs.day()), p);
p += 2;
*p++ = ' ';
absl::numbers_internal::PutTwoDigits(static_cast<uint32_t>(ci.cs.hour()), p);
p += 2;
*p++ = ':';
absl::numbers_internal::PutTwoDigits(static_cast<uint32_t>(ci.cs.minute()),
p);
p += 2;
*p++ = ':';
absl::numbers_internal::PutTwoDigits(static_cast<uint32_t>(ci.cs.second()),
p);
p += 2;
*p++ = '.';
const int64_t usecs = absl::ToInt64Microseconds(ci.subsecond);
absl::numbers_internal::PutTwoDigits(static_cast<uint32_t>(usecs / 10000), p);
p += 2;
absl::numbers_internal::PutTwoDigits(static_cast<uint32_t>(usecs / 100 % 100),
p);
p += 2;
absl::numbers_internal::PutTwoDigits(static_cast<uint32_t>(usecs % 100), p);
p += 2;
*p++ = ' ';
PutLeadingWhitespace(tid, p);
p = absl::numbers_internal::FastIntToBuffer(tid, p);
*p++ = ' ';
const size_t bytes_formatted = static_cast<size_t>(p - buf.data());
buf.remove_prefix(bytes_formatted);
return bytes_formatted;
}
size_t FormatLineNumber(int line, absl::Span<char>& buf) {
constexpr size_t kLineFieldMaxLen =
sizeof(":] ") + (1 + std::numeric_limits<int>::digits10 + 1) - sizeof("");
if (ABSL_PREDICT_FALSE(buf.size() < kLineFieldMaxLen)) {
buf.remove_suffix(buf.size());
return 0;
}
char* p = buf.data();
*p++ = ':';
p = absl::numbers_internal::FastIntToBuffer(line, p);
*p++ = ']';
*p++ = ' ';
const size_t bytes_formatted = static_cast<size_t>(p - buf.data());
buf.remove_prefix(bytes_formatted);
return bytes_formatted;
}
}
std::string FormatLogMessage(absl::LogSeverity severity,
absl::CivilSecond civil_second,
absl::Duration subsecond, log_internal::Tid tid,
absl::string_view basename, int line,
PrefixFormat format, absl::string_view message) {
return absl::StrFormat(
"%c%02d%02d %02d:%02d:%02d.%06d %7d %s:%d] %s%s",
absl::LogSeverityName(severity)[0], civil_second.month(),
civil_second.day(), civil_second.hour(), civil_second.minute(),
civil_second.second(), absl::ToInt64Microseconds(subsecond), tid,
basename, line, format == PrefixFormat::kRaw ? "RAW: " : "", message);
}
size_t FormatLogPrefix(absl::LogSeverity severity, absl::Time timestamp,
log_internal::Tid tid, absl::string_view basename,
int line, PrefixFormat format, absl::Span<char>& buf) {
auto prefix_size = FormatBoundedFields(severity, timestamp, tid, buf);
prefix_size += log_internal::AppendTruncated(basename, buf);
prefix_size += FormatLineNumber(line, buf);
if (format == PrefixFormat::kRaw)
prefix_size += log_internal::AppendTruncated("RAW: ", buf);
return prefix_size;
}
}
ABSL_NAMESPACE_END
} | #include <math.h>
#include <iomanip>
#include <ios>
#include <limits>
#include <ostream>
#include <sstream>
#include <string>
#include <type_traits>
#ifdef __ANDROID__
#include <android/api-level.h>
#endif
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/log/check.h"
#include "absl/log/internal/test_matchers.h"
#include "absl/log/log.h"
#include "absl/log/scoped_mock_log.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
namespace {
using ::absl::log_internal::AsString;
using ::absl::log_internal::MatchesOstream;
using ::absl::log_internal::RawEncodedMessage;
using ::absl::log_internal::TextMessage;
using ::absl::log_internal::TextPrefix;
using ::testing::AllOf;
using ::testing::AnyOf;
using ::testing::Each;
using ::testing::EndsWith;
using ::testing::Eq;
using ::testing::Ge;
using ::testing::IsEmpty;
using ::testing::Le;
using ::testing::SizeIs;
using ::testing::Types;
std::ostringstream ComparisonStream() {
std::ostringstream str;
str.setf(std::ios_base::showbase | std::ios_base::boolalpha |
std::ios_base::internal);
return str;
}
TEST(LogFormatTest, NoMessage) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const int log_line = __LINE__ + 1;
auto do_log = [] { LOG(INFO); };
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(ComparisonStream())),
TextPrefix(AsString(EndsWith(absl::StrCat(
" log_format_test.cc:", log_line, "] ")))),
TextMessage(IsEmpty()),
ENCODED_MESSAGE(HasValues(IsEmpty())))));
test_sink.StartCapturingLogs();
do_log();
}
template <typename T>
class CharLogFormatTest : public testing::Test {};
using CharTypes = Types<char, signed char, unsigned char>;
TYPED_TEST_SUITE(CharLogFormatTest, CharTypes);
TYPED_TEST(CharLogFormatTest, Printable) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const TypeParam value = 'x';
auto comparison_stream = ComparisonStream();
comparison_stream << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("x")),
ENCODED_MESSAGE(HasValues(
ElementsAre(EqualsProto(R"pb(str: "x")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value;
}
TYPED_TEST(CharLogFormatTest, Unprintable) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
constexpr auto value = static_cast<TypeParam>(0xeeu);
auto comparison_stream = ComparisonStream();
comparison_stream << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("\xee")),
ENCODED_MESSAGE(HasValues(ElementsAre(
EqualsProto(R"pb(str: "\xee")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value;
}
template <typename T>
class UnsignedIntLogFormatTest : public testing::Test {};
using UnsignedIntTypes = Types<unsigned short, unsigned int,
unsigned long, unsigned long long>;
TYPED_TEST_SUITE(UnsignedIntLogFormatTest, UnsignedIntTypes);
TYPED_TEST(UnsignedIntLogFormatTest, Positive) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const TypeParam value = 224;
auto comparison_stream = ComparisonStream();
comparison_stream << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("224")),
ENCODED_MESSAGE(HasValues(
ElementsAre(EqualsProto(R"pb(str: "224")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value;
}
TYPED_TEST(UnsignedIntLogFormatTest, BitfieldPositive) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const struct {
TypeParam bits : 6;
} value{42};
auto comparison_stream = ComparisonStream();
comparison_stream << value.bits;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("42")),
ENCODED_MESSAGE(HasValues(
ElementsAre(EqualsProto(R"pb(str: "42")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value.bits;
}
template <typename T>
class SignedIntLogFormatTest : public testing::Test {};
using SignedIntTypes =
Types<signed short, signed int, signed long, signed long long>;
TYPED_TEST_SUITE(SignedIntLogFormatTest, SignedIntTypes);
TYPED_TEST(SignedIntLogFormatTest, Positive) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const TypeParam value = 224;
auto comparison_stream = ComparisonStream();
comparison_stream << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("224")),
ENCODED_MESSAGE(HasValues(
ElementsAre(EqualsProto(R"pb(str: "224")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value;
}
TYPED_TEST(SignedIntLogFormatTest, Negative) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const TypeParam value = -112;
auto comparison_stream = ComparisonStream();
comparison_stream << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("-112")),
ENCODED_MESSAGE(HasValues(ElementsAre(
EqualsProto(R"pb(str: "-112")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value;
}
TYPED_TEST(SignedIntLogFormatTest, BitfieldPositive) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const struct {
TypeParam bits : 6;
} value{21};
auto comparison_stream = ComparisonStream();
comparison_stream << value.bits;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("21")),
ENCODED_MESSAGE(HasValues(
ElementsAre(EqualsProto(R"pb(str: "21")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value.bits;
}
TYPED_TEST(SignedIntLogFormatTest, BitfieldNegative) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const struct {
TypeParam bits : 6;
} value{-21};
auto comparison_stream = ComparisonStream();
comparison_stream << value.bits;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("-21")),
ENCODED_MESSAGE(HasValues(
ElementsAre(EqualsProto(R"pb(str: "-21")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value.bits;
}
#if !defined(__GNUC__) || defined(__clang__)
enum MyUnsignedEnum {
MyUnsignedEnum_ZERO = 0,
MyUnsignedEnum_FORTY_TWO = 42,
MyUnsignedEnum_TWO_HUNDRED_TWENTY_FOUR = 224,
};
enum MyUnsignedIntEnum : unsigned int {
MyUnsignedIntEnum_ZERO = 0,
MyUnsignedIntEnum_FORTY_TWO = 42,
MyUnsignedIntEnum_TWO_HUNDRED_TWENTY_FOUR = 224,
};
template <typename T>
class UnsignedEnumLogFormatTest : public testing::Test {};
using UnsignedEnumTypes = std::conditional<
std::is_signed<std::underlying_type<MyUnsignedEnum>::type>::value,
Types<MyUnsignedIntEnum>, Types<MyUnsignedEnum, MyUnsignedIntEnum>>::type;
TYPED_TEST_SUITE(UnsignedEnumLogFormatTest, UnsignedEnumTypes);
TYPED_TEST(UnsignedEnumLogFormatTest, Positive) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const TypeParam value = static_cast<TypeParam>(224);
auto comparison_stream = ComparisonStream();
comparison_stream << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("224")),
ENCODED_MESSAGE(HasValues(
ElementsAre(EqualsProto(R"pb(str: "224")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value;
}
TYPED_TEST(UnsignedEnumLogFormatTest, BitfieldPositive) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const struct {
TypeParam bits : 6;
} value{static_cast<TypeParam>(42)};
auto comparison_stream = ComparisonStream();
comparison_stream << value.bits;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("42")),
ENCODED_MESSAGE(HasValues(
ElementsAre(EqualsProto(R"pb(str: "42")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value.bits;
}
enum MySignedEnum {
MySignedEnum_NEGATIVE_ONE_HUNDRED_TWELVE = -112,
MySignedEnum_NEGATIVE_TWENTY_ONE = -21,
MySignedEnum_ZERO = 0,
MySignedEnum_TWENTY_ONE = 21,
MySignedEnum_TWO_HUNDRED_TWENTY_FOUR = 224,
};
enum MySignedIntEnum : signed int {
MySignedIntEnum_NEGATIVE_ONE_HUNDRED_TWELVE = -112,
MySignedIntEnum_NEGATIVE_TWENTY_ONE = -21,
MySignedIntEnum_ZERO = 0,
MySignedIntEnum_TWENTY_ONE = 21,
MySignedIntEnum_TWO_HUNDRED_TWENTY_FOUR = 224,
};
template <typename T>
class SignedEnumLogFormatTest : public testing::Test {};
using SignedEnumTypes = std::conditional<
std::is_signed<std::underlying_type<MyUnsignedEnum>::type>::value,
Types<MyUnsignedEnum, MySignedEnum, MySignedIntEnum>,
Types<MySignedEnum, MySignedIntEnum>>::type;
TYPED_TEST_SUITE(SignedEnumLogFormatTest, SignedEnumTypes);
TYPED_TEST(SignedEnumLogFormatTest, Positive) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const TypeParam value = static_cast<TypeParam>(224);
auto comparison_stream = ComparisonStream();
comparison_stream << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("224")),
ENCODED_MESSAGE(HasValues(
ElementsAre(EqualsProto(R"pb(str: "224")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value;
}
TYPED_TEST(SignedEnumLogFormatTest, Negative) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const TypeParam value = static_cast<TypeParam>(-112);
auto comparison_stream = ComparisonStream();
comparison_stream << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("-112")),
ENCODED_MESSAGE(HasValues(ElementsAre(
EqualsProto(R"pb(str: "-112")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value;
}
TYPED_TEST(SignedEnumLogFormatTest, BitfieldPositive) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const struct {
TypeParam bits : 6;
} value{static_cast<TypeParam>(21)};
auto comparison_stream = ComparisonStream();
comparison_stream << value.bits;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("21")),
ENCODED_MESSAGE(HasValues(
ElementsAre(EqualsProto(R"pb(str: "21")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value.bits;
}
TYPED_TEST(SignedEnumLogFormatTest, BitfieldNegative) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const struct {
TypeParam bits : 6;
} value{static_cast<TypeParam>(-21)};
auto comparison_stream = ComparisonStream();
comparison_stream << value.bits;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("-21")),
ENCODED_MESSAGE(HasValues(
ElementsAre(EqualsProto(R"pb(str: "-21")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value.bits;
}
#endif
TEST(FloatLogFormatTest, Positive) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const float value = 6.02e23f;
auto comparison_stream = ComparisonStream();
comparison_stream << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("6.02e+23")),
ENCODED_MESSAGE(HasValues(ElementsAre(
EqualsProto(R"pb(str: "6.02e+23")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value;
}
TEST(FloatLogFormatTest, Negative) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const float value = -6.02e23f;
auto comparison_stream = ComparisonStream();
comparison_stream << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("-6.02e+23")),
ENCODED_MESSAGE(HasValues(ElementsAre(
EqualsProto(R"pb(str: "-6.02e+23")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value;
}
TEST(FloatLogFormatTest, NegativeExponent) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const float value = 6.02e-23f;
auto comparison_stream = ComparisonStream();
comparison_stream << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("6.02e-23")),
ENCODED_MESSAGE(HasValues(ElementsAre(
EqualsProto(R"pb(str: "6.02e-23")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value;
}
TEST(DoubleLogFormatTest, Positive) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const double value = 6.02e23;
auto comparison_stream = ComparisonStream();
comparison_stream << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("6.02e+23")),
ENCODED_MESSAGE(HasValues(ElementsAre(
EqualsProto(R"pb(str: "6.02e+23")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value;
}
TEST(DoubleLogFormatTest, Negative) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const double value = -6.02e23;
auto comparison_stream = ComparisonStream();
comparison_stream << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("-6.02e+23")),
ENCODED_MESSAGE(HasValues(ElementsAre(
EqualsProto(R"pb(str: "-6.02e+23")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value;
}
TEST(DoubleLogFormatTest, NegativeExponent) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const double value = 6.02e-23;
auto comparison_stream = ComparisonStream();
comparison_stream << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("6.02e-23")),
ENCODED_MESSAGE(HasValues(ElementsAre(
EqualsProto(R"pb(str: "6.02e-23")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value;
}
template <typename T>
class FloatingPointLogFormatTest : public testing::Test {};
using FloatingPointTypes = Types<float, double>;
TYPED_TEST_SUITE(FloatingPointLogFormatTest, FloatingPointTypes);
TYPED_TEST(FloatingPointLogFormatTest, Zero) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const TypeParam value = 0.0;
auto comparison_stream = ComparisonStream();
comparison_stream << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("0")),
ENCODED_MESSAGE(HasValues(
ElementsAre(EqualsProto(R"pb(str: "0")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value;
}
TYPED_TEST(FloatingPointLogFormatTest, Integer) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const TypeParam value = 1.0;
auto comparison_stream = ComparisonStream();
comparison_stream << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("1")),
ENCODED_MESSAGE(HasValues(
ElementsAre(EqualsProto(R"pb(str: "1")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value;
}
TYPED_TEST(FloatingPointLogFormatTest, Infinity) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const TypeParam value = std::numeric_limits<TypeParam>::infinity();
auto comparison_stream = ComparisonStream();
comparison_stream << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(AnyOf(Eq("inf"), Eq("Inf"))),
ENCODED_MESSAGE(HasValues(
ElementsAre(EqualsProto(R"pb(str: "inf")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value;
}
TYPED_TEST(FloatingPointLogFormatTest, NegativeInfinity) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const TypeParam value = -std::numeric_limits<TypeParam>::infinity();
auto comparison_stream = ComparisonStream();
comparison_stream << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(AnyOf(Eq("-inf"), Eq("-Inf"))),
ENCODED_MESSAGE(HasValues(ElementsAre(
EqualsProto(R"pb(str: "-inf")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value;
}
TYPED_TEST(FloatingPointLogFormatTest, NaN) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const TypeParam value = std::numeric_limits<TypeParam>::quiet_NaN();
auto comparison_stream = ComparisonStream();
comparison_stream << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(AnyOf(Eq("nan"), Eq("NaN"))),
ENCODED_MESSAGE(HasValues(
ElementsAre(EqualsProto(R"pb(str: "nan")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value;
}
TYPED_TEST(FloatingPointLogFormatTest, NegativeNaN) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const TypeParam value =
std::copysign(std::numeric_limits<TypeParam>::quiet_NaN(), -1.0);
auto comparison_stream = ComparisonStream();
comparison_stream << value;
#ifdef __riscv
EXPECT_CALL(
test_sink,
Send(AllOf(
TextMessage(AnyOf(Eq("-nan"), Eq("nan"), Eq("NaN"), Eq("-nan(ind)"))),
ENCODED_MESSAGE(HasValues(
ElementsAre(AnyOf(EqualsProto(R"pb(str: "-nan")pb"),
EqualsProto(R"pb(str: "nan")pb"),
EqualsProto(R"pb(str: "-nan(ind)")pb"))))))));
#else
EXPECT_CALL(
test_sink,
Send(AllOf(
TextMessage(MatchesOstream(comparison_stream)),
TextMessage(AnyOf(Eq("-nan"), Eq("nan"), Eq("NaN"), Eq("-nan(ind)"))),
ENCODED_MESSAGE(HasValues(
ElementsAre(AnyOf(EqualsProto(R"pb(str: "-nan")pb"),
EqualsProto(R"pb(str: "nan")pb"),
EqualsProto(R"pb(str: "-nan(ind)")pb"))))))));
#endif
test_sink.StartCapturingLogs();
LOG(INFO) << value;
}
template <typename T>
class VoidPtrLogFormatTest : public testing::Test {};
using VoidPtrTypes = Types<void *, const void *>;
TYPED_TEST_SUITE(VoidPtrLogFormatTest, VoidPtrTypes);
TYPED_TEST(VoidPtrLogFormatTest, Null) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const TypeParam value = nullptr;
auto comparison_stream = ComparisonStream();
comparison_stream << value;
EXPECT_CALL(
test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(AnyOf(Eq("(nil)"), Eq("0"), Eq("0x0"),
Eq("00000000"), Eq("0000000000000000"))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value;
}
TYPED_TEST(VoidPtrLogFormatTest, NonNull) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const TypeParam value = reinterpret_cast<TypeParam>(0xdeadbeefULL);
auto comparison_stream = ComparisonStream();
comparison_stream << value;
EXPECT_CALL(
test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(AnyOf(Eq("0xdeadbeef"), Eq("DEADBEEF"),
Eq("00000000DEADBEEF"))),
ENCODED_MESSAGE(HasValues(ElementsAre(
AnyOf(EqualsProto(R"pb(str: "0xdeadbeef")pb"),
EqualsProto(R"pb(str: "00000000DEADBEEF")pb"))))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value;
}
template <typename T>
class VolatilePtrLogFormatTest : public testing::Test {};
using VolatilePtrTypes =
Types<volatile void*, const volatile void*, volatile char*,
const volatile char*, volatile signed char*,
const volatile signed char*, volatile unsigned char*,
const volatile unsigned char*>;
TYPED_TEST_SUITE(VolatilePtrLogFormatTest, VolatilePtrTypes);
TYPED_TEST(VolatilePtrLogFormatTest, Null) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const TypeParam value = nullptr;
auto comparison_stream = ComparisonStream();
comparison_stream << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("false")),
ENCODED_MESSAGE(HasValues(ElementsAre(
EqualsProto(R"pb(str: "false")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value;
}
TYPED_TEST(VolatilePtrLogFormatTest, NonNull) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const TypeParam value = reinterpret_cast<TypeParam>(0xdeadbeefLL);
auto comparison_stream = ComparisonStream();
comparison_stream << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("true")),
ENCODED_MESSAGE(HasValues(ElementsAre(
EqualsProto(R"pb(str: "true")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value;
}
template <typename T>
class CharPtrLogFormatTest : public testing::Test {};
using CharPtrTypes = Types<char, const char, signed char, const signed char,
unsigned char, const unsigned char>;
TYPED_TEST_SUITE(CharPtrLogFormatTest, CharPtrTypes);
TYPED_TEST(CharPtrLogFormatTest, Null) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
TypeParam* const value = nullptr;
EXPECT_CALL(
test_sink,
Send(AllOf(
TextMessage(Eq("(null)")),
ENCODED_MESSAGE(
HasValues(ElementsAre(EqualsProto(R"pb(str: "(null)")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value;
}
TYPED_TEST(CharPtrLogFormatTest, NonNull) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
TypeParam data[] = {'v', 'a', 'l', 'u', 'e', '\0'};
TypeParam* const value = data;
auto comparison_stream = ComparisonStream();
comparison_stream << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("value")),
ENCODED_MESSAGE(HasValues(ElementsAre(
EqualsProto(R"pb(str: "value")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value;
}
TEST(BoolLogFormatTest, True) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const bool value = true;
auto comparison_stream = ComparisonStream();
comparison_stream << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("true")),
ENCODED_MESSAGE(HasValues(ElementsAre(
EqualsProto(R"pb(str: "true")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value;
}
TEST(BoolLogFormatTest, False) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const bool value = false;
auto comparison_stream = ComparisonStream();
comparison_stream << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("false")),
ENCODED_MESSAGE(HasValues(ElementsAre(
EqualsProto(R"pb(str: "false")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value;
}
TEST(LogFormatTest, StringLiteral) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
auto comparison_stream = ComparisonStream();
comparison_stream << "value";
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("value")),
ENCODED_MESSAGE(HasValues(ElementsAre(
EqualsProto(R"pb(literal: "value")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << "value";
}
TEST(LogFormatTest, CharArray) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
char value[] = "value";
auto comparison_stream = ComparisonStream();
comparison_stream << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("value")),
ENCODED_MESSAGE(HasValues(ElementsAre(
EqualsProto(R"pb(str: "value")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value;
}
class CustomClass {};
std::ostream& operator<<(std::ostream& os, const CustomClass&) {
return os << "CustomClass{}";
}
TEST(LogFormatTest, Custom) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
CustomClass value;
auto comparison_stream = ComparisonStream();
comparison_stream << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("CustomClass{}")),
ENCODED_MESSAGE(HasValues(ElementsAre(
EqualsProto(R"pb(str: "CustomClass{}")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value;
}
class CustomClassNonCopyable {
public:
CustomClassNonCopyable() = default;
CustomClassNonCopyable(const CustomClassNonCopyable&) = delete;
CustomClassNonCopyable& operator=(const CustomClassNonCopyable&) = delete;
};
std::ostream& operator<<(std::ostream& os, const CustomClassNonCopyable&) {
return os << "CustomClassNonCopyable{}";
}
TEST(LogFormatTest, CustomNonCopyable) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
CustomClassNonCopyable value;
auto comparison_stream = ComparisonStream();
comparison_stream << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("CustomClassNonCopyable{}")),
ENCODED_MESSAGE(HasValues(ElementsAre(EqualsProto(
R"pb(str: "CustomClassNonCopyable{}")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value;
}
struct Point {
template <typename Sink>
friend void AbslStringify(Sink& sink, const Point& p) {
absl::Format(&sink, "(%d, %d)", p.x, p.y);
}
int x = 10;
int y = 20;
};
TEST(LogFormatTest, AbslStringifyExample) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
Point p;
EXPECT_CALL(
test_sink,
Send(AllOf(TextMessage(Eq("(10, 20)")), TextMessage(Eq(absl::StrCat(p))),
ENCODED_MESSAGE(HasValues(
ElementsAre(EqualsProto(R"pb(str: "(10, 20)")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << p;
}
struct PointWithAbslStringifiyAndOstream {
template <typename Sink>
friend void AbslStringify(Sink& sink,
const PointWithAbslStringifiyAndOstream& p) {
absl::Format(&sink, "(%d, %d)", p.x, p.y);
}
int x = 10;
int y = 20;
};
ABSL_ATTRIBUTE_UNUSED std::ostream& operator<<(
std::ostream& os, const PointWithAbslStringifiyAndOstream&) {
return os << "Default to AbslStringify()";
}
TEST(LogFormatTest, CustomWithAbslStringifyAndOstream) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
PointWithAbslStringifiyAndOstream p;
EXPECT_CALL(
test_sink,
Send(AllOf(TextMessage(Eq("(10, 20)")), TextMessage(Eq(absl::StrCat(p))),
ENCODED_MESSAGE(HasValues(
ElementsAre(EqualsProto(R"pb(str: "(10, 20)")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << p;
}
struct PointStreamsNothing {
template <typename Sink>
friend void AbslStringify(Sink&, const PointStreamsNothing&) {}
int x = 10;
int y = 20;
};
TEST(LogFormatTest, AbslStringifyStreamsNothing) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
PointStreamsNothing p;
EXPECT_CALL(test_sink, Send(AllOf(TextMessage(Eq("77")),
TextMessage(Eq(absl::StrCat(p, 77))),
ENCODED_MESSAGE(HasValues(ElementsAre(
EqualsProto(R"pb(str: "77")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << p << 77;
}
struct PointMultipleAppend {
template <typename Sink>
friend void AbslStringify(Sink& sink, const PointMultipleAppend& p) {
sink.Append("(");
sink.Append(absl::StrCat(p.x, ", ", p.y, ")"));
}
int x = 10;
int y = 20;
};
TEST(LogFormatTest, AbslStringifyMultipleAppend) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
PointMultipleAppend p;
EXPECT_CALL(
test_sink,
Send(AllOf(TextMessage(Eq("(10, 20)")), TextMessage(Eq(absl::StrCat(p))),
ENCODED_MESSAGE(HasValues(
ElementsAre(EqualsProto(R"pb(str: "(")pb"),
EqualsProto(R"pb(str: "10, 20)")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << p;
}
TEST(ManipulatorLogFormatTest, BoolAlphaTrue) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const bool value = true;
auto comparison_stream = ComparisonStream();
comparison_stream << std::noboolalpha << value << " "
<< std::boolalpha << value << " "
<< std::noboolalpha << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("1 true 1")),
ENCODED_MESSAGE(HasValues(
ElementsAre(EqualsProto(R"pb(str: "1")pb"),
EqualsProto(R"pb(literal: " ")pb"),
EqualsProto(R"pb(str: "true")pb"),
EqualsProto(R"pb(literal: " ")pb"),
EqualsProto(R"pb(str: "1")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << std::noboolalpha << value << " "
<< std::boolalpha << value << " "
<< std::noboolalpha << value;
}
TEST(ManipulatorLogFormatTest, BoolAlphaFalse) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const bool value = false;
auto comparison_stream = ComparisonStream();
comparison_stream << std::noboolalpha << value << " "
<< std::boolalpha << value << " "
<< std::noboolalpha << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("0 false 0")),
ENCODED_MESSAGE(HasValues(
ElementsAre(EqualsProto(R"pb(str: "0")pb"),
EqualsProto(R"pb(literal: " ")pb"),
EqualsProto(R"pb(str: "false")pb"),
EqualsProto(R"pb(literal: " ")pb"),
EqualsProto(R"pb(str: "0")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << std::noboolalpha << value << " "
<< std::boolalpha << value << " "
<< std::noboolalpha << value;
}
TEST(ManipulatorLogFormatTest, ShowPoint) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const double value = 77.0;
auto comparison_stream = ComparisonStream();
comparison_stream << std::noshowpoint << value << " "
<< std::showpoint << value << " "
<< std::noshowpoint << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("77 77.0000 77")),
ENCODED_MESSAGE(HasValues(
ElementsAre(EqualsProto(R"pb(str: "77")pb"),
EqualsProto(R"pb(literal: " ")pb"),
EqualsProto(R"pb(str: "77.0000")pb"),
EqualsProto(R"pb(literal: " ")pb"),
EqualsProto(R"pb(str: "77")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << std::noshowpoint << value << " "
<< std::showpoint << value << " "
<< std::noshowpoint << value;
}
TEST(ManipulatorLogFormatTest, ShowPos) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const int value = 77;
auto comparison_stream = ComparisonStream();
comparison_stream << std::noshowpos << value << " "
<< std::showpos << value << " "
<< std::noshowpos << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("77 +77 77")),
ENCODED_MESSAGE(HasValues(
ElementsAre(EqualsProto(R"pb(str: "77")pb"),
EqualsProto(R"pb(literal: " ")pb"),
EqualsProto(R"pb(str: "+77")pb"),
EqualsProto(R"pb(literal: " ")pb"),
EqualsProto(R"pb(str: "77")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << std::noshowpos << value << " "
<< std::showpos << value << " "
<< std::noshowpos << value;
}
TEST(ManipulatorLogFormatTest, UppercaseFloat) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const double value = 7.7e7;
auto comparison_stream = ComparisonStream();
comparison_stream << std::nouppercase << value << " "
<< std::uppercase << value << " "
<< std::nouppercase << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("7.7e+07 7.7E+07 7.7e+07")),
ENCODED_MESSAGE(HasValues(ElementsAre(
EqualsProto(R"pb(str: "7.7e+07")pb"),
EqualsProto(R"pb(literal: " ")pb"),
EqualsProto(R"pb(str: "7.7E+07")pb"),
EqualsProto(R"pb(literal: " ")pb"),
EqualsProto(R"pb(str: "7.7e+07")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << std::nouppercase << value << " "
<< std::uppercase << value << " "
<< std::nouppercase << value;
}
TEST(ManipulatorLogFormatTest, Hex) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const int value = 0x77;
auto comparison_stream = ComparisonStream();
comparison_stream << std::hex << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("0x77")),
ENCODED_MESSAGE(HasValues(ElementsAre(
EqualsProto(R"pb(str: "0x77")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << std::hex << value;
}
TEST(ManipulatorLogFormatTest, Oct) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const int value = 077;
auto comparison_stream = ComparisonStream();
comparison_stream << std::oct << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("077")),
ENCODED_MESSAGE(HasValues(
ElementsAre(EqualsProto(R"pb(str: "077")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << std::oct << value;
}
TEST(ManipulatorLogFormatTest, Dec) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const int value = 77;
auto comparison_stream = ComparisonStream();
comparison_stream << std::hex << std::dec << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("77")),
ENCODED_MESSAGE(HasValues(
ElementsAre(EqualsProto(R"pb(str: "77")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << std::hex << std::dec << value;
}
TEST(ManipulatorLogFormatTest, ShowbaseHex) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const int value = 0x77;
auto comparison_stream = ComparisonStream();
comparison_stream << std::hex
<< std::noshowbase << value << " "
<< std::showbase << value << " "
<< std::noshowbase << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("77 0x77 77")),
ENCODED_MESSAGE(HasValues(
ElementsAre(EqualsProto(R"pb(str: "77")pb"),
EqualsProto(R"pb(literal: " ")pb"),
EqualsProto(R"pb(str: "0x77")pb"),
EqualsProto(R"pb(literal: " ")pb"),
EqualsProto(R"pb(str: "77")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << std::hex
<< std::noshowbase << value << " "
<< std::showbase << value << " "
<< std::noshowbase << value;
}
TEST(ManipulatorLogFormatTest, ShowbaseOct) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const int value = 077;
auto comparison_stream = ComparisonStream();
comparison_stream << std::oct
<< std::noshowbase << value << " "
<< std::showbase << value << " "
<< std::noshowbase << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("77 077 77")),
ENCODED_MESSAGE(HasValues(
ElementsAre(EqualsProto(R"pb(str: "77")pb"),
EqualsProto(R"pb(literal: " ")pb"),
EqualsProto(R"pb(str: "077")pb"),
EqualsProto(R"pb(literal: " ")pb"),
EqualsProto(R"pb(str: "77")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << std::oct
<< std::noshowbase << value << " "
<< std::showbase << value << " "
<< std::noshowbase << value;
}
TEST(ManipulatorLogFormatTest, UppercaseHex) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const int value = 0xbeef;
auto comparison_stream = ComparisonStream();
comparison_stream
<< std::hex
<< std::nouppercase << value << " "
<< std::uppercase << value << " "
<< std::nouppercase << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("0xbeef 0XBEEF 0xbeef")),
ENCODED_MESSAGE(HasValues(ElementsAre(
EqualsProto(R"pb(str: "0xbeef")pb"),
EqualsProto(R"pb(literal: " ")pb"),
EqualsProto(R"pb(str: "0XBEEF")pb"),
EqualsProto(R"pb(literal: " ")pb"),
EqualsProto(R"pb(str: "0xbeef")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << std::hex
<< std::nouppercase << value << " "
<< std::uppercase << value << " "
<< std::nouppercase << value;
}
TEST(ManipulatorLogFormatTest, FixedFloat) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const double value = 7.7e7;
auto comparison_stream = ComparisonStream();
comparison_stream << std::fixed << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("77000000.000000")),
ENCODED_MESSAGE(HasValues(ElementsAre(
EqualsProto(R"pb(str: "77000000.000000")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << std::fixed << value;
}
TEST(ManipulatorLogFormatTest, ScientificFloat) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const double value = 7.7e7;
auto comparison_stream = ComparisonStream();
comparison_stream << std::scientific << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("7.700000e+07")),
ENCODED_MESSAGE(HasValues(ElementsAre(
EqualsProto(R"pb(str: "7.700000e+07")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << std::scientific << value;
}
#if defined(__BIONIC__) && (!defined(__ANDROID_API__) || __ANDROID_API__ < 22)
#elif defined(__GLIBCXX__) && __cplusplus < 201402L
#else
TEST(ManipulatorLogFormatTest, FixedAndScientificFloat) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const double value = 7.7e7;
auto comparison_stream = ComparisonStream();
comparison_stream << std::setiosflags(std::ios_base::scientific |
std::ios_base::fixed)
<< value;
EXPECT_CALL(
test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(AnyOf(Eq("0x1.25bb50p+26"), Eq("0x1.25bb5p+26"),
Eq("0x1.25bb500000000p+26"))),
ENCODED_MESSAGE(HasValues(ElementsAre(AnyOf(
EqualsProto(R"pb(str: "0x1.25bb5p+26")pb"),
EqualsProto(R"pb(str: "0x1.25bb500000000p+26")pb"))))))));
test_sink.StartCapturingLogs();
LOG(INFO) << std::setiosflags(std::ios_base::scientific |
std::ios_base::fixed)
<< value;
}
#endif
#if defined(__BIONIC__) && (!defined(__ANDROID_API__) || __ANDROID_API__ < 22)
#elif defined(__GLIBCXX__) && __cplusplus < 201402L
#else
TEST(ManipulatorLogFormatTest, HexfloatFloat) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const double value = 7.7e7;
auto comparison_stream = ComparisonStream();
comparison_stream << std::hexfloat << value;
EXPECT_CALL(
test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(AnyOf(Eq("0x1.25bb50p+26"), Eq("0x1.25bb5p+26"),
Eq("0x1.25bb500000000p+26"))),
ENCODED_MESSAGE(HasValues(ElementsAre(AnyOf(
EqualsProto(R"pb(str: "0x1.25bb5p+26")pb"),
EqualsProto(R"pb(str: "0x1.25bb500000000p+26")pb"))))))));
test_sink.StartCapturingLogs();
LOG(INFO) << std::hexfloat << value;
}
#endif
TEST(ManipulatorLogFormatTest, DefaultFloatFloat) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const double value = 7.7e7;
auto comparison_stream = ComparisonStream();
comparison_stream << std::hexfloat << std::defaultfloat << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("7.7e+07")),
ENCODED_MESSAGE(HasValues(ElementsAre(
EqualsProto(R"pb(str: "7.7e+07")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << std::hexfloat << std::defaultfloat << value;
}
TEST(ManipulatorLogFormatTest, Ends) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
auto comparison_stream = ComparisonStream();
comparison_stream << std::ends;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq(absl::string_view("\0", 1))),
ENCODED_MESSAGE(HasValues(
ElementsAre(EqualsProto(R"pb(str: "\0")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << std::ends;
}
TEST(ManipulatorLogFormatTest, Endl) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
auto comparison_stream = ComparisonStream();
comparison_stream << std::endl;
EXPECT_CALL(
test_sink,
Send(AllOf(
TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("\n")),
ENCODED_MESSAGE(HasValues(ElementsAre(EqualsProto(R"pb(str:
"\n")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << std::endl;
}
TEST(ManipulatorLogFormatTest, SetIosFlags) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const int value = 0x77;
auto comparison_stream = ComparisonStream();
comparison_stream << std::resetiosflags(std::ios_base::basefield)
<< std::setiosflags(std::ios_base::hex) << value << " "
<< std::resetiosflags(std::ios_base::basefield)
<< std::setiosflags(std::ios_base::dec) << value;
EXPECT_CALL(
test_sink,
Send(AllOf(
TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("0x77 119")),
ENCODED_MESSAGE(
HasValues(ElementsAre(EqualsProto(R"pb(str: "0x77")pb"),
EqualsProto(R"pb(literal: " ")pb"),
EqualsProto(R"pb(str: "119")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << std::resetiosflags(std::ios_base::basefield)
<< std::setiosflags(std::ios_base::hex) << value << " "
<< std::resetiosflags(std::ios_base::basefield)
<< std::setiosflags(std::ios_base::dec) << value;
}
TEST(ManipulatorLogFormatTest, SetBase) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const int value = 0x77;
auto comparison_stream = ComparisonStream();
comparison_stream << std::setbase(16) << value << " "
<< std::setbase(0) << value;
EXPECT_CALL(
test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("0x77 119")),
ENCODED_MESSAGE(HasValues(
ElementsAre(EqualsProto(R"pb(str: "0x77")pb"),
EqualsProto(R"pb(literal: " ")pb"),
EqualsProto(R"pb(str: "119")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << std::setbase(16) << value << " "
<< std::setbase(0) << value;
}
TEST(ManipulatorLogFormatTest, SetPrecision) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const double value = 6.022140857e23;
auto comparison_stream = ComparisonStream();
comparison_stream << std::setprecision(4) << value;
EXPECT_CALL(
test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("6.022e+23")),
ENCODED_MESSAGE(HasValues(
ElementsAre(EqualsProto(R"pb(str: "6.022e+23")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << std::setprecision(4) << value;
}
TEST(ManipulatorLogFormatTest, SetPrecisionOverflow) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const double value = 6.022140857e23;
auto comparison_stream = ComparisonStream();
comparison_stream << std::setprecision(200) << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("602214085700000015187968")),
ENCODED_MESSAGE(HasValues(ElementsAre(EqualsProto(
R"pb(str: "602214085700000015187968")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << std::setprecision(200) << value;
}
TEST(ManipulatorLogFormatTest, SetW) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const int value = 77;
auto comparison_stream = ComparisonStream();
comparison_stream << std::setw(8) << value;
EXPECT_CALL(
test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq(" 77")),
ENCODED_MESSAGE(HasValues(
ElementsAre(EqualsProto(R"pb(str: " 77")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << std::setw(8) << value;
}
TEST(ManipulatorLogFormatTest, Left) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const int value = -77;
auto comparison_stream = ComparisonStream();
comparison_stream << std::left << std::setw(8) << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("-77 ")),
ENCODED_MESSAGE(HasValues(ElementsAre(
EqualsProto(R"pb(str: "-77 ")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << std::left << std::setw(8) << value;
}
TEST(ManipulatorLogFormatTest, Right) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const int value = -77;
auto comparison_stream = ComparisonStream();
comparison_stream << std::right << std::setw(8) << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq(" -77")),
ENCODED_MESSAGE(HasValues(ElementsAre(
EqualsProto(R"pb(str: " -77")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << std::right << std::setw(8) << value;
}
TEST(ManipulatorLogFormatTest, Internal) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const int value = -77;
auto comparison_stream = ComparisonStream();
comparison_stream << std::internal << std::setw(8) << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("- 77")),
ENCODED_MESSAGE(HasValues(ElementsAre(
EqualsProto(R"pb(str: "- 77")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << std::internal << std::setw(8) << value;
}
TEST(ManipulatorLogFormatTest, SetFill) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
const int value = 77;
auto comparison_stream = ComparisonStream();
comparison_stream << std::setfill('0') << std::setw(8) << value;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("00000077")),
ENCODED_MESSAGE(HasValues(ElementsAre(
EqualsProto(R"pb(str: "00000077")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << std::setfill('0') << std::setw(8) << value;
}
class FromCustomClass {};
std::ostream& operator<<(std::ostream& os, const FromCustomClass&) {
return os << "FromCustomClass{}" << std::hex;
}
TEST(ManipulatorLogFormatTest, FromCustom) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
FromCustomClass value;
auto comparison_stream = ComparisonStream();
comparison_stream << value << " " << 0x77;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("FromCustomClass{} 0x77")),
ENCODED_MESSAGE(HasValues(ElementsAre(
EqualsProto(R"pb(str: "FromCustomClass{}")pb"),
EqualsProto(R"pb(literal: " ")pb"),
EqualsProto(R"pb(str: "0x77")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value << " " << 0x77;
}
class StreamsNothing {};
std::ostream& operator<<(std::ostream& os, const StreamsNothing&) { return os; }
TEST(ManipulatorLogFormatTest, CustomClassStreamsNothing) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
StreamsNothing value;
auto comparison_stream = ComparisonStream();
comparison_stream << value << 77;
EXPECT_CALL(test_sink,
Send(AllOf(TextMessage(MatchesOstream(comparison_stream)),
TextMessage(Eq("77")),
ENCODED_MESSAGE(HasValues(
ElementsAre(EqualsProto(R"pb(str: "77")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << value << 77;
}
struct PointPercentV {
template <typename Sink>
friend void AbslStringify(Sink& sink, const PointPercentV& p) {
absl::Format(&sink, "(%v, %v)", p.x, p.y);
}
int x = 10;
int y = 20;
};
TEST(ManipulatorLogFormatTest, IOManipsDoNotAffectAbslStringify) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
PointPercentV p;
EXPECT_CALL(
test_sink,
Send(AllOf(TextMessage(Eq("(10, 20)")), TextMessage(Eq(absl::StrCat(p))),
ENCODED_MESSAGE(HasValues(
ElementsAre(EqualsProto(R"pb(str: "(10, 20)")pb")))))));
test_sink.StartCapturingLogs();
LOG(INFO) << std::hex << p;
}
TEST(StructuredLoggingOverflowTest, TruncatesStrings) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(
test_sink,
Send(AllOf(
TextMessage(AllOf(
SizeIs(AllOf(Ge(absl::log_internal::kLogMessageBufferSize - 256),
Le(absl::log_internal::kLogMessageBufferSize))),
Each(Eq('x')))),
ENCODED_MESSAGE(HasOneStrThat(AllOf(
SizeIs(AllOf(Ge(absl::log_internal::kLogMessageBufferSize - 256),
Le(absl::log_internal::kLogMessageBufferSize))),
Each(Eq('x'))))))));
test_sink.StartCapturingLogs();
LOG(INFO) << std::string(2 * absl::log_internal::kLogMessageBufferSize, 'x');
}
struct StringLike {
absl::string_view data;
};
std::ostream& operator<<(std::ostream& os, StringLike str) {
return os << str.data;
}
TEST(StructuredLoggingOverflowTest, TruncatesInsertionOperators) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(
test_sink,
Send(AllOf(
TextMessage(AllOf(
SizeIs(AllOf(Ge(absl::log_internal::kLogMessageBufferSize - 256),
Le(absl::log_internal::kLogMessageBufferSize))),
Each(Eq('x')))),
ENCODED_MESSAGE(HasOneStrThat(AllOf(
SizeIs(AllOf(Ge(absl::log_internal::kLogMessageBufferSize - 256),
Le(absl::log_internal::kLogMessageBufferSize))),
Each(Eq('x'))))))));
test_sink.StartCapturingLogs();
LOG(INFO) << StringLike{
std::string(2 * absl::log_internal::kLogMessageBufferSize, 'x')};
}
size_t MaxLogFieldLengthNoPrefix() {
class StringLengthExtractorSink : public absl::LogSink {
public:
void Send(const absl::LogEntry& entry) override {
CHECK(!size_.has_value());
CHECK_EQ(entry.text_message().find_first_not_of('x'),
absl::string_view::npos);
size_.emplace(entry.text_message().size());
}
size_t size() const {
CHECK(size_.has_value());
return *size_;
}
private:
absl::optional<size_t> size_;
} extractor_sink;
LOG(INFO).NoPrefix().ToSinkOnly(&extractor_sink)
<< std::string(2 * absl::log_internal::kLogMessageBufferSize, 'x');
return extractor_sink.size();
}
TEST(StructuredLoggingOverflowTest, TruncatesStringsCleanly) {
const size_t longest_fit = MaxLogFieldLengthNoPrefix();
{
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(test_sink,
Send(AllOf(ENCODED_MESSAGE(HasOneStrThat(
AllOf(SizeIs(longest_fit), Each(Eq('x'))))),
RawEncodedMessage(AsString(EndsWith("x"))))));
test_sink.StartCapturingLogs();
LOG(INFO).NoPrefix() << std::string(longest_fit, 'x') << "y";
}
{
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(test_sink,
Send(AllOf(ENCODED_MESSAGE(HasOneStrThat(
AllOf(SizeIs(longest_fit - 1), Each(Eq('x'))))),
RawEncodedMessage(AsString(EndsWith("x"))))));
test_sink.StartCapturingLogs();
LOG(INFO).NoPrefix() << std::string(longest_fit - 1, 'x') << "y";
}
{
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(test_sink,
Send(AllOf(ENCODED_MESSAGE(HasOneStrThat(
AllOf(SizeIs(longest_fit - 2), Each(Eq('x'))))),
RawEncodedMessage(AsString(EndsWith("x"))))));
test_sink.StartCapturingLogs();
LOG(INFO).NoPrefix() << std::string(longest_fit - 2, 'x') << "y";
}
{
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(test_sink,
Send(AllOf(ENCODED_MESSAGE(HasOneStrThat(
AllOf(SizeIs(longest_fit - 3), Each(Eq('x'))))),
RawEncodedMessage(AsString(EndsWith("x"))))));
test_sink.StartCapturingLogs();
LOG(INFO).NoPrefix() << std::string(longest_fit - 3, 'x') << "y";
}
{
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(test_sink,
Send(AllOf(ENCODED_MESSAGE(HasOneStrAndOneLiteralThat(
AllOf(SizeIs(longest_fit - 4), Each(Eq('x'))),
IsEmpty())),
RawEncodedMessage(Not(AsString(EndsWith("x")))))));
test_sink.StartCapturingLogs();
LOG(INFO).NoPrefix() << std::string(longest_fit - 4, 'x') << "y";
}
{
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(
test_sink,
Send(AllOf(ENCODED_MESSAGE(HasOneStrAndOneLiteralThat(
AllOf(SizeIs(longest_fit - 5), Each(Eq('x'))), Eq("y"))),
RawEncodedMessage(AsString(EndsWith("y"))))));
test_sink.StartCapturingLogs();
LOG(INFO).NoPrefix() << std::string(longest_fit - 5, 'x') << "y";
}
}
TEST(StructuredLoggingOverflowTest, TruncatesInsertionOperatorsCleanly) {
const size_t longest_fit = MaxLogFieldLengthNoPrefix();
{
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(test_sink,
Send(AllOf(ENCODED_MESSAGE(HasOneStrThat(
AllOf(SizeIs(longest_fit), Each(Eq('x'))))),
RawEncodedMessage(AsString(EndsWith("x"))))));
test_sink.StartCapturingLogs();
LOG(INFO).NoPrefix() << std::string(longest_fit, 'x') << StringLike{"y"};
}
{
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(test_sink,
Send(AllOf(ENCODED_MESSAGE(HasOneStrThat(
AllOf(SizeIs(longest_fit - 1), Each(Eq('x'))))),
RawEncodedMessage(AsString(EndsWith("x"))))));
test_sink.StartCapturingLogs();
LOG(INFO).NoPrefix() << std::string(longest_fit - 1, 'x')
<< StringLike{"y"};
}
{
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(test_sink,
Send(AllOf(ENCODED_MESSAGE(HasOneStrThat(
AllOf(SizeIs(longest_fit - 2), Each(Eq('x'))))),
RawEncodedMessage(AsString(EndsWith("x"))))));
test_sink.StartCapturingLogs();
LOG(INFO).NoPrefix() << std::string(longest_fit - 2, 'x')
<< StringLike{"y"};
}
{
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(test_sink,
Send(AllOf(ENCODED_MESSAGE(HasOneStrThat(
AllOf(SizeIs(longest_fit - 3), Each(Eq('x'))))),
RawEncodedMessage(AsString(EndsWith("x"))))));
test_sink.StartCapturingLogs();
LOG(INFO).NoPrefix() << std::string(longest_fit - 3, 'x')
<< StringLike{"y"};
}
{
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(test_sink,
Send(AllOf(ENCODED_MESSAGE(HasOneStrThat(
AllOf(SizeIs(longest_fit - 4), Each(Eq('x'))))),
RawEncodedMessage(AsString(EndsWith("x"))))));
test_sink.StartCapturingLogs();
LOG(INFO).NoPrefix() << std::string(longest_fit - 4, 'x')
<< StringLike{"y"};
}
{
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(
test_sink,
Send(AllOf(ENCODED_MESSAGE(HasTwoStrsThat(
AllOf(SizeIs(longest_fit - 5), Each(Eq('x'))), Eq("y"))),
RawEncodedMessage(AsString(EndsWith("y"))))));
test_sink.StartCapturingLogs();
LOG(INFO).NoPrefix() << std::string(longest_fit - 5, 'x')
<< StringLike{"y"};
}
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/log/internal/log_format.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/log/log_format_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
81f8af8a-489b-4b14-9049-bb797e3be1da | cpp | google/tensorstore | aws_credentials_resource | tensorstore/kvstore/s3/aws_credentials_resource.cc | tensorstore/kvstore/s3/aws_credentials_resource_test.cc | #include "tensorstore/kvstore/s3/aws_credentials_resource.h"
#include <stddef.h>
#include <cassert>
#include <memory>
#include <optional>
#include <type_traits>
#include <utility>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/context.h"
#include "tensorstore/context_resource_provider.h"
#include "tensorstore/internal/http/curl_transport.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/json_serialization_options.h"
#include "tensorstore/kvstore/s3/credentials/aws_credentials.h"
#include "tensorstore/kvstore/s3/credentials/default_credential_provider.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
namespace jb = tensorstore::internal_json_binding;
namespace tensorstore {
namespace internal_kvstore_s3 {
using Spec = ::tensorstore::internal_kvstore_s3::AwsCredentialsResource::Spec;
using Resource =
::tensorstore::internal_kvstore_s3::AwsCredentialsResource::Resource;
const internal::ContextResourceRegistration<AwsCredentialsResource>
aws_credentials_registration;
Result<Resource> AwsCredentialsResource::Create(
const Spec& spec, internal::ContextResourceCreationContext context) const {
if (spec.anonymous) {
return Resource{spec, nullptr};
}
auto result = GetAwsCredentialProvider(
spec.filename, spec.profile, spec.metadata_endpoint,
internal_http::GetDefaultHttpTransport());
if (!result.ok() && absl::IsNotFound(result.status())) {
return Resource{spec, nullptr};
}
TENSORSTORE_RETURN_IF_ERROR(result);
return Resource{spec, *std::move(result)};
}
Result<std::optional<AwsCredentials>>
AwsCredentialsResource::Resource::GetCredentials() {
if (!credential_provider_) return std::nullopt;
auto credential_result_ = credential_provider_->GetCredentials();
if (!credential_result_.ok() &&
absl::IsNotFound(credential_result_.status())) {
return std::nullopt;
}
return credential_result_;
}
namespace {
static constexpr auto kAnonymousBinder = jb::Object(jb::Member(
"anonymous", jb::Projection<&Spec::anonymous>(
jb::Validate([](const auto& options, bool* x) {
if (*x != true) {
return absl::InvalidArgumentError(
"\"anonymous\" must be true or not present in "
"\"aws_credentials\"");
}
return absl::OkStatus();
}))));
static constexpr auto kParameterBinder = jb::Object(
jb::OptionalMember("profile", jb::Projection<&Spec::profile>()),
jb::OptionalMember("filename", jb::Projection<&Spec::filename>()),
jb::OptionalMember("metadata_endpoint",
jb::Projection<&Spec::metadata_endpoint>()));
}
absl::Status AwsCredentialsResource::FromJsonImpl(
const JsonSerializationOptions& options, Spec* spec, ::nlohmann::json* j) {
if (auto* j_obj = j->template get_ptr<::nlohmann::json::object_t*>();
j_obj && j_obj->find("anonymous") != j_obj->end()) {
return kAnonymousBinder(std::true_type{}, options, spec, j);
}
return kParameterBinder(std::true_type{}, options, spec, j);
}
absl::Status AwsCredentialsResource::ToJsonImpl(
const JsonSerializationOptions& options, const Spec* spec,
::nlohmann::json* j) {
if (spec->anonymous) {
return kAnonymousBinder(std::false_type{}, options, spec, j);
}
return kParameterBinder(std::false_type{}, options, spec, j);
}
}
} | #include "tensorstore/kvstore/s3/aws_credentials_resource.h"
#include <optional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json_fwd.hpp>
#include <nlohmann/json.hpp>
#include "tensorstore/context.h"
#include "tensorstore/util/status_testutil.h"
using ::tensorstore::Context;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_kvstore_s3::AwsCredentialsResource;
namespace {
TEST(AwsCredentialsResourceTest, InvalidDirectSpec) {
EXPECT_THAT(Context::Resource<AwsCredentialsResource>::FromJson(nullptr),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected non-null value, but received: null"));
EXPECT_THAT(Context::Resource<AwsCredentialsResource>::FromJson(3),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected object, but received: 3"));
EXPECT_THAT(
Context::Resource<AwsCredentialsResource>::FromJson("anonymous"),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Invalid reference to \"aws_credentials\" resource: \"anonymous\""));
}
TEST(AwsCredentialsResourceTest, Default) {
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec,
Context::Resource<AwsCredentialsResource>::FromJson("aws_credentials"));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource,
context.GetResource(resource_spec));
EXPECT_THAT(resource->spec.filename, ::testing::IsEmpty());
EXPECT_THAT(resource->spec.profile, ::testing::IsEmpty());
EXPECT_THAT(resource->spec.metadata_endpoint, ::testing::IsEmpty());
EXPECT_THAT(resource->spec.anonymous, false);
}
TEST(AwsCredentialsResourceTest, ExplicitDefault) {
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec, Context::Resource<AwsCredentialsResource>::FromJson(
::nlohmann::json::object_t()));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource,
context.GetResource(resource_spec));
EXPECT_THAT(resource->spec.filename, ::testing::IsEmpty());
EXPECT_THAT(resource->spec.profile, ::testing::IsEmpty());
EXPECT_THAT(resource->spec.metadata_endpoint, ::testing::IsEmpty());
EXPECT_THAT(resource->spec.anonymous, false);
}
TEST(AwsCredentialsResourceTest, ValidSpec) {
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec, Context::Resource<AwsCredentialsResource>::FromJson(
{{"profile", "my_profile"}}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource,
context.GetResource(resource_spec));
EXPECT_THAT(resource->spec.filename, ::testing::IsEmpty());
EXPECT_THAT(resource->spec.profile, "my_profile");
EXPECT_THAT(resource->spec.metadata_endpoint, ::testing::IsEmpty());
EXPECT_THAT(resource->spec.anonymous, false);
}
TEST(AwsCredentialsResourceTest, ValidAnonymousSpec) {
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec, Context::Resource<AwsCredentialsResource>::FromJson(
{{"anonymous", true}}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource,
context.GetResource(resource_spec));
EXPECT_THAT(resource->spec.filename, ::testing::IsEmpty());
EXPECT_THAT(resource->spec.profile, ::testing::IsEmpty());
EXPECT_THAT(resource->spec.metadata_endpoint, ::testing::IsEmpty());
EXPECT_THAT(resource->spec.anonymous, true);
EXPECT_THAT(resource->GetCredentials(),
tensorstore::IsOkAndHolds(::testing::Eq(std::nullopt)));
}
TEST(AwsCredentialsResourceTest, InvalidSpecs) {
EXPECT_THAT(Context::Resource<AwsCredentialsResource>::FromJson({
{"anonymous", true},
{"profile", "xyz"},
}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/s3/aws_credentials_resource.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/s3/aws_credentials_resource_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
8e76a92d-b3d4-4a7b-a177-4658f864cc89 | cpp | tensorflow/tensorflow | model_builder_helper | tensorflow/lite/delegates/gpu/common/model_builder_helper.cc | tensorflow/lite/delegates/gpu/common/model_builder_helper_test.cc | #include "tensorflow/lite/delegates/gpu/common/model_builder_helper.h"
#include <stddef.h>
#include <cstdint>
#include <cstring>
#include <limits>
#include <string>
#include <vector>
#include "fp16.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace gpu {
namespace {
absl::Status NewPassthroughNode(GraphFloat32* graph, Node* node,
const Value* output, Node** passthru_node) {
*passthru_node = graph->NewNode();
RETURN_IF_ERROR(graph->SetProducer((*passthru_node)->id, output->id));
Value* copy_output = graph->NewValue();
RETURN_IF_ERROR(graph->SetProducer(node->id, copy_output->id));
RETURN_IF_ERROR(graph->AddConsumer((*passthru_node)->id, copy_output->id));
copy_output->tensor = output->tensor;
copy_output->tensor.ref = -1;
return absl::OkStatus();
}
}
absl::Status GetNodeAndRegistration(TfLiteContext* context, int node_id,
TfLiteNode** tflite_node,
TfLiteRegistration** registration) {
if (context->GetNodeAndRegistration(context, node_id, tflite_node,
registration) != kTfLiteOk) {
return absl::InvalidArgumentError(absl::StrCat(
"Couldn't get node and registration info for op: ", node_id));
}
return absl::OkStatus();
}
DataType ToDataType(TfLiteType type) {
switch (type) {
case kTfLiteFloat32:
return DataType::FLOAT32;
case kTfLiteInt32:
return DataType::INT32;
case kTfLiteInt64:
return DataType::INT64;
case kTfLiteInt8:
return DataType::INT8;
case kTfLiteUInt8:
return DataType::UINT8;
case kTfLiteBool:
return DataType::BOOL;
default:
return DataType::UNKNOWN;
}
}
absl::Status ExtractTensorShape(const TfLiteTensor& tflite_tensor, BHWC* bhwc) {
const TfLiteIntArray* dims = tflite_tensor.dims;
switch (dims->size) {
case 1:
*bhwc = BHWC(dims->data[0], 1, 1, 1);
return absl::OkStatus();
case 2:
*bhwc = BHWC(dims->data[0], 1, 1, dims->data[1]);
return absl::OkStatus();
case 3:
*bhwc = BHWC(dims->data[0], 1, dims->data[1], dims->data[2]);
return absl::OkStatus();
case 4:
*bhwc = BHWC(dims->data[0], dims->data[1], dims->data[2], dims->data[3]);
return absl::OkStatus();
default:
return absl::InvalidArgumentError(absl::StrCat(
"Tensor \"", tflite_tensor.name ? tflite_tensor.name : "nullptr",
"\" has bad input dims size: ", dims->size, "."));
}
}
absl::Status ExtractAxisFromIndex(const TfLiteTensor& tflite_tensor, int index,
Axis* axis) {
const TfLiteIntArray* dims = tflite_tensor.dims;
if (index < 0) {
index = dims->size + index;
}
if (index < 0 || index >= dims->size) {
return absl::OutOfRangeError("Index for axis out of range");
}
std::vector<Axis> index_to_axis;
switch (dims->size) {
case 1:
index_to_axis = {Axis::BATCH};
break;
case 2:
index_to_axis = {Axis::BATCH, Axis::CHANNELS};
break;
case 3:
index_to_axis = {Axis::BATCH, Axis::WIDTH, Axis::CHANNELS};
break;
case 4:
index_to_axis = {Axis::BATCH, Axis::HEIGHT, Axis::WIDTH, Axis::CHANNELS};
break;
default:
return absl::UnavailableError("Unknown layout.");
}
*axis = index_to_axis[index];
return absl::OkStatus();
}
absl::Status ConvertTfLiteTensorToTensorRef(const TfLiteTensor& tflite_tensor,
TensorRef<BHWC>* tensor_ref) {
tensor_ref->type = ToDataType(tflite_tensor.type);
return ExtractTensorShape(tflite_tensor, &tensor_ref->shape);
}
absl::Status PopulateQuantParams(const TfLiteTensor& tensor,
QuantizationParams* quant_params) {
const TfLiteQuantization& quant = tensor.quantization;
if (quant.type != TfLiteQuantizationType::kTfLiteAffineQuantization) {
return absl::InvalidArgumentError(
absl::StrCat("Tensor not quantized: ", std::string(tensor.name)));
}
const TfLiteAffineQuantization* params =
static_cast<const TfLiteAffineQuantization*>(quant.params);
if (params->scale->size > 1) {
return absl::InvalidArgumentError(
absl::StrCat("Non-constant per-channel quantized tensor: ",
std::string(tensor.name)));
}
const float scale = params->scale->data[0];
const float zero_point = static_cast<float>(params->zero_point->data[0]);
float qmin_value = 0;
float qmax_value = 0;
if (tensor.type == kTfLiteUInt8) {
qmin_value = static_cast<float>(std::numeric_limits<uint8_t>::min());
qmax_value = static_cast<float>(std::numeric_limits<uint8_t>::max());
} else if (tensor.type == kTfLiteInt8) {
qmin_value = static_cast<float>(std::numeric_limits<int8_t>::min());
qmax_value = static_cast<float>(std::numeric_limits<int8_t>::max());
} else {
return absl::InvalidArgumentError(absl::StrCat(
"Type invalid for quantized tensor: ", std::string(tensor.name)));
}
quant_params->min = scale * (static_cast<float>(qmin_value) - zero_point);
quant_params->max = scale * (static_cast<float>(qmax_value) - zero_point);
quant_params->scale = scale;
return absl::OkStatus();
}
int GetNumberOfRuntimeInputsForNode(const TfLiteContext* context,
const TfLiteNode* tflite_node) {
int number_of_runtime_inputs = 0;
for (int i = 0; i < NumInputs(tflite_node); i++) {
const TfLiteTensor* tensor =
GetOptionalInputTensor(context, tflite_node, i);
if (tensor != nullptr && !IsConstantTensor(tensor)) {
number_of_runtime_inputs++;
}
}
return number_of_runtime_inputs;
}
int GetNumberOfConstInputsForNode(const TfLiteContext* context,
const TfLiteNode* tflite_node) {
return NumInputs(tflite_node) -
GetNumberOfRuntimeInputsForNode(context, tflite_node);
}
absl::Status CheckInputsOutputs(const TfLiteContext* context,
const TfLiteNode* tflite_node,
int runtime_inputs, int outputs) {
const int runtime_inputs_from_model =
GetNumberOfRuntimeInputsForNode(context, tflite_node);
if (runtime_inputs_from_model != runtime_inputs) {
return absl::InternalError(absl::StrCat(
"Expected ", runtime_inputs, " runtime input tensor(s), but node has ",
runtime_inputs_from_model, " runtime input(s)."));
}
const int outputs_from_model = NumOutputs(tflite_node);
if (outputs_from_model != outputs) {
return absl::InternalError(absl::StrCat("Expected ", outputs,
" output tensor(s), but node has ",
outputs_from_model, " output(s)."));
}
return absl::OkStatus();
}
absl::Status CheckInputsConstsOutputs(const TfLiteContext* context,
const TfLiteNode* tflite_node,
int runtime_inputs, int const_inputs,
int outputs) {
const int const_inputs_from_model =
GetNumberOfConstInputsForNode(context, tflite_node);
if (const_inputs_from_model != const_inputs) {
return absl::InternalError(absl::StrCat(
"Expected ", const_inputs, " const input tensor(s), but node has ",
const_inputs_from_model, " const input(s)."));
}
return CheckInputsOutputs(context, tflite_node, runtime_inputs, outputs);
}
void ConvertFloat16ToFloat32(size_t num_elements, const uint16_t* src,
float* dst) {
for (size_t i = 0; i < num_elements; i++) {
*dst++ = fp16_ieee_to_fp32_value(*src++);
}
}
template <>
absl::Status CreateVectorCopyData<float>(const TfLiteTensor& src, float* dst) {
switch (src.type) {
case kTfLiteFloat32:
std::memcpy(dst, src.data.f, src.bytes);
return absl::OkStatus();
case kTfLiteFloat16:
ConvertFloat16ToFloat32(NumElements(&src),
reinterpret_cast<uint16_t const*>(src.data.f16),
dst);
return absl::OkStatus();
case kTfLiteInt8:
DequantizeConstantTensor(src, src.data.int8, dst);
return absl::OkStatus();
case kTfLiteUInt8:
DequantizeConstantTensor(src, src.data.uint8, dst);
return absl::OkStatus();
case kTfLiteInt32:
DequantizeConstantTensor(src, src.data.i32, dst);
return absl::OkStatus();
default:
return absl::InvalidArgumentError(
"Unsupported data type for float32 tensor");
}
}
std::string GetDimensionString(const TfLiteIntArray* dimensions) {
return absl::StrJoin(TfLiteIntArrayView(dimensions), "x");
}
absl::Status SetAllDimensions(const TfLiteIntArray* dimensions, Scalar* shape) {
if (dimensions->size < 0) {
return absl::InvalidArgumentError("Invalid Scalar dimensions");
}
for (int i = 0; i < dimensions->size; ++i) {
if (dimensions->data[i] != 1) {
return absl::InvalidArgumentError(absl::StrCat(
GetDimensionString(dimensions), " cannot be reduced to scalar."));
}
}
shape->v = 1;
return absl::OkStatus();
}
absl::Status CheckIfLinearConvertible(const TfLiteIntArray* dimensions) {
if (dimensions->size <= 0) {
return absl::InvalidArgumentError("Dimension is empty.");
}
for (int i = 0; i < dimensions->size - 1; ++i) {
if (dimensions->data[i] != 1) {
return absl::InvalidArgumentError(absl::StrCat(
GetDimensionString(dimensions), " cannot be reduced to linear."));
}
}
return absl::OkStatus();
}
absl::Status SetAllDimensions(const TfLiteIntArray* dimensions, Linear* shape) {
RETURN_IF_ERROR(CheckIfLinearConvertible(dimensions));
shape->v = dimensions->data[dimensions->size - 1];
return absl::OkStatus();
}
absl::Status SetAllDimensions(const TfLiteIntArray* dimensions, HWC* shape) {
if (dimensions->size == 3) {
shape->h = dimensions->data[0];
shape->w = dimensions->data[1];
shape->c = dimensions->data[2];
return absl::OkStatus();
}
if (dimensions->size == 4) {
if (dimensions->data[0] != 1) {
return absl::UnimplementedError("Batch size is not equal to 1.");
}
shape->h = dimensions->data[1];
shape->w = dimensions->data[2];
shape->c = dimensions->data[3];
return absl::OkStatus();
}
return absl::InvalidArgumentError(
absl::StrCat("Expected a 3D tensor of shape HxWxC or a 4D tensor of "
"shape 1xHxWxC but got ",
GetDimensionString(dimensions)));
}
absl::Status SetAllDimensions(const TfLiteIntArray* dimensions, HW* shape) {
if (dimensions->size != 2) {
return absl::InvalidArgumentError(
absl::StrCat("Expected a 2D tensor of shape HxW but got ",
GetDimensionString(dimensions)));
}
shape->h = dimensions->data[0];
shape->w = dimensions->data[1];
return absl::OkStatus();
}
absl::Status SetAllDimensions(const TfLiteIntArray* dimensions, OHWI* shape) {
if (dimensions->size != 4) {
return absl::InvalidArgumentError(
absl::StrCat("Expected a 4D tensor of shape OxHxWxI but got ",
GetDimensionString(dimensions)));
}
shape->o = dimensions->data[0];
shape->h = dimensions->data[1];
shape->w = dimensions->data[2];
shape->i = dimensions->data[3];
return absl::OkStatus();
}
absl::Status SetAllDimensions(const TfLiteIntArray* dimensions, BHWC* shape) {
if (dimensions->size != 4) {
return absl::InvalidArgumentError(
absl::StrCat("Expected a 4D tensor of shape BxHxWxC but got ",
GetDimensionString(dimensions)));
}
shape->b = dimensions->data[0];
shape->h = dimensions->data[1];
shape->w = dimensions->data[2];
shape->c = dimensions->data[3];
return absl::OkStatus();
}
absl::Status MaybeFuseActivation(TfLiteFusedActivation fused_activation,
GraphFloat32* graph, Node* node) {
const auto outputs = graph->FindOutputs(node->id);
if (outputs.size() != 1) {
return absl::InternalError("Number of outputs != 1");
}
switch (fused_activation) {
case kTfLiteActNone:
return absl::OkStatus();
case kTfLiteActRelu:
case kTfLiteActReluN1To1:
case kTfLiteActRelu6: {
ReLUAttributes attr;
attr.activation_max =
fused_activation == kTfLiteActRelu
? 0.0f
: (fused_activation == kTfLiteActReluN1To1 ? 1.0f : 6.0f);
attr.activation_min =
fused_activation == kTfLiteActReluN1To1 ? -1.0f : 0.0f;
Node* activation_node;
RETURN_IF_ERROR(
NewPassthroughNode(graph, node, outputs[0], &activation_node));
activation_node->operation.type = ToString(OperationType::RELU);
activation_node->operation.attributes = attr;
return absl::OkStatus();
}
case kTfLiteActTanh: {
Node* activation_node;
RETURN_IF_ERROR(
NewPassthroughNode(graph, node, outputs[0], &activation_node));
activation_node->operation.type = ToString(OperationType::TANH);
return absl::OkStatus();
}
case kTfLiteActSigmoid: {
Node* activation_node;
RETURN_IF_ERROR(
NewPassthroughNode(graph, node, outputs[0], &activation_node));
activation_node->operation.type = ToString(OperationType::SIGMOID);
return absl::OkStatus();
} break;
default:
return absl::NotFoundError(
absl::StrCat("Unsupported fused activation: ", fused_activation));
}
}
}
} | #include "tensorflow/lite/delegates/gpu/common/model_builder_helper.h"
#include <cstdint>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
namespace gpu {
namespace {
using ::testing::ElementsAre;
TEST(ModelBuilderHelperTest, CreateVectorCopyDataDifferentSize) {
TfLiteTensor tflite_tensor;
tflite_tensor.type = kTfLiteInt32;
int32_t src_data[4] = {1, 2, 3, 4};
tflite_tensor.data.i32 = src_data;
tflite_tensor.dims = TfLiteIntArrayCreate(1);
tflite_tensor.dims->data[0] = sizeof(src_data) / sizeof(src_data[0]);
tflite_tensor.bytes = sizeof(src_data);
int16_t dst[4];
ASSERT_OK(CreateVectorCopyData(tflite_tensor, dst));
EXPECT_THAT(dst, ElementsAre(1, 2, 3, 4));
TfLiteIntArrayFree(tflite_tensor.dims);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/model_builder_helper.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/model_builder_helper_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9d6eea50-7e9c-414d-8036-f4cbf5f26b7e | cpp | tensorflow/tensorflow | inline_partitionedcall | tensorflow/tools/graph_transforms/inline_partitionedcall.cc | tensorflow/tools/graph_transforms/inline_partitionedcall_test.cc | #include <string>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
constexpr char kPartitionedCallOpName[] = "PartitionedCall";
constexpr char kFunctionAttrName[] = "f";
namespace {
absl::optional<FunctionDef> GetFunctionByNameFromLibrary(
const GraphDef& graph, absl::string_view function_name) {
for (const auto& fct : graph.library().function()) {
if (fct.signature().name() == function_name) {
return fct;
}
}
return {};
}
std::string NormalizeNodeDefInput(const std::string& input_name) {
std::vector<std::string> name_parts =
absl::StrSplit(input_name, absl::ByChar(':'));
if (name_parts.size() > 2) {
return absl::StrCat(name_parts[0], ":", name_parts.back());
}
return input_name;
}
}
Status InlinePartitionedCall(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def) {
output_graph_def->Clear();
absl::flat_hash_map<std::string, std::string> remap_input;
for (const NodeDef& node : input_graph_def.node()) {
if (node.op() == kPartitionedCallOpName) {
if (node.attr().count(kFunctionAttrName) == 0) {
return Status(
absl::StatusCode::kNotFound,
"Node " + node.name() + " has no attribute: " + kFunctionAttrName);
}
if (!node.attr().at(kFunctionAttrName).has_func()) {
return Status(absl::StatusCode::kNotFound,
"Cannot figure out function name");
}
const std::string function_name =
node.attr().at(kFunctionAttrName).func().name();
absl::optional<FunctionDef> function =
GetFunctionByNameFromLibrary(input_graph_def, function_name);
if (!function.has_value()) {
return Status(absl::StatusCode::kNotFound,
"function " + function_name + " Not found");
}
const std::string prefix = node.name();
const int kOutputArgumentCount =
function->signature().output_arg().size();
for (int k = 0; k < kOutputArgumentCount; ++k) {
const std::string function_arg_output_name =
function->ret().at(function->signature().output_arg()[k].name());
remap_input.insert_or_assign(
CanonicalInputName(absl::StrCat(node.name(), ":", k)),
absl::StrCat(prefix, "/",
NormalizeNodeDefInput(function_arg_output_name)));
}
const int kInputArgumentCount = function->signature().input_arg().size();
if (node.input().size() != kInputArgumentCount) {
return Status(absl::StatusCode::kInvalidArgument,
"Called function " + function_name +
" has invalid input signature.");
}
absl::flat_hash_map<std::string, std::string> input_argument_map;
for (int k = 0; k < kInputArgumentCount; ++k) {
const std::string canonical_name =
CanonicalInputName(function->signature().input_arg()[k].name());
input_argument_map.insert_or_assign(canonical_name, node.input()[k]);
}
for (const NodeDef& function_node : function->node_def()) {
NodeDef* new_node = output_graph_def->mutable_node()->Add();
*new_node = function_node;
new_node->set_name(absl::StrCat(prefix, "/", function_node.name()));
absl::c_transform(
*new_node->mutable_input(), new_node->mutable_input()->begin(),
[prefix, input_argument_map](const std::string& input_name) {
const std::string canonical_input_name =
CanonicalInputName(input_name);
if (input_argument_map.find(canonical_input_name) !=
input_argument_map.end()) {
return input_argument_map.at(canonical_input_name);
}
return absl::StrCat(prefix, "/",
NormalizeNodeDefInput(input_name));
});
}
} else {
NodeDef* new_node = output_graph_def->mutable_node()->Add();
*new_node = node;
}
}
for (NodeDef& node : *output_graph_def->mutable_node()) {
absl::c_transform(
*node.mutable_input(), node.mutable_input()->begin(),
[remap_input](const std::string& input_name) {
const std::string canonical_input_name =
CanonicalInputName(input_name);
if (remap_input.find(canonical_input_name) != remap_input.end()) {
return remap_input.at(canonical_input_name);
}
return input_name;
});
}
return OkStatus();
}
REGISTER_GRAPH_TRANSFORM("inline_partitionedcall", InlinePartitionedCall);
}
} | #include <algorithm>
#include <string>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
constexpr char kGraphDefWithPartitionedCall[] =
"node {\n"
" name: \"y\"\n"
" op: \"Placeholder\"\n"
"}\n"
"node {\n"
" name: \"sub/y\"\n"
" op: \"Const\"\n"
"}\n"
"node {\n"
" name: \"PartitionedCall\"\n"
" op: \"PartitionedCall\"\n"
" input: \"y\"\n"
" input: \"sub/y\"\n"
" attr {\n"
" key: \"f\"\n"
" value {\n"
" func {\n"
" name: \"__inference_simple_add_14\"\n"
" }\n"
" }\n"
" }\n"
"}\n"
"node {\n"
" name: \"add/y\"\n"
" op: \"Const\"\n"
"}\n"
"node {\n"
" name: \"add\"\n"
" op: \"AddV2\"\n"
" input: \"PartitionedCall\"\n"
" input: \"add/y\"\n"
"}\n"
"node {\n"
" name: \"Identity\"\n"
" op: \"Identity\"\n"
" input: \"add\"\n"
"}\n"
"library {\n"
" function {\n"
" signature {\n"
" name: \"__inference_simple_add_14\"\n"
" input_arg {\n"
" name: \"x\"\n"
" type: DT_FLOAT\n"
" }\n"
" input_arg {\n"
" name: \"y\"\n"
" type: DT_FLOAT\n"
" }\n"
" output_arg {\n"
" name: \"identity\"\n"
" type: DT_FLOAT\n"
" }\n"
" }\n"
" node_def {\n"
" name: \"mul\"\n"
" op: \"Mul\"\n"
" input: \"x\"\n"
" input: \"y\"\n"
" }\n"
" node_def {\n"
" name: \"add/y\"\n"
" op: \"Const\"\n"
" }\n"
" node_def {\n"
" name: \"add\"\n"
" op: \"AddV2\"\n"
" input: \"mul:z:0\"\n"
" input: \"add/y:output:0\"\n"
" }\n"
" node_def {\n"
" name: \"Identity\"\n"
" op: \"Identity\"\n"
" input: \"add:z:0\"\n"
" }\n"
" ret {\n"
" key: \"identity\"\n"
" value: \"Identity:output:0\"\n"
" }\n"
" }\n"
"}\n";
Status InlinePartitionedCall(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def);
TEST(InlinePartitionedCallTest, Inlining) {
GraphDef in_graph;
EXPECT_TRUE(::tensorflow::protobuf::TextFormat::ParseFromString(
kGraphDefWithPartitionedCall, &in_graph));
GraphDef result;
TransformFuncContext context;
context.input_names = {"y"};
context.output_names = {"Identity"};
TF_ASSERT_OK(InlinePartitionedCall(in_graph, context, &result));
EXPECT_TRUE(std::none_of(
result.node().cbegin(), result.node().cend(),
[](const NodeDef& node) { return node.op() == "PartitionedCall"; }));
EXPECT_EQ(9, result.node().size());
TF_EXPECT_OK(IsGraphValid(result));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/inline_partitionedcall.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/inline_partitionedcall_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5411c9c4-0335-4de2-af86-ce114032e1cb | cpp | google/cel-cpp | value_testing | common/value_testing.cc | common/value_testing_test.cc | #include "common/value_testing.h"
#include <cstdint>
#include <ostream>
#include <string>
#include <utility>
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/time/time.h"
#include "common/casting.h"
#include "common/value.h"
#include "common/value_kind.h"
#include "internal/testing.h"
namespace cel {
void PrintTo(const Value& value, std::ostream* os) { *os << value << "\n"; }
namespace test {
namespace {
using ::testing::Matcher;
template <typename Type>
constexpr ValueKind ToValueKind() {
if constexpr (std::is_same_v<Type, BoolValue>) {
return ValueKind::kBool;
} else if constexpr (std::is_same_v<Type, IntValue>) {
return ValueKind::kInt;
} else if constexpr (std::is_same_v<Type, UintValue>) {
return ValueKind::kUint;
} else if constexpr (std::is_same_v<Type, DoubleValue>) {
return ValueKind::kDouble;
} else if constexpr (std::is_same_v<Type, StringValue>) {
return ValueKind::kString;
} else if constexpr (std::is_same_v<Type, BytesValue>) {
return ValueKind::kBytes;
} else if constexpr (std::is_same_v<Type, DurationValue>) {
return ValueKind::kDuration;
} else if constexpr (std::is_same_v<Type, TimestampValue>) {
return ValueKind::kTimestamp;
} else if constexpr (std::is_same_v<Type, ErrorValue>) {
return ValueKind::kError;
} else if constexpr (std::is_same_v<Type, MapValue>) {
return ValueKind::kMap;
} else if constexpr (std::is_same_v<Type, ListValue>) {
return ValueKind::kList;
} else if constexpr (std::is_same_v<Type, StructValue>) {
return ValueKind::kStruct;
} else if constexpr (std::is_same_v<Type, OpaqueValue>) {
return ValueKind::kOpaque;
} else {
return ValueKind::kError;
}
}
template <typename Type, typename NativeType>
class SimpleTypeMatcherImpl : public testing::MatcherInterface<const Value&> {
public:
using MatcherType = Matcher<NativeType>;
explicit SimpleTypeMatcherImpl(MatcherType&& matcher)
: matcher_(std::forward<MatcherType>(matcher)) {}
bool MatchAndExplain(const Value& v,
testing::MatchResultListener* listener) const override {
return InstanceOf<Type>(v) &&
matcher_.MatchAndExplain(Cast<Type>(v).NativeValue(), listener);
}
void DescribeTo(std::ostream* os) const override {
*os << absl::StrCat("kind is ", ValueKindToString(ToValueKind<Type>()),
" and ");
matcher_.DescribeTo(os);
}
private:
MatcherType matcher_;
};
template <typename Type>
class StringTypeMatcherImpl : public testing::MatcherInterface<const Value&> {
public:
using MatcherType = Matcher<std::string>;
explicit StringTypeMatcherImpl(MatcherType matcher)
: matcher_((std::move(matcher))) {}
bool MatchAndExplain(const Value& v,
testing::MatchResultListener* listener) const override {
return InstanceOf<Type>(v) && matcher_.Matches(Cast<Type>(v).ToString());
}
void DescribeTo(std::ostream* os) const override {
*os << absl::StrCat("kind is ", ValueKindToString(ToValueKind<Type>()),
" and ");
matcher_.DescribeTo(os);
}
private:
MatcherType matcher_;
};
template <typename Type>
class AbstractTypeMatcherImpl : public testing::MatcherInterface<const Value&> {
public:
using MatcherType = Matcher<Type>;
explicit AbstractTypeMatcherImpl(MatcherType&& matcher)
: matcher_(std::forward<MatcherType>(matcher)) {}
bool MatchAndExplain(const Value& v,
testing::MatchResultListener* listener) const override {
return v.Is<Type>() && matcher_.Matches(v.template Get<Type>());
}
void DescribeTo(std::ostream* os) const override {
*os << absl::StrCat("kind is ", ValueKindToString(ToValueKind<Type>()),
" and ");
matcher_.DescribeTo(os);
}
private:
MatcherType matcher_;
};
class OptionalValueMatcherImpl
: public testing::MatcherInterface<const Value&> {
public:
explicit OptionalValueMatcherImpl(ValueMatcher matcher)
: matcher_(std::move(matcher)) {}
bool MatchAndExplain(const Value& v,
testing::MatchResultListener* listener) const override {
if (!InstanceOf<OptionalValue>(v)) {
*listener << "wanted OptionalValue, got " << ValueKindToString(v.kind());
return false;
}
const auto& optional_value = Cast<OptionalValue>(v);
if (!optional_value->HasValue()) {
*listener << "OptionalValue is not engaged";
return false;
}
return matcher_.MatchAndExplain(optional_value->Value(), listener);
}
void DescribeTo(std::ostream* os) const override {
*os << "is OptionalValue that is engaged with value whose ";
matcher_.DescribeTo(os);
}
private:
ValueMatcher matcher_;
};
MATCHER(OptionalValueIsEmptyImpl, "is empty OptionalValue") {
const Value& v = arg;
if (!InstanceOf<OptionalValue>(v)) {
*result_listener << "wanted OptionalValue, got "
<< ValueKindToString(v.kind());
return false;
}
const auto& optional_value = Cast<OptionalValue>(v);
*result_listener << (optional_value.HasValue() ? "is not empty" : "is empty");
return !optional_value->HasValue();
}
}
ValueMatcher BoolValueIs(Matcher<bool> m) {
return ValueMatcher(new SimpleTypeMatcherImpl<BoolValue, bool>(std::move(m)));
}
ValueMatcher IntValueIs(Matcher<int64_t> m) {
return ValueMatcher(
new SimpleTypeMatcherImpl<IntValue, int64_t>(std::move(m)));
}
ValueMatcher UintValueIs(Matcher<uint64_t> m) {
return ValueMatcher(
new SimpleTypeMatcherImpl<UintValue, uint64_t>(std::move(m)));
}
ValueMatcher DoubleValueIs(Matcher<double> m) {
return ValueMatcher(
new SimpleTypeMatcherImpl<DoubleValue, double>(std::move(m)));
}
ValueMatcher TimestampValueIs(Matcher<absl::Time> m) {
return ValueMatcher(
new SimpleTypeMatcherImpl<TimestampValue, absl::Time>(std::move(m)));
}
ValueMatcher DurationValueIs(Matcher<absl::Duration> m) {
return ValueMatcher(
new SimpleTypeMatcherImpl<DurationValue, absl::Duration>(std::move(m)));
}
ValueMatcher ErrorValueIs(Matcher<absl::Status> m) {
return ValueMatcher(
new SimpleTypeMatcherImpl<ErrorValue, absl::Status>(std::move(m)));
}
ValueMatcher StringValueIs(Matcher<std::string> m) {
return ValueMatcher(new StringTypeMatcherImpl<StringValue>(std::move(m)));
}
ValueMatcher BytesValueIs(Matcher<std::string> m) {
return ValueMatcher(new StringTypeMatcherImpl<BytesValue>(std::move(m)));
}
ValueMatcher MapValueIs(Matcher<MapValue> m) {
return ValueMatcher(new AbstractTypeMatcherImpl<MapValue>(std::move(m)));
}
ValueMatcher ListValueIs(Matcher<ListValue> m) {
return ValueMatcher(new AbstractTypeMatcherImpl<ListValue>(std::move(m)));
}
ValueMatcher StructValueIs(Matcher<StructValue> m) {
return ValueMatcher(new AbstractTypeMatcherImpl<StructValue>(std::move(m)));
}
ValueMatcher OptionalValueIs(ValueMatcher m) {
return ValueMatcher(new OptionalValueMatcherImpl(std::move(m)));
}
ValueMatcher OptionalValueIsEmpty() { return OptionalValueIsEmptyImpl(); }
}
} | #include "common/value_testing.h"
#include <utility>
#include "gtest/gtest-spi.h"
#include "absl/status/status.h"
#include "absl/time/time.h"
#include "common/memory.h"
#include "common/value.h"
#include "internal/testing.h"
namespace cel::test {
namespace {
using ::absl_testing::StatusIs;
using ::testing::_;
using ::testing::ElementsAre;
using ::testing::Truly;
using ::testing::UnorderedElementsAre;
TEST(BoolValueIs, Match) { EXPECT_THAT(BoolValue(true), BoolValueIs(true)); }
TEST(BoolValueIs, NoMatch) {
EXPECT_THAT(BoolValue(false), Not(BoolValueIs(true)));
EXPECT_THAT(IntValue(2), Not(BoolValueIs(true)));
}
TEST(BoolValueIs, NonMatchMessage) {
EXPECT_NONFATAL_FAILURE(
[]() { EXPECT_THAT(IntValue(42), BoolValueIs(true)); }(),
"kind is bool and is equal to true");
}
TEST(IntValueIs, Match) { EXPECT_THAT(IntValue(42), IntValueIs(42)); }
TEST(IntValueIs, NoMatch) {
EXPECT_THAT(IntValue(-42), Not(IntValueIs(42)));
EXPECT_THAT(UintValue(2), Not(IntValueIs(42)));
}
TEST(IntValueIs, NonMatchMessage) {
EXPECT_NONFATAL_FAILURE(
[]() { EXPECT_THAT(UintValue(42), IntValueIs(42)); }(),
"kind is int and is equal to 42");
}
TEST(UintValueIs, Match) { EXPECT_THAT(UintValue(42), UintValueIs(42)); }
TEST(UintValueIs, NoMatch) {
EXPECT_THAT(UintValue(41), Not(UintValueIs(42)));
EXPECT_THAT(IntValue(2), Not(UintValueIs(42)));
}
TEST(UintValueIs, NonMatchMessage) {
EXPECT_NONFATAL_FAILURE(
[]() { EXPECT_THAT(IntValue(42), UintValueIs(42)); }(),
"kind is uint and is equal to 42");
}
TEST(DoubleValueIs, Match) {
EXPECT_THAT(DoubleValue(1.2), DoubleValueIs(1.2));
}
TEST(DoubleValueIs, NoMatch) {
EXPECT_THAT(DoubleValue(41), Not(DoubleValueIs(1.2)));
EXPECT_THAT(IntValue(2), Not(DoubleValueIs(1.2)));
}
TEST(DoubleValueIs, NonMatchMessage) {
EXPECT_NONFATAL_FAILURE(
[]() { EXPECT_THAT(IntValue(42), DoubleValueIs(1.2)); }(),
"kind is double and is equal to 1.2");
}
TEST(DurationValueIs, Match) {
EXPECT_THAT(DurationValue(absl::Minutes(2)),
DurationValueIs(absl::Minutes(2)));
}
TEST(DurationValueIs, NoMatch) {
EXPECT_THAT(DurationValue(absl::Minutes(5)),
Not(DurationValueIs(absl::Minutes(2))));
EXPECT_THAT(IntValue(2), Not(DurationValueIs(absl::Minutes(2))));
}
TEST(DurationValueIs, NonMatchMessage) {
EXPECT_NONFATAL_FAILURE(
[]() { EXPECT_THAT(IntValue(42), DurationValueIs(absl::Minutes(2))); }(),
"kind is duration and is equal to 2m");
}
TEST(TimestampValueIs, Match) {
EXPECT_THAT(TimestampValue(absl::UnixEpoch() + absl::Minutes(2)),
TimestampValueIs(absl::UnixEpoch() + absl::Minutes(2)));
}
TEST(TimestampValueIs, NoMatch) {
EXPECT_THAT(TimestampValue(absl::UnixEpoch()),
Not(TimestampValueIs(absl::UnixEpoch() + absl::Minutes(2))));
EXPECT_THAT(IntValue(2),
Not(TimestampValueIs(absl::UnixEpoch() + absl::Minutes(2))));
}
TEST(TimestampValueIs, NonMatchMessage) {
EXPECT_NONFATAL_FAILURE(
[]() {
EXPECT_THAT(IntValue(42),
TimestampValueIs(absl::UnixEpoch() + absl::Minutes(2)));
}(),
"kind is timestamp and is equal to 19");
}
TEST(StringValueIs, Match) {
EXPECT_THAT(StringValue("hello!"), StringValueIs("hello!"));
}
TEST(StringValueIs, NoMatch) {
EXPECT_THAT(StringValue("hello!"), Not(StringValueIs("goodbye!")));
EXPECT_THAT(IntValue(2), Not(StringValueIs("goodbye!")));
}
TEST(StringValueIs, NonMatchMessage) {
EXPECT_NONFATAL_FAILURE(
[]() { EXPECT_THAT(IntValue(42), StringValueIs("hello!")); }(),
"kind is string and is equal to \"hello!\"");
}
TEST(BytesValueIs, Match) {
EXPECT_THAT(BytesValue("hello!"), BytesValueIs("hello!"));
}
TEST(BytesValueIs, NoMatch) {
EXPECT_THAT(BytesValue("hello!"), Not(BytesValueIs("goodbye!")));
EXPECT_THAT(IntValue(2), Not(BytesValueIs("goodbye!")));
}
TEST(BytesValueIs, NonMatchMessage) {
EXPECT_NONFATAL_FAILURE(
[]() { EXPECT_THAT(IntValue(42), BytesValueIs("hello!")); }(),
"kind is bytes and is equal to \"hello!\"");
}
TEST(ErrorValueIs, Match) {
EXPECT_THAT(ErrorValue(absl::InternalError("test")),
ErrorValueIs(StatusIs(absl::StatusCode::kInternal, "test")));
}
TEST(ErrorValueIs, NoMatch) {
EXPECT_THAT(ErrorValue(absl::UnknownError("test")),
Not(ErrorValueIs(StatusIs(absl::StatusCode::kInternal, "test"))));
EXPECT_THAT(IntValue(2), Not(ErrorValueIs(_)));
}
TEST(ErrorValueIs, NonMatchMessage) {
EXPECT_NONFATAL_FAILURE(
[]() {
EXPECT_THAT(IntValue(42), ErrorValueIs(StatusIs(
absl::StatusCode::kInternal, "test")));
}(),
"kind is *error* and");
}
using ValueMatcherTest = common_internal::ThreadCompatibleValueTest<>;
TEST_P(ValueMatcherTest, OptionalValueIsMatch) {
EXPECT_THAT(
OptionalValue::Of(value_manager().GetMemoryManager(), IntValue(42)),
OptionalValueIs(IntValueIs(42)));
}
TEST_P(ValueMatcherTest, OptionalValueIsHeldValueDifferent) {
EXPECT_NONFATAL_FAILURE(
[&]() {
EXPECT_THAT(OptionalValue::Of(value_manager().GetMemoryManager(),
IntValue(-42)),
OptionalValueIs(IntValueIs(42)));
}(),
"is OptionalValue that is engaged with value whose kind is int and is "
"equal to 42");
}
TEST_P(ValueMatcherTest, OptionalValueIsNotEngaged) {
EXPECT_NONFATAL_FAILURE(
[&]() {
EXPECT_THAT(OptionalValue::None(), OptionalValueIs(IntValueIs(42)));
}(),
"is not engaged");
}
TEST_P(ValueMatcherTest, OptionalValueIsNotAnOptional) {
EXPECT_NONFATAL_FAILURE(
[&]() { EXPECT_THAT(IntValue(42), OptionalValueIs(IntValueIs(42))); }(),
"wanted OptionalValue, got int");
}
TEST_P(ValueMatcherTest, OptionalValueIsEmptyMatch) {
EXPECT_THAT(OptionalValue::None(), OptionalValueIsEmpty());
}
TEST_P(ValueMatcherTest, OptionalValueIsEmptyNotEmpty) {
EXPECT_NONFATAL_FAILURE(
[&]() {
EXPECT_THAT(
OptionalValue::Of(value_manager().GetMemoryManager(), IntValue(42)),
OptionalValueIsEmpty());
}(),
"is not empty");
}
TEST_P(ValueMatcherTest, OptionalValueIsEmptyNotOptional) {
EXPECT_NONFATAL_FAILURE(
[&]() { EXPECT_THAT(IntValue(42), OptionalValueIsEmpty()); }(),
"wanted OptionalValue, got int");
}
TEST_P(ValueMatcherTest, ListMatcherBasic) {
ASSERT_OK_AND_ASSIGN(auto builder, value_manager().NewListValueBuilder(
value_manager().GetDynListType()));
ASSERT_OK(builder->Add(IntValue(42)));
Value list_value = std::move(*builder).Build();
EXPECT_THAT(list_value, ListValueIs(Truly([](const ListValue& v) {
auto size = v.Size();
return size.ok() && *size == 1;
})));
}
TEST_P(ValueMatcherTest, ListMatcherMatchesElements) {
ASSERT_OK_AND_ASSIGN(auto builder, value_manager().NewListValueBuilder(
value_manager().GetDynListType()));
ASSERT_OK(builder->Add(IntValue(42)));
ASSERT_OK(builder->Add(IntValue(1337)));
ASSERT_OK(builder->Add(IntValue(42)));
ASSERT_OK(builder->Add(IntValue(100)));
EXPECT_THAT(
std::move(*builder).Build(),
ListValueIs(ListValueElements(
&value_manager(), ElementsAre(IntValueIs(42), IntValueIs(1337),
IntValueIs(42), IntValueIs(100)))));
}
TEST_P(ValueMatcherTest, MapMatcherBasic) {
ASSERT_OK_AND_ASSIGN(auto builder, value_manager().NewMapValueBuilder(
value_manager().GetDynDynMapType()));
ASSERT_OK(builder->Put(IntValue(42), IntValue(42)));
Value map_value = std::move(*builder).Build();
EXPECT_THAT(map_value, MapValueIs(Truly([](const MapValue& v) {
auto size = v.Size();
return size.ok() && *size == 1;
})));
}
TEST_P(ValueMatcherTest, MapMatcherMatchesElements) {
ASSERT_OK_AND_ASSIGN(auto builder, value_manager().NewMapValueBuilder(
value_manager().GetDynDynMapType()));
ASSERT_OK(builder->Put(IntValue(42), StringValue("answer")));
ASSERT_OK(builder->Put(IntValue(1337), StringValue("leet")));
EXPECT_THAT(std::move(*builder).Build(),
MapValueIs(MapValueElements(
&value_manager(),
UnorderedElementsAre(
Pair(IntValueIs(42), StringValueIs("answer")),
Pair(IntValueIs(1337), StringValueIs("leet"))))));
}
INSTANTIATE_TEST_SUITE_P(
MemoryManagerStrategy, ValueMatcherTest,
testing::Values(cel::MemoryManagement::kPooling,
cel::MemoryManagement::kReferenceCounting));
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/value_testing.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/value_testing_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
26f2d42c-ee60-46c1-bd75-a32f45db3a9b | cpp | tensorflow/tensorflow | hlo_proto_to_graph_view | tensorflow/core/profiler/convert/hlo_proto_to_graph_view.cc | tensorflow/core/profiler/convert/hlo_proto_to_graph_view_test.cc | #include "tensorflow/core/profiler/convert/hlo_proto_to_graph_view.h"
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_opcode.h"
#ifdef PLATFORM_GOOGLE
#include "third_party/json/src/json.hpp"
#include "tensorflow/compiler/mlir/lite/experimental/google/tooling/google/direct_hlo_to_json_graph_convert.h"
#endif
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_graph_dumper.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/profiler/convert/tool_options.h"
#include "tensorflow/core/profiler/utils/hlo_module_utils.h"
#include "tensorflow/core/profiler/utils/hlo_proto_to_module.h"
namespace tensorflow {
namespace profiler {
namespace {
using ::tensorflow::StatusOr;
using ::tensorflow::errors::InvalidArgument;
using ::xla::HloComputation;
using ::xla::HloInstruction;
using ::xla::HloModule;
using ::xla::HloPrintOptions;
using ::xla::HloProto;
using ::xla::HloRenderOptions;
using ::xla::RenderedGraphFormat;
constexpr char kCenterNodeKey[] = "centerNode";
void CleanUpHloModuleForGraphviz(HloModule* hlo_module) {
for (HloComputation* computation : hlo_module->computations()) {
for (HloInstruction* inst : computation->instructions()) {
if (inst->opcode() == xla::HloOpcode::kInfeed) {
inst->set_infeed_config("");
} else if (inst->opcode() == xla::HloOpcode::kOutfeed) {
inst->set_outfeed_config("");
}
}
}
}
std::string GetLayerId(absl::string_view namespace_name) {
return absl::StrCat(namespace_name, "___group___");
}
#ifdef PLATFORM_GOOGLE
void AddCenterNodeMetadata(nlohmann::json& graph_json, std::string id,
absl::string_view name, absl::string_view opcode) {
nlohmann::json centerGroupNodeAttributes;
centerGroupNodeAttributes["name"] = name;
centerGroupNodeAttributes["id"] = id;
if (!opcode.empty()) {
centerGroupNodeAttributes["opcode"] = opcode;
}
graph_json[0]["subgraphs"][0]["groupNodeAttributes"][kCenterNodeKey] =
centerGroupNodeAttributes;
}
#endif
void AddGraphMetadata(std::string& graph_json_str,
const HloInstruction& instr) {
#ifdef PLATFORM_GOOGLE
nlohmann::json graph_json = nlohmann::json::parse(graph_json_str);
auto id =
instr.opcode() == xla::HloOpcode::kFusion
? GetLayerId(absl::StrCat(instr.parent()->name(), "/", instr.name()))
: absl::StrCat(instr.unique_id());
AddCenterNodeMetadata(graph_json, id, instr.name(),
HloOpcodeString(instr.opcode()));
graph_json_str = graph_json.dump();
#endif
}
void AddGraphMetadata(std::string& graph_json_str, const HloComputation& comp) {
#ifdef PLATFORM_GOOGLE
nlohmann::json graph_json = nlohmann::json::parse(graph_json_str);
AddCenterNodeMetadata(graph_json, GetLayerId(comp.name()), comp.name(), "");
graph_json_str = graph_json.dump();
#endif
}
absl::StatusOr<std::string> PlotMe(std::unique_ptr<HloModule> module,
const std::string& node_name,
int graph_width) {
if (node_name.empty()) {
return InvalidArgument("node_name should not be empty");
}
const HloInstruction* instr = FindInstruction(*module, node_name);
const HloComputation* comp = FindComputation(*module, node_name);
if (!instr && !comp) {
return InvalidArgument(
absl::StrCat("Couldn't find HloInstruction or HloComputation named ",
node_name, "."));
}
absl::StatusOr<std::string> graph_handle;
std::string graph_json_str;
#ifdef PLATFORM_GOOGLE
if (comp) {
graph_handle = tooling::visualization_client::HloGraphAdapter(*comp);
} else {
graph_handle =
tooling::visualization_client::HloGraphAdapter(*instr, graph_width);
}
#endif
if (graph_handle.ok()) {
VLOG(1) << graph_handle.value();
graph_json_str = graph_handle.value();
if (comp) {
AddGraphMetadata(graph_json_str, *comp);
} else {
AddGraphMetadata(graph_json_str, *instr);
}
return graph_json_str;
} else {
LOG(ERROR) << "Unable to render graph: " << graph_handle.status();
}
return graph_handle;
}
absl::StatusOr<std::string> Plot(std::unique_ptr<HloModule> module,
const std::string& node_name, int graph_width,
const HloRenderOptions& render_options,
const RenderedGraphFormat& format) {
if (node_name.empty()) {
return InvalidArgument("node_name should not be empty");
}
const HloInstruction* instr = FindInstruction(*module, node_name);
const HloComputation* comp = FindComputation(*module, node_name);
if (!instr && !comp) {
return InvalidArgument(
absl::StrCat("Couldn't find HloInstruction or HloComputation named ",
node_name, "."));
}
absl::StatusOr<std::string> graph_handle;
CleanUpHloModuleForGraphviz(module.get());
if (comp) {
graph_handle =
RenderGraphView(*comp, "", comp->parent()->config().debug_options(),
format, render_options);
} else {
graph_handle = RenderGraphNeighborhoodAround(*instr, graph_width, format,
render_options);
}
if (graph_handle.ok()) {
VLOG(1) << graph_handle.value();
} else {
LOG(ERROR) << "Unable to render graph: " << graph_handle.status();
}
return graph_handle;
}
static constexpr char kGraphTypeName[] = "graph";
static constexpr char kShortTxtTypeName[] = "short_txt";
static constexpr char kLongTxtTypeName[] = "long_txt";
static constexpr char kDefaultFormatString[] = "url";
static constexpr int kDefaultWidth = 3;
static constexpr int kDefaultShowMetadata = 0;
static constexpr int kDefaultMergeFusion = 0;
}
absl::StatusOr<std::string> GetNodeStyles() {
std::vector<xla::HloOpcode> async_op_codes = {xla::HloOpcode::kAsyncStart,
xla::HloOpcode::kAsyncUpdate,
xla::HloOpcode::kAsyncDone};
std::vector<xla::HloOpcode> brown_op_codes = {
xla::HloOpcode::kAllGather,
xla::HloOpcode::kAllGatherStart,
xla::HloOpcode::kAllGatherDone,
xla::HloOpcode::kAllReduce,
xla::HloOpcode::kReduceScatter,
xla::HloOpcode::kAllReduceStart,
xla::HloOpcode::kAllReduceDone,
xla::HloOpcode::kAllToAll,
xla::HloOpcode::kCollectiveBroadcast,
xla::HloOpcode::kCollectivePermute,
xla::HloOpcode::kCollectivePermuteStart,
xla::HloOpcode::kCollectivePermuteDone,
xla::HloOpcode::kInfeed,
xla::HloOpcode::kOutfeed,
xla::HloOpcode::kPartitionId,
xla::HloOpcode::kRecv,
xla::HloOpcode::kRecvDone,
xla::HloOpcode::kSend,
xla::HloOpcode::kSendDone,
xla::HloOpcode::kReplicaId};
std::vector<xla::HloOpcode> dark_blue_op_codes = {
xla::HloOpcode::kConvolution, xla::HloOpcode::kDot, xla::HloOpcode::kFft,
xla::HloOpcode::kTriangularSolve, xla::HloOpcode::kCholesky};
std::vector<xla::HloOpcode> dark_green_op_codes = {
xla::HloOpcode::kCall, xla::HloOpcode::kConditional,
xla::HloOpcode::kCustomCall, xla::HloOpcode::kWhile};
std::vector<xla::HloOpcode> gray_op_codes = {
xla::HloOpcode::kDomain, xla::HloOpcode::kFusion, xla::HloOpcode::kMap,
xla::HloOpcode::kGetDimensionSize, xla::HloOpcode::kSetDimensionSize};
std::vector<xla::HloOpcode> green_op_codes = {
xla::HloOpcode::kConcatenate, xla::HloOpcode::kDynamicSlice,
xla::HloOpcode::kReshape, xla::HloOpcode::kDynamicReshape,
xla::HloOpcode::kReverse, xla::HloOpcode::kTranspose,
xla::HloOpcode::kCopy, xla::HloOpcode::kCopyStart,
xla::HloOpcode::kCopyDone};
std::vector<xla::HloOpcode> orange_op_codes = {xla::HloOpcode::kParameter};
std::vector<xla::HloOpcode> purple_op_codes = {
xla::HloOpcode::kBatchNormGrad, xla::HloOpcode::kBatchNormInference,
xla::HloOpcode::kBatchNormTraining, xla::HloOpcode::kReduce,
xla::HloOpcode::kReduceWindow, xla::HloOpcode::kScatter,
xla::HloOpcode::kSelectAndScatter, xla::HloOpcode::kGather};
std::vector<xla::HloOpcode> yellow_op_codes = {
xla::HloOpcode::kBroadcast, xla::HloOpcode::kDynamicUpdateSlice};
auto OpCodesToNames =
[&](std::vector<xla::HloOpcode> op_codes) -> std::string {
std::string op_names = "";
for (const auto& op_code : op_codes) {
if (!op_names.empty()) {
op_names += ",";
}
op_names += std::string(xla::HloOpcodeString(op_code));
}
return op_names;
};
return absl::StrReplaceAll(
R"json({
"kBlue": "$asyncOpNames",
"kBrown": "$brownOpNames",
"kDarkBlue": "$darkBlueOpNames",
"kDarkGreen": "$darkGreenOpNames",
"kGray": "$grayOpNames",
"kGreen": "$greenOpNames",
"kOrange": "$orangeOpNames",
"kPurple": "$purpleOpNames",
"kYellow": "$yellowOpNames"
})json",
{
{"$asyncOpNames", OpCodesToNames(async_op_codes)},
{"$brownOpNames", OpCodesToNames(brown_op_codes)},
{"$darkBlueOpNames", OpCodesToNames(dark_blue_op_codes)},
{"$darkGreenOpNames", OpCodesToNames(dark_green_op_codes)},
{"$grayOpNames", OpCodesToNames(gray_op_codes)},
{"$greenOpNames", OpCodesToNames(green_op_codes)},
{"$orangeOpNames", OpCodesToNames(orange_op_codes)},
{"$purpleOpNames", OpCodesToNames(purple_op_codes)},
{"$yellowOpNames", OpCodesToNames(yellow_op_codes)},
});
}
absl::StatusOr<GraphViewerParams> ParseGraphViewerParams(
const ToolOptions& options) {
GraphViewerParams params;
std::optional<std::string> type = GetParam<std::string>(options, "type");
if (!type.has_value()) {
return errors::InvalidArgument("Graph viewer must provide a type option.");
}
if (type == kGraphTypeName) {
params.type = type.value();
if (std::optional<std::string> node_name =
GetParam<std::string>(options, "node_name")) {
params.node_name = node_name.value();
}
params.graph_width =
GetParamWithDefault<int>(options, "graph_width", kDefaultWidth);
params.render_options.show_backend_config = GetParamWithDefault<int>(
options, "show_metadata", kDefaultShowMetadata);
params.render_options.show_fusion_subcomputations =
!GetParamWithDefault<int>(options, "merge_fusion", kDefaultMergeFusion);
params.format = GetRenderFormat(GetParamWithDefault<std::string>(
options, "format", kDefaultFormatString));
return params;
}
if (type == kShortTxtTypeName || type == kLongTxtTypeName) {
params.type = type.value();
params.verbose = (type == kLongTxtTypeName);
params.show_metadata =
GetParamWithDefault(options, "show_metadata", kDefaultShowMetadata);
return params;
}
return errors::InvalidArgument("Unknown graph viewer type option: ",
type.value());
}
xla::RenderedGraphFormat GetRenderFormat(const std::string& format_string) {
if (format_string == "html") {
return xla::RenderedGraphFormat::kHtml;
} else if (format_string == "dot") {
return xla::RenderedGraphFormat::kDot;
} else if (format_string == "url") {
return xla::RenderedGraphFormat::kUrl;
} else {
LOG(ERROR) << "Invalid graph format argument: " << format_string
<< ", fallback to default url";
return xla::RenderedGraphFormat::kUrl;
}
}
absl::StatusOr<std::string> ConvertHloProtoToGraph(
const HloProto& hlo_proto, const std::string& node_name, int graph_width,
const HloRenderOptions& render_options, const RenderedGraphFormat& format) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModule> hlo_module,
ConvertHloProtoToModule(hlo_proto));
return Plot(std::move(hlo_module), node_name, graph_width, render_options,
format);
}
absl::StatusOr<std::string> ConvertHloProtoToMeGraph(
const HloProto& hlo_proto, const std::string& node_name, int graph_width) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModule> hlo_module,
ConvertHloProtoToModule(hlo_proto));
return PlotMe(std::move(hlo_module), node_name, graph_width);
}
absl::StatusOr<std::string> ConvertHloProtoToStringView(
const HloProto& hlo_proto, bool verbose, bool metadata) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModule> hlo_module,
ConvertHloProtoToModule(hlo_proto));
HloPrintOptions options;
if (!verbose) {
options = HloPrintOptions::ShortParsable();
}
options.set_print_large_constants(verbose);
options.set_print_metadata(metadata);
return hlo_module->ToString(options);
}
std::function<absl::StatusOr<std::string>(absl::string_view)>* url_renderer =
nullptr;
absl::Status CheckPrecondition(xla::RenderedGraphFormat format) {
if (format == xla::RenderedGraphFormat::kUrl && url_renderer == nullptr) {
return absl::FailedPreconditionError(
"Can't render as URL; no URL renderer was registered.");
}
return absl::OkStatus();
}
absl::StatusOr<std::string> RenderGraphView(
const xla::HloComputation& computation, absl::string_view label,
const xla::DebugOptions& debug_options, xla::RenderedGraphFormat format,
xla::HloRenderOptions hlo_render_options) {
auto precheck_status = CheckPrecondition(format);
if (!precheck_status.ok()) {
return precheck_status;
}
auto rendered_dot =
xla::RenderGraph(computation, label, debug_options,
RenderedGraphFormat::kDot, hlo_render_options);
if (!rendered_dot.ok()) {
return rendered_dot.status();
}
return WrapDotInFormat(rendered_dot.value(), format);
}
absl::StatusOr<std::string> RenderGraphNeighborhoodAround(
const xla::HloInstruction& node, int radius,
xla::RenderedGraphFormat format, xla::HloRenderOptions hlo_render_options,
const absl::flat_hash_set<const xla::HloInstruction*>& boundary) {
auto precheck_status = CheckPrecondition(format);
if (!precheck_status.ok()) {
return precheck_status;
}
auto rendered_dot = xla::RenderNeighborhoodAround(
node, radius, RenderedGraphFormat::kDot, hlo_render_options, boundary);
if (!rendered_dot.ok()) {
return rendered_dot.status();
}
return WrapDotInFormat(rendered_dot.value(), format);
}
absl::StatusOr<std::string> WrapDotInFormat(std::string dot,
xla::RenderedGraphFormat format) {
switch (format) {
case xla::RenderedGraphFormat::kUrl:
if (url_renderer == nullptr) {
return absl::InternalError("url_renderer is null");
}
return (*url_renderer)(dot);
case xla::RenderedGraphFormat::kHtml:
return WrapDotInHtml(dot);
case xla::RenderedGraphFormat::kDot:
return std::string(dot);
}
}
std::string WrapDotInHtml(std::string dot) {
return absl::StrReplaceAll(R"html(
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<style type="text/css">
body {
height: 100vh;
margin: 0;
}
#graph-container {height:95vh;width:100%;padding:10px;display:block;}
#graph-container svg { height: 100% !important; width: 100% !important;}
.node, .cluster {cursor:pointer;}
.cluster:hover, .node:hover {outline: solid 3px black;}
</style>
</head>
<body>
<script src="https:
integrity="sha384-LigJPbR3TOfU/Xbb+PjiN1dGJYPweLk7kiGnaMgmxnUmKWaCFKbb5tH6iLlyVhPZ"
crossorigin="anonymous"></script>
<script src="https:
<div id="graph-container"></div>
<script>
const cssregex = new RegExp('stylesheet=<([^]*)\n>\n', 'gm');
const hpccWasm = window["@hpcc-js/wasm"];
const data = `$DOT`;
const results = cssregex.exec(data);
let dot_data = data;
let css_data = '';
if (results !== null) {
css_data = results[1].replace(/\s*data:.*\s*,/,'');
css_data = unescape(css_data);
dot_data = data.replace(cssregex, '');
}
var render_start = performance.now()
function add_controls(svg) {
var htmlblob = new Blob([document.documentElement.innerHTML],
{type: 'text/html'});
var savehtml = document.createElement('a');
savehtml.setAttribute('href', URL.createObjectURL(htmlblob));
savehtml.setAttribute('download', 'graph.html');
savehtml.innerHTML = " [Save HTML+SVG] ";
document.body.append(savehtml);
var svgblob = new Blob([svg.outerHTML], {type: 'image/svg'});
var savesvg = document.createElement('a');
savesvg.setAttribute('href', URL.createObjectURL(svgblob));
savesvg.setAttribute('download', 'graph.svg');
savesvg.innerHTML = " [Save SVG] ";
document.body.append(savesvg);
var dotblob = new Blob([data], {type: 'text/dot'});
var savedot = document.createElement('a');
savedot.setAttribute('href', URL.createObjectURL(dotblob));
savedot.setAttribute('download', 'graph.dot');
savedot.innerHTML = " [Save DOT] ";
document.body.append(savedot);
var render_end = performance.now();
var render_note = document.createElement('div')
render_note.innerHTML = 'Rendering took '
+ (render_end - render_start).toFixed(2) + "ms."
document.body.append(render_note);
}
const render_callback = svg => {
const container = document.getElementById('graph-container')
container.innerHTML = `${svg}<style>${css_data}</style>`;
const panZoom = svgPanZoom(container.children[0], {
zoomEnabled: true,
controlIconsEnabled: true,
maxZoom: 200,
minZoom: 0,
});
add_controls(svg);
};
hpccWasm.graphviz.layout(dot_data, "svg", "dot").then(render_callback);
</script>
</body>
</html>
)html",
{
{"$DOT", dot},
});
}
void RegisterGraphvizURLRenderer(
std::function<absl::StatusOr<std::string>(absl::string_view)> renderer) {
if (url_renderer != nullptr) {
LOG(WARNING) << "Multiple calls to RegisterGraphToURLRenderer. Last call "
"wins, but because order of initialization in C++ is "
"nondeterministic, this may not be what you want.";
}
delete url_renderer;
url_renderer =
new std::function<absl::StatusOr<std::string>(absl::string_view)>(
std::move(renderer));
}
}
} | #include "tensorflow/core/profiler/convert/hlo_proto_to_graph_view.h"
#include <string>
#include <variant>
#include "xla/service/hlo_graph_dumper.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/profiler/convert/tool_options.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
using ::tensorflow::testing::StatusIs;
using ::testing::HasSubstr;
TEST(GraphViewerParamsTest, GraphType) {
ToolOptions options1;
options1["type"] = "graph";
TF_ASSERT_OK_AND_ASSIGN(GraphViewerParams params1,
ParseGraphViewerParams(options1));
EXPECT_EQ(params1.type, "graph");
EXPECT_EQ(params1.node_name, "");
EXPECT_EQ(params1.graph_width, 3);
EXPECT_EQ(params1.render_options.show_backend_config, false);
EXPECT_EQ(params1.render_options.show_fusion_subcomputations, true);
EXPECT_EQ(params1.format, xla::RenderedGraphFormat::kUrl);
ToolOptions options2;
options2["type"] = "graph";
options2["node_name"] = "fusion.111";
options2["graph_width"] = 10;
options2["show_metadata"] = 1;
options2["merge_fusion"] = 1;
options2["format"] = "html";
TF_ASSERT_OK_AND_ASSIGN(GraphViewerParams params2,
ParseGraphViewerParams(options2));
EXPECT_EQ(params2.type, "graph");
EXPECT_EQ(params2.node_name, "fusion.111");
EXPECT_EQ(params2.graph_width, 10);
EXPECT_EQ(params2.render_options.show_backend_config, true);
EXPECT_EQ(params2.render_options.show_fusion_subcomputations, false);
EXPECT_EQ(params2.format, xla::RenderedGraphFormat::kHtml);
}
TEST(GraphViewerParamsTest, ShortTxtType) {
ToolOptions options1;
options1["type"] = "short_txt";
TF_ASSERT_OK_AND_ASSIGN(GraphViewerParams params1,
ParseGraphViewerParams(options1));
EXPECT_EQ(params1.type, "short_txt");
EXPECT_EQ(params1.verbose, false);
EXPECT_EQ(params1.show_metadata, false);
ToolOptions options2;
options2["type"] = "short_txt";
options2["show_metadata"] = 1;
TF_ASSERT_OK_AND_ASSIGN(GraphViewerParams params2,
ParseGraphViewerParams(options2));
EXPECT_EQ(params2.type, "short_txt");
EXPECT_EQ(params2.verbose, false);
EXPECT_EQ(params2.show_metadata, true);
}
TEST(GraphViewerParamsTest, LongTxtType) {
ToolOptions options1;
options1["type"] = "long_txt";
TF_ASSERT_OK_AND_ASSIGN(GraphViewerParams params1,
ParseGraphViewerParams(options1));
EXPECT_EQ(params1.type, "long_txt");
EXPECT_EQ(params1.verbose, true);
EXPECT_EQ(params1.show_metadata, false);
ToolOptions options2;
options2["type"] = "long_txt";
options2["show_metadata"] = 1;
TF_ASSERT_OK_AND_ASSIGN(GraphViewerParams params2,
ParseGraphViewerParams(options2));
EXPECT_EQ(params2.type, "long_txt");
EXPECT_EQ(params2.verbose, true);
EXPECT_EQ(params2.show_metadata, true);
}
TEST(GraphViewerParamsTest, OtherTypes) {
ToolOptions options1;
EXPECT_THAT(ParseGraphViewerParams(options1),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Graph viewer must provide a type option")));
ToolOptions options2;
options2["type"] = "abcd";
EXPECT_THAT(ParseGraphViewerParams(options2),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Unknown graph viewer type option: abcd")));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/hlo_proto_to_graph_view.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/hlo_proto_to_graph_view_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ddfcea69-3e8f-447d-969f-24dcf59ba273 | cpp | tensorflow/tensorflow | input_split_metadata | tensorflow/core/kernels/batching_util/input_split_metadata.cc | tensorflow/core/kernels/batching_util/input_split_metadata_test.cc | #include "tensorflow/core/kernels/batching_util/input_split_metadata.h"
#include <algorithm>
#include "absl/container/fixed_array.h"
#include "absl/strings/str_join.h"
namespace tensorflow {
namespace serving {
namespace internal {
namespace {
int compute_task_size_from_open_batch(int input_task_size,
int open_batch_remaining_slot,
int batch_size_limit) {
return (open_batch_remaining_slot > 0)
? (input_task_size + batch_size_limit - open_batch_remaining_slot)
: input_task_size;
}
int compute_head_task_size(int input_task_size, int open_batch_remaining_slot,
int batch_size_limit) {
if (open_batch_remaining_slot == 0) {
return std::min(input_task_size, batch_size_limit);
}
return std::min(open_batch_remaining_slot, input_task_size);
}
int compute_tail_task_size(int task_size_from_open_batch, int input_task_size,
int open_batch_remaining_slot,
int batch_size_limit) {
int tail_task_size;
if (input_task_size <= open_batch_remaining_slot) {
tail_task_size = input_task_size;
} else {
tail_task_size = task_size_from_open_batch % batch_size_limit;
if (tail_task_size == 0) {
tail_task_size = batch_size_limit;
}
}
return tail_task_size;
}
int compute_num_batches(int task_size_from_open_batch, int batch_size_limit) {
return (task_size_from_open_batch + batch_size_limit - 1) / batch_size_limit;
}
}
InputSplitMetadata::InputSplitMetadata(int input_task_size,
int open_batch_remaining_slot,
int batch_size_limit)
: task_sizes_(generate_task_sizes(
input_task_size, open_batch_remaining_slot, batch_size_limit)) {}
const absl::FixedArray<int>& InputSplitMetadata::task_sizes() const {
return task_sizes_;
}
std::string InputSplitMetadata::DebugString() const {
return absl::StrJoin(task_sizes_, ", ");
}
absl::FixedArray<int> InputSplitMetadata::generate_task_sizes(
int input_task_size, int open_batch_remaining_slot,
int batch_size_limit) const {
const int task_size_from_open_batch = compute_task_size_from_open_batch(
input_task_size, open_batch_remaining_slot, batch_size_limit);
const int num_batches =
compute_num_batches(task_size_from_open_batch, batch_size_limit);
absl::FixedArray<int> task_sizes(num_batches, batch_size_limit);
task_sizes.front() = compute_head_task_size(
input_task_size, open_batch_remaining_slot, batch_size_limit);
task_sizes.back() =
compute_tail_task_size(task_size_from_open_batch, input_task_size,
open_batch_remaining_slot, batch_size_limit);
return task_sizes;
}
}
}
} | #include "tensorflow/core/kernels/batching_util/input_split_metadata.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace serving {
namespace internal {
namespace {
TEST(InputSplitUtilTest, Basic) {
for (const auto& batch_task_param :
{std::tuple<int , int ,
int , int ,
int ,
int ,
int >{5, 1, 1, 5, 4, 1,
1},
{10, 3, 4, 3, 2, 3, 3},
{20, 5, 6, 4, 3, 5, 3},
{30, 0, 11, 3, 3, 11, 8},
{5, 6, 8, 1, 0, 5, 5}}) {
const int input_size = std::get<0>(batch_task_param);
const int open_batch_remaining_slot = std::get<1>(batch_task_param);
const int batch_size_limit = std::get<2>(batch_task_param);
const int expected_num_batches = std::get<3>(batch_task_param);
const int expected_head_batch_task_size = std::get<5>(batch_task_param);
const int expected_tail_batch_task_size = std::get<6>(batch_task_param);
ASSERT_LE(open_batch_remaining_slot, batch_size_limit);
InputSplitMetadata input_split_metadata(
input_size, open_batch_remaining_slot, batch_size_limit);
EXPECT_EQ(input_split_metadata.task_sizes().size(), expected_num_batches);
absl::FixedArray<int> expected_task_sizes(expected_num_batches);
for (int i = 0; i < expected_num_batches; i++) {
if (i == 0) {
expected_task_sizes[i] = expected_head_batch_task_size;
} else if (i == expected_num_batches - 1) {
expected_task_sizes[i] = expected_tail_batch_task_size;
} else {
expected_task_sizes[i] = batch_size_limit;
}
}
EXPECT_THAT(input_split_metadata.task_sizes(),
::testing::ElementsAreArray(expected_task_sizes));
EXPECT_EQ(input_split_metadata.DebugString(),
absl::StrJoin(expected_task_sizes, ", "));
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/input_split_metadata.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/input_split_metadata_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
afae6ffd-8b55-4046-b9af-01091e4ddae9 | cpp | google/tensorstore | compressor | tensorstore/driver/n5/compressor.cc | tensorstore/driver/zarr/compressor_test.cc | #include "tensorstore/driver/n5/compressor.h"
#include <utility>
#include "absl/base/no_destructor.h"
#include "tensorstore/driver/n5/compressor_registry.h"
#include "tensorstore/internal/compression/json_specified_compressor.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/enum.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_registry.h"
namespace tensorstore {
namespace internal_n5 {
using CompressorRegistry = internal::JsonSpecifiedCompressor::Registry;
CompressorRegistry& GetCompressorRegistry() {
static absl::NoDestructor<CompressorRegistry> registry;
return *registry;
}
TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(Compressor, [](auto is_loading,
const auto& options,
auto* obj,
::nlohmann::json* j) {
namespace jb = tensorstore::internal_json_binding;
auto& registry = GetCompressorRegistry();
return jb::Object(
jb::Member("type",
jb::MapValue(registry.KeyBinder(),
std::make_pair(Compressor{}, "raw"))),
registry.RegisteredObjectBinder())(is_loading, options, obj, j);
})
}
} | #include "tensorstore/driver/zarr/compressor.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_zarr::Compressor;
TEST(ParseCompressorTest, Null) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto compressor,
Compressor::FromJson(nullptr));
EXPECT_EQ(nullptr, ::nlohmann::json(compressor));
}
TEST(ParseCompressorTest, ZlibSuccess) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto compressor, Compressor::FromJson({{"id", "zlib"}, {"level", 5}}));
EXPECT_EQ((::nlohmann::json{{"id", "zlib"}, {"level", 5}}),
::nlohmann::json(compressor));
}
TEST(ParseCompressorTest, ZlibFailure) {
EXPECT_THAT(
Compressor::FromJson(::nlohmann::json{{"id", "zlib"}, {"level", "a"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"level\": .*"));
}
TEST(ParseCompressorTest, UnsupportedId) {
EXPECT_THAT(
Compressor::FromJson(::nlohmann::json{{"id", "invalid"}, {"level", "a"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"id\": "
"\"invalid\" is not registered"));
}
TEST(ParseCompressorTest, InvalidId) {
EXPECT_THAT(Compressor::FromJson(::nlohmann::json{{"id", 5}, {"level", "a"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"id\": "
"Expected string, but received: 5"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/n5/compressor.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/zarr/compressor_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
93a4e78f-e7b6-439c-a9ab-8ad84529ba55 | cpp | tensorflow/tensorflow | batch_stats | tensorflow/core/kernels/batching_util/batch_stats.h | tensorflow/core/kernels/batching_util/batch_stats_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_BATCH_STATS_H_
#define TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_BATCH_STATS_H_
#include <atomic>
#include <cstdint>
#include <optional>
#include <string>
#include <tuple>
#include <vector>
#include "absl/container/node_hash_map.h"
#include "absl/time/time.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow::serving {
constexpr int64_t kNumBatchThreadsUnknown = -1;
constexpr int64_t kBatchTimeoutMicrosUnknown = -1;
class CostTracker {
public:
void Register(absl::Duration cost) {
DCHECK_GT(cost, absl::ZeroDuration());
mutex_lock l(mu_);
sample_count_++;
sample_sum_ += cost;
};
std::optional<absl::Duration> mean() const {
int64_t count;
absl::Duration sum;
{
mutex_lock l(mu_);
count = sample_count_;
sum = sample_sum_;
}
if (count == 0) return std::nullopt;
return sum / count;
};
private:
mutable mutex mu_;
int64_t sample_count_ TF_GUARDED_BY(mu_) = 0;
absl::Duration sample_sum_ TF_GUARDED_BY(mu_);
};
class BatchSizeStats {
public:
CostTracker& tpu_cost() { return tpu_cost_; };
private:
CostTracker tpu_cost_;
};
class ModelBatchStats {
public:
BatchSizeStats& batch_size(int32 batch_size) {
mutex_lock l(mu_);
return batch_size_stats_by_batch_size_[batch_size];
}
void RegisterProcessedSize(int64_t size) {
cumulative_processed_size_.fetch_add(size, std::memory_order_relaxed);
}
int64_t cumulative_processed_size() const {
return cumulative_processed_size_.load(std::memory_order_relaxed);
}
std::vector<int32> BatchSizes() const {
std::vector<int32> result;
mutex_lock l(mu_);
result.reserve(batch_size_stats_by_batch_size_.size());
for (const auto& [key, value] : batch_size_stats_by_batch_size_) {
result.push_back(key);
}
return result;
}
void SetNumBatchThreads(int64_t num_batch_threads) {
num_batch_threads_.store(num_batch_threads, std::memory_order_relaxed);
}
int64_t num_batch_threads() const {
return num_batch_threads_.load(std::memory_order_relaxed);
}
void SetBatchTimeoutMicros(int64_t batch_timeout_micros) {
batch_timeout_micros_.store(batch_timeout_micros,
std::memory_order_relaxed);
}
int64_t batch_timeout_micros() const {
return batch_timeout_micros_.load(std::memory_order_relaxed);
}
private:
mutable mutex mu_;
absl::node_hash_map<int32, BatchSizeStats> batch_size_stats_by_batch_size_
TF_GUARDED_BY(mu_);
std::atomic<int64_t> cumulative_processed_size_ = 0;
std::atomic<int64_t> num_batch_threads_ = kNumBatchThreadsUnknown;
std::atomic<int64_t> batch_timeout_micros_ = kBatchTimeoutMicrosUnknown;
};
class BatchStatsRegistry {
public:
ModelBatchStats& model(const std::string& model_name,
const std::string& op_name) {
std::tuple key(model_name, op_name);
mutex_lock l(mu_);
return model_batch_stats_by_model_and_op_names_[key];
}
std::vector<std::tuple<std::string, std::string>> ModelAndOpNames() const {
std::vector<std::tuple<std::string, std::string>> result;
mutex_lock l(mu_);
result.reserve(model_batch_stats_by_model_and_op_names_.size());
for (const auto& [key, value] : model_batch_stats_by_model_and_op_names_) {
result.push_back(key);
}
return result;
}
private:
mutable mutex mu_;
absl::node_hash_map<std::tuple<std::string, std::string>, ModelBatchStats>
model_batch_stats_by_model_and_op_names_ TF_GUARDED_BY(mu_);
};
inline BatchStatsRegistry& GlobalBatchStatsRegistry() {
static BatchStatsRegistry* instance = new BatchStatsRegistry();
return *instance;
}
}
#endif | #include "tensorflow/core/kernels/batching_util/batch_stats.h"
#include <tuple>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/time/time.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow::serving {
namespace {
using ::testing::UnorderedElementsAre;
TEST(BatchStatsTest, GlobalBatchStatsRegistryAlwaysReturnsTheSameInstance) {
ASSERT_EQ(&GlobalBatchStatsRegistry(), &GlobalBatchStatsRegistry());
}
TEST(BatchStatsTest, BasicOperation) {
BatchStatsRegistry stats;
stats.model( "m", "o")
.batch_size(1)
.tpu_cost()
.Register(absl::Hours(5));
ASSERT_EQ(stats.model( "m", "o")
.batch_size(1)
.tpu_cost()
.mean(),
absl::Hours(5));
}
TEST(BatchStatsTest, ModelBatchStatsAreUniqueForEachModel) {
BatchStatsRegistry stats;
ASSERT_NE(&stats.model( "m", "o"),
&stats.model( "m", "o2"));
}
TEST(BatchStatsTest, BatchSizeStatsAreUniqueForEachBatchSize) {
ModelBatchStats stats;
ASSERT_NE(&stats.batch_size(1), &stats.batch_size(2));
}
TEST(BatchStatsTest, CostTrackerStartsWithNoMean) {
CostTracker tracker;
ASSERT_FALSE(tracker.mean().has_value());
}
TEST(BatchStatsTest, CostTrackerMeanIsCorrect) {
CostTracker tracker;
tracker.Register(absl::Hours(5));
tracker.Register(absl::Hours(7));
ASSERT_EQ(*tracker.mean(), absl::Hours(6));
}
TEST(BatchStatsTest, ProcessedSizeIsCorrect) {
ModelBatchStats stats;
stats.RegisterProcessedSize(5);
stats.RegisterProcessedSize(7);
ASSERT_EQ(stats.cumulative_processed_size(), 12);
}
TEST(BatchStatsTest, ModelOpNamesAreCorrect) {
BatchStatsRegistry stats;
stats.model( "m", "o")
.batch_size(1)
.tpu_cost()
.Register(absl::Hours(5));
stats.model( "m2", "o")
.batch_size(1)
.tpu_cost()
.Register(absl::Hours(7));
stats.model( "m", "o")
.batch_size(2)
.tpu_cost()
.Register(absl::Hours(4));
stats.model( "m", "o2")
.batch_size(1)
.tpu_cost()
.Register(absl::Hours(1));
ASSERT_THAT(stats.ModelAndOpNames(),
UnorderedElementsAre(
std::tuple( "m", "o"),
std::tuple( "m", "o2"),
std::tuple( "m2", "o")));
}
TEST(BatchStatsTest, BatchSizesAreCorrect) {
ModelBatchStats stats;
stats.batch_size(1).tpu_cost().Register(absl::Hours(5));
stats.batch_size(4).tpu_cost().Register(absl::Hours(7));
stats.batch_size(1).tpu_cost().Register(absl::Hours(4));
stats.batch_size(2).tpu_cost().Register(absl::Hours(1));
ASSERT_THAT(stats.BatchSizes(), UnorderedElementsAre(1, 2, 4));
}
TEST(BatchStatsTest, BatchTimeoutIsCorrect) {
ModelBatchStats stats;
ASSERT_EQ(stats.batch_timeout_micros(), -1);
stats.SetBatchTimeoutMicros(100);
ASSERT_EQ(stats.batch_timeout_micros(), 100);
}
TEST(BatchStatsTest, NumBatchThreadsIsCorrect) {
ModelBatchStats stats;
ASSERT_EQ(stats.num_batch_threads(), -1);
stats.SetNumBatchThreads(16);
ASSERT_EQ(stats.num_batch_threads(), 16);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/batch_stats.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/batch_stats_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
52944e6e-66f7-4103-9d2a-a920c7a49650 | cpp | tensorflow/tensorflow | verify_tfxla_legalization | tensorflow/compiler/mlir/tf2xla/transforms/verify_tfxla_legalization.cc | tensorflow/compiler/mlir/tf2xla/transforms/verify_tfxla_legalization_test.cc | #include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "mlir/IR/BuiltinOps.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/FormatVariadic.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Visitors.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/passes.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_targets.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "tensorflow/core/lib/monitoring/counter.h"
#include "tensorflow/core/platform/errors.h"
namespace mlir {
namespace mhlo {
namespace {
#define GEN_PASS_DEF_VERIFYTFXLALEGALIZATION
#include "tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_tf_passes.h.inc"
auto* mlir_failed_legalization_op_count =
tensorflow::monitoring::Counter<1>::New(
"/tensorflow/core/tf2xla/"
"mlir_second_phase_failed_legalization_op_count",
"Counts which op fails to legalize", "op_name");
auto* mlir_non_static_op_count = tensorflow::monitoring::Counter<1>::New(
"/tensorflow/core/tf2xla/"
"mlir_second_phase_non_static_op_count",
"Counts which ops do not have static results", "op_name");
auto* mlir_non_static_op_skip_count = tensorflow::monitoring::Counter<1>::New(
"/tensorflow/core/tf2xla/"
"mlir_second_phase_non_static_op_skip_count",
"Counts skipped ops which do not have static results", "op_name");
static const char* kMustBeConstantError =
"must have compile-time constant inputs and outputs.\n\n"
"XLA compilation requires that operator arguments that represent shapes or "
"dimensions be evaluated to concrete values at compile time. This error "
"means that a shape or dimension argument could not be evaluated at "
"compile time, usually because the value of the argument depends on a "
"parameter to the computation, on a variable, or on a stateful operation "
"such as a random number generator.";
static const DenseSet<mlir::TypeID>* operations_to_skip =
new DenseSet<mlir::TypeID>{mlir::TypeID::get<mhlo::EinsumOp>()};
class VerifyTFXLALegalization
: public impl::VerifyTFXLALegalizationBase<VerifyTFXLALegalization> {
public:
explicit VerifyTFXLALegalization(bool legalize_chlo) {
legalize_chlo_ = legalize_chlo;
}
void runOnOperation() override;
};
static void IncrementCounterFor(tensorflow::monitoring::Counter<1>* counter,
Operation* op) {
counter->GetCell(op->getName().getStringRef().str())->IncrementBy(1);
}
bool HasBounds(RankedTensorType type) {
auto encoding = mlir::dyn_cast_or_null<mlir::mhlo::TypeExtensionsAttr>(
type.getEncoding());
return (encoding && !encoding.getBounds().empty());
}
bool HasStaticShapeOrBounded(Value val) {
auto type = val.getType();
if (mlir::isa<UnrankedTensorType>(type)) {
return false;
}
if (mlir::isa<RankedTensorType>(type)) {
auto ranked_tensor = mlir::dyn_cast<RankedTensorType>(type);
if (ranked_tensor.hasStaticShape()) {
return true;
}
return HasBounds(ranked_tensor);
}
return true;
}
bool EmitMustBeConstantError(Operation* op) {
if (operations_to_skip->contains(op->getRegisteredInfo()->getTypeID())) {
IncrementCounterFor(mlir_non_static_op_skip_count, op);
return true;
}
emitError(op->getLoc()) << absl::StrCat(
"Node `", op->getName().getStringRef().str(), "` ", kMustBeConstantError);
return false;
}
bool IsStaticOperation(Operation* op) {
for (auto o : op->getResults()) {
if (!HasStaticShapeOrBounded(o)) {
return EmitMustBeConstantError(op);
}
}
return true;
}
bool IsMhloAndStatic(Operation* op) {
if (!llvm::isa<mlir::mhlo::MhloDialect>(op->getDialect())) {
return true;
}
return IsStaticOperation(op);
}
bool IsDefaultConversionLegal(
Operation* op, const ConversionTarget& default_conversion_target) {
if (!default_conversion_target.isLegal(op)) {
emitError(op->getLoc()) << "Could not legalize op: " << op->getName();
return false;
}
return true;
}
void VerifyTFXLALegalization::runOnOperation() {
Operation* func_op = getOperation();
ConversionTarget default_conversion_target =
GetDefaultLegalConversionTargets(getContext(), legalize_chlo_);
bool has_invalid_ops = false;
func_op->walk([&](Operation* op) {
if (!IsMhloAndStatic(op)) {
has_invalid_ops = true;
IncrementCounterFor(mlir_non_static_op_count, op);
return WalkResult::interrupt();
}
if (!IsDefaultConversionLegal(op, default_conversion_target)) {
has_invalid_ops = true;
IncrementCounterFor(mlir_failed_legalization_op_count, op);
}
return WalkResult::advance();
});
if (has_invalid_ops) signalPassFailure();
}
}
std::unique_ptr<mlir::OperationPass<mlir::func::FuncOp>>
CreateVerifyTFXLALegalizationPass(bool legalize_chlo) {
return std::make_unique<VerifyTFXLALegalization>(legalize_chlo);
}
}
} | #include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/passes.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/test_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
using ::mlir::MLIRContext;
using ::mlir::ModuleOp;
using ::mlir::OwningOpRef;
using ::mlir::mhlo::test::GetMlirModuleFromString;
using ::tensorflow::monitoring::testing::CellReader;
static constexpr char kFailedLegalizationStreamz[] =
"/tensorflow/core/tf2xla/mlir_second_phase_failed_legalization_op_count";
static constexpr char kNonStaticOpStreamz[] =
"/tensorflow/core/tf2xla/mlir_second_phase_non_static_op_count";
static constexpr char kNonStaticOpSkipStreamz[] =
"/tensorflow/core/tf2xla/mlir_second_phase_non_static_op_skip_count";
class VerifyTfxlaLegalizationTest : public ::testing::Test {
protected:
void CreateModule(const char* module_string) {
TF_ASSERT_OK_AND_ASSIGN(module_,
GetMlirModuleFromString(module_string, &context_));
pm_ = std::make_unique<mlir::PassManager>(&context_);
pm_->addNestedPass<mlir::func::FuncOp>(
mlir::mhlo::CreateVerifyTFXLALegalizationPass(false));
}
mlir::LogicalResult Run() { return pm_->run(module_.get()); }
private:
MLIRContext context_;
OwningOpRef<ModuleOp> module_;
std::unique_ptr<mlir::PassManager> pm_;
};
TEST_F(VerifyTfxlaLegalizationTest, RecordsStreamzFailedVerification) {
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> tensor<1xi32> {
%0 = "tf.BadValue"() {value = dense<1000> : tensor<1xi32>} : () -> tensor<1xi32>
func.return %0 : tensor<1xi32>
}
})";
CellReader<int64_t> error(kFailedLegalizationStreamz);
CreateModule(kMlirModuleStr);
auto result = Run();
EXPECT_TRUE(result.failed());
EXPECT_EQ(error.Delta("tf.BadValue"), 1);
}
TEST_F(VerifyTfxlaLegalizationTest, ErrorsNonStaticInputs) {
static constexpr char kNonStaticFailure[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 1504 : i32}} {
func.func @main() -> tensor<?xi32> attributes {tf.entry_function = {control_outputs = "", inputs = "i,j", outputs = "identity_RetVal"}} {
%0 = mhlo.constant dense<1.000000e+00> : tensor<f64>
%1 = mhlo.convert %0 : (tensor<f64>) -> tensor<i64>
%2 = mhlo.reshape %1 : (tensor<i64>) -> tensor<1xi64>
%3 = "mhlo.dynamic_iota"(%2) {iota_dimension = 0 : i64} : (tensor<1xi64>) -> tensor<?xi32>
%4 = mhlo.multiply %3, %3 : tensor<?xi32>
return %4 : tensor<?xi32>
}
})";
CellReader<int64_t> legal_error(kFailedLegalizationStreamz);
CellReader<int64_t> static_error(kNonStaticOpStreamz);
CreateModule(kNonStaticFailure);
auto result = Run();
EXPECT_TRUE(result.failed());
EXPECT_EQ(legal_error.Delta("mhlo.dynamic_iota"), 0);
EXPECT_EQ(static_error.Delta("mhlo.dynamic_iota"), 1);
}
TEST_F(VerifyTfxlaLegalizationTest, SkipsSpecificNonStaticInputs) {
static constexpr char kNonStaticFailure[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 1504 : i32}} {
func.func @main(%a : tensor<5x14x1xf32>, %b : tensor<1x14x32xf32>) -> tensor<?x?x?xf32> attributes {tf.entry_function = {control_outputs = "", inputs = "i,j", outputs = "identity_RetVal"}} {
%c = "mhlo.einsum"(%a, %b) {einsum_config = "bji,bjk->bik"} : (tensor<5x14x1xf32>, tensor<1x14x32xf32>) -> tensor<?x?x?xf32>
return %c : tensor<?x?x?xf32>
}
})";
CellReader<int64_t> static_error(kNonStaticOpStreamz);
CellReader<int64_t> skipped(kNonStaticOpSkipStreamz);
CreateModule(kNonStaticFailure);
auto result = Run();
EXPECT_TRUE(result.succeeded());
EXPECT_EQ(static_error.Delta("mhlo.einsum"), 0);
EXPECT_EQ(skipped.Delta("mhlo.einsum"), 1);
}
TEST_F(VerifyTfxlaLegalizationTest, SkipsNonStaticInputsWithBounds) {
static constexpr char kNonStaticWithBoundsSuccess[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 1504 : i32}} {
func.func @main() -> tensor<?xi32, #mhlo.type_extensions<bounds = [4]>> attributes {tf.entry_function = {control_outputs = "", inputs = "i,j", outputs = "identity_RetVal"}} {
%0 = mhlo.constant dense<1.000000e+00> : tensor<f64>
%1 = mhlo.convert %0 : (tensor<f64>) -> tensor<i64>
%2 = mhlo.reshape %1 : (tensor<i64>) -> tensor<1xi64>
%3 = "mhlo.dynamic_iota"(%2) {iota_dimension = 0 : i64} : (tensor<1xi64>) -> tensor<?xi32, #mhlo.type_extensions<bounds = [4]>>
%4 = mhlo.multiply %3, %3 : tensor<?xi32, #mhlo.type_extensions<bounds = [4]>>
return %4 : tensor<?xi32, #mhlo.type_extensions<bounds = [4]>>
}
})";
CellReader<int64_t> legal_error(kFailedLegalizationStreamz);
CellReader<int64_t> static_error(kNonStaticOpStreamz);
CreateModule(kNonStaticWithBoundsSuccess);
auto result = Run();
EXPECT_TRUE(result.succeeded());
EXPECT_EQ(legal_error.Delta("mhlo.multiply"), 0);
EXPECT_EQ(static_error.Delta("mhlo.multiply"), 0);
}
TEST_F(VerifyTfxlaLegalizationTest, RecordsMultipleFailures) {
static constexpr char kMultipleFailures[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> tensor<1xi32> {
%0 = "tf.BadValue"() {value = dense<1000> : tensor<1xi32>} : () -> tensor<1xi32>
%1 = "tf.AlsoBad"() {value = dense<10> : tensor<1xi32>} : () -> tensor<1xi32>
func.return %0 : tensor<1xi32>
}
})";
CellReader<int64_t> error(kFailedLegalizationStreamz);
CreateModule(kMultipleFailures);
auto result = Run();
EXPECT_TRUE(result.failed());
EXPECT_EQ(error.Delta("tf.BadValue"), 1);
EXPECT_EQ(error.Delta("tf.AlsoBad"), 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/transforms/verify_tfxla_legalization.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/transforms/verify_tfxla_legalization_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ae38afae-cea4-480b-bc81-c353972f7926 | cpp | tensorflow/tensorflow | reduction_utils | third_party/xla/xla/service/gpu/reduction_utils.cc | third_party/xla/xla/service/gpu/reduction_utils_test.cc | #include "xla/service/gpu/reduction_utils.h"
#include <algorithm>
#include <array>
#include <atomic>
#include <cstdint>
#include <ostream>
#include "absl/algorithm/container.h"
#include "absl/base/const_init.h"
#include "absl/strings/str_join.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/semantic_version.h"
#include "xla/util.h"
#include "tsl/platform/logging.h"
#ifdef GOOGLE_CUDA
#include "xla/service/gpu/gpu_asm_opts_util.h"
#include "xla/stream_executor/cuda/cuda_asm_compiler.h"
#endif
namespace xla {
namespace gpu {
namespace {
Vector3 PartitionShapeByMiddleDimensions(
const Shape& shape, absl::Span<const int64_t> dims_middle) {
CHECK(LayoutUtil::AreDimensionsConsecutive(shape.layout(), dims_middle));
Vector3 values = {1, 1, 1};
enum Segment { kMajor = 0, kMiddle = 1, kMinor = 2 };
Segment cur_segment = kMinor;
for (int64_t cur_dim : LayoutUtil::MinorToMajor(shape)) {
if (cur_segment != kMajor) {
bool cur_dim_in_middle = absl::c_linear_search(dims_middle, cur_dim);
if (cur_segment == kMinor) {
if (cur_dim_in_middle) {
cur_segment = kMiddle;
}
} else if (cur_segment == kMiddle) {
if (!cur_dim_in_middle) {
cur_segment = kMajor;
}
}
}
values[cur_segment] *= shape.dimensions(cur_dim);
}
return values;
}
}
int64_t MinThreadsXRowReduction(const HloModuleConfig& hlo_module_config) {
#ifdef GOOGLE_CUDA
static absl::Mutex mutex(absl::kConstInit);
static std::atomic<bool*> use_reduced_thread_count_atomic = nullptr;
bool* use_reduced_thread_count =
use_reduced_thread_count_atomic.load(std::memory_order_acquire);
if (use_reduced_thread_count == nullptr) {
absl::MutexLock lock(&mutex);
use_reduced_thread_count =
use_reduced_thread_count_atomic.load(std::memory_order_relaxed);
if (use_reduced_thread_count == nullptr) {
auto ptxas_config =
PtxOptsFromDebugOptions(hlo_module_config.debug_options());
auto ptxas_version_tuple =
se::GetAsmCompilerVersion(ptxas_config.preferred_cuda_dir);
use_reduced_thread_count = new bool(false);
if (!ptxas_version_tuple.ok() ||
ptxas_version_tuple.value() <
stream_executor::SemanticVersion{12, 2, 0}) {
*use_reduced_thread_count = true;
}
use_reduced_thread_count_atomic.store(use_reduced_thread_count,
std::memory_order_release);
}
}
if (*use_reduced_thread_count) {
return 512;
}
#endif
return 1024;
}
Vector3 GetReductionTiling(const ReductionDimensions& reduction_dimensions) {
if (reduction_dimensions.is_row_reduction) {
int64_t tile_z = std::min(reduction_dimensions.dimensions[0],
BatchedReductionRaceFreeBound());
return {tile_z, 1, 16};
}
return {1, 128, 1};
}
int64_t ReductionDimensionRaceFreeBound(
const HloModuleConfig& hlo_module_config,
const ReductionDimensions& reduction_dimensions) {
Vector3 reduction_tiling = GetReductionTiling(reduction_dimensions);
if (reduction_dimensions.is_row_reduction) {
return MinThreadsXRowReduction(hlo_module_config) * reduction_tiling[2];
}
return WarpSize() * reduction_tiling[1];
}
bool IsUnnestedReductionFasterThanElemental(
const ReductionDimensions& reduction_dimensions) {
if (reduction_dimensions.is_row_reduction) {
return (reduction_dimensions.dimensions[2] >= WarpSize()) ||
((WarpSize() % reduction_dimensions.dimensions[2]) == 0);
}
int64_t major_size = reduction_dimensions.dimensions[1];
int64_t minor_size = reduction_dimensions.dimensions[2];
bool prefer_elemental_emitter =
(major_size < WarpSize()) ||
(major_size < 2 * WarpSize() && minor_size < WarpSize()) ||
(major_size < 4 * WarpSize() && minor_size < 8) ||
(major_size < 8 * WarpSize() && minor_size < 3);
return !prefer_elemental_emitter;
}
bool IsReductionFromOrToContiguousDimensions(const HloInstruction& reduce) {
if (reduce.opcode() != HloOpcode::kReduce) {
return false;
}
const Shape& operand_shape = reduce.operand(0)->shape();
absl::Span<const int64_t> dims_to_reduce = reduce.dimensions();
DimensionVector dims_to_keep;
for (int64_t dim = 0; dim < operand_shape.dimensions().size(); ++dim) {
if (!absl::c_linear_search(dims_to_reduce, dim)) {
dims_to_keep.push_back(dim);
}
}
return (LayoutUtil::AreDimensionsConsecutive(operand_shape.layout(),
dims_to_keep) ||
LayoutUtil::AreDimensionsConsecutive(operand_shape.layout(),
dims_to_reduce)) &&
IsUnnestedReductionFasterThanElemental(
GetReductionKindAndContiguousComponents(reduce));
}
bool ReductionIsRaceFree(const HloModuleConfig& hlo_module_config,
const ReductionDimensions& reduction_dimensions) {
if (reduction_dimensions.is_row_reduction) {
return reduction_dimensions.dimensions[2] <=
ReductionDimensionRaceFreeBound(hlo_module_config,
reduction_dimensions) &&
reduction_dimensions.dimensions[0] <=
BatchedReductionRaceFreeBound();
}
return reduction_dimensions.dimensions[1] <=
ReductionDimensionRaceFreeBound(hlo_module_config,
reduction_dimensions);
}
std::ostream& operator<<(std::ostream& os,
const ReductionDimensions& reduction_dimensions) {
bool is_row_reduction = reduction_dimensions.is_row_reduction;
os << (is_row_reduction ? "row " : "column ") << "reduction ["
<< absl::StrJoin(reduction_dimensions.dimensions, ",") << "] -> ["
<< reduction_dimensions.dimensions[0] << ", "
<< reduction_dimensions
.dimensions[is_row_reduction
? ReductionDimensions::kRowKeptDimension
: ReductionDimensions::kColMinorKeptDimension]
<< "]";
return os;
}
ReductionDimensions GetReductionKindAndContiguousComponents(
const HloInstruction& reduce) {
Shape input_shape = reduce.operand(0)->shape();
absl::Span<const int64_t> dims_to_reduce = reduce.dimensions();
DimensionVector dims_to_keep;
for (int64_t dim = 0; dim < input_shape.rank(); ++dim) {
if (!absl::c_linear_search(dims_to_reduce, dim)) {
dims_to_keep.push_back(dim);
}
}
if (dims_to_keep.empty()) {
return {true,
{1, 1, ShapeUtil::ElementsIn(input_shape)}};
}
if (LayoutUtil::AreDimensionsConsecutive(input_shape.layout(),
dims_to_keep)) {
Vector3 shape_partition =
PartitionShapeByMiddleDimensions(input_shape, dims_to_keep);
if (shape_partition[1] == 1) {
return {true,
{1, 1, shape_partition[0] * shape_partition[2]}};
}
if (shape_partition[2] == 1) {
return {false,
{1, shape_partition[0], shape_partition[1]}};
}
return {true, shape_partition};
}
Vector3 shape_partition =
PartitionShapeByMiddleDimensions(input_shape, dims_to_reduce);
if (shape_partition[2] == 1) {
return {true,
{1, shape_partition[0], shape_partition[1]}};
}
return {false, shape_partition};
}
bool IsRealReductionHero(const HloInstruction& root,
const HloInstruction& hero) {
if (!IsReductionFromOrToContiguousDimensions(hero)) {
return false;
}
return &root == &hero ||
ReductionIsRaceFree(hero.GetModule()->config(),
GetReductionKindAndContiguousComponents(hero));
}
bool AreReductionsMultiOutputFusionCompatible(
const HloInstruction* reduce_hero, const HloInstruction* first_reduce) {
return GetReductionKindAndContiguousComponents(*reduce_hero) ==
GetReductionKindAndContiguousComponents(*first_reduce);
}
}
} | #include "xla/service/gpu/reduction_utils.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
using ReductionUtilsTest = HloTestBase;
const char kModulePrefix[] = R"(
HloModule test_module
scalar_add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
})";
TEST_F(ReductionUtilsTest, ReductionsAreMultioutputFusionCompatible) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_sibling1 {
p_0 = f32[32,64]{1,0} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(p_0, constant), dimensions={1}, to_apply=scalar_add
}
fused_sibling2 {
p_0 = f32[32,64]{1,0} parameter(0)
neg = f32[32,64]{1,0} negate(p_0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(neg, constant), dimensions={1}, to_apply=scalar_add
}
ENTRY entry {
p_0 = f32[32,64]{1,0} parameter(0)
fusion1 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling1
fusion2 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling2
ROOT root = (f32[32]{0}, f32[32]{0}) tuple(fusion1, fusion2)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion1 = root->operand(0);
const HloInstruction* fusion2 = root->operand(1);
EXPECT_TRUE(AreReductionsMultiOutputFusionCompatible(
fusion1->fused_expression_root(), fusion2->fused_expression_root()));
}
TEST_F(ReductionUtilsTest,
ReductionsWithSameCanonicalizedDimsAreMultioutputFusionCompatible) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_sibling1 {
p_0 = f32[32,64]{1,0} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(p_0, constant), dimensions={1}, to_apply=scalar_add
}
fused_sibling2 {
p_0 = f32[32,64]{1,0} parameter(0)
bitcast = f32[32,8,8]{2,1,0} bitcast(p_0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(bitcast, constant), dimensions={1,2}, to_apply=scalar_add
}
ENTRY entry {
p_0 = f32[32,64]{1,0} parameter(0)
fusion1 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling1
fusion2 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling2
ROOT root = (f32[32]{0}, f32[32]{0}) tuple(fusion1, fusion2)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion1 = root->operand(0);
const HloInstruction* fusion2 = root->operand(1);
EXPECT_TRUE(AreReductionsMultiOutputFusionCompatible(
fusion1->fused_expression_root(), fusion2->fused_expression_root()));
}
TEST_F(ReductionUtilsTest,
ReductionsAreNotMultioutputFusionCompatible_DifferentOperandShapes) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_sibling1 {
p_0 = f32[32,64]{1,0} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(p_0, constant), dimensions={1}, to_apply=scalar_add
}
fused_sibling2 {
p_0 = f32[64,32]{1,0} parameter(0)
neg = f32[64,32]{1,0} negate(p_0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(neg, constant), dimensions={0}, to_apply=scalar_add
}
ENTRY entry {
p_0 = f32[32,64]{1,0} parameter(0)
p_1 = f32[64,32]{1,0} parameter(1)
fusion1 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling1
fusion2 = f32[32]{0} fusion(p_1), kind=kInput, calls=fused_sibling2
ROOT root = (f32[32]{0}, f32[32]{0}) tuple(fusion1, fusion2)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion1 = root->operand(0);
const HloInstruction* fusion2 = root->operand(1);
EXPECT_FALSE(AreReductionsMultiOutputFusionCompatible(
fusion1->fused_expression_root(), fusion2->fused_expression_root()));
}
TEST_F(ReductionUtilsTest,
ReductionsAreNotMultioutputFusionCompatible_DifferentOutputShapes) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_sibling1 {
p_0 = f32[32,64]{1,0} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(p_0, constant), dimensions={1}, to_apply=scalar_add
}
fused_sibling2 {
p_0 = f32[64,32]{1,0} parameter(0)
neg = f32[64,32]{1,0} negate(p_0)
constant = f32[] constant(0)
ROOT reduce = f32[64]{0} reduce(neg, constant), dimensions={1}, to_apply=scalar_add
}
ENTRY entry {
p_0 = f32[32,64]{1,0} parameter(0)
p_1 = f32[64,32]{1,0} parameter(1)
fusion1 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling1
fusion2 = f32[64]{0} fusion(p_1), kind=kInput, calls=fused_sibling2
ROOT root = (f32[32]{0}, f32[64]{0}) tuple(fusion1, fusion2)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion1 = root->operand(0);
const HloInstruction* fusion2 = root->operand(1);
EXPECT_FALSE(AreReductionsMultiOutputFusionCompatible(
fusion1->fused_expression_root(), fusion2->fused_expression_root()));
}
TEST_F(ReductionUtilsTest,
ReductionsAreNotMultioutputFusionCompatible_DifferentReduceDimensions) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_sibling1 {
p_0 = f32[32,32]{1,0} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(p_0, constant), dimensions={0}, to_apply=scalar_add
}
fused_sibling2 {
p_0 = f32[32,32]{1,0} parameter(0)
neg = f32[32,32]{1,0} negate(p_0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(neg, constant), dimensions={1}, to_apply=scalar_add
}
ENTRY entry {
p_0 = f32[32,32]{1,0} parameter(0)
fusion1 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling1
fusion2 = f32[32]{0} fusion(p_0), kind=kInput, calls=fused_sibling2
ROOT root = (f32[32]{0}, f32[32]{0}) tuple(fusion1, fusion2)
})"))
.value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion1 = root->operand(0);
const HloInstruction* fusion2 = root->operand(1);
EXPECT_FALSE(AreReductionsMultiOutputFusionCompatible(
fusion1->fused_expression_root(), fusion2->fused_expression_root()));
}
TEST(ReductionDimensionsTest, GetOutputShape) {
ReductionDimensions row_reduction{true, {1, 2, 3}};
ReductionDimensions col_reduction{false, {1, 2, 3}};
EXPECT_THAT(row_reduction.GetOutputShape(), ElementsAre(2));
EXPECT_THAT(col_reduction.GetOutputShape(), ElementsAre(1, 3));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/reduction_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/reduction_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3358f23b-4b78-4872-945b-0d8d3aa38831 | cpp | google/quiche | quiche_data_reader | quiche/common/quiche_data_reader.cc | quiche/common/quiche_data_reader_test.cc | #include "quiche/common/quiche_data_reader.h"
#include <algorithm>
#include <cstring>
#include <string>
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/common/platform/api/quiche_bug_tracker.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/quiche_endian.h"
namespace quiche {
QuicheDataReader::QuicheDataReader(absl::string_view data)
: QuicheDataReader(data.data(), data.length(), quiche::NETWORK_BYTE_ORDER) {
}
QuicheDataReader::QuicheDataReader(const char* data, const size_t len)
: QuicheDataReader(data, len, quiche::NETWORK_BYTE_ORDER) {}
QuicheDataReader::QuicheDataReader(const char* data, const size_t len,
quiche::Endianness endianness)
: data_(data), len_(len), pos_(0), endianness_(endianness) {}
bool QuicheDataReader::ReadUInt8(uint8_t* result) {
return ReadBytes(result, sizeof(*result));
}
bool QuicheDataReader::ReadUInt16(uint16_t* result) {
if (!ReadBytes(result, sizeof(*result))) {
return false;
}
if (endianness_ == quiche::NETWORK_BYTE_ORDER) {
*result = quiche::QuicheEndian::NetToHost16(*result);
}
return true;
}
bool QuicheDataReader::ReadUInt24(uint32_t* result) {
if (endianness_ != quiche::NETWORK_BYTE_ORDER) {
QUICHE_BUG(QuicheDataReader_ReadUInt24_NotImplemented);
return false;
}
*result = 0;
if (!ReadBytes(reinterpret_cast<char*>(result) + 1, 3u)) {
return false;
}
*result = quiche::QuicheEndian::NetToHost32(*result);
return true;
}
bool QuicheDataReader::ReadUInt32(uint32_t* result) {
if (!ReadBytes(result, sizeof(*result))) {
return false;
}
if (endianness_ == quiche::NETWORK_BYTE_ORDER) {
*result = quiche::QuicheEndian::NetToHost32(*result);
}
return true;
}
bool QuicheDataReader::ReadUInt64(uint64_t* result) {
if (!ReadBytes(result, sizeof(*result))) {
return false;
}
if (endianness_ == quiche::NETWORK_BYTE_ORDER) {
*result = quiche::QuicheEndian::NetToHost64(*result);
}
return true;
}
bool QuicheDataReader::ReadBytesToUInt64(size_t num_bytes, uint64_t* result) {
*result = 0u;
if (num_bytes > sizeof(*result)) {
return false;
}
if (endianness_ == quiche::HOST_BYTE_ORDER) {
return ReadBytes(result, num_bytes);
}
if (!ReadBytes(reinterpret_cast<char*>(result) + sizeof(*result) - num_bytes,
num_bytes)) {
return false;
}
*result = quiche::QuicheEndian::NetToHost64(*result);
return true;
}
bool QuicheDataReader::ReadStringPiece16(absl::string_view* result) {
uint16_t result_len;
if (!ReadUInt16(&result_len)) {
return false;
}
return ReadStringPiece(result, result_len);
}
bool QuicheDataReader::ReadStringPiece8(absl::string_view* result) {
uint8_t result_len;
if (!ReadUInt8(&result_len)) {
return false;
}
return ReadStringPiece(result, result_len);
}
bool QuicheDataReader::ReadStringPiece(absl::string_view* result, size_t size) {
if (!CanRead(size)) {
OnFailure();
return false;
}
*result = absl::string_view(data_ + pos_, size);
pos_ += size;
return true;
}
absl::string_view QuicheDataReader::ReadAtMost(size_t size) {
size_t actual_size = std::min(size, BytesRemaining());
absl::string_view result = absl::string_view(data_ + pos_, actual_size);
AdvancePos(actual_size);
return result;
}
bool QuicheDataReader::ReadTag(uint32_t* tag) {
return ReadBytes(tag, sizeof(*tag));
}
bool QuicheDataReader::ReadDecimal64(size_t num_digits, uint64_t* result) {
absl::string_view digits;
if (!ReadStringPiece(&digits, num_digits)) {
return false;
}
return absl::SimpleAtoi(digits, result);
}
QuicheVariableLengthIntegerLength QuicheDataReader::PeekVarInt62Length() {
QUICHE_DCHECK_EQ(endianness(), NETWORK_BYTE_ORDER);
const unsigned char* next =
reinterpret_cast<const unsigned char*>(data() + pos());
if (BytesRemaining() == 0) {
return VARIABLE_LENGTH_INTEGER_LENGTH_0;
}
return static_cast<QuicheVariableLengthIntegerLength>(
1 << ((*next & 0b11000000) >> 6));
}
bool QuicheDataReader::ReadVarInt62(uint64_t* result) {
QUICHE_DCHECK_EQ(endianness(), quiche::NETWORK_BYTE_ORDER);
size_t remaining = BytesRemaining();
const unsigned char* next =
reinterpret_cast<const unsigned char*>(data() + pos());
if (remaining != 0) {
switch (*next & 0xc0) {
case 0xc0:
if (remaining >= 8) {
*result = (static_cast<uint64_t>((*(next)) & 0x3f) << 56) +
(static_cast<uint64_t>(*(next + 1)) << 48) +
(static_cast<uint64_t>(*(next + 2)) << 40) +
(static_cast<uint64_t>(*(next + 3)) << 32) +
(static_cast<uint64_t>(*(next + 4)) << 24) +
(static_cast<uint64_t>(*(next + 5)) << 16) +
(static_cast<uint64_t>(*(next + 6)) << 8) +
(static_cast<uint64_t>(*(next + 7)) << 0);
AdvancePos(8);
return true;
}
return false;
case 0x80:
if (remaining >= 4) {
*result = (((*(next)) & 0x3f) << 24) + (((*(next + 1)) << 16)) +
(((*(next + 2)) << 8)) + (((*(next + 3)) << 0));
AdvancePos(4);
return true;
}
return false;
case 0x40:
if (remaining >= 2) {
*result = (((*(next)) & 0x3f) << 8) + (*(next + 1));
AdvancePos(2);
return true;
}
return false;
case 0x00:
*result = (*next) & 0x3f;
AdvancePos(1);
return true;
}
}
return false;
}
bool QuicheDataReader::ReadStringPieceVarInt62(absl::string_view* result) {
uint64_t result_length;
if (!ReadVarInt62(&result_length)) {
return false;
}
return ReadStringPiece(result, result_length);
}
bool QuicheDataReader::ReadStringVarInt62(std::string& result) {
absl::string_view result_view;
bool success = ReadStringPieceVarInt62(&result_view);
result = std::string(result_view);
return success;
}
absl::string_view QuicheDataReader::ReadRemainingPayload() {
absl::string_view payload = PeekRemainingPayload();
pos_ = len_;
return payload;
}
absl::string_view QuicheDataReader::PeekRemainingPayload() const {
return absl::string_view(data_ + pos_, len_ - pos_);
}
absl::string_view QuicheDataReader::FullPayload() const {
return absl::string_view(data_, len_);
}
absl::string_view QuicheDataReader::PreviouslyReadPayload() const {
return absl::string_view(data_, pos_);
}
bool QuicheDataReader::ReadBytes(void* result, size_t size) {
if (!CanRead(size)) {
OnFailure();
return false;
}
memcpy(result, data_ + pos_, size);
pos_ += size;
return true;
}
bool QuicheDataReader::Seek(size_t size) {
if (!CanRead(size)) {
OnFailure();
return false;
}
pos_ += size;
return true;
}
bool QuicheDataReader::IsDoneReading() const { return len_ == pos_; }
size_t QuicheDataReader::BytesRemaining() const {
if (pos_ > len_) {
QUICHE_BUG(quiche_reader_pos_out_of_bound)
<< "QUIC reader pos out of bound: " << pos_ << ", len: " << len_;
return 0;
}
return len_ - pos_;
}
bool QuicheDataReader::TruncateRemaining(size_t truncation_length) {
if (truncation_length > BytesRemaining()) {
return false;
}
len_ = pos_ + truncation_length;
return true;
}
bool QuicheDataReader::CanRead(size_t bytes) const {
return bytes <= (len_ - pos_);
}
void QuicheDataReader::OnFailure() {
pos_ = len_;
}
uint8_t QuicheDataReader::PeekByte() const {
if (pos_ >= len_) {
QUICHE_LOG(FATAL)
<< "Reading is done, cannot peek next byte. Tried to read pos = "
<< pos_ << " buffer length = " << len_;
return 0;
}
return data_[pos_];
}
std::string QuicheDataReader::DebugString() const {
return absl::StrCat(" { length: ", len_, ", position: ", pos_, " }");
}
#undef ENDPOINT
} | #include "quiche/common/quiche_data_reader.h"
#include <cstdint>
#include "absl/strings/string_view.h"
#include "quiche/common/platform/api/quiche_test.h"
#include "quiche/common/quiche_endian.h"
namespace quiche {
TEST(QuicheDataReaderTest, ReadUInt16) {
const uint16_t kData[] = {
QuicheEndian::HostToNet16(1),
QuicheEndian::HostToNet16(1 << 15),
};
QuicheDataReader reader(reinterpret_cast<const char*>(kData), sizeof(kData));
EXPECT_FALSE(reader.IsDoneReading());
uint16_t uint16_val;
EXPECT_TRUE(reader.ReadUInt16(&uint16_val));
EXPECT_FALSE(reader.IsDoneReading());
EXPECT_EQ(1, uint16_val);
EXPECT_TRUE(reader.ReadUInt16(&uint16_val));
EXPECT_TRUE(reader.IsDoneReading());
EXPECT_EQ(1 << 15, uint16_val);
}
TEST(QuicheDataReaderTest, ReadUInt32) {
const uint32_t kData[] = {
QuicheEndian::HostToNet32(1),
QuicheEndian::HostToNet32(0x80000000),
};
QuicheDataReader reader(reinterpret_cast<const char*>(kData),
ABSL_ARRAYSIZE(kData) * sizeof(uint32_t));
EXPECT_FALSE(reader.IsDoneReading());
uint32_t uint32_val;
EXPECT_TRUE(reader.ReadUInt32(&uint32_val));
EXPECT_FALSE(reader.IsDoneReading());
EXPECT_EQ(1u, uint32_val);
EXPECT_TRUE(reader.ReadUInt32(&uint32_val));
EXPECT_TRUE(reader.IsDoneReading());
EXPECT_EQ(1u << 31, uint32_val);
}
TEST(QuicheDataReaderTest, ReadStringPiece16) {
const char kData[] = {
0x00, 0x02,
0x48, 0x69,
0x00, 0x10,
0x54, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2c,
0x20, 0x31, 0x2c, 0x20, 0x32, 0x2c, 0x20, 0x33,
};
QuicheDataReader reader(kData, ABSL_ARRAYSIZE(kData));
EXPECT_FALSE(reader.IsDoneReading());
absl::string_view stringpiece_val;
EXPECT_TRUE(reader.ReadStringPiece16(&stringpiece_val));
EXPECT_FALSE(reader.IsDoneReading());
EXPECT_EQ(0, stringpiece_val.compare("Hi"));
EXPECT_TRUE(reader.ReadStringPiece16(&stringpiece_val));
EXPECT_TRUE(reader.IsDoneReading());
EXPECT_EQ(0, stringpiece_val.compare("Testing, 1, 2, 3"));
}
TEST(QuicheDataReaderTest, ReadUInt16WithBufferTooSmall) {
const char kData[] = {
0x00,
};
QuicheDataReader reader(kData, ABSL_ARRAYSIZE(kData));
EXPECT_FALSE(reader.IsDoneReading());
uint16_t uint16_val;
EXPECT_FALSE(reader.ReadUInt16(&uint16_val));
}
TEST(QuicheDataReaderTest, ReadUInt32WithBufferTooSmall) {
const char kData[] = {
0x00, 0x00, 0x00,
};
QuicheDataReader reader(kData, ABSL_ARRAYSIZE(kData));
EXPECT_FALSE(reader.IsDoneReading());
uint32_t uint32_val;
EXPECT_FALSE(reader.ReadUInt32(&uint32_val));
uint16_t uint16_val;
EXPECT_FALSE(reader.ReadUInt16(&uint16_val));
}
TEST(QuicheDataReaderTest, ReadStringPiece16WithBufferTooSmall) {
const char kData[] = {
0x00, 0x03,
0x48, 0x69,
};
QuicheDataReader reader(kData, ABSL_ARRAYSIZE(kData));
EXPECT_FALSE(reader.IsDoneReading());
absl::string_view stringpiece_val;
EXPECT_FALSE(reader.ReadStringPiece16(&stringpiece_val));
uint16_t uint16_val;
EXPECT_FALSE(reader.ReadUInt16(&uint16_val));
}
TEST(QuicheDataReaderTest, ReadStringPiece16WithBufferWayTooSmall) {
const char kData[] = {
0x00,
};
QuicheDataReader reader(kData, ABSL_ARRAYSIZE(kData));
EXPECT_FALSE(reader.IsDoneReading());
absl::string_view stringpiece_val;
EXPECT_FALSE(reader.ReadStringPiece16(&stringpiece_val));
uint16_t uint16_val;
EXPECT_FALSE(reader.ReadUInt16(&uint16_val));
}
TEST(QuicheDataReaderTest, ReadBytes) {
const char kData[] = {
0x66, 0x6f, 0x6f,
0x48, 0x69,
};
QuicheDataReader reader(kData, ABSL_ARRAYSIZE(kData));
EXPECT_FALSE(reader.IsDoneReading());
char dest1[3] = {};
EXPECT_TRUE(reader.ReadBytes(&dest1, ABSL_ARRAYSIZE(dest1)));
EXPECT_FALSE(reader.IsDoneReading());
EXPECT_EQ("foo", absl::string_view(dest1, ABSL_ARRAYSIZE(dest1)));
char dest2[2] = {};
EXPECT_TRUE(reader.ReadBytes(&dest2, ABSL_ARRAYSIZE(dest2)));
EXPECT_TRUE(reader.IsDoneReading());
EXPECT_EQ("Hi", absl::string_view(dest2, ABSL_ARRAYSIZE(dest2)));
}
TEST(QuicheDataReaderTest, ReadBytesWithBufferTooSmall) {
const char kData[] = {
0x01,
};
QuicheDataReader reader(kData, ABSL_ARRAYSIZE(kData));
EXPECT_FALSE(reader.IsDoneReading());
char dest[ABSL_ARRAYSIZE(kData) + 2] = {};
EXPECT_FALSE(reader.ReadBytes(&dest, ABSL_ARRAYSIZE(kData) + 1));
EXPECT_STREQ("", dest);
}
TEST(QuicheDataReaderTest, ReadAtMost) {
constexpr absl::string_view kData = "foobar";
QuicheDataReader reader(kData);
EXPECT_EQ(reader.ReadAtMost(0), "");
EXPECT_EQ(reader.ReadAtMost(3), "foo");
EXPECT_EQ(reader.ReadAtMost(6), "bar");
EXPECT_EQ(reader.ReadAtMost(1000), "");
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/quiche_data_reader.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/quiche_data_reader_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
47921c4c-501b-4155-a145-ae15dcab82ac | cpp | google/cel-cpp | message_type_name | internal/message_type_name.h | internal/message_type_name_test.cc | #ifndef THIRD_PARTY_CEL_CPP_INTERNAL_MESSAGE_TYPE_NAME_H_
#define THIRD_PARTY_CEL_CPP_INTERNAL_MESSAGE_TYPE_NAME_H_
#include <string>
#include <type_traits>
#include "absl/base/no_destructor.h"
#include "absl/strings/string_view.h"
#include "google/protobuf/message.h"
#include "google/protobuf/message_lite.h"
namespace cel::internal {
template <typename T>
std::enable_if_t<
std::conjunction_v<std::is_base_of<google::protobuf::MessageLite, T>,
std::negation<std::is_base_of<google::protobuf::Message, T>>>,
absl::string_view>
MessageTypeNameFor() {
static_assert(!std::is_const_v<T>, "T must not be const qualified");
static_assert(!std::is_volatile_v<T>, "T must not be volatile qualified");
static_assert(!std::is_reference_v<T>, "T must not be a reference");
static const absl::NoDestructor<std::string> kTypeName(T().GetTypeName());
return *kTypeName;
}
template <typename T>
std::enable_if_t<std::is_base_of_v<google::protobuf::Message, T>, absl::string_view>
MessageTypeNameFor() {
static_assert(!std::is_const_v<T>, "T must not be const qualified");
static_assert(!std::is_volatile_v<T>, "T must not be volatile qualified");
static_assert(!std::is_reference_v<T>, "T must not be a reference");
return T::descriptor()->full_name();
}
}
#endif | #include "internal/message_type_name.h"
#include "google/protobuf/any.pb.h"
#include "internal/testing.h"
namespace cel::internal {
namespace {
TEST(MessageTypeNameFor, Generated) {
EXPECT_EQ(MessageTypeNameFor<google::protobuf::Any>(), "google.protobuf.Any");
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/internal/message_type_name.h | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/internal/message_type_name_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
50dc4182-df60-4ae7-a9a7-df5628cbc0a3 | cpp | google/tensorstore | sync_flow_sender | tensorstore/util/execution/sync_flow_sender.h | tensorstore/util/execution/sync_flow_sender_test.cc | #ifndef TENSORSTORE_UTIL_EXECUTION_SYNC_FLOW_SENDER_H_
#define TENSORSTORE_UTIL_EXECUTION_SYNC_FLOW_SENDER_H_
#include <utility>
#include "absl/synchronization/mutex.h"
#include "tensorstore/util/execution/execution.h"
namespace tensorstore {
template <typename Receiver>
struct SyncFlowReceiver {
SyncFlowReceiver() = default;
SyncFlowReceiver(Receiver receiver) : receiver(std::move(receiver)) {}
SyncFlowReceiver(SyncFlowReceiver&& other)
: receiver(std::move(other.receiver)) {}
SyncFlowReceiver& operator=(SyncFlowReceiver&& other) {
receiver = std::move(other.receiver);
return *this;
}
template <typename CancelReceiver>
friend void set_starting(SyncFlowReceiver& self, CancelReceiver cancel) {
execution::set_starting(self.receiver, std::move(cancel));
}
template <typename... V>
friend void set_value(SyncFlowReceiver& self, V... v) {
absl::MutexLock lock(&self.mutex);
execution::set_value(self.receiver, std::move(v)...);
}
friend void set_done(SyncFlowReceiver& self) {
execution::set_done(self.receiver);
}
template <typename E>
friend void set_error(SyncFlowReceiver& self, E e) {
execution::set_error(self.receiver, std::move(e));
}
friend void set_stopping(SyncFlowReceiver& self) {
execution::set_stopping(self.receiver);
}
Receiver receiver;
absl::Mutex mutex;
};
template <typename Sender>
struct SyncFlowSender {
Sender sender;
template <typename Receiver>
friend void submit(SyncFlowSender& self, Receiver receiver) {
execution::submit(self.sender,
SyncFlowReceiver<Receiver>{std::move(receiver)});
}
};
template <typename Sender>
SyncFlowSender<Sender> MakeSyncFlowSender(Sender sender) {
return {std::move(sender)};
}
}
#endif | #include "tensorstore/util/execution/sync_flow_sender.h"
#include <stddef.h>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/internal/thread/thread.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender_testutil.h"
namespace {
struct ConcurrentSender {
size_t num_threads;
bool error;
template <typename Receiver>
void submit(Receiver receiver) {
tensorstore::execution::set_starting(receiver, [] {});
std::vector<tensorstore::internal::Thread> threads;
for (size_t i = 0; i < num_threads; ++i) {
threads.emplace_back(tensorstore::internal::Thread(
{"sender"},
[i, &receiver] { tensorstore::execution::set_value(receiver, i); }));
}
for (auto& thread : threads) thread.Join();
if (error) {
tensorstore::execution::set_error(receiver, 3);
} else {
tensorstore::execution::set_done(receiver);
}
tensorstore::execution::set_stopping(receiver);
}
};
TEST(SyncFlowSender, Values) {
std::vector<std::string> log;
const size_t num_threads = 10;
tensorstore::execution::submit(
tensorstore::MakeSyncFlowSender(
ConcurrentSender{num_threads, false}),
tensorstore::LoggingReceiver{&log});
ASSERT_EQ(num_threads + 3, log.size());
EXPECT_EQ("set_starting", log[0]);
EXPECT_EQ("set_done", log[log.size() - 2]);
EXPECT_EQ("set_stopping", log[log.size() - 1]);
EXPECT_THAT(
log, ::testing::UnorderedElementsAre(
"set_starting", "set_value: 0", "set_value: 1", "set_value: 2",
"set_value: 3", "set_value: 4", "set_value: 5", "set_value: 6",
"set_value: 7", "set_value: 8", "set_value: 9", "set_done",
"set_stopping"));
}
TEST(SyncFlowSender, Error) {
std::vector<std::string> log;
const size_t num_threads = 10;
tensorstore::execution::submit(
tensorstore::MakeSyncFlowSender(
ConcurrentSender{num_threads, true}),
tensorstore::LoggingReceiver{&log});
ASSERT_EQ(num_threads + 3, log.size());
EXPECT_EQ("set_starting", log[0]);
EXPECT_EQ("set_error: 3", log[log.size() - 2]);
EXPECT_EQ("set_stopping", log[log.size() - 1]);
EXPECT_THAT(
log, ::testing::UnorderedElementsAre(
"set_starting", "set_value: 0", "set_value: 1", "set_value: 2",
"set_value: 3", "set_value: 4", "set_value: 5", "set_value: 6",
"set_value: 7", "set_value: 8", "set_value: 9", "set_error: 3",
"set_stopping"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/sync_flow_sender.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/sync_flow_sender_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
1e8ed1a7-a21b-45fb-94a1-dd7bf937e305 | cpp | tensorflow/tensorflow | node_builder | tensorflow/core/graph/node_builder.cc | tensorflow/core/graph/node_builder_test.cc | #include "tensorflow/core/graph/node_builder.h"
#include <unordered_map>
#include <vector>
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
NodeBuilder::NodeOut::NodeOut(Node* n, int32_t i)
: node(n),
error(false),
name(node != nullptr ? node->name() : (error = true, "")),
index(i),
dt(SafeGetOutput(node, i, &error)) {}
NodeBuilder::NodeOut::NodeOut(OutputTensor t) : NodeOut(t.node, t.index) {}
NodeBuilder::NodeOut::NodeOut(StringPiece n, int32_t i, DataType t)
: node(nullptr), error(false), name(n), index(i), dt(t) {}
NodeBuilder::NodeOut::NodeOut()
: node(nullptr), error(true), index(0), dt(DT_FLOAT) {}
NodeBuilder::NodeBuilder(StringPiece name, StringPiece op_name,
const OpRegistryInterface* op_registry,
const NodeDebugInfo* debug)
: def_builder_(name, op_name, op_registry, debug) {}
NodeBuilder::NodeBuilder(StringPiece name, const OpDef* op_def)
: def_builder_(name, op_def) {}
NodeBuilder::NodeBuilder(const NodeDefBuilder& def_builder)
: def_builder_(def_builder) {}
NodeBuilder& NodeBuilder::Input(Node* src_node, int src_index) {
inputs_.emplace_back(src_node, src_index);
DataType dt;
if (GetOutputType(src_node, src_index, &dt)) {
def_builder_.Input(src_node->name(), src_index, dt);
}
return *this;
}
NodeBuilder& NodeBuilder::Input(NodeOut src) {
if (src.error) {
AddIndexError(src.node, src.index);
} else {
inputs_.emplace_back(src.node, src.index);
def_builder_.Input(src.name, src.index, src.dt);
}
return *this;
}
NodeBuilder& NodeBuilder::Input(absl::Span<const NodeOut> src_list) {
std::vector<NodeDefBuilder::NodeOut> srcs;
srcs.reserve(src_list.size());
for (const auto& node_out : src_list) {
if (node_out.error) {
AddIndexError(node_out.node, node_out.index);
} else {
srcs.emplace_back(node_out.name, node_out.index, node_out.dt);
inputs_.emplace_back(node_out.node, node_out.index);
}
}
def_builder_.Input(absl::Span<const NodeDefBuilder::NodeOut>(srcs));
return *this;
}
NodeBuilder& NodeBuilder::ControlInput(Node* src_node) {
control_inputs_.emplace_back(src_node);
def_builder_.ControlInput(src_node->name());
return *this;
}
NodeBuilder& NodeBuilder::ControlInputs(absl::Span<Node* const> src_nodes) {
control_inputs_.insert(control_inputs_.end(), src_nodes.begin(),
src_nodes.end());
for (const Node* src_node : src_nodes) {
def_builder_.ControlInput(src_node->name());
}
return *this;
}
NodeBuilder& NodeBuilder::Device(StringPiece device_spec) {
def_builder_.Device(device_spec);
return *this;
}
NodeBuilder& NodeBuilder::AssignedDevice(StringPiece device) {
assigned_device_ = string(device);
return *this;
}
NodeBuilder& NodeBuilder::XlaCluster(StringPiece xla_cluster) {
def_builder_.Attr("_XlaCluster", xla_cluster);
return *this;
}
absl::StatusOr<Node*> NodeBuilder::Finalize(Graph* graph, bool consume) {
Node* out;
TF_RETURN_IF_ERROR(Finalize(graph, &out, consume));
return out;
}
Status NodeBuilder::Finalize(Graph* graph, Node** created_node, bool consume) {
if (created_node != nullptr) {
*created_node = nullptr;
}
if (!errors_.empty()) {
return errors::InvalidArgument(absl::StrJoin(errors_, "\n"));
}
NodeDef node_def;
TF_RETURN_IF_ERROR(def_builder_.Finalize(&node_def, consume));
TF_RETURN_IF_ERROR(ValidateNodeDef(node_def, def_builder_.op_def()));
TF_RETURN_IF_ERROR(
CheckOpDeprecation(def_builder_.op_def(), graph->versions().producer()));
TF_ASSIGN_OR_RETURN(Node * node, graph->AddNode(std::move(node_def)));
node->set_assigned_device_name(assigned_device_);
for (size_t i = 0; i < inputs_.size(); ++i) {
if (inputs_[i].node != nullptr) {
graph->AddEdge(inputs_[i].node, inputs_[i].index, node, i);
}
}
for (Node* control_input : control_inputs_) {
graph->AddControlEdge(control_input, node);
}
if (created_node != nullptr) *created_node = node;
return absl::OkStatus();
}
void NodeBuilder::AddIndexError(const Node* node, int i) {
if (node == nullptr) {
errors_.emplace_back(
strings::StrCat("Attempt to add nullptr Node to node with type ",
def_builder_.op_def().name()));
} else {
errors_.emplace_back(strings::StrCat(
"Attempt to add output ", i, " of ", node->name(), " not in range [0, ",
node->num_outputs(), ") to node with type ",
def_builder_.op_def().name(), ". Node: ", FormatNodeForError(*node)));
}
}
bool NodeBuilder::GetOutputType(const Node* node, int i, DataType* dt) {
bool error;
*dt = SafeGetOutput(node, i, &error);
if (error) AddIndexError(node, i);
return !error;
}
} | #include "tensorflow/core/graph/node_builder.h"
#include <string>
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
REGISTER_OP("Source").Output("o: out_types").Attr("out_types: list(type)");
REGISTER_OP("Sink").Input("i: T").Attr("T: type");
TEST(NodeBuilderTest, Simple) {
Graph graph(OpRegistry::Global());
Node* source_node;
TF_EXPECT_OK(NodeBuilder("source_op", "Source")
.Attr("out_types", {DT_INT32, DT_STRING})
.Finalize(&graph, &source_node));
ASSERT_TRUE(source_node != nullptr);
TF_EXPECT_OK(NodeBuilder("sink1", "Sink")
.Input(source_node)
.Finalize(&graph, nullptr));
TF_EXPECT_OK(NodeBuilder("sink2", "Sink")
.Input(source_node, 1)
.Finalize(&graph, nullptr));
EXPECT_FALSE(NodeBuilder("sink3", "Sink")
.Input(source_node, 2)
.Finalize(&graph, nullptr)
.ok());
EXPECT_FALSE(NodeBuilder("sink4", "Sink")
.Input(source_node, -1)
.Finalize(&graph, nullptr)
.ok());
EXPECT_FALSE(NodeBuilder("sink5", "Sink")
.Input({source_node, -1})
.Finalize(&graph, nullptr)
.ok());
EXPECT_FALSE(NodeBuilder("sink6", "Sink")
.Input(nullptr)
.Finalize(&graph, nullptr)
.ok());
EXPECT_FALSE(NodeBuilder("sink7", "Sink")
.Input(NodeBuilder::NodeOut(nullptr, 0))
.Finalize(&graph, nullptr)
.ok());
}
REGISTER_OP("FullTypeOpBasicType")
.Output("o1: out_type")
.Attr("out_type: type")
.SetTypeConstructor([](OpDef* op_def) {
FullTypeDef* tdef =
op_def->mutable_output_arg(0)->mutable_experimental_full_type();
tdef->set_type_id(TFT_ARRAY);
FullTypeDef* arg = tdef->add_args();
arg->set_type_id(TFT_VAR);
arg->set_s("out_type");
return absl::OkStatus();
});
TEST(NodeBuilderTest, TypeConstructorBasicType) {
Graph graph(OpRegistry::Global());
Node* node;
TF_EXPECT_OK(NodeBuilder("op", "FullTypeOpBasicType")
.Attr("out_type", DT_FLOAT)
.Finalize(&graph, &node));
ASSERT_TRUE(node->def().has_experimental_type());
const FullTypeDef& ft = node->def().experimental_type();
ASSERT_EQ(ft.type_id(), TFT_PRODUCT);
ASSERT_EQ(ft.args_size(), 1);
auto ot = ft.args(0);
ASSERT_EQ(ot.type_id(), TFT_ARRAY);
ASSERT_EQ(ot.args(0).type_id(), TFT_FLOAT);
ASSERT_EQ(ot.args(0).args().size(), 0);
}
REGISTER_OP("FullTypeOpListType")
.Output("o1: out_types")
.Attr("out_types: list(type)")
.SetTypeConstructor([](OpDef* op_def) {
FullTypeDef* tdef =
op_def->mutable_output_arg(0)->mutable_experimental_full_type();
tdef->set_type_id(TFT_ARRAY);
FullTypeDef* arg = tdef->add_args();
arg->set_type_id(TFT_VAR);
arg->set_s("out_types");
return absl::OkStatus();
});
TEST(NodeBuilderTest, TypeConstructorListType) {
Graph graph(OpRegistry::Global());
Node* node;
ASSERT_FALSE(NodeBuilder("op", "FullTypeOpListType")
.Attr("out_types", {DT_FLOAT, DT_INT32})
.Finalize(&graph, &node)
.ok());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/node_builder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/node_builder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
798ba46e-3eac-4fcd-aa56-8b804c33f805 | cpp | tensorflow/tensorflow | parallel_batch | tensorflow/core/grappler/optimizers/data/parallel_batch.cc | tensorflow/core/grappler/optimizers/data/parallel_batch_test.cc | #include "tensorflow/core/grappler/optimizers/data/parallel_batch.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
namespace tensorflow {
namespace grappler {
Status ParallelBatch::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
MutableGraphView graph(output);
for (NodeDef& node : *output->mutable_node()) {
if (node.op() == "BatchDatasetV2" || node.op() == "PaddedBatchDatasetV2") {
(*node.mutable_attr())["parallel_copy"].set_b(true);
stats->num_changes++;
}
}
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(ParallelBatch, "parallel_batch");
}
} | #include "tensorflow/core/grappler/optimizers/data/parallel_batch.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
TEST(ParallelBatch, BatchDataset) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 5}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", false}, {"dtype", DT_BOOL}}),
NDef("batch", "BatchDatasetV2",
{"range", "batch_size", "drop_remainder"}, {})});
ParallelBatch optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("batch", output));
int index = graph_utils::FindGraphNodeWithName("batch", output);
EXPECT_TRUE(output.node(index).attr().at("parallel_copy").b());
}
TEST(ParallelBatch, PaddedBatchDataset) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 5}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", false}, {"dtype", DT_BOOL}}),
NDef("batch", "PaddedBatchDatasetV2",
{"range", "batch_size", "drop_remainder"}, {})});
ParallelBatch optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("batch", output));
int index = graph_utils::FindGraphNodeWithName("batch", output);
EXPECT_TRUE(output.node(index).attr().at("parallel_copy").b());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/parallel_batch.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/parallel_batch_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fed78beb-029c-47b3-aad3-c4c29d5b52fc | cpp | tensorflow/tensorflow | sort_thunk | third_party/xla/xla/backends/cpu/runtime/sort_thunk.cc | third_party/xla/xla/backends/cpu/runtime/sort_thunk_test.cc | #include "xla/backends/cpu/runtime/sort_thunk.h"
#include <algorithm>
#include <array>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <functional>
#include <iterator>
#include <memory>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/dynamic_annotations.h"
#include "absl/base/optimization.h"
#include "absl/container/inlined_vector.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/backends/cpu/runtime/thunk.h"
#include "xla/layout_util.h"
#include "xla/primitive_util.h"
#include "xla/runtime/buffer_use.h"
#include "xla/service/buffer_assignment.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/lib/traceme.h"
namespace xla::cpu {
static absl::Status VerifySortInputs(absl::Span<const SortThunk::Input> inputs,
int64_t dimension) {
if (inputs.empty()) {
return Internal("Inputs must not be empty");
}
auto equal = Shape::Equal().IgnoreElementType();
const Shape& shape = inputs[0].shape;
for (const SortThunk::Input& input : inputs) {
if (!equal(shape, input.shape)) {
return Internal("Inputs must have the same shape");
}
}
int64_t sort_dimension =
dimension >= 0 ? dimension : shape.rank() + dimension;
if (shape.rank() <= sort_dimension) {
return Internal(
"Shape of dimensions [%s] can't be sorted along dimension %d",
absl::StrJoin(shape.dimensions(), ","), dimension);
}
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<SortThunk>> SortThunk::Create(
Info info, absl::Span<const Input> inputs, int64_t dimension,
bool is_stable, LessThan less_than) {
TF_RETURN_IF_ERROR(VerifySortInputs(inputs, dimension));
return absl::WrapUnique(new SortThunk(std::move(info), inputs, dimension,
is_stable, std::move(less_than)));
}
absl::StatusOr<std::unique_ptr<SortThunk>> SortThunk::Create(
Info info, absl::Span<const Input> inputs, int64_t dimension,
bool is_stable, std::string comparator_name) {
TF_RETURN_IF_ERROR(VerifySortInputs(inputs, dimension));
return absl::WrapUnique(new SortThunk(std::move(info), inputs, dimension,
is_stable, std::move(comparator_name)));
}
SortThunk::SortThunk(Info info, absl::Span<const Input> inputs,
int64_t dimension, bool is_stable, LessThan less_than)
: Thunk(Kind::kSort, std::move(info)),
inputs_(inputs.begin(), inputs.end()),
dimension_(dimension),
is_stable_(is_stable),
less_than_(std::move(less_than)),
less_than_ptr_(&*less_than_) {}
SortThunk::SortThunk(Info info, absl::Span<const Input> inputs,
int64_t dimension, bool is_stable,
std::string comparator_name)
: Thunk(Kind::kSort, std::move(info)),
inputs_(inputs.begin(), inputs.end()),
dimension_(dimension),
is_stable_(is_stable),
comparator_name_(std::move(comparator_name)),
less_than_ptr_(nullptr) {}
namespace {
static constexpr size_t kMaxElementSize = 16;
template <size_t n>
struct Ref;
struct DRef;
template <size_t n>
struct Value {
Value(const Ref<n>& ref);
const void* compared_value(size_t i) const { return value[i].data(); }
using ValueStorage = std::array<std::byte, kMaxElementSize>;
alignas(alignof(std::max_align_t)) std::array<ValueStorage, n> value;
std::array<uint8_t, n> value_sizes;
};
struct DValue {
DValue(const DRef& ref);
const void* compared_value(size_t i) const { return value[i].data(); }
using ValueStorage = std::array<std::byte, kMaxElementSize>;
std::vector<ValueStorage> value;
std::vector<uint8_t> value_sizes;
size_t n;
};
template <size_t n>
struct Ref {
Ref(std::array<std::byte*, n> ptr, std::array<uint8_t, n> ptr_sizes)
: ptr(ptr), ptr_sizes(ptr_sizes) {}
Ref& operator=(const Value<n>& value);
Ref& operator=(const Ref<n>& other);
const void* compared_value(size_t i) const { return ptr[i]; }
std::array<std::byte*, n> ptr;
std::array<uint8_t, n> ptr_sizes;
};
struct DRef {
DRef(std::vector<std::byte*> ptr, std::vector<uint8_t> ptr_sizes)
: ptr(ptr), ptr_sizes(ptr_sizes), n(ptr.size()) {}
DRef& operator=(const DValue& value);
DRef& operator=(const DRef& other);
const void* compared_value(size_t i) const { return ptr[i]; }
std::vector<std::byte*> ptr;
std::vector<uint8_t> ptr_sizes;
const size_t n;
};
template <size_t n>
Value<n>::Value(const Ref<n>& ref) : value_sizes(ref.ptr_sizes) {
for (size_t i = 0; i < n; ++i) {
std::memcpy(value[i].data(), ref.ptr[i], ref.ptr_sizes[i]);
}
}
DValue::DValue(const DRef& ref)
: value_sizes(ref.ptr_sizes), n(ref.ptr.size()) {
value.reserve(n);
for (size_t i = 0; i < n; ++i) {
value.emplace_back();
std::memcpy(value[i].data(), ref.ptr[i], ref.ptr_sizes[i]);
}
}
template <size_t n>
Ref<n>& Ref<n>::operator=(const Value<n>& value) {
DCHECK(ptr_sizes == value.value_sizes);
for (size_t i = 0; i < n; ++i) {
std::memcpy(ptr[i], value.value[i].data(), value.value_sizes[i]);
}
return *this;
}
DRef& DRef::operator=(const DValue& value) {
DCHECK(ptr_sizes == value.value_sizes);
for (size_t i = 0; i < n; ++i) {
std::memcpy(ptr[i], value.value[i].data(), value.value_sizes[i]);
}
return *this;
}
template <size_t n>
Ref<n>& Ref<n>::operator=(const Ref<n>& other) {
DCHECK(ptr_sizes == other.ptr_sizes);
for (size_t i = 0; i < n; ++i) {
std::memcpy(ptr[i], other.ptr[i], other.ptr_sizes[i]);
}
return *this;
}
DRef& DRef::operator=(const DRef& other) {
DCHECK(ptr_sizes == other.ptr_sizes);
const size_t n = other.ptr.size();
for (size_t i = 0; i < n; ++i) {
std::memcpy(ptr[i], other.ptr[i], other.ptr_sizes[i]);
}
return *this;
}
template <size_t n>
void swap(const Ref<n>& lhs, const Ref<n>& rhs) {
for (size_t i = 0; i < n; ++i) {
std::array<std::byte, kMaxElementSize> tmp;
std::memcpy(tmp.data(), lhs.ptr[i], lhs.ptr_sizes[i]);
std::memcpy(lhs.ptr[i], rhs.ptr[i], rhs.ptr_sizes[i]);
std::memcpy(rhs.ptr[i], tmp.data(), lhs.ptr_sizes[i]);
}
}
void swap(const DRef& lhs, const DRef& rhs) {
DCHECK(lhs.ptr_sizes == rhs.ptr_sizes);
const size_t n = lhs.ptr.size();
for (size_t i = 0; i < n; ++i) {
std::array<std::byte, kMaxElementSize> tmp;
std::memcpy(tmp.data(), lhs.ptr[i], lhs.ptr_sizes[i]);
std::memcpy(lhs.ptr[i], rhs.ptr[i], rhs.ptr_sizes[i]);
std::memcpy(rhs.ptr[i], tmp.data(), lhs.ptr_sizes[i]);
}
}
template <size_t n>
struct Ptr {
using difference_type = std::ptrdiff_t;
Ptr() = default;
Ptr(std::array<std::byte*, n> ptr, std::array<uint8_t, n> ptr_sizes)
: ptr(ptr), ptr_sizes(ptr_sizes) {}
Ref<n> operator*() const { return Ref<n>{ptr, ptr_sizes}; }
Ptr& operator+=(difference_type diff) {
for (size_t i = 0; i < n; ++i) ptr[i] += diff * ptr_sizes[i];
return *this;
}
Ptr& operator-=(difference_type diff) {
for (size_t i = 0; i < n; ++i) ptr[i] -= diff * ptr_sizes[i];
return *this;
}
Ptr operator+(difference_type diff) const {
std::array<std::byte*, n> upd;
for (size_t i = 0; i < n; ++i) upd[i] = ptr[i] + diff * ptr_sizes[i];
return Ptr{upd, ptr_sizes};
}
Ptr operator-(difference_type diff) const {
std::array<std::byte*, n> upd;
for (size_t i = 0; i < n; ++i) upd[i] = ptr[i] - diff * ptr_sizes[i];
return Ptr{upd, ptr_sizes};
}
difference_type operator-(const Ptr& rhs) const {
DCHECK(ptr_sizes == rhs.ptr_sizes);
return (ptr[0] - rhs.ptr[0]) / ptr_sizes[0];
}
bool operator==(const Ptr& rhs) const { return ptr[0] == rhs.ptr[0]; }
bool operator!=(const Ptr& rhs) const { return ptr[0] != rhs.ptr[0]; }
bool operator>(const Ptr& rhs) const { return ptr[0] > rhs.ptr[0]; }
bool operator<(const Ptr& rhs) const { return ptr[0] < rhs.ptr[0]; }
bool operator>=(const Ptr& rhs) const { return ptr[0] >= rhs.ptr[0]; }
bool operator<=(const Ptr& rhs) const { return ptr[0] <= rhs.ptr[0]; }
std::array<std::byte*, n> ptr;
std::array<uint8_t, n> ptr_sizes;
};
struct DPtr {
using difference_type = std::ptrdiff_t;
DPtr() = default;
DPtr(std::vector<std::byte*> ptr, std::vector<uint8_t> ptr_sizes)
: ptr(ptr), ptr_sizes(ptr_sizes), n(ptr.size()) {}
DRef operator*() const { return DRef{ptr, ptr_sizes}; }
DPtr& operator+=(difference_type diff) {
for (size_t i = 0; i < n; ++i) ptr[i] += diff * ptr_sizes[i];
return *this;
}
DPtr& operator-=(difference_type diff) {
for (size_t i = 0; i < n; ++i) ptr[i] -= diff * ptr_sizes[i];
return *this;
}
DPtr operator+(difference_type diff) const {
std::vector<std::byte*> upd(n);
for (size_t i = 0; i < n; ++i) upd[i] = ptr[i] + diff * ptr_sizes[i];
return DPtr{upd, ptr_sizes};
}
DPtr operator-(difference_type diff) const {
std::vector<std::byte*> upd(n);
for (size_t i = 0; i < n; ++i) upd[i] = ptr[i] - diff * ptr_sizes[i];
return DPtr{upd, ptr_sizes};
}
difference_type operator-(const DPtr& rhs) const {
DCHECK(ptr_sizes == rhs.ptr_sizes);
return (ptr[0] - rhs.ptr[0]) / ptr_sizes[0];
}
bool operator==(const DPtr& rhs) const { return ptr[0] == rhs.ptr[0]; }
bool operator!=(const DPtr& rhs) const { return ptr[0] != rhs.ptr[0]; }
bool operator>(const DPtr& rhs) const { return ptr[0] > rhs.ptr[0]; }
bool operator<(const DPtr& rhs) const { return ptr[0] < rhs.ptr[0]; }
bool operator>=(const DPtr& rhs) const { return ptr[0] >= rhs.ptr[0]; }
bool operator<=(const DPtr& rhs) const { return ptr[0] <= rhs.ptr[0]; }
std::vector<std::byte*> ptr;
std::vector<uint8_t> ptr_sizes;
size_t n;
};
template <class Value, class Ref, class Ptr>
class SortIterator {
public:
using iterator_category = std::random_access_iterator_tag;
using difference_type = std::ptrdiff_t;
using value_type = Value;
using reference = Ref;
using pointer = Ptr;
SortIterator() = default;
SortIterator(pointer ptr, difference_type stride)
: ptr_(ptr), stride_(stride) {}
SortIterator(const SortIterator& other) = default;
SortIterator& operator=(const SortIterator& other) = default;
SortIterator(SortIterator&& other) = default;
SortIterator& operator=(SortIterator&& other) = default;
reference operator*() const { return *ptr_; }
difference_type operator-(const SortIterator& rhs) const {
return (ptr_ - rhs.ptr_) / stride_;
}
SortIterator& operator+=(difference_type diff) {
ptr_ += diff * stride_;
return *this;
}
SortIterator& operator-=(difference_type diff) {
ptr_ -= diff * stride_;
return *this;
}
SortIterator& operator++() {
ptr_ += stride_;
return *this;
}
SortIterator& operator--() {
ptr_ -= stride_;
return *this;
}
SortIterator operator+(difference_type diff) const {
return SortIterator(ptr_ + diff * stride_, stride_);
}
SortIterator operator-(difference_type diff) const {
return SortIterator(ptr_ - diff * stride_, stride_);
}
bool operator==(const SortIterator& rhs) const { return ptr_ == rhs.ptr_; }
bool operator!=(const SortIterator& rhs) const { return ptr_ != rhs.ptr_; }
bool operator>(const SortIterator& rhs) const { return ptr_ > rhs.ptr_; }
bool operator<(const SortIterator& rhs) const { return ptr_ < rhs.ptr_; }
bool operator>=(const SortIterator& rhs) const { return ptr_ >= rhs.ptr_; }
bool operator<=(const SortIterator& rhs) const { return ptr_ <= rhs.ptr_; }
private:
pointer ptr_;
difference_type stride_ = 1;
};
struct SortDims {
int64_t outer_dim_size;
int64_t sort_dim_size;
int64_t inner_dim_size;
int64_t num_iterations;
};
}
static SortDims GetSortDims(const Shape& shape, int64_t dimension) {
int64_t sort_dimension =
dimension >= 0 ? dimension : shape.rank() + dimension;
Shape physical_shape =
ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout(shape);
auto logical_to_physical = LayoutUtil::MakeLogicalToPhysical(shape.layout());
sort_dimension = logical_to_physical[sort_dimension];
auto product = [](absl::Span<const int64_t> dims) {
return absl::c_accumulate(dims, int64_t{1}, std::multiplies<>());
};
absl::Span<const int64_t> dimensions = physical_shape.dimensions();
int64_t outer_dim_size = product(dimensions.subspan(0, sort_dimension));
int64_t sort_dim_size = dimensions[sort_dimension];
int64_t inner_dim_size = product(dimensions.subspan(sort_dimension + 1));
int64_t num_iterations = outer_dim_size * inner_dim_size;
return SortDims{outer_dim_size, sort_dim_size, inner_dim_size,
num_iterations};
}
template <size_t n>
static void SortInplace(const SortDims& sort_dims, int64_t offset,
absl::Span<se::DeviceMemoryBase> data,
absl::Span<const Shape> shapes, bool is_stable,
SortThunk::LessThan* less_than) {
std::array<std::byte*, n> ptr;
std::array<uint8_t, n> ptr_sizes;
for (size_t i = 0; i < n; ++i) {
std::byte* base = reinterpret_cast<std::byte*>(data[i].opaque());
ptr_sizes[i] = primitive_util::ByteWidth(shapes[i].element_type());
ptr[i] = base + offset * ptr_sizes[i];
}
auto compare = [&](const auto& a, const auto& b) {
std::array<const void*, 2 * n> data;
for (size_t i = 0, j = 0; i < n; i += 1, j += 2) {
data[j] = a.compared_value(i);
data[j + 1] = b.compared_value(i);
}
return (*less_than)(data.data());
};
SortIterator<Value<n>, Ref<n>, Ptr<n>> begin(
Ptr<n>(ptr, ptr_sizes),
sort_dims.inner_dim_size);
if (is_stable) {
std::stable_sort(begin, begin + sort_dims.sort_dim_size, compare);
} else {
std::sort(begin, begin + sort_dims.sort_dim_size, compare);
}
}
static void DSortInplace(const SortDims& sort_dims, int64_t offset,
absl::Span<se::DeviceMemoryBase> data,
absl::Span<const Shape> shapes, bool is_stable,
SortThunk::LessThan* less_than, size_t n) {
std::vector<std::byte*> ptr(n);
std::vector<uint8_t> ptr_sizes(n);
for (size_t i = 0; i < n; ++i) {
std::byte* base = reinterpret_cast<std::byte*>(data[i].opaque());
ptr_sizes[i] = primitive_util::ByteWidth(shapes[i].element_type());
ptr[i] = base + offset * ptr_sizes[i];
}
auto compare = [&](const auto& a, const auto& b) {
std::vector<const void*> data(2 * n);
for (size_t i = 0, j = 0; i < n; i += 1, j += 2) {
data[j] = a.compared_value(i);
data[j + 1] = b.compared_value(i);
}
return (*less_than)(data.data());
};
SortIterator<DValue, DRef, DPtr> begin(DPtr(ptr, ptr_sizes),
sort_dims.inner_dim_size);
if (is_stable) {
std::stable_sort(begin, begin + sort_dims.sort_dim_size, compare);
} else {
std::sort(begin, begin + sort_dims.sort_dim_size, compare);
}
}
static absl::Status SortInplace(absl::Span<se::DeviceMemoryBase> data,
absl::Span<const Shape> shapes,
int64_t dimension, bool is_stable,
SortThunk::LessThan* less_than) {
SortDims sort_dims = GetSortDims(shapes[0], dimension);
for (int64_t i = 0; i < sort_dims.num_iterations; ++i) {
int64_t inner_idx = i % sort_dims.inner_dim_size;
int64_t offset = inner_idx + (i - inner_idx) * sort_dims.sort_dim_size;
auto sort = [&](auto num_inputs) {
SortInplace<decltype(num_inputs)::value>(sort_dims, offset, data, shapes,
is_stable, less_than);
};
auto dsort = [&](size_t num_inputs) {
DSortInplace(sort_dims, offset, data, shapes, is_stable, less_than,
num_inputs);
};
switch (data.size()) {
case 1:
sort(std::integral_constant<size_t, 1>{});
break;
case 2:
sort(std::integral_constant<size_t, 2>{});
break;
case 3:
sort(std::integral_constant<size_t, 3>{});
break;
case 4:
sort(std::integral_constant<size_t, 4>{});
break;
case 5:
sort(std::integral_constant<size_t, 5>{});
break;
case 6:
sort(std::integral_constant<size_t, 6>{});
break;
case 7:
sort(std::integral_constant<size_t, 7>{});
break;
case 8:
sort(std::integral_constant<size_t, 8>{});
break;
case 9:
sort(std::integral_constant<size_t, 9>{});
break;
case 10:
sort(std::integral_constant<size_t, 10>{});
break;
case 11:
sort(std::integral_constant<size_t, 11>{});
break;
case 12:
sort(std::integral_constant<size_t, 12>{});
break;
case 13:
sort(std::integral_constant<size_t, 13>{});
break;
case 14:
sort(std::integral_constant<size_t, 14>{});
break;
case 15:
sort(std::integral_constant<size_t, 15>{});
break;
case 16:
sort(std::integral_constant<size_t, 16>{});
break;
case 17:
sort(std::integral_constant<size_t, 17>{});
break;
case 18:
sort(std::integral_constant<size_t, 18>{});
break;
case 19:
sort(std::integral_constant<size_t, 19>{});
break;
case 20:
sort(std::integral_constant<size_t, 20>{});
break;
case 21:
sort(std::integral_constant<size_t, 21>{});
break;
case 22:
sort(std::integral_constant<size_t, 22>{});
break;
case 23:
sort(std::integral_constant<size_t, 23>{});
break;
case 24:
sort(std::integral_constant<size_t, 24>{});
break;
case 25:
sort(std::integral_constant<size_t, 25>{});
break;
default:
dsort(data.size());
break;
}
}
return absl::OkStatus();
}
tsl::AsyncValueRef<SortThunk::ExecuteEvent> SortThunk::Execute(
const ExecuteParams& params) {
tsl::profiler::TraceMe trace([&] { return TraceMeEncode(); });
VLOG(3) << absl::StreamFormat(
"Sort %d inputs along dimension %d (is_stable=%v)", inputs_.size(),
dimension_, is_stable_);
absl::InlinedVector<se::DeviceMemoryBase, 8> data;
data.reserve(inputs_.size());
absl::InlinedVector<Shape, 8> shapes;
shapes.reserve(inputs_.size());
for (const Input& input : inputs_) {
size_t idx = data.size();
TF_ASSIGN_OR_RETURN(
data.emplace_back(),
params.buffer_allocations->GetDeviceAddress(input.slice));
shapes.push_back(input.shape);
ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(data.back().opaque(),
data.back().size());
VLOG(3) << absl::StreamFormat(" sort input #%d: %s in slice %s (%p)", idx,
input.shape.ToString(true),
input.slice.ToString(), data.back().opaque());
}
LessThan* less_than = less_than_ptr_.load();
if (ABSL_PREDICT_FALSE(less_than == nullptr)) {
TF_ASSIGN_OR_RETURN(
FunctionRegistry::Comparator comparator,
params.function_registry->FindComparator(comparator_name_));
absl::MutexLock lock(&mutex_);
less_than_ = [comparator](const void** data) {
bool result;
comparator(&result, nullptr, data, nullptr, nullptr, nullptr);
ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(&result, sizeof(result));
return result;
};
less_than_ptr_.store(less_than = &*less_than_);
}
TF_RETURN_IF_ERROR(SortInplace(absl::MakeSpan(data), shapes, dimension_,
is_stable_, less_than));
return OkExecuteEvent();
}
SortThunk::BufferUses SortThunk::buffer_uses() const {
BufferUses buffer_uses;
buffer_uses.reserve(inputs_.size());
for (const Input& input : inputs_) {
buffer_uses.emplace_back(BufferUse::Write(input.slice));
}
return buffer_uses;
}
} | #include "xla/backends/cpu/runtime/sort_thunk.h"
#include <array>
#include <cstddef>
#include <cstdint>
#include <numeric>
#include <string_view>
#include <vector>
#include "absl/status/statusor.h"
#include "xla/backends/cpu/runtime/buffer_allocations.h"
#include "xla/backends/cpu/runtime/thunk.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace xla::cpu {
namespace {
class SortThunkTest : public testing::TestWithParam<bool> {};
static bool LessThan(const void** data) {
auto* lhs = reinterpret_cast<const float*>(data[0]);
auto* rhs = reinterpret_cast<const float*>(data[1]);
return *lhs < *rhs;
}
class LessThanComparator : public Thunk::FunctionRegistry {
public:
static void LessThanWrapper(bool* result, const void*, const void** data,
const void*, const void*, const void*) {
*result = LessThan(data);
}
absl::StatusOr<Comparator> FindComparator(std::string_view name) final {
DCHECK_EQ(name, "less_than");
return LessThanWrapper;
}
};
TEST_P(SortThunkTest, Sort1D) {
bool is_stable = GetParam();
std::vector<MaybeOwningDeviceMemory> buffers;
std::vector<float> data = {2.0, 4.0, 1.0, 3.0};
std::vector<int32_t> indices = {0, 1, 2, 3};
size_t size_in_bytes = data.size() * sizeof(float);
buffers.emplace_back(se::DeviceMemoryBase(data.data(), size_in_bytes));
buffers.emplace_back(se::DeviceMemoryBase(indices.data(), size_in_bytes));
BufferAllocations allocations(buffers);
BufferAllocation alloc0(0, size_in_bytes, 0);
BufferAllocation alloc1(1, size_in_bytes, 0);
BufferAllocation::Slice slice0(&alloc0, 0, size_in_bytes);
BufferAllocation::Slice slice1(&alloc1, 0, size_in_bytes);
Shape data_shape = ShapeUtil::MakeShape(F32, {4});
Shape indices_shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto thunk, SortThunk::Create(
{"sort"}, {{slice0, data_shape}, {slice1, indices_shape}},
0, is_stable, LessThan));
Thunk::ExecuteParams params;
params.buffer_allocations = &allocations;
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError());
std::vector<float> expected_data = {1.0, 2.0, 3.0, 4.0};
std::vector<int32_t> expected_indices = {2, 0, 3, 1};
EXPECT_EQ(data, expected_data);
EXPECT_EQ(indices, expected_indices);
}
TEST_P(SortThunkTest, DynamicSort1D) {
bool is_stable = GetParam();
constexpr int num_of_empty_slices = 33;
constexpr int total_num_of_slices = num_of_empty_slices + 2;
constexpr int data_size = 31;
constexpr float starting_value = 5.0f;
std::array<float, data_size> data{
17.0f, 16.0f, 5.0f, 10.0f, 30.0f, 8.0f, 9.0f, 21.0f,
14.0f, 32.0f, 29.0f, 28.0f, 19.0f, 12.0f, 25.0f, 22.0f,
18.0f, 35.0f, 34.0f, 23.0f, 7.0f, 13.0f, 26.0f, 33.0f,
15.0f, 24.0f, 20.0f, 31.0f, 6.0f, 27.0f, 11.0f};
std::array<int32_t, data_size> indices;
std::iota(indices.begin(), indices.end(), 0);
std::array<uint32_t, data_size * num_of_empty_slices> empty;
const size_t data_size_in_bytes = data.size() * sizeof(float);
const size_t ind_size_in_bytes = indices.size() * sizeof(int32_t);
const size_t empty_size_in_bytes = empty.size() * sizeof(uint32_t);
const BufferAllocation alloc0(0, data_size_in_bytes, 0);
const BufferAllocation alloc1(1, ind_size_in_bytes, 0);
const BufferAllocation rest(2, empty_size_in_bytes, 0);
const BufferAllocation::Slice slice0(&alloc0, 0, data_size_in_bytes);
const BufferAllocation::Slice slice1(&alloc1, 0, ind_size_in_bytes);
const Shape data_shape = ShapeUtil::MakeShape(F32, {data_size});
const Shape indices_shape = ShapeUtil::MakeShape(S32, {data_size});
const Shape rest_shape = ShapeUtil::MakeShape(U32, {data_size});
std::vector<MaybeOwningDeviceMemory> buffers;
buffers.emplace_back(se::DeviceMemoryBase(data.data(), data_size_in_bytes));
buffers.emplace_back(se::DeviceMemoryBase(indices.data(), ind_size_in_bytes));
buffers.emplace_back(se::DeviceMemoryBase(empty.data(), empty_size_in_bytes));
BufferAllocations allocations(buffers);
std::array<SortThunk::Input, total_num_of_slices> inputs{
{{slice0, data_shape}, {slice1, indices_shape}}};
for (int i = 0; i < num_of_empty_slices; ++i) {
constexpr size_t empty_slice_in_bytes = data_size * sizeof(uint32_t);
inputs[i + 2].slice = BufferAllocation::Slice(
&rest, i * empty_slice_in_bytes, empty_slice_in_bytes);
inputs[i + 2].shape = rest_shape;
}
TF_ASSERT_OK_AND_ASSIGN(
auto thunk, SortThunk::Create({"sort"}, inputs,
0, is_stable, LessThan));
Thunk::ExecuteParams params;
params.buffer_allocations = &allocations;
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError());
std::array<float, data_size> expected_data;
std::iota(expected_data.begin(), expected_data.end(), starting_value);
const std::array<int32_t, data_size> expected_indices{
2, 28, 20, 5, 6, 3, 30, 13, 21, 8, 24, 1, 0, 16, 12, 26,
7, 15, 19, 25, 14, 22, 29, 11, 10, 4, 27, 9, 23, 18, 17};
EXPECT_EQ(data, expected_data);
EXPECT_EQ(indices, expected_indices);
}
TEST_P(SortThunkTest, Sort2D) {
bool is_stable = GetParam();
std::vector<MaybeOwningDeviceMemory> buffers;
std::vector<float> data = {2.0, 4.0, 1.0, 3.0};
std::vector<int32_t> indices = {0, 1, 2, 3};
size_t size_in_bytes = data.size() * sizeof(float);
buffers.emplace_back(se::DeviceMemoryBase(data.data(), size_in_bytes));
buffers.emplace_back(se::DeviceMemoryBase(indices.data(), size_in_bytes));
BufferAllocations allocations(buffers);
BufferAllocation alloc0(0, size_in_bytes, 0);
BufferAllocation alloc1(1, size_in_bytes, 0);
BufferAllocation::Slice slice0(&alloc0, 0, size_in_bytes);
BufferAllocation::Slice slice1(&alloc1, 0, size_in_bytes);
Shape data_shape = ShapeUtil::MakeShape(F32, {2, 2});
Shape indices_shape = ShapeUtil::MakeShape(S32, {2, 2});
TF_ASSERT_OK_AND_ASSIGN(
auto sort_dim0,
SortThunk::Create({"sort"},
{{slice0, data_shape}, {slice1, indices_shape}},
0, is_stable, "less_than"));
Thunk::ExecuteParams params;
params.buffer_allocations = &allocations;
LessThanComparator less_than_comparator;
params.function_registry = &less_than_comparator;
auto execute_event0 = sort_dim0->Execute(params);
tsl::BlockUntilReady(execute_event0);
ASSERT_FALSE(execute_event0.IsError());
std::vector<float> expected_data = {1.0, 3.0, 2.0, 4.0};
std::vector<int32_t> expected_indices = {2, 3, 0, 1};
EXPECT_EQ(data, expected_data);
EXPECT_EQ(indices, expected_indices);
data = {4.0, 3.0, 2.0, 1.0};
indices = {0, 1, 2, 3};
TF_ASSERT_OK_AND_ASSIGN(
auto sort_dim1,
SortThunk::Create({"sort"},
{{slice0, data_shape}, {slice1, indices_shape}},
1,
false, "less_than"));
auto execute_event1 = sort_dim1->Execute(params);
tsl::BlockUntilReady(execute_event1);
ASSERT_FALSE(execute_event1.IsError());
expected_data = {3.0, 4.0, 1.0, 2.0};
expected_indices = {1, 0, 3, 2};
EXPECT_EQ(data, expected_data);
EXPECT_EQ(indices, expected_indices);
}
TEST_P(SortThunkTest, Sort2DWithLayout) {
bool is_stable = GetParam();
std::vector<MaybeOwningDeviceMemory> buffers;
std::vector<float> data = {4.0, 3.0, 2.0, 1.0};
std::vector<int32_t> indices = {0, 1, 2, 3};
size_t size_in_bytes = data.size() * sizeof(float);
buffers.emplace_back(se::DeviceMemoryBase(data.data(), size_in_bytes));
buffers.emplace_back(se::DeviceMemoryBase(indices.data(), size_in_bytes));
BufferAllocations allocations(buffers);
BufferAllocation alloc0(0, size_in_bytes, 0);
BufferAllocation alloc1(1, size_in_bytes, 0);
BufferAllocation::Slice slice0(&alloc0, 0, size_in_bytes);
BufferAllocation::Slice slice1(&alloc1, 0, size_in_bytes);
Shape data_shape = ShapeUtil::MakeShape(F32, {2, 2});
*data_shape.mutable_layout() = LayoutUtil::MakeLayout({0, 1});
Shape indices_shape = ShapeUtil::MakeShape(S32, {2, 2});
*indices_shape.mutable_layout() = LayoutUtil::MakeLayout({0, 1});
TF_ASSERT_OK_AND_ASSIGN(
auto sort_dim0,
SortThunk::Create({"sort"},
{{slice0, data_shape}, {slice1, indices_shape}},
0, is_stable, "less_than"));
Thunk::ExecuteParams params;
params.buffer_allocations = &allocations;
LessThanComparator less_than_comparator;
params.function_registry = &less_than_comparator;
auto execute_event0 = sort_dim0->Execute(params);
tsl::BlockUntilReady(execute_event0);
ASSERT_FALSE(execute_event0.IsError());
std::vector<float> expected_data = {3.0, 4.0, 1.0, 2.0};
std::vector<int32_t> expected_indices = {1, 0, 3, 2};
EXPECT_EQ(data, expected_data);
EXPECT_EQ(indices, expected_indices);
data = {2.0, 4.0, 1.0, 3.0};
indices = {0, 1, 2, 3};
TF_ASSERT_OK_AND_ASSIGN(
auto sort_dim1,
SortThunk::Create({"sort"},
{{slice0, data_shape}, {slice1, indices_shape}},
1,
false, "less_than"));
auto execute_event1 = sort_dim1->Execute(params);
tsl::BlockUntilReady(execute_event1);
ASSERT_FALSE(execute_event1.IsError());
expected_data = {1.0, 3.0, 2.0, 4.0};
expected_indices = {2, 3, 0, 1};
EXPECT_EQ(data, expected_data);
EXPECT_EQ(indices, expected_indices);
}
void BM_DynamicSort1D(::testing::benchmark::State& state, bool is_stable) {
const int total_num_of_slices = state.range(0);
const int num_of_empty_slices = total_num_of_slices - 2;
constexpr int data_size = 31;
const std::array<float, data_size> data{
17.0f, 16.0f, 5.0f, 10.0f, 30.0f, 8.0f, 9.0f, 21.0f,
14.0f, 32.0f, 29.0f, 28.0f, 19.0f, 12.0f, 25.0f, 22.0f,
18.0f, 35.0f, 34.0f, 23.0f, 7.0f, 13.0f, 26.0f, 33.0f,
15.0f, 24.0f, 20.0f, 31.0f, 6.0f, 27.0f, 11.0f};
std::array<int32_t, data_size> indices;
std::iota(indices.begin(), indices.end(), 0);
std::vector<uint32_t> empty(data_size * num_of_empty_slices);
const size_t data_size_in_bytes = data.size() * sizeof(float);
const size_t ind_size_in_bytes = indices.size() * sizeof(int32_t);
const size_t empty_size_in_bytes = empty.size() * sizeof(uint32_t);
const BufferAllocation alloc0(0, data_size_in_bytes, 0);
const BufferAllocation alloc1(1, ind_size_in_bytes, 0);
const BufferAllocation rest(2, empty_size_in_bytes, 0);
const BufferAllocation::Slice slice0(&alloc0, 0, data_size_in_bytes);
const BufferAllocation::Slice slice1(&alloc1, 0, ind_size_in_bytes);
const Shape data_shape = ShapeUtil::MakeShape(F32, {data_size});
const Shape indices_shape = ShapeUtil::MakeShape(S32, {data_size});
const Shape rest_shape = ShapeUtil::MakeShape(U32, {data_size});
for (auto s : state) {
state.PauseTiming();
auto data_clone(data);
auto indices_clone(indices);
std::vector<MaybeOwningDeviceMemory> buffers;
buffers.emplace_back(
se::DeviceMemoryBase(data_clone.data(), data_size_in_bytes));
buffers.emplace_back(
se::DeviceMemoryBase(indices_clone.data(), ind_size_in_bytes));
buffers.emplace_back(
se::DeviceMemoryBase(empty.data(), empty_size_in_bytes));
BufferAllocations allocations(buffers);
std::vector<SortThunk::Input> inputs(total_num_of_slices);
inputs[0] = {slice0, data_shape};
inputs[1] = {slice1, indices_shape};
for (int i = 0; i < num_of_empty_slices; ++i) {
constexpr size_t empty_slice_in_bytes = data_size * sizeof(uint32_t);
inputs[i + 2].slice = BufferAllocation::Slice(
&rest, i * empty_slice_in_bytes, empty_slice_in_bytes);
inputs[i + 2].shape = rest_shape;
}
Thunk::ExecuteParams params;
params.buffer_allocations = &allocations;
state.ResumeTiming();
TF_ASSERT_OK_AND_ASSIGN(
auto thunk, SortThunk::Create({"sort"}, inputs,
0, is_stable, LessThan));
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError());
}
}
void BM_StableDynamicSort1D(::testing::benchmark::State& state) {
BM_DynamicSort1D(state, true);
}
void BM_UnstableDynamicSort1D(::testing::benchmark::State& state) {
BM_DynamicSort1D(state, false);
}
BENCHMARK(BM_StableDynamicSort1D)
->MeasureProcessCPUTime()
->Arg(35)
->Arg(50)
->Arg(100);
BENCHMARK(BM_UnstableDynamicSort1D)
->MeasureProcessCPUTime()
->Arg(35)
->Arg(50)
->Arg(100);
INSTANTIATE_TEST_SUITE_P(SortThunk, SortThunkTest, testing::Bool(),
testing::PrintToStringParamName());
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/cpu/runtime/sort_thunk.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/cpu/runtime/sort_thunk_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
734ac35f-845b-4102-a28f-233a8b460dc1 | cpp | tensorflow/tensorflow | convolution_thunk | third_party/xla/xla/service/gpu/runtime/convolution_thunk.cc | third_party/xla/xla/backends/cpu/runtime/convolution_thunk_test.cc | #include "xla/service/gpu/runtime/convolution_thunk.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/service/buffer_assignment.h"
#if TENSORFLOW_USE_ROCM
#include "xla/service/gpu/stream_executor_util.h"
#endif
#include "xla/service/gpu/gpu_conv_runner.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/scratch_allocator.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace gpu {
ConvolutionThunk::ConvolutionThunk(
ThunkInfo thunk_info, GpuConvConfig config,
std::vector<BufferAllocation::Slice> operand_slices,
std::vector<BufferAllocation::Slice> result_slices,
BufferAllocation::Slice scratch_slice)
: Thunk(Kind::kConvolution, thunk_info),
operand_buffers_(std::move(operand_slices)),
result_buffers_(std::move(result_slices)),
scratch_buffer_(scratch_slice),
config_(std::move(config)) {}
GenericConvRunner& ConvolutionThunk::GetOrCreateRunner(
const stream_executor::Stream* stream, bool* runner_created) {
absl::MutexLock lock(&mu_);
auto it = runner_cache_.find(stream);
*runner_created = (it == runner_cache_.end());
if (*runner_created) {
it = runner_cache_
.insert({stream, std::make_unique<GenericConvRunner>(config_)})
.first;
}
return *it->second;
}
absl::Status ConvolutionThunk::ExecuteOnStream(const ExecuteParams& params) {
const auto& buffer_allocations = *params.buffer_allocations;
std::vector<se::DeviceMemoryBase> operand_se_buffers, result_se_buffers;
operand_se_buffers.reserve(operand_buffers_.size());
for (BufferAllocation::Slice buffer : operand_buffers_) {
operand_se_buffers.push_back(buffer_allocations.GetDeviceAddress(buffer));
}
result_se_buffers.reserve(result_buffers_.size());
for (BufferAllocation::Slice buffer : result_buffers_) {
result_se_buffers.push_back(buffer_allocations.GetDeviceAddress(buffer));
}
se::DeviceMemoryBase scratch =
buffer_allocations.GetDeviceAddress(scratch_buffer_);
bool runner_created = false;
RunConvOptions opts;
opts.runner_cache = &GetOrCreateRunner(params.stream, &runner_created);
#if TENSORFLOW_USE_ROCM
if (runner_created) {
TF_ASSIGN_OR_RETURN(
GpuConvParams conv_params,
GetGpuConvParams(config_, operand_se_buffers, result_se_buffers));
TF_ASSIGN_OR_RETURN(se::dnn::ConvolutionKind kind,
GetDNNConvKindFromCudnnConvKind(config_.kind));
TF_ASSIGN_OR_RETURN(se::dnn::DataType input_type,
GetDNNDataTypeFromPrimitiveType(config_.input_type));
TF_ASSIGN_OR_RETURN(se::dnn::DataType output_type,
GetDNNDataTypeFromPrimitiveType(config_.output_type));
TF_ASSIGN_OR_RETURN(auto dnn,
se::dnn::internal::GetDnnFromStream(params.stream));
se::OwningScratchAllocator<> scratch_allocator(
buffer_allocations.device_ordinal(),
buffer_allocations.memory_allocator());
std::vector<se::dnn::ProfileResult> profile_results;
dnn->GetMIOpenConvolveAlgorithms(
kind, input_type, output_type, params.stream, config_.input_descriptor,
conv_params.input_buf, config_.filter_descriptor,
conv_params.filter_buf, config_.output_descriptor,
conv_params.output_buf, config_.conv_desc, &scratch_allocator,
&profile_results);
}
#endif
TF_RETURN_IF_ERROR(RunGpuConv(config_, absl::MakeSpan(operand_se_buffers),
absl::MakeSpan(result_se_buffers), scratch,
params.stream, opts));
if (!params.stream->ok()) {
return Internal("ConvolutionThunk::ExecuteOnStream failed.");
}
return absl::OkStatus();
}
ConvolutionReorderThunk::ConvolutionReorderThunk(
ThunkInfo thunk_info, absl::Span<int64_t> filter_nchw,
absl::InlinedVector<BufferAllocation::Slice, 2> operand_slices,
absl::InlinedVector<BufferAllocation::Slice, 2> result_slices)
: Thunk(Kind::kConvolutionReorder, thunk_info),
filter_descriptor_(CreateFilterDescriptor(filter_nchw)),
operand_buffers_(operand_slices),
result_buffers_(result_slices) {}
absl::Status ConvolutionReorderThunk::ExecuteOnStream(
const ExecuteParams& params) {
bool has_bias = operand_buffers_.size() > 1;
CHECK_EQ(operand_buffers_.size(), result_buffers_.size());
const auto& buffer_allocations = *params.buffer_allocations;
auto filter_input = se::DeviceMemory<int8_t>(
buffer_allocations.GetDeviceAddress(operand_buffers_[0]));
auto filter_output = se::DeviceMemory<int8_t>(
buffer_allocations.GetDeviceAddress(result_buffers_[0]));
auto bias_input =
has_bias ? std::make_optional(se::DeviceMemory<float>(
buffer_allocations.GetDeviceAddress(operand_buffers_[1])))
: std::nullopt;
auto bias_output =
has_bias ? std::make_optional(se::DeviceMemory<float>(
buffer_allocations.GetDeviceAddress(result_buffers_[1])))
: std::nullopt;
auto dnn = params.stream->parent()->AsDnn();
if (dnn == nullptr) {
return absl::InternalError("No DNN for stream.");
}
return dnn->CudnnReorderConvolutionFilterAndBias(
params.stream, filter_descriptor_, filter_input, &filter_output,
std::move(bias_input), std::move(bias_output));
}
se::dnn::FilterDescriptor ConvolutionReorderThunk::CreateFilterDescriptor(
absl::Span<int64_t> filter_nchw) {
CHECK_EQ(filter_nchw.size(), 4);
se::dnn::FilterDescriptor filter_desc(2);
filter_desc.set_layout(se::dnn::FilterLayout::kOutputInputYX32);
filter_desc.set_output_feature_map_count(filter_nchw[0]);
filter_desc.set_input_feature_map_count(filter_nchw[1]);
filter_desc.set_input_filter_height(filter_nchw[2]);
filter_desc.set_input_filter_width(filter_nchw[3]);
return filter_desc;
}
}
} | #include "xla/backends/cpu/runtime/convolution_thunk.h"
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "Eigen/Core"
#include "xla/backends/cpu/runtime/buffer_allocations.h"
#include "xla/backends/cpu/runtime/thunk.h"
#include "xla/primitive_util.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
struct ConvolutionDimensions {
explicit ConvolutionDimensions(int convolution_rank = 2)
: convolution_rank(convolution_rank) {}
int convolution_rank = 2;
int batch_size = 1;
int input_size = 3;
int input_channels = 5;
int kernel_size = 3;
int output_channels = 3;
int output_size = input_size - kernel_size + 1;
};
template <typename T>
class ConvolutionThunkTypedTest : public ::testing::Test {};
using CorrectTypes = ::testing::Types<float, Eigen::half>;
TYPED_TEST_SUITE(ConvolutionThunkTypedTest, CorrectTypes);
std::vector<int64_t> MakeInputDims(
ConvolutionDimensions dims = ConvolutionDimensions()) {
std::vector<int64_t> input_dims = {dims.batch_size};
for (int i = 0; i < dims.convolution_rank; ++i) {
input_dims.push_back(dims.input_size);
}
input_dims.push_back(dims.input_channels);
return input_dims;
}
std::vector<int64_t> MakeKernelDims(
ConvolutionDimensions dims = ConvolutionDimensions()) {
std::vector<int64_t> kernel_dims = {};
for (int i = 0; i < dims.convolution_rank; ++i) {
kernel_dims.push_back(dims.kernel_size);
}
kernel_dims.push_back(dims.input_channels);
kernel_dims.push_back(dims.output_channels);
return kernel_dims;
}
std::vector<int64_t> MakeOutputDims(
ConvolutionDimensions dims = ConvolutionDimensions()) {
std::vector<int64_t> output_dims = {dims.batch_size};
for (int i = 0; i < dims.convolution_rank; ++i) {
output_dims.push_back(dims.output_size);
}
output_dims.push_back(dims.output_channels);
return output_dims;
}
template <typename ElementType>
std::vector<ElementType> MakeDataVector(const std::vector<int64_t>& dims) {
auto size = absl::c_accumulate(dims, 1, std::multiplies<int>());
return std::vector<ElementType>(size, ElementType(0.0));
}
template <typename ElementType>
std::vector<MaybeOwningDeviceMemory> MakeBuffers(
const std::vector<ElementType>& input,
const std::vector<ElementType>& kernel,
const std::vector<ElementType>& output) {
std::vector<MaybeOwningDeviceMemory> buffers;
size_t input_size_in_bytes = input.size() * sizeof(ElementType);
buffers.emplace_back(se::DeviceMemoryBase(input.data(), input_size_in_bytes));
size_t kernel_size_in_bytes = kernel.size() * sizeof(ElementType);
buffers.emplace_back(
se::DeviceMemoryBase(kernel.data(), kernel_size_in_bytes));
size_t output_size_in_bytes = output.size() * sizeof(ElementType);
buffers.emplace_back(
se::DeviceMemoryBase(output.data(), output_size_in_bytes));
return buffers;
}
ConvolutionThunk::Options MakeConvolutionOptions() {
ConvolutionThunk::Options options;
options.multi_threaded = false;
options.use_acl = false;
return options;
}
ConvolutionDimensionNumbers MakeConvolutionDimensionNumbers(
int convolution_rank) {
ConvolutionDimensionNumbers dnums;
int dim = 0;
dnums.set_input_batch_dimension(dim++);
for (int i = 0; i < convolution_rank; ++i) {
dnums.add_input_spatial_dimensions(dim++);
}
dnums.set_input_feature_dimension(dim++);
dim = 0;
for (int i = 0; i < convolution_rank; ++i) {
dnums.add_kernel_spatial_dimensions(dim++);
}
dnums.set_kernel_input_feature_dimension(dim++);
dnums.set_kernel_output_feature_dimension(dim++);
dim = 0;
dnums.set_output_batch_dimension(dim++);
for (int i = 0; i < convolution_rank; ++i) {
dnums.add_output_spatial_dimensions(dim++);
}
dnums.set_output_feature_dimension(dim++);
return dnums;
}
Window MakeWindow(int convolution_rank) {
Window window;
for (int i = 0; i < convolution_rank; ++i) {
WindowDimension* window_dim = window.add_dimensions();
window_dim->set_stride(1);
window_dim->set_padding_low(0);
window_dim->set_padding_high(0);
window_dim->set_window_dilation(1);
window_dim->set_base_dilation(1);
}
return window;
}
template <typename ElementType>
class ConvolutionThunkBuilder {
public:
void SetOptions(ConvolutionThunk::Options options) {
options_ = std::move(options);
}
auto Build(ConvolutionDimensions dims = ConvolutionDimensions()) {
auto input_dims = MakeInputDims(dims);
auto kernel_dims = MakeKernelDims(dims);
auto output_dims = MakeOutputDims(dims);
return Build(input_dims, kernel_dims, output_dims);
}
auto Build(const std::vector<int64_t>& input_dims,
const std::vector<int64_t>& kernel_dims,
const std::vector<int64_t>& output_dims) {
int convolution_rank = input_dims.size() - 2;
input_ = MakeDataVector<ElementType>(input_dims);
kernel_ = MakeDataVector<ElementType>(kernel_dims);
output_ = MakeDataVector<ElementType>(output_dims);
size_t input_size_in_bytes = input_.size() * sizeof(ElementType);
buffers_.emplace_back(
se::DeviceMemoryBase(input_.data(), input_size_in_bytes));
size_t kernel_size_in_bytes = kernel_.size() * sizeof(ElementType);
buffers_.emplace_back(
se::DeviceMemoryBase(kernel_.data(), kernel_size_in_bytes));
size_t output_size_in_bytes = output_.size() * sizeof(ElementType);
buffers_.emplace_back(
se::DeviceMemoryBase(output_.data(), output_size_in_bytes));
allocations_ = std::make_unique<BufferAllocations>(buffers_);
input_alloc_ =
std::make_unique<BufferAllocation>(0, input_size_in_bytes, 0);
kernel_alloc_ =
std::make_unique<BufferAllocation>(1, kernel_size_in_bytes, 0);
output_alloc_ =
std::make_unique<BufferAllocation>(2, output_size_in_bytes, 0);
BufferAllocation::Slice input_slice(input_alloc_.get(), 0,
input_size_in_bytes);
BufferAllocation::Slice kernel_slice(kernel_alloc_.get(), 0,
kernel_size_in_bytes);
BufferAllocation::Slice output_slice(output_alloc_.get(), 0,
output_size_in_bytes);
auto primitive_type = primitive_util::NativeToPrimitiveType<ElementType>();
Shape input_shape = ShapeUtil::MakeShape(primitive_type, input_dims);
Shape kernel_shape = ShapeUtil::MakeShape(primitive_type, kernel_dims);
Shape output_shape = ShapeUtil::MakeShape(primitive_type, output_dims);
auto dnums = MakeConvolutionDimensionNumbers(convolution_rank);
auto window = MakeWindow(convolution_rank);
return ConvolutionThunk::Create(
{"convolution"}, options_, std::move(input_slice), input_shape,
std::move(kernel_slice), kernel_shape, std::move(output_slice),
output_shape, dnums, window,
1);
}
auto GetExecutionParams() {
return Thunk::ExecuteParams{nullptr, allocations_.get()};
}
private:
std::vector<ElementType> input_;
std::vector<ElementType> kernel_;
std::vector<ElementType> output_;
std::vector<MaybeOwningDeviceMemory> buffers_;
ConvolutionThunk::Options options_ = MakeConvolutionOptions();
std::unique_ptr<BufferAllocations> allocations_;
std::unique_ptr<BufferAllocation> input_alloc_;
std::unique_ptr<BufferAllocation> kernel_alloc_;
std::unique_ptr<BufferAllocation> output_alloc_;
};
template <typename ElementType>
void SuccessfulConvolution(int convolution_rank) {
ConvolutionThunkBuilder<ElementType> builder;
TF_ASSERT_OK_AND_ASSIGN(
auto thunk, builder.Build(ConvolutionDimensions(convolution_rank)));
Thunk::ExecuteParams params = builder.GetExecutionParams();
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError()) << execute_event.GetError();
}
TYPED_TEST(ConvolutionThunkTypedTest, SuccessfulConvolution1D) {
SuccessfulConvolution<TypeParam>(1);
}
TYPED_TEST(ConvolutionThunkTypedTest, SuccessfulConvolution2D) {
SuccessfulConvolution<TypeParam>(2);
}
TYPED_TEST(ConvolutionThunkTypedTest, SuccessfulConvolution3D) {
SuccessfulConvolution<TypeParam>(3);
}
TEST(ConvolutionThunkTest, CreationErrorOnUnsupportedType) {
ConvolutionThunkBuilder<int> builder;
auto status_or_thunk = builder.Build();
EXPECT_EQ(status_or_thunk.status().code(),
absl::StatusCode::kInvalidArgument);
EXPECT_THAT(status_or_thunk.status().message(),
::testing::HasSubstr("Unsupported element type (S32)"));
}
TEST(ConvolutionThunkTest, CreationErrorOnTooHighConvolutionRank) {
ConvolutionThunkBuilder<float> builder;
auto status_or_thunk =
builder.Build(ConvolutionDimensions(4));
EXPECT_EQ(status_or_thunk.status().code(),
absl::StatusCode::kInvalidArgument);
EXPECT_THAT(status_or_thunk.status().message(),
::testing::HasSubstr("Incorrect convolution rank (4)"));
}
TEST(ConvolutionThunkTest, CreationErrorOnTooLowConvolutionRank) {
ConvolutionThunkBuilder<float> builder;
auto status_or_thunk =
builder.Build(ConvolutionDimensions(0));
EXPECT_EQ(status_or_thunk.status().code(),
absl::StatusCode::kInvalidArgument);
EXPECT_THAT(status_or_thunk.status().message(),
::testing::HasSubstr("Incorrect convolution rank (0)"));
}
TEST(ConvolutionThunkTest, CreationErrorOnMismatchedKernelBufferRank) {
ConvolutionThunkBuilder<float> builder;
ConvolutionDimensions dims_2d(2);
auto input_dims = MakeInputDims(dims_2d);
auto output_dims = MakeOutputDims(dims_2d);
ConvolutionDimensions dims_3d(3);
auto kernel_dims = MakeKernelDims(dims_3d);
auto status_or_thunk = builder.Build(input_dims, kernel_dims, output_dims);
EXPECT_EQ(status_or_thunk.status().code(),
absl::StatusCode::kInvalidArgument);
EXPECT_THAT(status_or_thunk.status().message(),
::testing::HasSubstr("Buffer ranks mismatch. Input rank (4) vs "
"kernel rank (5) vs output rank (4)"));
}
TEST(ConvolutionThunkTest, CreationErrorOnMismatchedOutputBufferRank) {
ConvolutionThunkBuilder<float> builder;
ConvolutionDimensions dims_2d(2);
auto input_dims = MakeInputDims(dims_2d);
auto kernel_dims = MakeKernelDims(dims_2d);
ConvolutionDimensions dims_3d(3);
auto output_dims = MakeOutputDims(dims_3d);
auto status_or_thunk = builder.Build(input_dims, kernel_dims, output_dims);
EXPECT_EQ(status_or_thunk.status().code(),
absl::StatusCode::kInvalidArgument);
EXPECT_THAT(status_or_thunk.status().message(),
::testing::HasSubstr("Buffer ranks mismatch. Input rank (4) vs "
"kernel rank (4) vs output rank (5)"));
}
TEST(ConvolutionThunkTest, CreationErrorOnBatchSizeMismatch) {
ConvolutionThunkBuilder<float> builder;
ConvolutionDimensions dims;
dims.batch_size = 1;
auto input_dims = MakeInputDims(dims);
auto kernel_dims = MakeKernelDims(dims);
dims.batch_size = 2;
auto output_dims = MakeOutputDims(dims);
auto status_or_thunk = builder.Build(input_dims, kernel_dims, output_dims);
EXPECT_EQ(status_or_thunk.status().code(),
absl::StatusCode::kInvalidArgument);
EXPECT_THAT(status_or_thunk.status().message(),
::testing::HasSubstr(
"Batch sizes mismatch. Input batch (1) vs output batch (2)"));
}
TEST(ConvolutionThunkTest, CreationErrorOnOutputChannelsMismatch) {
ConvolutionThunkBuilder<float> builder;
ConvolutionDimensions dims;
dims.output_channels = 3;
auto input_dims = MakeInputDims(dims);
auto kernel_dims = MakeKernelDims(dims);
dims.output_channels = 4;
auto output_dims = MakeOutputDims(dims);
auto status_or_thunk = builder.Build(input_dims, kernel_dims, output_dims);
EXPECT_EQ(status_or_thunk.status().code(),
absl::StatusCode::kInvalidArgument);
EXPECT_THAT(
status_or_thunk.status().message(),
::testing::HasSubstr("Output channels mismatch. Kernel filters count (3) "
"should be the same as output channels count (4)"));
}
TEST(ConvolutionThunkTest,
ExecutionErrorOnMissingThreadPoolInMultiThreadedMode) {
ConvolutionThunkBuilder<float> builder;
auto options = MakeConvolutionOptions();
options.multi_threaded = true;
builder.SetOptions(options);
TF_ASSERT_OK_AND_ASSIGN(auto thunk, builder.Build(ConvolutionDimensions()));
Thunk::ExecuteParams params = builder.GetExecutionParams();
params.intra_op_threadpool = nullptr;
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_TRUE(execute_event.IsError());
auto status = execute_event.GetError();
EXPECT_EQ(absl::StatusCode::kInternal, status.code());
EXPECT_EQ(
"Intra-op threadpool must be provided for ConvolutionThunk in "
"multi-threaded mode.",
status.message());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime/convolution_thunk.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/cpu/runtime/convolution_thunk_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e59aac71-cf2e-45a8-bde2-70dce5b93419 | cpp | google/quiche | chacha20_poly1305_tls_decrypter | quiche/quic/core/crypto/chacha20_poly1305_tls_decrypter.cc | quiche/quic/core/crypto/chacha20_poly1305_tls_decrypter_test.cc | #include "quiche/quic/core/crypto/chacha20_poly1305_tls_decrypter.h"
#include "openssl/aead.h"
#include "openssl/tls1.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
namespace quic {
namespace {
const size_t kKeySize = 32;
const size_t kNonceSize = 12;
}
ChaCha20Poly1305TlsDecrypter::ChaCha20Poly1305TlsDecrypter()
: ChaChaBaseDecrypter(EVP_aead_chacha20_poly1305, kKeySize, kAuthTagSize,
kNonceSize,
true) {
static_assert(kKeySize <= kMaxKeySize, "key size too big");
static_assert(kNonceSize <= kMaxNonceSize, "nonce size too big");
}
ChaCha20Poly1305TlsDecrypter::~ChaCha20Poly1305TlsDecrypter() {}
uint32_t ChaCha20Poly1305TlsDecrypter::cipher_id() const {
return TLS1_CK_CHACHA20_POLY1305_SHA256;
}
QuicPacketCount ChaCha20Poly1305TlsDecrypter::GetIntegrityLimit() const {
static_assert(kMaxIncomingPacketSize < 16384,
"This key limit requires limits on decryption payload sizes");
return 68719476736U;
}
} | #include "quiche/quic/core/crypto/chacha20_poly1305_tls_decrypter.h"
#include <memory>
#include <string>
#include "absl/strings/escaping.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
namespace {
struct TestVector {
const char* key;
const char* iv;
const char* fixed;
const char* aad;
const char* ct;
const char* pt;
};
const TestVector test_vectors[] = {
{"808182838485868788898a8b8c8d8e8f"
"909192939495969798999a9b9c9d9e9f",
"4041424344454647",
"07000000",
"50515253c0c1c2c3c4c5c6c7",
"d31a8d34648e60db7b86afbc53ef7ec2"
"a4aded51296e08fea9e2b5a736ee62d6"
"3dbea45e8ca9671282fafb69da92728b"
"1a71de0a9e060b2905d6a5b67ecd3b36"
"92ddbd7f2d778b8c9803aee328091b58"
"fab324e4fad675945585808b4831d7bc"
"3ff4def08e4b7a9de576d26586cec64b"
"6116"
"1ae10b594f09e26a7e902ecbd0600691",
"4c616469657320616e642047656e746c"
"656d656e206f662074686520636c6173"
"73206f66202739393a20496620492063"
"6f756c64206f6666657220796f75206f"
"6e6c79206f6e652074697020666f7220"
"746865206675747572652c2073756e73"
"637265656e20776f756c642062652069"
"742e"},
{"808182838485868788898a8b8c8d8e8f"
"909192939495969798999a9b9c9d9e9f",
"4041424344454647",
"07000000",
"50515253c0c1c2c3c4c5c6c7",
"d31a8d34648e60db7b86afbc53ef7ec2"
"a4aded51296e08fea9e2b5a736ee62d6"
"3dbea45e8ca9671282fafb69da92728b"
"1a71de0a9e060b2905d6a5b67ecd3b36"
"92ddbd7f2d778b8c9803aee328091b58"
"fab324e4fad675945585808b4831d7bc"
"3ff4def08e4b7a9de576d26586cec64b"
"6116"
"1ae10b594f09e26a7e902eccd0600691",
nullptr},
{"808182838485868788898a8b8c8d8e8f"
"909192939495969798999a9b9c9d9e9f",
"4041424344454647",
"07000000",
"60515253c0c1c2c3c4c5c6c7",
"d31a8d34648e60db7b86afbc53ef7ec2"
"a4aded51296e08fea9e2b5a736ee62d6"
"3dbea45e8ca9671282fafb69da92728b"
"1a71de0a9e060b2905d6a5b67ecd3b36"
"92ddbd7f2d778b8c9803aee328091b58"
"fab324e4fad675945585808b4831d7bc"
"3ff4def08e4b7a9de576d26586cec64b"
"6116"
"1ae10b594f09e26a7e902ecbd0600691",
nullptr},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
}
namespace quic {
namespace test {
QuicData* DecryptWithNonce(ChaCha20Poly1305TlsDecrypter* decrypter,
absl::string_view nonce,
absl::string_view associated_data,
absl::string_view ciphertext) {
decrypter->SetIV(nonce);
std::unique_ptr<char[]> output(new char[ciphertext.length()]);
size_t output_length = 0;
const bool success =
decrypter->DecryptPacket(0, associated_data, ciphertext, output.get(),
&output_length, ciphertext.length());
if (!success) {
return nullptr;
}
return new QuicData(output.release(), output_length, true);
}
class ChaCha20Poly1305TlsDecrypterTest : public QuicTest {};
TEST_F(ChaCha20Poly1305TlsDecrypterTest, Decrypt) {
for (size_t i = 0; test_vectors[i].key != nullptr; i++) {
bool has_pt = test_vectors[i].pt;
std::string key;
std::string iv;
std::string fixed;
std::string aad;
std::string ct;
std::string pt;
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[i].key, &key));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[i].iv, &iv));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[i].fixed, &fixed));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[i].aad, &aad));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[i].ct, &ct));
if (has_pt) {
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[i].pt, &pt));
}
ChaCha20Poly1305TlsDecrypter decrypter;
ASSERT_TRUE(decrypter.SetKey(key));
std::unique_ptr<QuicData> decrypted(DecryptWithNonce(
&decrypter, fixed + iv,
absl::string_view(aad.length() ? aad.data() : nullptr, aad.length()),
ct));
if (!decrypted) {
EXPECT_FALSE(has_pt);
continue;
}
EXPECT_TRUE(has_pt);
EXPECT_EQ(16u, ct.size() - decrypted->length());
ASSERT_EQ(pt.length(), decrypted->length());
quiche::test::CompareCharArraysWithHexError(
"plaintext", decrypted->data(), pt.length(), pt.data(), pt.length());
}
}
TEST_F(ChaCha20Poly1305TlsDecrypterTest, GenerateHeaderProtectionMask) {
ChaCha20Poly1305TlsDecrypter decrypter;
std::string key;
std::string sample;
std::string expected_mask;
ASSERT_TRUE(absl::HexStringToBytes(
"6a067f432787bd6034dd3f08f07fc9703a27e58c70e2d88d948b7f6489923cc7",
&key));
ASSERT_TRUE(
absl::HexStringToBytes("1210d91cceb45c716b023f492c29e612", &sample));
ASSERT_TRUE(absl::HexStringToBytes("1cc2cd98dc", &expected_mask));
QuicDataReader sample_reader(sample.data(), sample.size());
ASSERT_TRUE(decrypter.SetHeaderProtectionKey(key));
std::string mask = decrypter.GenerateHeaderProtectionMask(&sample_reader);
quiche::test::CompareCharArraysWithHexError(
"header protection mask", mask.data(), mask.size(), expected_mask.data(),
expected_mask.size());
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/chacha20_poly1305_tls_decrypter.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/chacha20_poly1305_tls_decrypter_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
6fed1475-0ff7-47b4-b9ac-9fe5bccb633b | cpp | tensorflow/tensorflow | iterator_ops | tensorflow/core/kernels/data/iterator_ops.cc | tensorflow/core/kernels/data/iterator_ops_test.cc | #include "tensorflow/core/kernels/data/iterator_ops.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/time/time.h"
#include "tensorflow/core/activity_watcher/activity.h"
#include "tensorflow/core/activity_watcher/activity_utils.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/common_runtime/renamed_device.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/finalization_utils.h"
#include "tensorflow/core/data/metric_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/data/tf_data_memory_logger.h"
#include "tensorflow/core/data/tfdataz_metrics.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/dataset_options.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/model.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/variant_op_registry.h"
#include "tensorflow/core/framework/variant_tensor_data.h"
#include "tensorflow/core/kernels/data/optional_ops.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/refcount.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/platform/resource.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/profiler/lib/traceme_encode.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace data {
namespace {
const char kAnonymousIterator[] = "AnonymousIterator";
const char kAnonymousIteratorV2[] = "AnonymousIteratorV2";
const char kAnonymousIteratorV3[] = "AnonymousIteratorV3";
const char kIteratorVariantTypeName[] = "tensorflow::Iterator";
const char kOutputShapes[] = "output_shapes";
const char kOutputTypes[] = "output_types";
bool SymbolicCheckpointEnabled(const Options& options) {
return options.optional_symbolic_checkpoint_case() ==
Options::kSymbolicCheckpoint &&
options.symbolic_checkpoint();
}
}
constexpr const char* const
SerializeIteratorOp::kExternalStatePolicy;
IteratorResource::IteratorResource(
Env* env, const DataTypeVector& output_dtypes,
const std::vector<PartialTensorShape>& output_shapes,
std::unique_ptr<DeviceMgr> device_mgr,
std::unique_ptr<FunctionLibraryDefinition> flib_def,
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr,
FunctionLibraryRuntime* flr)
: metrics_collector_(flr->device()->device_type(), *env),
unbounded_thread_pool_(env, "tf_data_iterator_resource"),
env_(*env),
device_mgr_(std::move(device_mgr)),
iterator_state_(std::make_shared<State>(std::move(flib_def),
std::move(pflr), flr,
nullptr)),
output_dtypes_(output_dtypes),
output_shapes_(output_shapes) {
VLOG(2) << "creating iterator resource";
}
IteratorResource::~IteratorResource() {
TfDatazMetricsRegistry::Deregister(tf_dataz_metrics_collector_);
VLOG(2) << "destroying iterator resource";
}
Status IteratorResource::GetNext(OpKernelContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) {
std::shared_ptr<State> captured_state;
{
tf_shared_lock l(mu_);
captured_state = iterator_state_;
}
auto iterator = captured_state->iterator();
if (!iterator) {
return errors::FailedPrecondition(
"GetNext() failed because the iterator has not been initialized. "
"Ensure that you have run the initializer operation for this iterator "
"before getting the next element.");
}
auto* dataset = captured_state->dataset();
IteratorContext::Params params(ctx);
params.cancellation_manager = captured_state->cancellation_manager();
params.flr = captured_state->flr();
params.function_handle_cache = captured_state->function_handle_cache();
params.resource_mgr = captured_state->resource_mgr();
params.symbolic_checkpoint = SymbolicCheckpointEnabled(dataset->options());
params.thread_factory = unbounded_thread_pool_.get_thread_factory();
params.thread_pool = &unbounded_thread_pool_;
params.id_registry = captured_state->id_registry();
params.warm_start = dataset->options().warm_start();
params.model = captured_state->model();
std::function<void()> deregister_fn;
TF_RETURN_IF_ERROR(RegisterCancellationCallback(
ctx->cancellation_manager(),
[cm = params.cancellation_manager]() { cm->StartCancel(); },
&deregister_fn));
auto cleanup = gtl::MakeCleanup(std::move(deregister_fn));
IteratorContext iter_ctx(std::move(params));
const absl::Time start_time = metrics_collector_.RecordStart();
auto status = iterator->GetNext(&iter_ctx, out_tensors, end_of_sequence);
metrics_collector_.RecordStop(start_time, *out_tensors);
const int64_t get_next_latency_micros =
env_.NowMicros() - absl::ToUnixMicros(start_time);
tf_dataz_metrics_collector_->RecordGetNextLatency(get_next_latency_micros);
captured_state->MergeCheckpoint(iter_ctx.checkpoint());
return status;
}
absl::Status IteratorResource::GetModelProto(std::string& model_proto) {
std::shared_ptr<State> captured_state;
{
tf_shared_lock l(mu_);
captured_state = iterator_state_;
}
auto iterator = captured_state->iterator();
if (!iterator) {
return absl::FailedPreconditionError(
"GetModelProto() failed because the iterator has not been initialized. "
"Ensure that you have run the initializer operation for this iterator "
"before getting the next element.");
}
model::ModelProto proto;
if (auto model = captured_state->model(); model) {
TF_RETURN_IF_ERROR(model->ToProto(&proto));
} else {
return absl::NotFoundError(
"Cannot find this iterator's analytical model. Did you disable "
"autotune for the dataset used to create this iterator? See more "
"information at "
"https:
"AutotuneOptions .");
}
model_proto = proto.SerializeAsString();
return absl::OkStatus();
}
Status IteratorResource::Save(OpKernelContext* ctx,
ExternalStatePolicy external_state_policy,
IteratorStateWriter* writer) {
std::shared_ptr<State> captured_state;
{
tf_shared_lock l(mu_);
captured_state = iterator_state_;
}
auto iterator = captured_state->iterator();
if (!iterator) {
return errors::FailedPrecondition(
"Save() failed because the iterator has not been initialized. Ensure "
"that you have run the initializer operation for this iterator before "
"saving it.");
}
auto* dataset = captured_state->dataset();
if (SymbolicCheckpointEnabled(dataset->options())) {
const auto& checkpoint = captured_state->checkpoint();
if (!checkpoint.GetStatus().ok()) {
LOG(WARNING) << "Symbolic checkpointing failed: "
<< checkpoint.GetStatus();
return checkpoint.GetStatus();
}
LOG(INFO) << "Saving symbolic checkpoint";
TF_RETURN_IF_ERROR(checkpoint.Save(writer));
return absl::OkStatus();
}
SerializationContext::Params params(ctx);
params.external_state_policy = external_state_policy;
params.symbolic_checkpoint = SymbolicCheckpointEnabled(dataset->options());
SerializationContext serialization_ctx(params);
return iterator->Save(&serialization_ctx, writer);
}
Status IteratorResource::Restore(OpKernelContext* ctx,
IteratorStateReader* reader) {
const DatasetBase* dataset;
std::shared_ptr<State> new_state;
const DatasetBase* input_dataset;
{
tf_shared_lock l(mu_);
auto iterator = iterator_state_->iterator();
if (!iterator) {
return errors::FailedPrecondition(
"Restore() failed because the iterator has not been initialized. "
"Ensure that you have run the initializer operation for this "
"iterator before restoring it.");
}
dataset = iterator->dataset();
dataset->Ref();
new_state =
std::make_shared<State>(iterator_state_->flib_def(),
iterator_state_->pflr(), iterator_state_->flr(),
nullptr);
input_dataset = iterator_state_->dataset();
iterator_state_->cancellation_manager()->StartCancel();
}
core::ScopedUnref scoped_unref(dataset);
IteratorContext::Params params(ctx);
params.cancellation_manager = new_state->cancellation_manager();
params.flr = new_state->flr();
params.function_handle_cache = new_state->function_handle_cache();
params.resource_mgr = new_state->resource_mgr();
params.symbolic_checkpoint =
SymbolicCheckpointEnabled(input_dataset->options());
params.thread_factory = unbounded_thread_pool_.get_thread_factory();
params.thread_pool = &unbounded_thread_pool_;
params.id_registry = new_state->id_registry();
params.warm_start = dataset->options().warm_start();
std::function<void()> deregister_fn;
TF_RETURN_IF_ERROR(RegisterCancellationCallback(
ctx->cancellation_manager(),
[cm = params.cancellation_manager]() { cm->StartCancel(); },
&deregister_fn));
auto cleanup = gtl::MakeCleanup(std::move(deregister_fn));
IteratorContext iter_ctx(IteratorContext(std::move(params)));
std::unique_ptr<IteratorBase> iterator_base;
TF_RETURN_IF_ERROR(dataset->MakeIteratorFromCheckpoint(
&iter_ctx, "Iterator", reader, &iterator_base));
new_state->DowncastAndSetIteratorAndDataset(std::move(iterator_base),
input_dataset);
new_state->MergeCheckpoint(iter_ctx.checkpoint());
mutex_lock l(mu_);
std::swap(iterator_state_, new_state);
return absl::OkStatus();
}
Status IteratorResource::SetIteratorFromDataset(OpKernelContext* ctx,
const DatasetBase* dataset) {
std::shared_ptr<State> new_state;
{
tf_shared_lock l(mu_);
new_state =
std::make_shared<State>(iterator_state_->flib_def(),
iterator_state_->pflr(), iterator_state_->flr(),
nullptr);
}
IteratorContext::Params params(ctx);
params.cancellation_manager = new_state->cancellation_manager();
params.flr = new_state->flr();
params.function_handle_cache = new_state->function_handle_cache();
params.resource_mgr = new_state->resource_mgr();
params.symbolic_checkpoint = SymbolicCheckpointEnabled(dataset->options());
params.thread_factory = unbounded_thread_pool_.get_thread_factory();
params.thread_pool = &unbounded_thread_pool_;
params.id_registry = new_state->id_registry();
params.warm_start = dataset->options().warm_start();
std::function<void()> deregister_fn;
TF_RETURN_IF_ERROR(RegisterCancellationCallback(
ctx->cancellation_manager(),
[cm = params.cancellation_manager]() { cm->StartCancel(); },
&deregister_fn));
auto cleanup = gtl::MakeCleanup(std::move(deregister_fn));
IteratorContext iter_ctx(IteratorContext(std::move(params)));
std::unique_ptr<IteratorBase> iterator;
if (ctx->function_library()->device()->device_type() == DEVICE_CPU) {
DatasetBase* finalized_dataset;
TF_ASSIGN_OR_RETURN(finalized_dataset, GetFinalizedDataset(ctx, dataset));
TF_RETURN_IF_ERROR(finalized_dataset->MakeIterator(&iter_ctx,
nullptr,
"Iterator", &iterator));
} else {
TF_RETURN_IF_ERROR(dataset->MakeIterator(&iter_ctx,
nullptr, "Iterator",
&iterator));
}
TF_RETURN_IF_ERROR(
VerifyTypesMatch(output_dtypes_, iterator->output_dtypes()));
TF_RETURN_IF_ERROR(
VerifyShapesCompatible(output_shapes_, iterator->output_shapes()));
new_state->DowncastAndSetIteratorAndDataset(std::move(iterator), dataset);
new_state->SetModel(iter_ctx.model());
new_state->MergeCheckpoint(iter_ctx.checkpoint());
mutex_lock l(mu_);
std::swap(iterator_state_, new_state);
tf_dataz_metrics_collector_ = std::make_shared<TfDatazMetricsCollector>(
env_, iterator_state_->iterator(), iterator_state_->model());
EnsureIteratorMemoryLoggerStarted();
TfDatazMetricsRegistry::Register(tf_dataz_metrics_collector_);
return absl::OkStatus();
}
void IteratorResource::State::DowncastAndSetIteratorAndDataset(
std::unique_ptr<IteratorBase> it, const DatasetBase* dataset) {
iterator_.reset(static_cast<DatasetBaseIterator*>(it.release()));
if (dataset) {
dataset->Ref();
dataset_.reset(const_cast<DatasetBase*>(dataset));
}
}
void IteratorResource::State::MergeCheckpoint(MemoryCheckpoint* other) {
if (SymbolicCheckpointEnabled(dataset_->options())) {
checkpoint_.Merge(other);
}
}
void IteratorResource::State::SetModel(std::shared_ptr<model::Model> model) {
model_ = model;
}
namespace {
class IteratorVariantSerializer {
public:
IteratorVariantSerializer() = default;
Status InitializeFromIterator(OpKernelContext* ctx,
ExternalStatePolicy external_state_policy,
IteratorResource* iterator_resource) {
VariantTensorDataWriter writer;
TF_RETURN_IF_ERROR(
iterator_resource->Save(ctx, external_state_policy, &writer));
std::vector<std::unique_ptr<VariantTensorData>> data;
writer.ReleaseData(&data);
variants_.clear();
variants_.reserve(data.size());
for (auto& it : data) {
IteratorStateVariant v;
TF_RETURN_IF_ERROR(v.InitializeFromVariantData(std::move(it)));
variants_.push_back(v);
}
num_tensors_ = variants_.size();
can_serialize_ = true;
return absl::OkStatus();
}
Status InitFromTensor(const Tensor* serialized_t) {
int64_t num_tensors = serialized_t->dim_size(0);
auto serialized_vec = serialized_t->vec<Variant>();
std::vector<const VariantTensorData*> data;
data.reserve(num_tensors);
for (int i = 0; i < num_tensors; ++i) {
auto* w = serialized_vec(i).get<IteratorStateVariant>();
if (!w) {
return errors::Internal(
"Cannot initialize an iterator from tensor ",
serialized_vec(i).DebugString(),
". Expected a variant tensor of type IteratorStateVariant");
}
data.push_back(w->GetData());
}
reader_ = std::make_unique<VariantTensorDataReader>(data);
num_tensors_ = data.size();
return absl::OkStatus();
}
int64_t NumTensors() { return num_tensors_; }
Status Serialize(Tensor* serialized) {
if (!can_serialize_) {
return errors::InvalidArgument(
"Please call InitializeFromIterator before calling Serialize.");
}
int64_t size = variants_.size();
for (int64_t i = 0; i < size; ++i) {
if (variants_[i].GetData() == nullptr) {
return errors::Internal(
"Cannot serialize an empty IteratorStateVariant");
}
serialized->vec<Variant>()(i) = variants_[i];
}
return absl::OkStatus();
}
IteratorStateReader* GetReader() { return reader_.get(); }
private:
bool can_serialize_ = false;
int64_t num_tensors_;
std::vector<IteratorStateVariant> variants_;
std::unique_ptr<IteratorStateReader> reader_;
};
}
IteratorHandleOp::IteratorHandleOp(OpKernelConstruction* ctx)
: OpKernel(ctx), graph_def_version_(ctx->graph_def_version()) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_dtypes_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("shared_name", &name_));
}
IteratorHandleOp::~IteratorHandleOp() {
if (resource_ != nullptr) {
resource_->Unref();
if (cinfo_.resource_is_private_to_kernel()) {
if (!cinfo_.resource_manager()
->template Delete<IteratorResource>(cinfo_.container(),
cinfo_.name())
.ok()) {
}
}
}
}
void IteratorHandleOp::Compute(OpKernelContext* context)
TF_LOCKS_EXCLUDED(mu_) {
{
mutex_lock l(mu_);
if (resource_ == nullptr) {
FunctionLibraryRuntime* flr;
std::unique_ptr<DeviceMgr> device_mgr(nullptr);
std::unique_ptr<FunctionLibraryDefinition> flib_def(nullptr);
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr(nullptr);
if (!name_.empty()) {
flr = CreatePrivateFLR(context, &device_mgr, &flib_def, &pflr);
} else {
OP_REQUIRES_OK(context, context->function_library()->Clone(
&flib_def, &pflr, &flr, true));
}
ResourceMgr* mgr = context->resource_manager();
OP_REQUIRES_OK(context, cinfo_.Init(mgr, def()));
IteratorResource* resource;
OP_REQUIRES_OK(
context,
mgr->LookupOrCreate<IteratorResource>(
cinfo_.container(), cinfo_.name(), &resource,
[context, flr, &device_mgr, &flib_def, &pflr,
this](IteratorResource** ret) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
*ret = new IteratorResource(
context->env(), output_dtypes_, output_shapes_,
std::move(device_mgr), std::move(flib_def), std::move(pflr),
flr);
return absl::OkStatus();
}));
Status s = VerifyResource(resource);
if (TF_PREDICT_FALSE(!s.ok())) {
resource->Unref();
context->SetStatus(s);
return;
}
resource_ = resource;
}
}
OP_REQUIRES_OK(context, MakeResourceHandleToOutput(
context, 0, cinfo_.container(), cinfo_.name(),
TypeIndex::Make<IteratorResource>()));
}
Status IteratorHandleOp::VerifyResource(IteratorResource* resource) {
TF_RETURN_IF_ERROR(
VerifyTypesMatch(output_dtypes_, resource->output_dtypes()));
TF_RETURN_IF_ERROR(
VerifyShapesCompatible(output_shapes_, resource->output_shapes()));
return absl::OkStatus();
}
FunctionLibraryRuntime* IteratorHandleOp::CreatePrivateFLR(
OpKernelContext* ctx, std::unique_ptr<DeviceMgr>* device_mgr,
std::unique_ptr<FunctionLibraryDefinition>* flib_def,
std::unique_ptr<ProcessFunctionLibraryRuntime>* pflr) {
*device_mgr =
std::make_unique<StaticDeviceMgr>(RenamedDevice::NewRenamedDevice(
ctx->device()->name(), down_cast<Device*>(ctx->device()),
false , false ));
*flib_def = std::make_unique<FunctionLibraryDefinition>(
*ctx->function_library()->GetFunctionLibraryDefinition());
const auto* config = ctx->function_library()->config_proto();
*pflr = std::make_unique<ProcessFunctionLibraryRuntime>(
device_mgr->get(), ctx->env(),
config, graph_def_version_, flib_def->get(),
config->graph_options().optimizer_options());
return (*pflr)->GetFLR(ctx->device()->name());
}
AnonymousIteratorHandleOp::AnonymousIteratorHandleOp(
OpKernelConstruction* context)
: AnonymousResourceOp<IteratorResource>(
context,
context->def().op() == kAnonymousIteratorV2 ||
context->def().op() == kAnonymousIteratorV3,
context->def().op() == kAnonymousIteratorV2),
graph_def_version_(context->graph_def_version()) {
OP_REQUIRES_OK(context, context->GetAttr(kOutputTypes, &output_dtypes_));
OP_REQUIRES_OK(context, context->GetAttr(kOutputShapes, &output_shapes_));
}
string AnonymousIteratorHandleOp::name() { return kAnonymousIterator; }
Status AnonymousIteratorHandleOp::CreateResource(
OpKernelContext* ctx, std::unique_ptr<FunctionLibraryDefinition> flib_def,
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr,
FunctionLibraryRuntime* lib, IteratorResource** resource) {
std::unique_ptr<DeviceMgr> device_mgr(nullptr);
*resource = new IteratorResource(ctx->env(), output_dtypes_, output_shapes_,
std::move(device_mgr), std::move(flib_def),
std::move(pflr), lib);
return absl::OkStatus();
}
HybridAsyncOpKernel::HybridAsyncOpKernel(OpKernelConstruction* ctx,
const char* background_worker_name)
: AsyncOpKernel(ctx),
background_worker_(ctx->env(), background_worker_name) {}
void HybridAsyncOpKernel::ComputeAsync(OpKernelContext* ctx,
DoneCallback done) {
background_worker_.Schedule([this, ctx, done = std::move(done)]() {
ctx->SetStatus(DoCompute(ctx));
done();
});
}
void HybridAsyncOpKernel::Compute(OpKernelContext* ctx) {
ctx->SetStatus(DoCompute(ctx));
}
Status MakeIteratorOp::DoCompute(OpKernelContext* ctx) {
tensorflow::ResourceTagger tag(kTFDataResourceTag,
ctx->op_kernel().type_string());
DatasetBase* dataset;
TF_RETURN_IF_ERROR(GetDatasetFromVariantTensor(ctx->input(0), &dataset));
IteratorResource* iterator_resource;
TF_RETURN_IF_ERROR(
LookupResource(ctx, HandleFromInput(ctx, 1), &iterator_resource));
core::ScopedUnref unref_iterator(iterator_resource);
return iterator_resource->SetIteratorFromDataset(ctx, dataset);
}
Status DeleteIteratorOp::DoCompute(OpKernelContext* ctx) {
tensorflow::ResourceTagger tag(kTFDataResourceTag,
ctx->op_kernel().type_string());
const ResourceHandle& handle = ctx->input(0).flat<ResourceHandle>()(0);
return DeleteResource(ctx, handle);
}
namespace {
class ToSingleElementOp : public AsyncOpKernel {
public:
explicit ToSingleElementOp(OpKernelConstruction* ctx)
: AsyncOpKernel(ctx),
metrics_collector_(ctx->device()->attributes().device_type(),
*ctx->env()),
unbounded_threadpool_(ctx->env(), "tf_data_to_single_element") {
OP_REQUIRES_OK(ctx, ctx->GetAttr("output_types", &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("output_shapes", &output_shapes_));
}
void ComputeAsync(OpKernelContext* ctx, DoneCallback done) override {
unbounded_threadpool_.Schedule([this, ctx, done = std::move(done)]() {
ctx->SetStatus(DoCompute(ctx));
done();
});
}
void Compute(OpKernelContext* ctx) override {
ctx->SetStatus(DoCompute(ctx));
}
private:
Status DoCompute(OpKernelContext* ctx) {
tsl::profiler::TraceMe traceme(
[&] {
return tsl::profiler::TraceMeEncode("ToSingleElementOp::DoCompute",
{{"id", ctx->step_id()}});
},
profiler::kInfo);
tensorflow::ResourceTagger tag(kTFDataResourceTag,
ctx->op_kernel().type_string());
metrics::RecordTFDataFetchOp("ToSingleElementOp");
DatasetBase* dataset;
TF_RETURN_IF_ERROR(GetDatasetFromVariantTensor(ctx->input(0), &dataset));
IteratorContext::Params params(ctx);
ResourceMgr resource_mgr;
params.resource_mgr = &resource_mgr;
CancellationManager cancellation_manager(ctx->cancellation_manager());
params.cancellation_manager = &cancellation_manager;
IteratorContext iter_ctx(std::move(params));
std::unique_ptr<IteratorBase> iterator;
TF_RETURN_IF_ERROR(dataset->MakeIterator(
&iter_ctx, nullptr, "SingleElementIterator", &iterator));
std::vector<Tensor> components;
components.reserve(dataset->output_dtypes().size());
bool end_of_sequence = false;
const absl::Time start_time = metrics_collector_.RecordStart();
TF_RETURN_IF_ERROR(
iterator->GetNext(&iter_ctx, &components, &end_of_sequence));
metrics_collector_.RecordStop(start_time, components);
if (end_of_sequence) {
return errors::InvalidArgument("Dataset was empty.");
}
TF_RETURN_IF_ERROR(VerifyTypesMatch(output_types_, components));
TF_RETURN_IF_ERROR(VerifyShapesCompatible(output_shapes_, components));
for (int i = 0; i < components.size(); ++i) {
ctx->set_output(i, components[i]);
}
components.clear();
TF_RETURN_IF_ERROR(
iterator->GetNext(&iter_ctx, &components, &end_of_sequence));
if (!end_of_sequence) {
return errors::InvalidArgument("Dataset had more than one element.");
}
return absl::OkStatus();
}
IteratorMetricsCollector metrics_collector_;
UnboundedThreadPool unbounded_threadpool_;
DataTypeVector output_types_;
std::vector<PartialTensorShape> output_shapes_;
};
class OneShotIteratorOp : public AsyncOpKernel {
public:
explicit OneShotIteratorOp(OpKernelConstruction* ctx)
: AsyncOpKernel(ctx),
background_worker_(ctx->env(), "tf_data_one_shot_iterator"),
graph_def_version_(ctx->graph_def_version())
{
string shared_name;
OP_REQUIRES_OK(ctx, ctx->GetAttr("shared_name", &shared_name));
OP_REQUIRES(ctx, shared_name.empty(),
errors::InvalidArgument("OneShotIteratorOp does not currently "
"support the 'shared_name' attr."));
OP_REQUIRES_OK(ctx,
ctx->GetAttr("dataset_factory", &dataset_factory_func_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_dtypes_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
}
~OneShotIteratorOp() override {
if (iterator_resource_ != nullptr) {
iterator_resource_->Unref();
if (!cinfo_.resource_manager()
->Delete<IteratorResource>(cinfo_.container(), cinfo_.name())
.ok()) {
}
}
}
void ComputeAsync(OpKernelContext* ctx, DoneCallback done) override {
tensorflow::ResourceTagger tag(kTFDataResourceTag,
ctx->op_kernel().type_string());
{
mutex_lock l(mu_);
if (iterator_resource_ == nullptr && initialization_status_.ok()) {
if (!initialization_started_) {
background_worker_.Schedule([this, ctx, done]() { Init(ctx, done); });
initialization_started_ = true;
} else {
done_callbacks_.emplace_back(ctx, std::move(done));
}
return;
}
}
ProduceOutput(ctx, done);
}
private:
void Init(OpKernelContext* ctx, const DoneCallback& done) {
IteratorResource* iterator = nullptr;
ContainerInfo cinfo;
Status s = TryInit(ctx, &iterator, &cinfo);
std::vector<std::pair<OpKernelContext*, DoneCallback>> callbacks_to_run;
{
mutex_lock l(mu_);
if (s.ok()) {
iterator_resource_ = iterator;
cinfo_ = cinfo;
}
initialization_status_ = s;
std::swap(done_callbacks_, callbacks_to_run);
}
for (auto&& ctx_done : callbacks_to_run) {
ProduceOutput(ctx_done.first, ctx_done.second);
}
ProduceOutput(ctx, done);
}
Status TryInit(OpKernelContext* ctx, IteratorResource** iterator,
ContainerInfo* cinfo) {
TF_RETURN_IF_ERROR(cinfo->Init(ctx->resource_manager(), def()));
FunctionLibraryRuntime* flr;
std::unique_ptr<FunctionLibraryDefinition> flib_def(nullptr);
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr(nullptr);
TF_RETURN_IF_ERROR(
ctx->function_library()->Clone(&flib_def, &pflr, &flr, true));
TF_RETURN_IF_ERROR(
ctx->resource_manager()->LookupOrCreate<IteratorResource>(
cinfo->container(), cinfo->name(), iterator,
[ctx, flr, this, &flib_def, &pflr](IteratorResource** ret)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
*ret = new IteratorResource(
ctx->env(), output_dtypes_, output_shapes_,
nullptr, std::move(flib_def),
std::move(pflr), flr);
return absl::OkStatus();
}));
core::ScopedUnref unref_iterator(*iterator);
TF_RETURN_IF_ERROR(
VerifyTypesMatch(output_dtypes_, (*iterator)->output_dtypes()));
TF_RETURN_IF_ERROR(
VerifyShapesCompatible(output_shapes_, (*iterator)->output_shapes()));
FunctionLibraryRuntime::Handle f_handle;
TF_RETURN_IF_ERROR(ctx->function_library()->Instantiate(
dataset_factory_func_.name(), AttrSlice(&dataset_factory_func_.attr()),
&f_handle));
FunctionLibraryRuntime::Options opts;
opts.cancellation_manager = ctx->cancellation_manager();
ScopedStepContainer step_container(opts.step_id, [ctx](const string& name) {
ctx->resource_manager()->Cleanup(name).IgnoreError();
});
opts.step_container = &step_container;
opts.runner = ctx->runner();
opts.run_all_kernels_inline = ctx->run_all_kernels_inline();
std::vector<Tensor> return_values;
TF_RETURN_IF_ERROR(ctx->function_library()->RunSync(
std::move(opts), f_handle, {}, &return_values));
if (return_values.size() != 1 || return_values[0].dtype() != DT_VARIANT ||
!TensorShapeUtils::IsScalar(return_values[0].shape())) {
return errors::InvalidArgument(
"The `dataset_factory` function must return "
"a single scalar of dtype DT_VARIANT.");
}
DatasetBase* dataset;
TF_RETURN_IF_ERROR(GetDatasetFromVariantTensor(return_values[0], &dataset));
TF_RETURN_IF_ERROR((*iterator)->SetIteratorFromDataset(ctx, dataset));
(*iterator)->Ref();
return absl::OkStatus();
}
void ProduceOutput(OpKernelContext* ctx, const DoneCallback& done) {
Tensor* handle;
OP_REQUIRES_OK_ASYNC(ctx, ctx->allocate_output(0, TensorShape({}), &handle),
done);
Status s;
{
mutex_lock l(mu_);
s = initialization_status_;
if (s.ok()) {
handle->scalar<ResourceHandle>()() =
MakeResourceHandle<IteratorResource>(ctx, cinfo_.container(),
cinfo_.name());
}
}
OP_REQUIRES_OK_ASYNC(ctx, s, done);
done();
}
NameAttrList dataset_factory_func_;
DataTypeVector output_dtypes_;
std::vector<PartialTensorShape> output_shapes_;
BackgroundWorker background_worker_;
mutex mu_;
ContainerInfo cinfo_ TF_GUARDED_BY(mu_);
IteratorResource* iterator_resource_ TF_GUARDED_BY(mu_) = nullptr;
bool initialization_started_ TF_GUARDED_BY(mu_) = false;
Status initialization_status_ TF_GUARDED_BY(mu_);
std::vector<std::pair<OpKernelContext*, DoneCallback>> done_callbacks_
TF_GUARDED_BY(mu_);
const int graph_def_version_;
};
}
AsyncOpKernel* IteratorGetNextOp::AsAsync() {
return type_string() == "IteratorGetNextSync" ? nullptr : this;
}
void RecordElementSize(const std::vector<Tensor> element,
tsl::profiler::TraceMe* traceme) {
traceme->AppendMetadata([&]() {
int64_t element_size = 0;
for (const auto& component : element) {
element_size += component.TotalBytes();
}
return tsl::profiler::TraceMeEncode({{"element_size", element_size}});
});
}
Status IteratorGetNextOp::DoCompute(OpKernelContext* ctx) {
VLOG(3) << "IteratorGetNextOp enter. iter_id=" << ctx->frame_iter().iter_id;
auto cleanup = gtl::MakeCleanup([ctx] {
VLOG(3) << "IteratorGetNextOp exit. iter_id=" << ctx->frame_iter().iter_id;
});
activity_watcher::ActivityScope activity_scope([ctx = ctx]() {
return activity_watcher::ActivityFromContext(
ctx, "IteratorGetNextOp::DoCompute",
activity_watcher::ActivityCategory::kDatasetOp);
});
tsl::profiler::TraceMe traceme(
[&] {
return tsl::profiler::TraceMeEncode(
"IteratorGetNextOp::DoCompute",
{{"id", ctx->step_id()}, {"iter_num", ctx->frame_iter().iter_id}});
},
profiler::kInfo);
tensorflow::ResourceTagger tag(kTFDataResourceTag,
ctx->op_kernel().type_string());
metrics::RecordTFDataFetchOp("IteratorGetNextOp");
IteratorResource* iterator;
TF_RETURN_IF_ERROR(LookupResource(ctx, HandleFromInput(ctx, 0), &iterator));
core::ScopedUnref unref_iterator(iterator);
std::vector<Tensor> components;
bool end_of_sequence = false;
TF_RETURN_IF_ERROR(iterator->GetNext(ctx, &components, &end_of_sequence));
if (end_of_sequence) {
return errors::OutOfRange("End of sequence");
}
TF_RETURN_IF_ERROR(VerifyTypesMatch(output_types_, components));
TF_RETURN_IF_ERROR(VerifyShapesCompatible(output_shapes_, components));
RecordElementSize(components, &traceme);
for (int i = 0; i < components.size(); ++i) {
ctx->set_output(i, components[i]);
}
return absl::OkStatus();
}
Status IteratorGetModelProtoOp::DoCompute(OpKernelContext* ctx) {
IteratorResource* iterator = nullptr;
TF_RETURN_IF_ERROR(LookupResource(ctx, HandleFromInput(ctx, 0), &iterator));
core::ScopedUnref unref_iterator(iterator);
std::string model_proto;
TF_RETURN_IF_ERROR(iterator->GetModelProto(model_proto));
Tensor* model_proto_result;
TF_RETURN_IF_ERROR(
ctx->allocate_output(0, TensorShape({}), &model_proto_result));
model_proto_result->scalar<tstring>()() = model_proto;
return absl::OkStatus();
}
Status IteratorGetNextAsOptionalOp::DoCompute(OpKernelContext* ctx) {
VLOG(3) << "IteratorGetNextAsOptionalOp enter. iter_id="
<< ctx->frame_iter().iter_id;
auto cleanup = gtl::MakeCleanup([ctx] {
VLOG(3) << "IteratorGetNextAsOptionalOp exit. iter_id="
<< ctx->frame_iter().iter_id;
});
activity_watcher::ActivityScope activity_scope([ctx = ctx]() {
return activity_watcher::ActivityFromContext(
ctx, "IteratorGetNextAsOptionalOp::DoCompute",
activity_watcher::ActivityCategory::kDatasetOp);
});
tsl::profiler::TraceMe traceme(
[&] {
return tsl::profiler::TraceMeEncode(
"IteratorGetNextAsOptionalOp::DoCompute",
{{"id", ctx->step_id()}, {"iter_num", ctx->frame_iter().iter_id}});
},
profiler::kInfo);
tensorflow::ResourceTagger tag(kTFDataResourceTag,
ctx->op_kernel().type_string());
metrics::RecordTFDataFetchOp("IteratorGetNextAsOptionalOp");
IteratorResource* iterator;
TF_RETURN_IF_ERROR(LookupResource(ctx, HandleFromInput(ctx, 0), &iterator));
core::ScopedUnref unref_iterator(iterator);
std::vector<Tensor> components;
bool end_of_sequence = false;
TF_RETURN_IF_ERROR(iterator->GetNext(ctx, &components, &end_of_sequence));
if (end_of_sequence) {
return WriteOptionalNoneToOutput(ctx, 0);
} else {
RecordElementSize(components, &traceme);
for (int i = 0; i < components.size(); ++i) {
if (components[i].dtype() != output_types_[i]) {
return errors::InvalidArgument(
"The given optional does not match the expected type for "
"component ",
i, ". Expected: ", DataTypeString(output_types_[i]),
". Actual: ", DataTypeString(components[i].dtype()), ".");
}
if (!output_shapes_[i].IsCompatibleWith(components[i].shape())) {
return errors::InvalidArgument(
"The given optional does not match the expected shape "
"for component ",
i, ". Expected: ", output_shapes_[i].DebugString(),
". Actual: ", components[i].shape().DebugString(), ".");
}
}
return WriteOptionalWithValueToOutput(ctx, 0, std::move(components));
}
}
void IteratorToStringHandleOp::Compute(OpKernelContext* ctx) {
const Tensor& resource_handle_t = ctx->input(0);
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(resource_handle_t.shape()),
errors::InvalidArgument("resource_handle must be a scalar"));
IteratorResource* iterator_resource;
OP_REQUIRES_OK(
ctx, LookupResource(ctx, HandleFromInput(ctx, 0), &iterator_resource));
iterator_resource->Unref();
Tensor* string_handle_t;
OP_REQUIRES_OK(ctx,
ctx->allocate_output(0, TensorShape({}), &string_handle_t));
string_handle_t->scalar<tstring>()() =
resource_handle_t.scalar<ResourceHandle>()().SerializeAsString();
}
IteratorFromStringHandleOp::IteratorFromStringHandleOp(
OpKernelConstruction* ctx)
: OpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_dtypes_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
OP_REQUIRES(
ctx,
output_dtypes_.empty() || output_shapes_.empty() ||
output_dtypes_.size() == output_shapes_.size(),
errors::InvalidArgument("If both 'output_types' and 'output_shapes' "
"are set, they must have the same length."));
}
void IteratorFromStringHandleOp::Compute(OpKernelContext* ctx) {
const Tensor& string_handle_t = ctx->input(0);
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(string_handle_t.shape()),
errors::InvalidArgument("string_handle must be a scalar"));
ResourceHandle resource_handle;
OP_REQUIRES(
ctx, resource_handle.ParseFromString(string_handle_t.scalar<tstring>()()),
errors::InvalidArgument(
"Could not parse string_handle as a valid ResourceHandle"));
OP_REQUIRES(
ctx, resource_handle.device() == ctx->device()->attributes().name(),
errors::InvalidArgument("Attempted create an iterator on device \"",
ctx->device()->attributes().name(),
"\" from handle defined on device \"",
resource_handle.device(), "\""));
IteratorResource* iterator_resource;
OP_REQUIRES_OK(ctx, LookupResource(ctx, resource_handle, &iterator_resource));
core::ScopedUnref unref_iterator(iterator_resource);
if (!output_dtypes_.empty()) {
OP_REQUIRES_OK(ctx, VerifyTypesMatch(output_dtypes_,
iterator_resource->output_dtypes()));
}
if (!output_shapes_.empty()) {
OP_REQUIRES_OK(ctx,
VerifyShapesCompatible(output_shapes_,
iterator_resource->output_shapes()));
}
Tensor* resource_handle_t;
OP_REQUIRES_OK(ctx,
ctx->allocate_output(0, TensorShape({}), &resource_handle_t));
resource_handle_t->scalar<ResourceHandle>()() = resource_handle;
}
SerializeIteratorOp::SerializeIteratorOp(OpKernelConstruction* ctx)
: OpKernel(ctx) {
if (ctx->HasAttr(kExternalStatePolicy)) {
int64_t external_state_policy;
OP_REQUIRES_OK(ctx,
ctx->GetAttr(kExternalStatePolicy, &external_state_policy));
external_state_policy_ = ExternalStatePolicy(external_state_policy);
}
}
void SerializeIteratorOp::Compute(OpKernelContext* ctx) {
tensorflow::ResourceTagger tag(kTFDataResourceTag,
ctx->op_kernel().type_string());
const Tensor& resource_handle_t = ctx->input(0);
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(resource_handle_t.shape()),
errors::InvalidArgument("resource_handle must be a scalar"));
IteratorResource* iterator_resource;
OP_REQUIRES_OK(
ctx, LookupResource(ctx, HandleFromInput(ctx, 0), &iterator_resource));
core::ScopedUnref unref_iterator(iterator_resource);
IteratorVariantSerializer serializer;
OP_REQUIRES_OK(ctx, serializer.InitializeFromIterator(
ctx, external_state_policy_, iterator_resource));
Tensor* serialized_t;
OP_REQUIRES_OK(ctx,
ctx->allocate_output(0, TensorShape({serializer.NumTensors()}),
&serialized_t));
OP_REQUIRES_OK(ctx, serializer.Serialize(serialized_t));
}
void DeserializeIteratorOp::Compute(OpKernelContext* ctx) {
tensorflow::ResourceTagger tag(kTFDataResourceTag,
ctx->op_kernel().type_string());
IteratorResource* iterator_resource;
OP_REQUIRES_OK(
ctx, LookupResource(ctx, HandleFromInput(ctx, 0), &iterator_resource));
core::ScopedUnref unref_iterator(iterator_resource);
const Tensor* serialized_t;
OP_REQUIRES_OK(ctx, ctx->input("serialized", &serialized_t));
IteratorVariantSerializer serializer;
OP_REQUIRES_OK(ctx, serializer.InitFromTensor(serialized_t));
Status s = iterator_resource->Restore(ctx, serializer.GetReader());
if (!s.ok()) {
OP_REQUIRES_OK(
ctx,
errors::CreateWithUpdatedMessage(
s, absl::StrCat(
"Failed to restore dataset iterator from checkpoint: ",
s.message(),
". Make sure the dataset definition has not changed between "
"the process that saved the checkpoint and the process that "
"is restoring it.")));
}
}
namespace {
REGISTER_KERNEL_BUILDER(Name("Iterator").Device(DEVICE_CPU), IteratorHandleOp);
REGISTER_KERNEL_BUILDER(Name("IteratorV2").Device(DEVICE_CPU).Priority(2),
IteratorHandleOp);
REGISTER_KERNEL_BUILDER(Name("IteratorV2").Device(DEVICE_GPU).Priority(1),
IteratorHandleOp);
REGISTER_KERNEL_BUILDER(Name("MakeIterator").Device(DEVICE_CPU).Priority(2),
MakeIteratorOp);
REGISTER_KERNEL_BUILDER(
Name("MakeIterator").Device(DEVICE_GPU).Priority(1).HostMemory("dataset"),
MakeIteratorOp);
REGISTER_KERNEL_BUILDER(Name("DeleteIterator").Device(DEVICE_CPU).Priority(2),
DeleteIteratorOp);
REGISTER_KERNEL_BUILDER(Name("DeleteIterator").Device(DEVICE_GPU).Priority(1),
DeleteIteratorOp);
REGISTER_KERNEL_BUILDER(
Name("AnonymousIterator").Device(DEVICE_CPU).Priority(2),
AnonymousIteratorHandleOp);
REGISTER_KERNEL_BUILDER(
Name("AnonymousIterator").Device(DEVICE_GPU).Priority(1),
AnonymousIteratorHandleOp);
REGISTER_KERNEL_BUILDER(
Name("AnonymousIteratorV2").Device(DEVICE_CPU).Priority(2),
AnonymousIteratorHandleOp);
REGISTER_KERNEL_BUILDER(
Name("AnonymousIteratorV2").Device(DEVICE_GPU).Priority(1),
AnonymousIteratorHandleOp);
REGISTER_KERNEL_BUILDER(
Name("AnonymousIteratorV3").Device(DEVICE_CPU).Priority(2),
AnonymousIteratorHandleOp);
REGISTER_KERNEL_BUILDER(
Name("AnonymousIteratorV3").Device(DEVICE_GPU).Priority(1),
AnonymousIteratorHandleOp);
REGISTER_KERNEL_BUILDER(Name("DatasetToSingleElement").Device(DEVICE_CPU),
ToSingleElementOp);
REGISTER_KERNEL_BUILDER(Name("OneShotIterator").Device(DEVICE_CPU),
OneShotIteratorOp);
REGISTER_KERNEL_BUILDER(Name("IteratorGetNext").Device(DEVICE_CPU).Priority(2),
IteratorGetNextOp);
REGISTER_KERNEL_BUILDER(Name("IteratorGetNext").Device(DEVICE_GPU).Priority(1),
IteratorGetNextOp);
REGISTER_KERNEL_BUILDER(
Name("IteratorGetNextSync").Device(DEVICE_CPU).Priority(2),
IteratorGetNextOp);
REGISTER_KERNEL_BUILDER(
Name("IteratorGetNextSync").Device(DEVICE_GPU).Priority(1),
IteratorGetNextOp);
REGISTER_KERNEL_BUILDER(
Name("IteratorGetNextAsOptional").Device(DEVICE_CPU).Priority(2),
IteratorGetNextAsOptionalOp);
REGISTER_KERNEL_BUILDER(
Name("IteratorGetNextAsOptional").Device(DEVICE_GPU).Priority(1),
IteratorGetNextAsOptionalOp);
REGISTER_KERNEL_BUILDER(
Name("IteratorToStringHandle").Device(DEVICE_CPU).Priority(2),
IteratorToStringHandleOp);
REGISTER_KERNEL_BUILDER(Name("IteratorToStringHandle")
.Device(DEVICE_GPU)
.HostMemory("string_handle")
.Priority(1),
IteratorToStringHandleOp);
REGISTER_KERNEL_BUILDER(Name("IteratorFromStringHandle").Device(DEVICE_CPU),
IteratorFromStringHandleOp);
REGISTER_KERNEL_BUILDER(
Name("IteratorFromStringHandleV2").Device(DEVICE_CPU).Priority(2),
IteratorFromStringHandleOp);
REGISTER_KERNEL_BUILDER(Name("IteratorFromStringHandleV2")
.Device(DEVICE_GPU)
.HostMemory("string_handle")
.Priority(1),
IteratorFromStringHandleOp);
REGISTER_KERNEL_BUILDER(Name("SerializeIterator").Device(DEVICE_CPU),
SerializeIteratorOp);
REGISTER_KERNEL_BUILDER(Name("DeserializeIterator").Device(DEVICE_CPU),
DeserializeIteratorOp);
REGISTER_KERNEL_BUILDER(Name("IteratorGetModelProto").Device(DEVICE_CPU),
IteratorGetModelProtoOp);
}
}
} | #include "tensorflow/core/kernels/data/iterator_ops.h"
#include <cstdint>
#include <utility>
#include <vector>
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/lib/monitoring/test_utils.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
using ::tensorflow::monitoring::testing::CellReader;
using ::tensorflow::monitoring::testing::Histogram;
class IteratorOpsTest : public DatasetOpsTestBase {
public:
absl::StatusOr<core::RefCountPtr<IteratorResource>> GetIteratorResource() {
FunctionLibraryRuntime* flr = nullptr;
std::unique_ptr<DeviceMgr> device_mgr;
std::unique_ptr<FunctionLibraryDefinition> flib_def;
std::unique_ptr<ProcessFunctionLibraryRuntime> plfr;
TF_RETURN_IF_ERROR(dataset_ctx_->function_library()->Clone(
&flib_def, &plfr, &flr, true));
core::RefCountPtr<IteratorResource> iter_resource(
new IteratorResource(dataset_ctx_->env(), dataset_->output_dtypes(),
dataset_->output_shapes(), std::move(device_mgr),
std::move(flib_def), std::move(plfr), flr));
TF_RETURN_IF_ERROR(
iter_resource->SetIteratorFromDataset(dataset_ctx_.get(), dataset_));
return iter_resource;
}
absl::StatusOr<std::vector<std::vector<Tensor>>> GetIteratorOutput(
IteratorResource& iterator) {
std::vector<std::vector<Tensor>> output;
for (bool end_of_sequence = false; !end_of_sequence;) {
std::vector<Tensor> tensors;
TF_RETURN_IF_ERROR(
iterator.GetNext(dataset_ctx_.get(), &tensors, &end_of_sequence));
if (end_of_sequence) {
break;
}
output.push_back(std::move(tensors));
}
return output;
}
};
TEST_F(IteratorOpsTest, CollectMetrics) {
CellReader<Histogram> latency("/tensorflow/data/getnext_duration");
CellReader<Histogram> iterator_gap("/tensorflow/data/iterator_gap");
CellReader<int64_t> throughput("/tensorflow/data/bytes_fetched");
CellReader<int64_t> iterator_lifetime("/tensorflow/data/iterator_lifetime");
CellReader<int64_t> iterator_busy("/tensorflow/data/iterator_busy");
EXPECT_FLOAT_EQ(latency.Delta().num(), 0.0);
EXPECT_FLOAT_EQ(iterator_gap.Delta().num(), 0.0);
EXPECT_EQ(throughput.Delta(), 0.0);
EXPECT_EQ(iterator_lifetime.Delta(), 0.0);
EXPECT_EQ(iterator_busy.Delta(), 0.0);
RangeDatasetParams dataset_params = RangeDatasetParams(0, 10, 3);
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK_AND_ASSIGN(core::RefCountPtr<IteratorResource> iter_resource,
GetIteratorResource());
TF_ASSERT_OK_AND_ASSIGN(std::vector<std::vector<Tensor>> output,
GetIteratorOutput(*iter_resource));
EXPECT_EQ(output.size(), 4);
Histogram latency_histogram = latency.Delta();
EXPECT_FLOAT_EQ(latency_histogram.num(), 5.0);
EXPECT_GT(latency_histogram.sum(), 0.0);
Histogram iterator_gap_histogram = iterator_gap.Delta();
EXPECT_FLOAT_EQ(iterator_gap_histogram.num(), 5.0);
EXPECT_GT(iterator_gap_histogram.sum(), 0.0);
EXPECT_GT(throughput.Delta(), 0);
EXPECT_GT(iterator_lifetime.Delta(), 0);
EXPECT_GT(iterator_busy.Delta(), 0.0);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/iterator_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/iterator_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b1d8c80a-0c7a-41ea-a5ca-6806d5cb43e9 | cpp | tensorflow/tensorflow | stablehlo_add | tensorflow/lite/kernels/stablehlo_add.cc | tensorflow/lite/kernels/stablehlo_add_test.cc | #include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/stablehlo_elementwise.h"
namespace tflite::ops::builtin {
TfLiteRegistration* Register_STABLEHLO_ADD() {
static TfLiteRegistration r = {nullptr, nullptr, ElementwisePrepare,
ElementwiseEval<ComputationType::kAdd>};
return &r;
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
class AddOpModel : public SingleOpModel {
public:
AddOpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_STABLEHLO_ADD, BuiltinOptions_NONE, 0);
SetBypassDefaultDelegates();
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
int input1() { return input1_; }
int input2() { return input2_; }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
protected:
int input1_;
int input2_;
int output_;
};
TEST(StablehloElementwise, AddWorks) {
AddOpModel model({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}});
model.PopulateTensor<float>(model.input1(), {-2.0, 0.2, 0.7, 0.8});
model.PopulateTensor<float>(model.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(-1.9, 0.4, 1.0, 1.3));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_add.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_add_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
add2a9f3-d5e8-40f7-9af8-1c492ad3a6c9 | cpp | google/arolla | dispatch_operator | arolla/expr/operator_loader/dispatch_operator.cc | arolla/expr/operator_loader/dispatch_operator_test.cc | #include "arolla/expr/operator_loader/dispatch_operator.h"
#include <cstddef>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/nullability.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/expr/basic_expr_operator.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_attributes.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/operator_loader/generic_operator_overload_condition.h"
#include "arolla/expr/qtype_utils.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/tuple_qtype.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/repr.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::operator_loader {
using ::arolla::expr::ExprAttributes;
using ::arolla::expr::ExprNode;
using ::arolla::expr::ExprNodePtr;
using ::arolla::expr::ExprOperatorPtr;
using ::arolla::expr::GetPlaceholderKeys;
using ::arolla::expr::ValidateDepsCount;
absl::StatusOr<ExprOperatorPtr> DispatchOperator::Make(
absl::string_view name, expr::ExprOperatorSignature signature,
std::vector<Overload> overloads,
expr::ExprNodePtr dispatch_readiness_condition) {
RETURN_IF_ERROR(ValidateSignature(signature));
for (const auto& overload : overloads) {
const auto& placeholder_keys = GetPlaceholderKeys(overload.condition);
if (!placeholder_keys.empty()) {
return absl::InvalidArgumentError(
"placeholders are not supported "
"in dispatch operator overload conditions");
}
}
for (const auto& param : signature.parameters) {
if (param.default_value.has_value()) {
return absl::InvalidArgumentError(
"signatures with the default values are not supported "
"in dispatch operator; "
"got signature: " +
GetExprOperatorSignatureSpec(signature));
}
}
std::vector<ExprNodePtr> overload_conditions;
overload_conditions.reserve(overloads.size() + 1);
for (const auto& overload : overloads) {
overload_conditions.push_back(overload.condition);
}
overload_conditions.push_back(dispatch_readiness_condition);
ASSIGN_OR_RETURN(auto overloads_condition_fn,
MakeGenericOperatorOverloadConditionFn(overload_conditions));
FingerprintHasher hasher("::arolla::operator_loader::DispatchOperator");
hasher.Combine(name, signature, dispatch_readiness_condition->fingerprint(),
overloads.size());
for (const auto& overload : overloads) {
hasher.Combine(overload.name, overload.op->fingerprint(),
overload.condition->fingerprint());
}
return std::make_shared<DispatchOperator>(
PrivateConstructorTag{}, name, std::move(signature), std::move(overloads),
std::move(overloads_condition_fn),
std::move(dispatch_readiness_condition), std::move(hasher).Finish());
}
DispatchOperator::DispatchOperator(
PrivateConstructorTag, absl::string_view name,
expr::ExprOperatorSignature signature, std::vector<Overload> overloads,
GenericOperatorOverloadConditionFn overloads_condition_fn,
expr::ExprNodePtr dispatch_readiness_condition, Fingerprint fingerprint)
: ExprOperatorWithFixedSignature(name, signature, "", fingerprint),
overloads_(std::move(overloads)),
overloads_condition_fn_(std::move(overloads_condition_fn)),
dispatch_readiness_condition_(std::move(dispatch_readiness_condition)) {}
absl::StatusOr<expr::ExprAttributes> DispatchOperator::InferAttributes(
absl::Span<const expr::ExprAttributes> inputs) const {
ASSIGN_OR_RETURN(const auto* overload, LookupImpl(inputs));
if (overload == nullptr) {
return ExprAttributes{};
}
ASSIGN_OR_RETURN(expr::ExprAttributes attr,
overload->op->InferAttributes(inputs),
_ << "in " << absl::Utf8SafeCHexEscape(overload->name)
<< " overload of DispatchOperator");
return attr;
}
absl::StatusOr<expr::ExprNodePtr> DispatchOperator::ToLowerLevel(
const expr::ExprNodePtr& node) const {
ASSIGN_OR_RETURN(const auto* overload,
LookupImpl(GetExprAttrs(node->node_deps())));
if (overload == nullptr) {
return node;
}
auto expr = ExprNode::UnsafeMakeOperatorNode(ExprOperatorPtr(overload->op),
std::vector(node->node_deps()),
ExprAttributes(node->attr()));
ASSIGN_OR_RETURN(expr::ExprNodePtr lowered, expr->op()->ToLowerLevel(expr),
_ << "in " << absl::Utf8SafeCHexEscape(overload->name)
<< " overload of DispatchOperator");
return lowered;
}
absl::StatusOr<absl::Nullable<const DispatchOperator::Overload*>>
DispatchOperator::LookupImpl(absl::Span<const ExprAttributes> inputs) const {
RETURN_IF_ERROR(ValidateDepsCount(signature(), inputs.size(),
absl::StatusCode::kInvalidArgument));
auto input_qtypes = GetAttrQTypes(inputs);
for (auto& input_qtype : input_qtypes) {
if (input_qtype == nullptr) {
input_qtype = GetNothingQType();
}
}
ASSIGN_OR_RETURN(auto is_condition_passed,
overloads_condition_fn_(MakeTupleQType(input_qtypes)));
if (is_condition_passed.size() != overloads_.size() + 1) {
return absl::InternalError("the state of DispatchOperator is invalid");
}
bool ready_to_dispatch = is_condition_passed.back();
if (!ready_to_dispatch) {
if (HasAllAttrQTypes(inputs)) {
return absl::FailedPreconditionError(
absl::StrFormat("the operator is broken for argument types %s",
FormatTypeVector(input_qtypes)));
}
return nullptr;
}
std::vector<size_t> matching_ids;
for (size_t i = 0; i < overloads_.size(); ++i) {
if (is_condition_passed[i]) {
matching_ids.push_back(i);
}
}
if (matching_ids.size() > 1) {
return absl::FailedPreconditionError(absl::StrFormat(
"constraints of the multiple overloads (%s) passed for argument "
"types %s",
absl::StrJoin(matching_ids, ", ",
[&](std::string* out, size_t id) {
absl::StrAppend(
out, absl::Utf8SafeCHexEscape(overloads_[id].name));
}),
FormatTypeVector(input_qtypes)));
}
if (matching_ids.empty()) {
return absl::InvalidArgumentError(
absl::StrFormat("no suitable overload for argument types %s",
FormatTypeVector(input_qtypes)));
}
return &overloads_[matching_ids[0]];
}
ReprToken DispatchOperator::GenReprToken() const {
return {absl::StrFormat(
"<DispatchOperator: name='%s', signature='%s', cases=['%s']>",
absl::Utf8SafeCHexEscape(display_name()),
GetExprOperatorSignatureSpec(signature()),
absl::StrJoin(
overloads_, "', '", [](std::string* out, const auto& overload) {
absl::StrAppend(out, absl::Utf8SafeCHexEscape(overload.name));
}))};
}
absl::string_view DispatchOperator::py_qvalue_specialization_key() const {
return "::arolla::operator_loader::DispatchOperator";
}
} | #include "arolla/expr/operator_loader/dispatch_operator.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_attributes.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/lambda_expr_operator.h"
#include "arolla/expr/operator_loader/qtype_constraint.h"
#include "arolla/expr/operator_loader/restricted_lambda_operator.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/bytes.h"
#include "arolla/util/testing/repr_token_eq.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::operator_loader {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::arolla::GetNothingQType;
using ::arolla::expr::CallOp;
using ::arolla::expr::ExprOperatorSignature;
using ::arolla::expr::LambdaOperator;
using ::arolla::expr::Leaf;
using ::arolla::expr::Literal;
using ::arolla::expr::Placeholder;
using ::arolla::testing::EqualsAttr;
using ::arolla::testing::EqualsExpr;
using ::arolla::testing::ReprTokenEq;
using ::arolla::testing::WithQTypeAnnotation;
using ::testing::HasSubstr;
using Attr = ::arolla::expr::ExprAttributes;
class DispatchOperatorTest : public ::testing::Test {
protected:
static absl::StatusOr<expr::ExprNodePtr> arg_first() {
return CallOp("core.get_nth", {Leaf("input_tuple_qtype"), Literal(0)});
}
static absl::StatusOr<expr::ExprNodePtr> arg_second() {
return CallOp("core.get_nth", {Leaf("input_tuple_qtype"), Literal(1)});
}
static absl::StatusOr<expr::ExprNodePtr> arg_first_qtype() {
return CallOp("qtype.get_field_qtype",
{Leaf("input_tuple_qtype"), Literal(0)});
}
static absl::StatusOr<expr::ExprNodePtr> args_from_second_qtype() {
return CallOp("qtype.slice_tuple_qtype",
{Leaf("input_tuple_qtype"), Literal(1), Literal(-1)});
}
static absl::StatusOr<std::shared_ptr<const LambdaOperator>>
MakeBaseBinaryOp() {
return expr::MakeLambdaOperator(
"with_name", ExprOperatorSignature{{"x"}, {"name"}},
SuppressUnusedWarning("name", Placeholder("x")),
"doc-string-for-lambda");
}
static absl::StatusOr<QTypeConstraint> MakeBaseBinaryQTypeConstraint() {
ASSIGN_OR_RETURN(auto predicate_expr,
CallOp("core.equal",
{Placeholder("name"), Literal(GetQType<Bytes>())}));
return QTypeConstraint{predicate_expr,
"expected name to be bytes, got {name}"};
}
static absl::StatusOr<std::shared_ptr<const RestrictedLambdaOperator>>
MakeBinaryOp() {
ASSIGN_OR_RETURN(auto lambda_op, MakeBaseBinaryOp());
ASSIGN_OR_RETURN(auto qtype_constraint, MakeBaseBinaryQTypeConstraint());
ASSIGN_OR_RETURN(auto restricted_lambda_op,
RestrictedLambdaOperator::Make(
std::move(lambda_op), {std::move(qtype_constraint)}));
return std::dynamic_pointer_cast<const RestrictedLambdaOperator>(
restricted_lambda_op);
}
static absl::StatusOr<std::shared_ptr<const LambdaOperator>>
MakeBaseUnaryOp() {
return expr::MakeLambdaOperator("noop", ExprOperatorSignature{{"x"}},
Placeholder("x"),
"doc-string-for-unary-case");
}
static absl::StatusOr<QTypeConstraint> MakeBaseUnaryQTypeConstraint() {
ASSIGN_OR_RETURN(auto predicate_expr,
CallOp("qtype.is_numeric_qtype", {Placeholder("x")}));
return QTypeConstraint{predicate_expr, "expected x to be numeric, got {x}"};
}
static absl::StatusOr<std::shared_ptr<const RestrictedLambdaOperator>>
MakeUnaryOp() {
ASSIGN_OR_RETURN(auto lambda_op, MakeBaseUnaryOp());
ASSIGN_OR_RETURN(auto qtype_constraint, MakeBaseUnaryQTypeConstraint());
ASSIGN_OR_RETURN(auto restricted_lambda_op,
RestrictedLambdaOperator::Make(
std::move(lambda_op), {std::move(qtype_constraint)}));
return std::dynamic_pointer_cast<const RestrictedLambdaOperator>(
restricted_lambda_op);
}
static absl::StatusOr<expr::ExprNodePtr> MakeUnaryCondition() {
auto one_argument =
CallOp("core.equal",
{CallOp("qtype.get_field_count", {args_from_second_qtype()}),
Literal(0)});
auto is_numeric = CallOp("qtype.is_scalar_qtype", {arg_first_qtype()});
return CallOp("core.presence_and", {one_argument, is_numeric});
}
static absl::StatusOr<expr::ExprNodePtr> MakeDispatchReadinessCondition(
const std::vector<int64_t> ids) {
auto expr = CallOp("core.not_equal",
{Leaf("input_tuple_qtype"), Literal(GetNothingQType())});
for (auto id : ids) {
auto additional_expr = CallOp(
"core.not_equal", {CallOp("qtype.get_field_qtype",
{Leaf("input_tuple_qtype"), Literal(id)}),
Literal(GetNothingQType())});
expr = CallOp("core.presence_and", {expr, additional_expr});
}
return expr;
}
static absl::StatusOr<std::shared_ptr<const DispatchOperator>> MakeOp() {
ASSIGN_OR_RETURN(auto binary_op, MakeBinaryOp());
ASSIGN_OR_RETURN(auto unary_op, MakeUnaryOp());
ASSIGN_OR_RETURN(auto unary_condition, MakeUnaryCondition());
ASSIGN_OR_RETURN(auto not_unary_condition,
CallOp("core.presence_not", {unary_condition}));
ASSIGN_OR_RETURN(auto readiness_condition,
MakeDispatchReadinessCondition({0}));
ASSIGN_OR_RETURN(
auto dispatch_op,
DispatchOperator::Make("op.name",
expr::ExprOperatorSignature{
{"x"},
{.name = "args",
.kind = ExprOperatorSignature::Parameter::
Kind::kVariadicPositional}},
{{.name = "unary\tcase",
.op = std::move(unary_op),
.condition = std::move(unary_condition)},
{.name = "default",
.op = std::move(binary_op),
.condition = std::move(not_unary_condition)}},
readiness_condition));
return std::dynamic_pointer_cast<const DispatchOperator>(dispatch_op);
}
static absl::StatusOr<std::shared_ptr<const DispatchOperator>>
MakeOpNoDefault() {
ASSIGN_OR_RETURN(auto no_op, MakeBaseUnaryOp());
ASSIGN_OR_RETURN(auto unary_op, MakeUnaryOp());
ASSIGN_OR_RETURN(auto unary_condition, MakeUnaryCondition());
ASSIGN_OR_RETURN(auto readiness_condition,
MakeDispatchReadinessCondition({0}));
ASSIGN_OR_RETURN(
auto dispatch_op,
DispatchOperator::Make("op.name",
expr::ExprOperatorSignature{
{"x"},
{.name = "args",
.kind = ExprOperatorSignature::Parameter::
Kind::kVariadicPositional}},
{{.name = "unary\tcase",
.op = std::move(unary_op),
.condition = std::move(unary_condition)}},
readiness_condition));
return std::dynamic_pointer_cast<const DispatchOperator>(dispatch_op);
}
static absl::StatusOr<std::shared_ptr<const DispatchOperator>>
MakeDuplicatedOp() {
ASSIGN_OR_RETURN(auto binary_op, MakeBinaryOp());
ASSIGN_OR_RETURN(auto unary_op_a, MakeUnaryOp());
ASSIGN_OR_RETURN(auto unary_op_b, MakeUnaryOp());
ASSIGN_OR_RETURN(auto unary_condition_a, MakeUnaryCondition());
ASSIGN_OR_RETURN(auto unary_condition_b, MakeUnaryCondition());
ASSIGN_OR_RETURN(auto not_unary_condition,
CallOp("core.presence_not", {unary_condition_a}));
ASSIGN_OR_RETURN(auto readiness_condition,
MakeDispatchReadinessCondition({0}));
ASSIGN_OR_RETURN(
auto dispatch_op,
DispatchOperator::Make("op.name",
expr::ExprOperatorSignature{
{"x"},
{.name = "args",
.kind = ExprOperatorSignature::Parameter::
Kind::kVariadicPositional}},
{{.name = "unary_case_a",
.op = std::move(unary_op_a),
.condition = std::move(unary_condition_a)},
{.name = "unary_case_b",
.op = std::move(unary_op_b),
.condition = std::move(unary_condition_b)},
{.name = "binary_case",
.op = std::move(binary_op),
.condition = std::move(not_unary_condition)}},
readiness_condition));
return std::dynamic_pointer_cast<const DispatchOperator>(dispatch_op);
}
};
}
TEST_F(DispatchOperatorTest, PublicProperties) {
ASSERT_OK_AND_ASSIGN(auto op, MakeOp());
EXPECT_EQ(op->display_name(), "op.name");
EXPECT_EQ(op->doc(), "");
}
TEST_F(DispatchOperatorTest, InferAttributes) {
ASSERT_OK_AND_ASSIGN(auto op, MakeOp());
EXPECT_THAT(op->InferAttributes({Attr{}}), IsOkAndHolds(EqualsAttr(nullptr)));
EXPECT_THAT(op->InferAttributes({Attr{}, Attr{}}),
IsOkAndHolds(EqualsAttr(nullptr)));
EXPECT_THAT(op->InferAttributes({Attr{}, Attr{}, Attr{}}),
IsOkAndHolds(EqualsAttr(nullptr)));
EXPECT_THAT(op->InferAttributes({Attr(GetQType<int>()), Attr{}, Attr{}}),
StatusIs(absl::StatusCode::kInvalidArgument,
"incorrect number of dependencies passed to an "
"operator node: expected 2 but got 3; in default "
"overload of DispatchOperator"));
EXPECT_THAT(op->InferAttributes({}),
StatusIs(absl::StatusCode::kInvalidArgument,
"incorrect number of dependencies passed to an "
"operator node: expected 1 but got 0"));
EXPECT_THAT(op->InferAttributes({Attr(GetQType<Bytes>())}),
StatusIs(absl::StatusCode::kInvalidArgument,
"expected x to be numeric, got BYTES; in unary\\tcase "
"overload of DispatchOperator"));
EXPECT_THAT(op->InferAttributes({Attr(GetQType<int>())}),
IsOkAndHolds(EqualsAttr(GetQType<int>())));
EXPECT_THAT(op->InferAttributes({Attr(GetQType<int>()), Attr{}}),
IsOkAndHolds(EqualsAttr(nullptr)));
EXPECT_THAT(
op->InferAttributes({Attr(GetQType<int>()), Attr(GetQType<Bytes>())}),
IsOkAndHolds(EqualsAttr(GetQType<int>())));
}
TEST_F(DispatchOperatorTest, InferAttributesNoDefault) {
ASSERT_OK_AND_ASSIGN(auto op, MakeOpNoDefault());
EXPECT_THAT(op->InferAttributes({Attr{}}), IsOkAndHolds(EqualsAttr(nullptr)));
EXPECT_THAT(op->InferAttributes({Attr{}, Attr{}}),
IsOkAndHolds(EqualsAttr(nullptr)));
EXPECT_THAT(
op->InferAttributes({Attr(GetQType<int>()), Attr{}}),
StatusIs(absl::StatusCode::kInvalidArgument,
"no suitable overload for argument types (INT32,NOTHING)"));
EXPECT_THAT(op->InferAttributes({}),
StatusIs(absl::StatusCode::kInvalidArgument,
"incorrect number of dependencies passed to an "
"operator node: expected 1 but got 0"));
EXPECT_THAT(op->InferAttributes({Attr(GetQType<Bytes>())}),
StatusIs(absl::StatusCode::kInvalidArgument,
"expected x to be numeric, got BYTES; in unary\\tcase "
"overload of DispatchOperator"));
EXPECT_THAT(op->InferAttributes({Attr(GetQType<int>())}),
IsOkAndHolds(EqualsAttr(GetQType<int>())));
}
TEST_F(DispatchOperatorTest, SignatureWithDefaultValues) {
ASSERT_OK_AND_ASSIGN(auto binary_op, MakeBinaryOp());
ASSERT_OK_AND_ASSIGN(auto unary_op, MakeUnaryOp());
ASSERT_OK_AND_ASSIGN(auto readiness_condition,
MakeDispatchReadinessCondition({}));
ASSERT_OK_AND_ASSIGN(auto predicate_expr_xx,
CallOp("core.equal", {arg_first(), arg_first()}));
EXPECT_THAT(
DispatchOperator::Make("op",
expr::ExprOperatorSignature{
{.name = "x",
.default_value = TypedValue::FromValue(false),
.kind = ExprOperatorSignature::Parameter::
Kind::kPositionalOrKeyword}},
{{.name = "foo",
.op = std::move(binary_op),
.condition = std::move(predicate_expr_xx)}},
readiness_condition),
StatusIs(absl::StatusCode::kInvalidArgument,
"signatures with the default values are not supported in "
"dispatch operator; got signature: x="));
}
TEST_F(DispatchOperatorTest, ToLowerLevel) {
auto leaf = Leaf("leaf");
ASSERT_OK_AND_ASSIGN(auto leaf_with_qtype,
WithQTypeAnnotation(Leaf("leaf"), GetQType<float>()));
ASSERT_OK_AND_ASSIGN(auto leaf_with_nothing_qtype,
WithQTypeAnnotation(Leaf("leaf"), GetNothingQType()));
auto name_literal = Literal(Bytes("name"));
auto name_placeholder = Placeholder("name");
{
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(MakeOp(), {leaf}));
ASSERT_OK_AND_ASSIGN(auto noop_leaf, CallOp(MakeUnaryOp(), {leaf}));
EXPECT_EQ(expr->qtype(), nullptr);
EXPECT_THAT(ToLowest(expr), IsOkAndHolds(EqualsExpr(expr)));
}
{
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(MakeOp(), {leaf, name_placeholder}));
ASSERT_OK_AND_ASSIGN(auto binary_op,
CallOp(MakeBinaryOp(), {leaf, name_placeholder}));
EXPECT_EQ(expr->qtype(), nullptr);
EXPECT_THAT(ToLowest(expr), IsOkAndHolds(EqualsExpr(expr)));
}
{
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(MakeOp(), {leaf, name_literal}));
ASSERT_OK_AND_ASSIGN(auto binary_op,
CallOp(MakeBinaryOp(), {leaf, name_literal}));
EXPECT_EQ(expr->qtype(), nullptr);
EXPECT_THAT(ToLowest(expr), IsOkAndHolds(EqualsExpr(expr)));
}
{
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(MakeOp(), {leaf_with_qtype}));
EXPECT_EQ(expr->qtype(), GetQType<float>());
EXPECT_THAT(ToLowest(expr), IsOkAndHolds(EqualsExpr(leaf_with_qtype)));
}
{
ASSERT_OK_AND_ASSIGN(auto expr,
CallOp(MakeOp(), {leaf_with_qtype, name_placeholder}));
ASSERT_OK_AND_ASSIGN(
auto binary_op,
CallOp(MakeBinaryOp(), {leaf_with_qtype, name_placeholder}));
EXPECT_EQ(expr->qtype(), nullptr);
EXPECT_THAT(ToLowest(expr), IsOkAndHolds(EqualsExpr(binary_op)));
}
{
ASSERT_OK_AND_ASSIGN(auto expr,
CallOp(MakeOp(), {leaf_with_qtype, name_literal}));
EXPECT_EQ(expr->qtype(), GetQType<float>());
EXPECT_THAT(ToLowest(expr), IsOkAndHolds(EqualsExpr(leaf_with_qtype)));
}
{
ASSERT_OK_AND_ASSIGN(
auto expr, CallOp(MakeDuplicatedOp(), {leaf_with_qtype, name_literal}));
EXPECT_EQ(expr->qtype(), GetQType<float>());
EXPECT_THAT(ToLowest(expr), IsOkAndHolds(EqualsExpr(leaf_with_qtype)));
}
{
EXPECT_THAT(
CallOp(MakeDuplicatedOp(), {leaf_with_qtype}),
StatusIs(
absl::StatusCode::kFailedPrecondition,
HasSubstr("constraints of the multiple overloads (unary_case_a, "
"unary_case_b) passed for argument types (FLOAT32)")));
}
{
EXPECT_THAT(CallOp(MakeOp(), {}),
StatusIs(absl::StatusCode::kInvalidArgument,
"missing 1 required argument: 'x'; while binding "
"operator 'op.name'"));
}
{
EXPECT_THAT(
CallOp(MakeOp(), {leaf_with_nothing_qtype}),
StatusIs(
absl::StatusCode::kFailedPrecondition,
HasSubstr("the operator is broken for argument types (NOTHING)")));
}
{
ASSERT_OK_AND_ASSIGN(auto expr,
CallOp(MakeOp(), {leaf_with_nothing_qtype, leaf}));
ASSERT_OK_AND_ASSIGN(auto binary_op,
CallOp(MakeBinaryOp(), {leaf, name_literal}));
EXPECT_EQ(expr->qtype(), nullptr);
EXPECT_THAT(ToLowest(expr), IsOkAndHolds(EqualsExpr(expr)));
}
{
EXPECT_THAT(CallOp(MakeOp(), {leaf_with_nothing_qtype, leaf_with_qtype}),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("the operator is broken for argument types "
"(NOTHING,FLOAT32)")));
}
}
TEST_F(DispatchOperatorTest, Repr) {
ASSERT_OK_AND_ASSIGN(auto op, MakeOp());
EXPECT_THAT(op->GenReprToken(),
ReprTokenEq("<DispatchOperator: name='op.name', signature='x, "
"*args', cases=['unary\\tcase', 'default']>"));
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operator_loader/dispatch_operator.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operator_loader/dispatch_operator_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
8a82f1ac-bf7f-4b52-9cb6-b9539c798c42 | cpp | google/tensorstore | block_queue | tensorstore/internal/container/block_queue.h | tensorstore/internal/container/block_queue_test.cc | #ifndef TENSORSTORE_INTERNAL_THREAD_BLOCK_QUEUE_H_
#define TENSORSTORE_INTERNAL_THREAD_BLOCK_QUEUE_H_
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <cassert>
#include <memory>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/log/absl_check.h"
#include "tensorstore/internal/container/item_traits.h"
namespace tensorstore {
namespace internal_container {
template <typename T, size_t kMin = 1024, size_t kMax = 1024,
typename Allocator = std::allocator<T>>
class BlockQueue;
template <typename T, typename Allocator>
class SQBlock {
private:
using BlockAllocator =
typename std::allocator_traits<Allocator>::template rebind_alloc<SQBlock>;
using ByteAllocator =
typename std::allocator_traits<Allocator>::template rebind_alloc<char>;
constexpr static ptrdiff_t start_offset() {
struct X {
SQBlock array;
T item[1];
};
return offsetof(X, item);
}
constexpr static size_t start_items() {
return (start_offset() + sizeof(T) - 1) / sizeof(T);
}
struct private_t {
private:
friend class SQBlock;
private_t() = default;
};
public:
static SQBlock* New(int64_t c, Allocator* alloc) {
size_t allocation_bytes =
(c <= start_items() + 2)
? (start_offset() + 2 * sizeof(T))
: (c -= start_items(), ((c + start_items()) * sizeof(T)));
ByteAllocator byte_alloc(*alloc);
void* mem = std::allocator_traits<ByteAllocator>::allocate(
byte_alloc, allocation_bytes);
auto* as_array = static_cast<SQBlock*>(mem);
BlockAllocator array_alloc(*alloc);
std::allocator_traits<BlockAllocator>::construct(array_alloc, as_array,
private_t{}, c);
return as_array;
}
static void Delete(SQBlock* ptr, Allocator* alloc) {
const size_t allocation_bytes =
(ptr->capacity() == 2) ? (start_offset() + 2 * sizeof(T))
: (start_items() + ptr->capacity()) * sizeof(T);
BlockAllocator block_alloc(*alloc);
std::allocator_traits<BlockAllocator>::destroy(block_alloc, ptr);
void* mem = ptr;
ByteAllocator byte_alloc(*alloc);
std::allocator_traits<ByteAllocator>::deallocate(
byte_alloc, static_cast<char*>(mem), allocation_bytes);
}
SQBlock(private_t, size_t c) : end_(begin() + c), next_(nullptr) {}
SQBlock* next() const { return next_; }
void set_next(SQBlock* b) { next_ = b; }
T* begin() {
return reinterpret_cast<T*>(reinterpret_cast<char*>(this) + start_offset());
}
T* end() { return end_; }
size_t capacity() { return end() - begin(); }
private:
T* end_;
SQBlock* next_;
};
template <typename T, size_t kMin, size_t kMax, typename Allocator>
class BlockQueue {
using Block = SQBlock<T, Allocator>;
using TransferTraits = ItemTraits<T>;
static constexpr bool kDestroyIsTrivial =
TransferTraits::template destroy_is_trivial<Allocator>();
static_assert(kMin > 0);
static_assert(kMin <= kMax);
struct Cursor {
Cursor(Block* b) : block(b), ptr(b->begin()), end(b->end()) {}
Cursor() : block(nullptr), ptr(nullptr), end(nullptr) {}
Block* block;
T* ptr;
T* end;
};
public:
BlockQueue() : BlockQueue(Allocator()) {}
explicit BlockQueue(Allocator alloc)
: allocator_(std::move(alloc)), head_(), tail_(), size_(0) {}
~BlockQueue() {
Block* b = head_.block;
while (b) {
Block* next = b->next();
ClearBlock(b);
Block::Delete(b, &allocator_);
b = next;
}
}
BlockQueue(const BlockQueue&) = delete;
BlockQueue& operator=(const BlockQueue&) = delete;
size_t size() const { return size_; }
bool empty() const { return !size(); }
T& front() {
ABSL_CHECK(!empty());
return *head_.ptr;
}
const T& front() const {
ABSL_CHECK(!empty());
return *head_.ptr;
}
T& back() {
ABSL_CHECK(!empty());
return *((tail_.ptr) - 1);
}
const T& back() const {
ABSL_CHECK(!empty());
return *((tail_.ptr) - 1);
}
void push_back(const T& val) { emplace_back(val); }
void push_back(T&& val) { emplace_back(std::move(val)); }
template <typename... A>
T& emplace_back(A&&... args) {
auto* storage = emplace_back_raw();
TransferTraits::construct(&allocator_, storage, std::forward<A>(args)...);
return *storage;
}
void pop_front() {
ABSL_CHECK(!empty());
ABSL_CHECK(head_.block);
TransferTraits::destroy(&allocator_, head_.ptr);
++head_.ptr;
--size_;
if (empty()) {
ABSL_CHECK_EQ(head_.block, tail_.block);
head_.ptr = tail_.ptr = head_.block->begin();
return;
}
if (head_.ptr == head_.end) {
Block* n = head_.block->next();
Block::Delete(head_.block, &allocator_);
head_ = Cursor(n);
}
}
void clear() {
Block* b = head_.block;
if (!b) {
ABSL_CHECK(empty());
return;
}
while (b) {
Block* next = b->next();
ClearBlock(b);
if (head_.block != b) {
Block::Delete(b, &allocator_);
}
b = next;
}
b = head_.block;
b->set_next(nullptr);
head_ = tail_ = Cursor(b);
size_ = 0;
}
private:
T* emplace_back_raw() {
if (tail_.ptr == tail_.end) {
size_t capacity = kMin;
if (tail_.block) {
capacity = 2 * tail_.block->capacity();
if (capacity > kMax) capacity = kMax;
}
auto* b = Block::New(capacity, &allocator_);
if (!head_.block) {
ABSL_CHECK(tail_.block == nullptr);
head_ = Cursor(b);
} else {
ABSL_CHECK(head_.block != nullptr);
tail_.block->set_next(b);
}
tail_ = Cursor(b);
}
++size_;
return tail_.ptr++;
}
void ClearBlock(Block* b) {
auto* begin = b == head_.block ? head_.ptr : b->begin();
auto* end = b == tail_.block ? tail_.ptr : b->end();
if constexpr (!kDestroyIsTrivial) {
for (; begin != end; ++begin) {
TransferTraits::destroy(&allocator_, begin);
}
}
}
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS Allocator allocator_;
Cursor head_;
Cursor tail_;
size_t size_;
};
}
}
#endif | #include "tensorstore/internal/container/block_queue.h"
#include <stdint.h>
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal_container::BlockQueue;
TEST(BlockQueue, Basic) {
BlockQueue<int64_t> q;
EXPECT_TRUE(q.empty());
EXPECT_THAT(q.size(), 0);
q.push_back(10);
EXPECT_FALSE(q.empty());
EXPECT_EQ(q.front(), 10);
EXPECT_EQ(q.back(), 10);
q.pop_front();
EXPECT_TRUE(q.empty());
q.clear();
EXPECT_TRUE(q.empty());
}
TEST(BlockQueue, PushPop) {
BlockQueue<int64_t> q;
for (int i = 0; i < 4096; i++) {
q.push_back(i);
if (i & 0x08) {
q.pop_front();
}
}
EXPECT_FALSE(q.empty());
q.clear();
EXPECT_TRUE(q.empty());
}
class OnlyConstructibleByAllocator {
explicit OnlyConstructibleByAllocator(int i) : i_(i) {}
public:
OnlyConstructibleByAllocator(const OnlyConstructibleByAllocator &other)
: i_(other.i_) {}
OnlyConstructibleByAllocator &operator=(
const OnlyConstructibleByAllocator &other) {
i_ = other.i_;
return *this;
}
int Get() const { return i_; }
bool operator==(int i) const { return i_ == i; }
private:
template <typename T>
friend class OnlyConstructibleAllocator;
int i_;
};
template <typename T = OnlyConstructibleByAllocator>
class OnlyConstructibleAllocator : public std::allocator<T> {
public:
OnlyConstructibleAllocator() = default;
template <class U>
explicit OnlyConstructibleAllocator(const OnlyConstructibleAllocator<U> &) {}
void construct(OnlyConstructibleByAllocator *p, int i) {
new (p) OnlyConstructibleByAllocator(i);
}
template <class U>
struct rebind {
using other = OnlyConstructibleAllocator<U>;
};
};
TEST(BlockQueue, OnlyConstructibleByAllocator) {
BlockQueue<OnlyConstructibleByAllocator, 1024, 1024,
OnlyConstructibleAllocator<>>
q;
EXPECT_TRUE(q.empty());
EXPECT_THAT(q.size(), 0);
q.emplace_back(10);
EXPECT_FALSE(q.empty());
EXPECT_EQ(q.front().Get(), 10);
EXPECT_EQ(q.back().Get(), 10);
q.pop_front();
EXPECT_TRUE(q.empty());
q.clear();
EXPECT_TRUE(q.empty());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/container/block_queue.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/container/block_queue_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
b716890b-3e9c-405c-930e-85296b06a003 | cpp | google/leveldb | skiplist | db/skiplist.h | db/skiplist_test.cc | #ifndef STORAGE_LEVELDB_DB_SKIPLIST_H_
#define STORAGE_LEVELDB_DB_SKIPLIST_H_
#include <atomic>
#include <cassert>
#include <cstdlib>
#include "util/arena.h"
#include "util/random.h"
namespace leveldb {
template <typename Key, class Comparator>
class SkipList {
private:
struct Node;
public:
explicit SkipList(Comparator cmp, Arena* arena);
SkipList(const SkipList&) = delete;
SkipList& operator=(const SkipList&) = delete;
void Insert(const Key& key);
bool Contains(const Key& key) const;
class Iterator {
public:
explicit Iterator(const SkipList* list);
bool Valid() const;
const Key& key() const;
void Next();
void Prev();
void Seek(const Key& target);
void SeekToFirst();
void SeekToLast();
private:
const SkipList* list_;
Node* node_;
};
private:
enum { kMaxHeight = 12 };
inline int GetMaxHeight() const {
return max_height_.load(std::memory_order_relaxed);
}
Node* NewNode(const Key& key, int height);
int RandomHeight();
bool Equal(const Key& a, const Key& b) const { return (compare_(a, b) == 0); }
bool KeyIsAfterNode(const Key& key, Node* n) const;
Node* FindGreaterOrEqual(const Key& key, Node** prev) const;
Node* FindLessThan(const Key& key) const;
Node* FindLast() const;
Comparator const compare_;
Arena* const arena_;
Node* const head_;
std::atomic<int> max_height_;
Random rnd_;
};
template <typename Key, class Comparator>
struct SkipList<Key, Comparator>::Node {
explicit Node(const Key& k) : key(k) {}
Key const key;
Node* Next(int n) {
assert(n >= 0);
return next_[n].load(std::memory_order_acquire);
}
void SetNext(int n, Node* x) {
assert(n >= 0);
next_[n].store(x, std::memory_order_release);
}
Node* NoBarrier_Next(int n) {
assert(n >= 0);
return next_[n].load(std::memory_order_relaxed);
}
void NoBarrier_SetNext(int n, Node* x) {
assert(n >= 0);
next_[n].store(x, std::memory_order_relaxed);
}
private:
std::atomic<Node*> next_[1];
};
template <typename Key, class Comparator>
typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::NewNode(
const Key& key, int height) {
char* const node_memory = arena_->AllocateAligned(
sizeof(Node) + sizeof(std::atomic<Node*>) * (height - 1));
return new (node_memory) Node(key);
}
template <typename Key, class Comparator>
inline SkipList<Key, Comparator>::Iterator::Iterator(const SkipList* list) {
list_ = list;
node_ = nullptr;
}
template <typename Key, class Comparator>
inline bool SkipList<Key, Comparator>::Iterator::Valid() const {
return node_ != nullptr;
}
template <typename Key, class Comparator>
inline const Key& SkipList<Key, Comparator>::Iterator::key() const {
assert(Valid());
return node_->key;
}
template <typename Key, class Comparator>
inline void SkipList<Key, Comparator>::Iterator::Next() {
assert(Valid());
node_ = node_->Next(0);
}
template <typename Key, class Comparator>
inline void SkipList<Key, Comparator>::Iterator::Prev() {
assert(Valid());
node_ = list_->FindLessThan(node_->key);
if (node_ == list_->head_) {
node_ = nullptr;
}
}
template <typename Key, class Comparator>
inline void SkipList<Key, Comparator>::Iterator::Seek(const Key& target) {
node_ = list_->FindGreaterOrEqual(target, nullptr);
}
template <typename Key, class Comparator>
inline void SkipList<Key, Comparator>::Iterator::SeekToFirst() {
node_ = list_->head_->Next(0);
}
template <typename Key, class Comparator>
inline void SkipList<Key, Comparator>::Iterator::SeekToLast() {
node_ = list_->FindLast();
if (node_ == list_->head_) {
node_ = nullptr;
}
}
template <typename Key, class Comparator>
int SkipList<Key, Comparator>::RandomHeight() {
static const unsigned int kBranching = 4;
int height = 1;
while (height < kMaxHeight && rnd_.OneIn(kBranching)) {
height++;
}
assert(height > 0);
assert(height <= kMaxHeight);
return height;
}
template <typename Key, class Comparator>
bool SkipList<Key, Comparator>::KeyIsAfterNode(const Key& key, Node* n) const {
return (n != nullptr) && (compare_(n->key, key) < 0);
}
template <typename Key, class Comparator>
typename SkipList<Key, Comparator>::Node*
SkipList<Key, Comparator>::FindGreaterOrEqual(const Key& key,
Node** prev) const {
Node* x = head_;
int level = GetMaxHeight() - 1;
while (true) {
Node* next = x->Next(level);
if (KeyIsAfterNode(key, next)) {
x = next;
} else {
if (prev != nullptr) prev[level] = x;
if (level == 0) {
return next;
} else {
level--;
}
}
}
}
template <typename Key, class Comparator>
typename SkipList<Key, Comparator>::Node*
SkipList<Key, Comparator>::FindLessThan(const Key& key) const {
Node* x = head_;
int level = GetMaxHeight() - 1;
while (true) {
assert(x == head_ || compare_(x->key, key) < 0);
Node* next = x->Next(level);
if (next == nullptr || compare_(next->key, key) >= 0) {
if (level == 0) {
return x;
} else {
level--;
}
} else {
x = next;
}
}
}
template <typename Key, class Comparator>
typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::FindLast()
const {
Node* x = head_;
int level = GetMaxHeight() - 1;
while (true) {
Node* next = x->Next(level);
if (next == nullptr) {
if (level == 0) {
return x;
} else {
level--;
}
} else {
x = next;
}
}
}
template <typename Key, class Comparator>
SkipList<Key, Comparator>::SkipList(Comparator cmp, Arena* arena)
: compare_(cmp),
arena_(arena),
head_(NewNode(0 , kMaxHeight)),
max_height_(1),
rnd_(0xdeadbeef) {
for (int i = 0; i < kMaxHeight; i++) {
head_->SetNext(i, nullptr);
}
}
template <typename Key, class Comparator>
void SkipList<Key, Comparator>::Insert(const Key& key) {
Node* prev[kMaxHeight];
Node* x = FindGreaterOrEqual(key, prev);
assert(x == nullptr || !Equal(key, x->key));
int height = RandomHeight();
if (height > GetMaxHeight()) {
for (int i = GetMaxHeight(); i < height; i++) {
prev[i] = head_;
}
max_height_.store(height, std::memory_order_relaxed);
}
x = NewNode(key, height);
for (int i = 0; i < height; i++) {
x->NoBarrier_SetNext(i, prev[i]->NoBarrier_Next(i));
prev[i]->SetNext(i, x);
}
}
template <typename Key, class Comparator>
bool SkipList<Key, Comparator>::Contains(const Key& key) const {
Node* x = FindGreaterOrEqual(key, nullptr);
if (x != nullptr && Equal(key, x->key)) {
return true;
} else {
return false;
}
}
}
#endif | #include "db/skiplist.h"
#include <atomic>
#include <set>
#include "gtest/gtest.h"
#include "leveldb/env.h"
#include "port/port.h"
#include "port/thread_annotations.h"
#include "util/arena.h"
#include "util/hash.h"
#include "util/random.h"
#include "util/testutil.h"
namespace leveldb {
typedef uint64_t Key;
struct Comparator {
int operator()(const Key& a, const Key& b) const {
if (a < b) {
return -1;
} else if (a > b) {
return +1;
} else {
return 0;
}
}
};
TEST(SkipTest, Empty) {
Arena arena;
Comparator cmp;
SkipList<Key, Comparator> list(cmp, &arena);
ASSERT_TRUE(!list.Contains(10));
SkipList<Key, Comparator>::Iterator iter(&list);
ASSERT_TRUE(!iter.Valid());
iter.SeekToFirst();
ASSERT_TRUE(!iter.Valid());
iter.Seek(100);
ASSERT_TRUE(!iter.Valid());
iter.SeekToLast();
ASSERT_TRUE(!iter.Valid());
}
TEST(SkipTest, InsertAndLookup) {
const int N = 2000;
const int R = 5000;
Random rnd(1000);
std::set<Key> keys;
Arena arena;
Comparator cmp;
SkipList<Key, Comparator> list(cmp, &arena);
for (int i = 0; i < N; i++) {
Key key = rnd.Next() % R;
if (keys.insert(key).second) {
list.Insert(key);
}
}
for (int i = 0; i < R; i++) {
if (list.Contains(i)) {
ASSERT_EQ(keys.count(i), 1);
} else {
ASSERT_EQ(keys.count(i), 0);
}
}
{
SkipList<Key, Comparator>::Iterator iter(&list);
ASSERT_TRUE(!iter.Valid());
iter.Seek(0);
ASSERT_TRUE(iter.Valid());
ASSERT_EQ(*(keys.begin()), iter.key());
iter.SeekToFirst();
ASSERT_TRUE(iter.Valid());
ASSERT_EQ(*(keys.begin()), iter.key());
iter.SeekToLast();
ASSERT_TRUE(iter.Valid());
ASSERT_EQ(*(keys.rbegin()), iter.key());
}
for (int i = 0; i < R; i++) {
SkipList<Key, Comparator>::Iterator iter(&list);
iter.Seek(i);
std::set<Key>::iterator model_iter = keys.lower_bound(i);
for (int j = 0; j < 3; j++) {
if (model_iter == keys.end()) {
ASSERT_TRUE(!iter.Valid());
break;
} else {
ASSERT_TRUE(iter.Valid());
ASSERT_EQ(*model_iter, iter.key());
++model_iter;
iter.Next();
}
}
}
{
SkipList<Key, Comparator>::Iterator iter(&list);
iter.SeekToLast();
for (std::set<Key>::reverse_iterator model_iter = keys.rbegin();
model_iter != keys.rend(); ++model_iter) {
ASSERT_TRUE(iter.Valid());
ASSERT_EQ(*model_iter, iter.key());
iter.Prev();
}
ASSERT_TRUE(!iter.Valid());
}
}
class ConcurrentTest {
private:
static constexpr uint32_t K = 4;
static uint64_t key(Key key) { return (key >> 40); }
static uint64_t gen(Key key) { return (key >> 8) & 0xffffffffu; }
static uint64_t hash(Key key) { return key & 0xff; }
static uint64_t HashNumbers(uint64_t k, uint64_t g) {
uint64_t data[2] = {k, g};
return Hash(reinterpret_cast<char*>(data), sizeof(data), 0);
}
static Key MakeKey(uint64_t k, uint64_t g) {
static_assert(sizeof(Key) == sizeof(uint64_t), "");
assert(k <= K);
assert(g <= 0xffffffffu);
return ((k << 40) | (g << 8) | (HashNumbers(k, g) & 0xff));
}
static bool IsValidKey(Key k) {
return hash(k) == (HashNumbers(key(k), gen(k)) & 0xff);
}
static Key RandomTarget(Random* rnd) {
switch (rnd->Next() % 10) {
case 0:
return MakeKey(0, 0);
case 1:
return MakeKey(K, 0);
default:
return MakeKey(rnd->Next() % K, 0);
}
}
struct State {
std::atomic<int> generation[K];
void Set(int k, int v) {
generation[k].store(v, std::memory_order_release);
}
int Get(int k) { return generation[k].load(std::memory_order_acquire); }
State() {
for (int k = 0; k < K; k++) {
Set(k, 0);
}
}
};
State current_;
Arena arena_;
SkipList<Key, Comparator> list_;
public:
ConcurrentTest() : list_(Comparator(), &arena_) {}
void WriteStep(Random* rnd) {
const uint32_t k = rnd->Next() % K;
const intptr_t g = current_.Get(k) + 1;
const Key key = MakeKey(k, g);
list_.Insert(key);
current_.Set(k, g);
}
void ReadStep(Random* rnd) {
State initial_state;
for (int k = 0; k < K; k++) {
initial_state.Set(k, current_.Get(k));
}
Key pos = RandomTarget(rnd);
SkipList<Key, Comparator>::Iterator iter(&list_);
iter.Seek(pos);
while (true) {
Key current;
if (!iter.Valid()) {
current = MakeKey(K, 0);
} else {
current = iter.key();
ASSERT_TRUE(IsValidKey(current)) << current;
}
ASSERT_LE(pos, current) << "should not go backwards";
while (pos < current) {
ASSERT_LT(key(pos), K) << pos;
ASSERT_TRUE((gen(pos) == 0) ||
(gen(pos) > static_cast<Key>(initial_state.Get(key(pos)))))
<< "key: " << key(pos) << "; gen: " << gen(pos)
<< "; initgen: " << initial_state.Get(key(pos));
if (key(pos) < key(current)) {
pos = MakeKey(key(pos) + 1, 0);
} else {
pos = MakeKey(key(pos), gen(pos) + 1);
}
}
if (!iter.Valid()) {
break;
}
if (rnd->Next() % 2) {
iter.Next();
pos = MakeKey(key(pos), gen(pos) + 1);
} else {
Key new_target = RandomTarget(rnd);
if (new_target > pos) {
pos = new_target;
iter.Seek(new_target);
}
}
}
}
};
constexpr uint32_t ConcurrentTest::K;
TEST(SkipTest, ConcurrentWithoutThreads) {
ConcurrentTest test;
Random rnd(test::RandomSeed());
for (int i = 0; i < 10000; i++) {
test.ReadStep(&rnd);
test.WriteStep(&rnd);
}
}
class TestState {
public:
ConcurrentTest t_;
int seed_;
std::atomic<bool> quit_flag_;
enum ReaderState { STARTING, RUNNING, DONE };
explicit TestState(int s)
: seed_(s), quit_flag_(false), state_(STARTING), state_cv_(&mu_) {}
void Wait(ReaderState s) LOCKS_EXCLUDED(mu_) {
mu_.Lock();
while (state_ != s) {
state_cv_.Wait();
}
mu_.Unlock();
}
void Change(ReaderState s) LOCKS_EXCLUDED(mu_) {
mu_.Lock();
state_ = s;
state_cv_.Signal();
mu_.Unlock();
}
private:
port::Mutex mu_;
ReaderState state_ GUARDED_BY(mu_);
port::CondVar state_cv_ GUARDED_BY(mu_);
};
static void ConcurrentReader(void* arg) {
TestState* state = reinterpret_cast<TestState*>(arg);
Random rnd(state->seed_);
int64_t reads = 0;
state->Change(TestState::RUNNING);
while (!state->quit_flag_.load(std::memory_order_acquire)) {
state->t_.ReadStep(&rnd);
++reads;
}
state->Change(TestState::DONE);
}
static void RunConcurrent(int run) {
const int seed = test::RandomSeed() + (run * 100);
Random rnd(seed);
const int N = 1000;
const int kSize = 1000;
for (int i = 0; i < N; i++) {
if ((i % 100) == 0) {
std::fprintf(stderr, "Run %d of %d\n", i, N);
}
TestState state(seed + 1);
Env::Default()->Schedule(ConcurrentReader, &state);
state.Wait(TestState::RUNNING);
for (int i = 0; i < kSize; i++) {
state.t_.WriteStep(&rnd);
}
state.quit_flag_.store(true, std::memory_order_release);
state.Wait(TestState::DONE);
}
}
TEST(SkipTest, Concurrent1) { RunConcurrent(1); }
TEST(SkipTest, Concurrent2) { RunConcurrent(2); }
TEST(SkipTest, Concurrent3) { RunConcurrent(3); }
TEST(SkipTest, Concurrent4) { RunConcurrent(4); }
TEST(SkipTest, Concurrent5) { RunConcurrent(5); }
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/db/skiplist.h | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/db/skiplist_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
19ac5ec1-6718-4849-b3c2-d2aff780b06c | cpp | google/tensorstore | translate_op | tensorstore/index_space/internal/translate_op.cc | tensorstore/index_space/translate_op_test.cc | #include "tensorstore/index_space/internal/translate_op.h"
#include <algorithm>
#include "absl/status/status.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
namespace {
absl::Status TranslateOutputOffsetsUsingInputOffsets(
TransformRep* transform, const Index* input_offsets) {
const DimensionIndex output_rank = transform->output_rank;
const DimensionIndex input_rank = transform->input_rank;
span<OutputIndexMap> maps = transform->output_index_maps().first(output_rank);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
auto& map = maps[output_dim];
switch (map.method()) {
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex input_dim = map.input_dimension();
const Index offset_change = input_offsets[input_dim];
Index new_offset;
if (internal::MulOverflow(offset_change, map.stride(), &new_offset) ||
internal::SubOverflow(map.offset(), new_offset, &map.offset())) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Integer overflow computing output offset for dimension ",
output_dim, "."));
}
break;
}
case OutputIndexMethod::array: {
auto& index_array_data = map.index_array_data();
index_array_data.element_pointer = AddByteOffset(
std::move(index_array_data.element_pointer),
-IndexInnerProduct(input_rank, index_array_data.byte_strides,
input_offsets));
break;
}
case OutputIndexMethod::constant:
break;
}
}
return absl::OkStatus();
}
}
Result<IndexTransform<>> ApplyTranslate(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
IndexVectorOrScalarView offsets,
TranslateOpKind kind,
bool domain_only) {
const DimensionIndex num_dims = dimensions->size();
const DimensionIndex input_rank = transform.input_rank();
TENSORSTORE_RETURN_IF_ERROR(CheckIndexVectorSize(offsets, num_dims));
TransformRep::Ptr<> rep = MutableRep(
TransformAccess::rep_ptr<container>(std::move(transform)), domain_only);
const auto input_domain = rep->input_domain(input_rank);
Index input_offsets[kMaxRank];
std::fill_n(&input_offsets[0], input_rank, static_cast<Index>(0));
for (DimensionIndex i = 0; i < num_dims; ++i) {
const DimensionIndex input_dim = (*dimensions)[i];
Index offset = offsets[i];
if (offset == kImplicit) continue;
const IndexInterval old_interval = input_domain[input_dim];
IndexInterval new_interval;
switch (kind) {
case TranslateOpKind::kTranslateTo: {
TENSORSTORE_ASSIGN_OR_RETURN(new_interval,
ShiftIntervalTo(old_interval, offset));
offset = new_interval.inclusive_min() - old_interval.inclusive_min();
break;
}
case TranslateOpKind::kTranslateBackwardBy: {
offset = -offset;
}
[[fallthrough]];
case TranslateOpKind::kTranslateBy: {
TENSORSTORE_ASSIGN_OR_RETURN(new_interval,
ShiftInterval(old_interval, offset));
break;
}
}
input_domain[input_dim] = new_interval;
input_offsets[input_dim] = offset;
}
TENSORSTORE_RETURN_IF_ERROR(
TranslateOutputOffsetsUsingInputOffsets(rep.get(), &input_offsets[0]));
internal_index_space::DebugCheckInvariants(rep.get());
return TransformAccess::Make<IndexTransform<>>(std::move(rep));
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/dim_expression_testutil.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::AllDims;
using ::tensorstore::DimensionIndex;
using ::tensorstore::Dims;
using ::tensorstore::Index;
using ::tensorstore::IndexDomainBuilder;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::kImplicit;
using ::tensorstore::kInfIndex;
using ::tensorstore::kInfSize;
using ::tensorstore::kMaxFiniteIndex;
using ::tensorstore::kMinFiniteIndex;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::span;
using ::tensorstore::internal_index_space::EquivalentIndices;
using ::tensorstore::internal_index_space::TestDimExpression;
TEST(TranslateByTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform =
IndexTransformBuilder<3, 3>()
.input_origin({11, 2, 23})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_single_input_dimension(0, -10, 1, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, -20, 1, 2)
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{2, 3, 3}, {12, 3, 23}},
};
TestDimExpression(original_transform,
Dims(0, 2).TranslateBy({10, 20}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
TestDimExpression(original_transform,
Dims("x", "z").TranslateBy({10, 20}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
}
TEST(TranslateBackwardByTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform =
IndexTransformBuilder<3, 3>()
.input_origin({-9, 2, -17})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_single_input_dimension(0, 10, 1, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 20, 1, 2)
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{2, 3, 3}, {-8, 3, -17}},
};
TestDimExpression(original_transform,
Dims(0, 2).TranslateBackwardBy({10, 20}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
TestDimExpression(original_transform,
Dims("x", "z").TranslateBackwardBy({10, 20}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
}
TEST(TranslateToTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform =
IndexTransformBuilder<3, 3>()
.input_origin({10, 2, 20})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_single_input_dimension(0, -9, 1, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, -17, 1, 2)
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{2, 3, 3}, {11, 3, 20}},
};
TestDimExpression(original_transform,
Dims(0, 2).TranslateTo({10, 20}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
TestDimExpression(original_transform,
Dims(0, 2).TranslateTo({10, 20}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
}
TEST(TranslateByTest, OneDimensionalConstant) {
TestDimExpression(
IndexTransformBuilder<1, 1>()
.output_constant(0, 2)
.Finalize()
.value(),
AllDims().TranslateBy(5),
{0},
IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, -5, 1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.output_constant(0, 2)
.Finalize()
.value(),
{{{4}, {9}}});
}
TEST(TranslateByTest, OneDimensionalSingleInputDimension) {
TestDimExpression(
IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, 2, 3, 0)
.Finalize()
.value(),
AllDims().TranslateBy(5),
{0},
IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, -5, 1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, 2 - 3 * 5, 3, 0)
.Finalize()
.value(),
{{{4}, {9}}});
}
TEST(TranslateByTest, OneDimensionalSingleInputDimensionImplicit) {
TestDimExpression(
IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, 2, 3, 0)
.Finalize()
.value(),
AllDims().TranslateBy(kImplicit),
{0},
IndexTransformBuilder<1, 1>()
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, 2, 3, 0)
.Finalize()
.value(),
{{{4}, {4}}});
}
TEST(TranslateByTest, OneDimensionalIndexArray) {
TestDimExpression(
IndexTransformBuilder<1, 1>()
.input_origin({-2})
.input_shape({5})
.output_index_array(0, 2, 3, MakeArray<Index>({6, 7, 8, 9, 10}))
.Finalize()
.value(),
AllDims().TranslateBy(5),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({3})
.input_shape({5})
.output_single_input_dimension(0, -5, 1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({3})
.input_shape({5})
.output_index_array(0, 2, 3, MakeArray<Index>({6, 7, 8, 9, 10}))
.Finalize()
.value(),
{{{1}, {6}}});
}
TEST(TranslateByTest, AllDimsUniform) {
TestDimExpression(
IndexTransformBuilder<3, 5>()
.input_origin({-kInfIndex, 5, -kInfIndex})
.input_shape({kInfSize, 30, kInfIndex + 10})
.output_single_input_dimension(0, 1, 4, 0)
.output_single_input_dimension(1, 2, 5, 0)
.output_constant(2, 3)
.output_single_input_dimension(3, 4, 7, 1)
.output_single_input_dimension(4, 5, 8, 2)
.Finalize()
.value(),
AllDims().TranslateBy(5),
{0, 1, 2},
IndexTransformBuilder<3, 3>()
.input_origin({-kInfIndex, 10, -kInfIndex})
.input_shape({kInfSize, 30, kInfIndex + 15})
.output_single_input_dimension(0, -5, 1, 0)
.output_single_input_dimension(1, -5, 1, 1)
.output_single_input_dimension(2, -5, 1, 2)
.Finalize()
.value(),
IndexTransformBuilder<3, 5>()
.input_origin({-kInfIndex, 10, -kInfIndex})
.input_shape({kInfSize, 30, kInfIndex + 15})
.output_single_input_dimension(0, 1 - 4 * 5, 4, 0)
.output_single_input_dimension(1, 2 - 5 * 5, 5, 0)
.output_constant(2, 3)
.output_single_input_dimension(3, 4 - 7 * 5, 7, 1)
.output_single_input_dimension(4, 5 - 8 * 5, 8, 2)
.Finalize()
.value(),
{{{4, 5, 6}, {4 + 5, 5 + 5, 6 + 5}}});
}
TEST(TranslateByTest, ErrorHandling) {
TestDimExpressionError(
IndexTransformBuilder<1, 1>().Finalize().value(),
AllDims().TranslateBy(span<const Index>({1, 2})),
absl::StatusCode::kInvalidArgument,
"Number of dimensions \\(1\\) does not match number of "
"indices \\(2\\)");
TestDimExpressionError(IndexTransformBuilder<1, 1>()
.input_origin({kMinFiniteIndex})
.input_shape({10})
.Finalize()
.value(),
AllDims().TranslateBy(-kInfIndex),
absl::StatusCode::kInvalidArgument,
".* is outside valid range .*");
TestDimExpressionError(IndexTransformBuilder<1, 1>()
.input_origin({kMinFiniteIndex})
.input_shape({10})
.Finalize()
.value(),
AllDims().TranslateBy(-1),
absl::StatusCode::kInvalidArgument,
".* is outside valid range .*");
TestDimExpressionError(IndexTransformBuilder<1, 1>()
.input_origin({kMaxFiniteIndex - 1})
.input_shape({2})
.Finalize()
.value(),
AllDims().TranslateBy(1),
absl::StatusCode::kInvalidArgument,
".* is outside valid range .*");
TestDimExpressionError(IndexTransformBuilder<1, 1>()
.output_single_input_dimension(
0, std::numeric_limits<Index>::min(), 1, 0)
.Finalize()
.value(),
AllDims().TranslateBy(1),
absl::StatusCode::kInvalidArgument,
"Integer overflow computing output offset .*");
}
TEST(TranslateByTest, DimSubsetUniform) {
TestDimExpression(IndexTransformBuilder<3, 2>()
.input_origin({1, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2, 2, 2)
.Finalize()
.value(),
Dims(0, 2).TranslateBy(5),
{0, 2},
IndexTransformBuilder<3, 3>()
.input_origin({6, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7 + 5})
.output_single_input_dimension(0, -5, 1, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, -5, 1, 2)
.Finalize()
.value(),
IndexTransformBuilder<3, 2>()
.input_origin({6, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7 + 5})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2 - 2 * 5, 2, 2)
.Finalize()
.value(),
{{{4, 5, 6}, {4 + 5, 5, 6 + 5}}});
}
TEST(TranslateByTest, DimSubsetNonUniform) {
TestDimExpression(IndexTransformBuilder<3, 2>()
.input_origin({1, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2, 2, 2)
.Finalize()
.value(),
Dims(0, 2).TranslateBy({5, 6}),
{0, 2},
IndexTransformBuilder<3, 3>()
.input_origin({6, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7 + 6})
.output_single_input_dimension(0, -5, 1, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, -6, 1, 2)
.Finalize()
.value(),
IndexTransformBuilder<3, 2>()
.input_origin({6, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7 + 6})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2 - 2 * 6, 2, 2)
.Finalize()
.value(),
{{{3, 4, 5}, {3 + 5, 4, 5 + 6}}});
}
TEST(TranslateToTest, OneDimensionalConstant) {
TestDimExpression(IndexTransformBuilder<1, 1>()
.input_origin({5})
.input_shape({10})
.output_constant(0, 2)
.Finalize()
.value(),
AllDims().TranslateTo(8),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({8})
.input_shape({10})
.output_single_input_dimension(0, -3, 1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({8})
.input_shape({10})
.output_constant(0, 2)
.Finalize()
.value(),
{{{7}, {10}}});
}
TEST(TranslateToTest, OneDimensionalSingleInputDimension) {
TestDimExpression(IndexTransformBuilder<1, 1>()
.input_origin({4})
.input_shape({10})
.output_single_input_dimension(0, 2, 3, 0)
.Finalize()
.value(),
AllDims().TranslateTo(5),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({5})
.input_shape({10})
.output_single_input_dimension(0, -1, 1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({5})
.input_shape({10})
.output_single_input_dimension(0, 2 - 3, 3, 0)
.Finalize()
.value(),
{{{6}, {7}}});
}
TEST(TranslateToTest, OneDimensionalSingleInputDimensionImplicit) {
TestDimExpression(IndexTransformBuilder<1, 1>()
.input_origin({4})
.input_shape({10})
.output_single_input_dimension(0, 2, 3, 0)
.Finalize()
.value(),
AllDims().TranslateTo(kImplicit),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({4})
.input_shape({10})
.output_single_input_dimension(0, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({4})
.input_shape({10})
.output_single_input_dimension(0, 2, 3, 0)
.Finalize()
.value(),
{{{6}, {6}}});
}
TEST(TranslateToTest, TwoDimensionalSingleInputDimensionOneImplicit) {
TestDimExpression(IndexTransformBuilder<2, 2>()
.input_origin({4, 5})
.input_shape({10, 11})
.output_single_input_dimension(0, 2, 3, 0)
.output_single_input_dimension(1, 4, 5, 1)
.Finalize()
.value(),
AllDims().TranslateTo({kImplicit, 10}),
{0, 1},
IndexTransformBuilder<2, 2>()
.input_origin({4, 10})
.input_shape({10, 11})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, -5, 1, 1)
.Finalize()
.value(),
IndexTransformBuilder<2, 2>()
.input_origin({4, 10})
.input_shape({10, 11})
.output_single_input_dimension(0, 2, 3, 0)
.output_single_input_dimension(1, -25 + 4, 5, 1)
.Finalize()
.value(),
{{{6, 7}, {6, 12}}});
}
TEST(TranslateToTest, ErrorHandling) {
TestDimExpressionError(IndexTransformBuilder<1, 1>().Finalize().value(),
AllDims().TranslateTo(1),
absl::StatusCode::kInvalidArgument,
"Interval \\(-inf, \\+inf\\) is not bounded below");
TestDimExpressionError(
IndexTransformBuilder<1, 1>()
.input_origin({-5})
.input_shape({10})
.Finalize()
.value(),
AllDims().TranslateTo(std::numeric_limits<Index>::max()),
absl::StatusCode::kOutOfRange, "Origin [0-9]+ is outside valid range .*");
}
TEST(TranslateToTest, IndexDomain) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain,
IndexDomainBuilder<3>().origin({1, 2, 3}).shape({6, 7, 8}).Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto translated_domain,
IndexDomainBuilder<3>().origin({4, 5, 6}).shape({6, 7, 8}).Finalize());
EXPECT_THAT(domain | AllDims().TranslateTo({4, 5, 6}),
::testing::Optional(translated_domain));
}
TEST(TranslateToTest, IndexDomainOverflow) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
IndexTransformBuilder(1, 1)
.input_shape({10})
.output_single_input_dimension(0, kMaxFiniteIndex, kMaxFiniteIndex, 0)
.Finalize());
auto domain = transform.domain();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto translated_domain,
IndexDomainBuilder(1).origin({-5}).shape({10}).Finalize());
EXPECT_THAT(transform | AllDims().TranslateTo({-5}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(domain | AllDims().TranslateTo({-5}),
::testing::Optional(translated_domain));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/internal/translate_op.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/translate_op_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
95f724dc-8304-472f-bfdf-85d4e1c4bdcf | cpp | tensorflow/tensorflow | hlo_cse | third_party/xla/xla/service/hlo_cse.cc | third_party/xla/xla/service/hlo_cse_test.cc | #include "xla/service/hlo_cse.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/service/hlo_domain_map.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
template <bool kIsLayoutSensitive>
struct ConstantKey {
template <typename H>
friend H AbslHashValue(H h, const ConstantKey& key) {
h = H::combine(std::move(h), key.domain);
return Literal::Hash<H, kIsLayoutSensitive, 64>(
std::move(h), key.hlo->literal());
}
friend bool operator==(const ConstantKey& lhs, const ConstantKey& rhs) {
return lhs.domain == rhs.domain &&
(kIsLayoutSensitive ? Shape::Equal()
: Shape::Equal().IgnoreLayout())(
lhs.hlo->shape(), rhs.hlo->shape()) &&
lhs.hlo->literal().Equal(rhs.hlo->literal(), kIsLayoutSensitive);
}
HloConstantInstruction* hlo;
int64_t domain;
};
template <bool kIsLayoutSensitive>
absl::StatusOr<bool> CombineConstants(HloComputation* computation,
bool only_scalars) {
std::unique_ptr<HloDomainMap> domain_map;
if (absl::c_any_of(computation->instructions(),
[&](const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kDomain;
})) {
TF_ASSIGN_OR_RETURN(domain_map, HloDomainMap::Create(computation, ""));
}
absl::flat_hash_set<ConstantKey<kIsLayoutSensitive>> constants;
int64_t combined = 0;
auto inst_it = computation->instructions().begin();
while (inst_it != computation->instructions().end()) {
HloInstruction* instruction = *inst_it;
++inst_it;
if (only_scalars && !ShapeUtil::IsScalar(instruction->shape())) {
continue;
}
HloInstruction* match = nullptr;
if (auto* constant_inst = DynCast<HloConstantInstruction>(instruction)) {
auto insert_result = constants.insert(ConstantKey<kIsLayoutSensitive>{
constant_inst,
(domain_map != nullptr ? domain_map->GetDomainId(instruction) : 0)});
if (!insert_result.second) {
match = insert_result.first->hlo;
}
}
if (match != nullptr) {
TF_CHECK_OK(instruction->ReplaceAllUsesWith(match));
TF_CHECK_OK(computation->RemoveInstruction(instruction));
++combined;
}
}
VLOG(4) << "Combined " << combined << " constants and iotas in "
<< computation->name() << " computation";
return combined > 0;
}
struct CseKey {
template <typename H>
friend H AbslHashValue(H h, const CseKey& key) {
auto instruction = key.hlo;
h = H::combine(std::move(h), instruction->opcode(),
instruction->shape().dimensions());
auto window_hash = [](H h, const Window& window) {
const auto& window_dims = window.dimensions();
for (const auto& window_dim : window_dims) {
h = H::combine(std::move(h), window_dim.size(), window_dim.stride(),
window_dim.padding_low(), window_dim.padding_high(),
window_dim.window_dilation(), window_dim.base_dilation(),
window_dim.window_reversal());
}
return H::combine(std::move(h), window_dims.size());
};
if (HloOpcodeIsBinaryCommutative(instruction->opcode())) {
CHECK_EQ(instruction->operand_count(), 2);
auto id0 = instruction->operand(0)->unique_id();
if (instruction->operand(0)->opcode() == HloOpcode::kIota) {
id0 = 0;
}
auto id1 = instruction->operand(1)->unique_id();
if (instruction->operand(1)->opcode() == HloOpcode::kIota) {
id1 = 0;
}
if (id0 > id1) {
std::swap(id0, id1);
}
h = H::combine(std::move(h), id0, id1);
} else {
for (auto operand : instruction->operands()) {
if (operand->opcode() == HloOpcode::kIota) {
continue;
}
h = H::combine(std::move(h), operand->unique_id());
}
}
for (auto c : instruction->called_computations()) {
h = H::combine(std::move(h), c->root_instruction()->opcode());
}
switch (instruction->opcode()) {
case HloOpcode::kSlice:
return H::combine(std::move(h), instruction->slice_starts(),
instruction->slice_strides());
case HloOpcode::kPad: {
const auto& padding_dims = instruction->padding_config().dimensions();
for (const auto& padding_dim : padding_dims) {
h = H::combine(std::move(h), padding_dim.edge_padding_low(),
padding_dim.edge_padding_high(),
padding_dim.interior_padding());
}
h = H::combine(std::move(h), padding_dims.size());
return std::move(h);
}
case HloOpcode::kDot: {
const auto& dot_dimension_numbers =
instruction->dot_dimension_numbers();
h = H::combine(
std::move(h),
absl::MakeSpan(dot_dimension_numbers.lhs_contracting_dimensions()),
absl::MakeSpan(dot_dimension_numbers.rhs_contracting_dimensions()),
absl::MakeSpan(dot_dimension_numbers.lhs_batch_dimensions()),
absl::MakeSpan(dot_dimension_numbers.rhs_batch_dimensions()));
return std::move(h);
}
case HloOpcode::kConvolution: {
const auto& conv_dimension_numbers =
instruction->convolution_dimension_numbers();
h = H::combine(
std::move(h), conv_dimension_numbers.input_batch_dimension(),
conv_dimension_numbers.input_feature_dimension(),
absl::MakeSpan(conv_dimension_numbers.input_spatial_dimensions()),
conv_dimension_numbers.kernel_input_feature_dimension(),
conv_dimension_numbers.kernel_output_feature_dimension(),
absl::MakeSpan(conv_dimension_numbers.kernel_spatial_dimensions()),
conv_dimension_numbers.output_batch_dimension(),
conv_dimension_numbers.output_feature_dimension(),
absl::MakeSpan(conv_dimension_numbers.output_spatial_dimensions()));
return window_hash(std::move(h), instruction->window());
}
case HloOpcode::kReduceWindow:
return window_hash(std::move(h), instruction->window());
case HloOpcode::kConcatenate:
case HloOpcode::kBroadcast:
case HloOpcode::kTranspose:
case HloOpcode::kReduce:
return H::combine(std::move(h), instruction->dimensions());
case HloOpcode::kGetTupleElement:
return H::combine(std::move(h), instruction->tuple_index());
case HloOpcode::kCompare:
return H::combine(
std::move(h),
Cast<HloCompareInstruction>(instruction)->direction());
default:
return std::move(h);
}
}
HloInstruction* hlo;
};
}
absl::StatusOr<bool> HloCSE::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
const auto eq_instructions = [&](const HloInstruction* a,
const HloInstruction* b) {
if (a == b) {
return true;
}
if (a->opcode() != b->opcode() || a->opcode() != HloOpcode::kIota) {
return false;
}
return a->dimensions(0) == b->dimensions(0) &&
(is_layout_sensitive_
? ShapeUtil::Equal(a->shape(), b->shape())
: ShapeUtil::Compatible(a->shape(), b->shape()));
};
const auto eq_computations = [](const HloComputation* lhs,
const HloComputation* rhs) {
return *lhs == *rhs;
};
auto cse_equal = [&](const CseKey& lhs, const CseKey& rhs) {
return lhs.hlo->IdenticalIgnoringCommutativeOperandOrder(
*rhs.hlo, eq_instructions, eq_computations, is_layout_sensitive_,
true);
};
for (auto* computation : module->computations(execution_threads)) {
if (only_fusion_computations_ && !computation->IsFusionComputation()) {
continue;
}
TF_ASSIGN_OR_RETURN(
bool combined,
is_layout_sensitive_
? CombineConstants<true>(computation, only_scalars_)
: CombineConstants<false>(computation, only_scalars_));
changed |= combined;
absl::flat_hash_set<CseKey, absl::Hash<CseKey>, decltype(cse_equal)>
representatives(computation->instruction_count() + 1,
absl::Hash<CseKey>{}, cse_equal);
for (auto instruction : computation->MakeInstructionPostOrder()) {
if (instruction->operand_count() == 0 &&
instruction->opcode() != HloOpcode::kPartitionId &&
instruction->opcode() != HloOpcode::kReplicaId) {
continue;
}
if (instruction->HasSideEffect()) {
continue;
}
if (only_scalars_ && !ShapeUtil::IsScalar(instruction->shape())) {
continue;
}
auto pair = representatives.insert(CseKey{instruction});
if (!pair.second) {
HloInstruction* equivalent_instruction = pair.first->hlo;
TF_RETURN_IF_ERROR(
instruction->ReplaceAllUsesWith(equivalent_instruction));
TF_RETURN_IF_ERROR(computation->RemoveInstructionAndUnusedOperands(
instruction, std::nullopt,
ignore_control_dependencies_));
VLOG(4) << "Replaced " << instruction->name() << " with "
<< equivalent_instruction->name();
changed = true;
continue;
}
for (int64_t i = 0; i < instruction->operand_count(); ++i) {
HloInstruction* a = instruction->mutable_operand(i);
if (a->opcode() != HloOpcode::kIota) {
continue;
}
for (int64_t j = i + 1; j < instruction->operand_count(); ++j) {
HloInstruction* b = instruction->mutable_operand(j);
if (a == b || !eq_instructions(a, b)) {
continue;
}
TF_RETURN_IF_ERROR(instruction->ReplaceOperandWith(j, a));
changed = true;
if (b->IsDead()) {
TF_RETURN_IF_ERROR(computation->RemoveInstruction(b));
}
}
}
}
if (auto fusion = computation->FusionInstruction()) {
if (fusion->IsMultiOutputFusion()) {
absl::flat_hash_map<const HloInstruction*, int64_t>
root_to_unique_index;
int64_t root_index = 0;
HloInstruction* root = computation->root_instruction();
for (const HloInstruction* hlo : root->operands()) {
if (root_to_unique_index.find(hlo) == root_to_unique_index.end()) {
root_to_unique_index[hlo] = root_to_unique_index[hlo] = root_index;
}
++root_index;
}
if (root_to_unique_index.size() < root->operand_count()) {
for (HloInstruction* user : fusion->users()) {
if (user->opcode() == HloOpcode::kGetTupleElement) {
const HloInstruction* fusion_root =
root->operand(user->tuple_index());
user->set_tuple_index(root_to_unique_index[fusion_root]);
}
}
}
}
}
}
return changed;
}
} | #include "xla/service/hlo_cse.h"
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include "absl/algorithm/container.h"
#include "absl/strings/substitute.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
namespace m = xla::match;
class HloCseTest : public HloTestBase {
protected:
HloCseTest() {}
};
TEST_F(HloCseTest, CombineTwoConstants) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
builder.AddInstruction(HloInstruction::CreateBinary(
constant1->shape(), HloOpcode::kAdd, constant1, constant2));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(3, computation->instruction_count());
HloCSE cse(false);
EXPECT_TRUE(cse.Run(module.get()).value());
EXPECT_EQ(2, computation->instruction_count());
HloInstruction* constant = *computation->instructions().begin();
EXPECT_EQ(42.0f, constant->literal().Get<float>({}));
auto result = ExecuteAndTransfer(module->Clone(), {});
auto expected = LiteralUtil::CreateR0<float>(84.0);
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, ErrorSpec(1e-4)));
}
TEST_F(HloCseTest, CombineTwoConstantsDifferentLayouts) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR2WithLayout<float>(
{{1.0, 2.0}, {3.0, 4.0}}, LayoutUtil::MakeLayout({0, 1}))));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR2WithLayout<float>(
{{1.0, 2.0}, {3.0, 4.0}}, LayoutUtil::MakeLayout({1, 0}))));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
constant1->shape(), HloOpcode::kAdd, constant1, constant2));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(3, computation->instruction_count());
EXPECT_THAT(add, op::Add(constant1, constant2));
HloCSE cse(true);
EXPECT_FALSE(cse.Run(module.get()).value());
EXPECT_EQ(3, computation->instruction_count());
EXPECT_THAT(add, op::Add(constant1, constant2));
auto result = ExecuteAndTransfer(module->Clone(), {});
auto expected = LiteralUtil::CreateR2<float>({{2.0, 4.0}, {6.0, 8.0}});
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, ErrorSpec(1e-4)));
}
TEST_F(HloCseTest, ConstantsSameValueDifferentType) {
auto builder = HloComputation::Builder(TestName());
std::vector<HloInstruction*> constants;
constants.push_back(builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<uint32_t>(42))));
constants.push_back(builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(42))));
constants.push_back(builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<uint64_t>(42.0))));
constants.push_back(builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int64_t>(42.0))));
constants.push_back(builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<double>(42.0))));
constants.push_back(builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))));
constants.push_back(builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f))));
const Shape shape_r0 = ShapeUtil::MakeShape(F32, {});
for (int64_t i = 0; i < constants.size(); ++i) {
constants[i] = builder.AddInstruction(
HloInstruction::CreateConvert(shape_r0, constants[i]));
}
HloInstruction* root = builder.AddInstruction(HloInstruction::CreateBinary(
shape_r0, HloOpcode::kAdd, constants[0], constants[1]));
for (int64_t i = 2; i < constants.size(); ++i) {
root = builder.AddInstruction(HloInstruction::CreateBinary(
shape_r0, HloOpcode::kAdd, root, constants[i]));
}
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(20, computation->instruction_count());
HloCSE cse(false);
EXPECT_TRUE(cse.Run(module.get()).value());
EXPECT_EQ(18, computation->instruction_count());
}
TEST_F(HloCseTest, NonscalarConstants) {
auto builder = HloComputation::Builder(TestName());
auto common_constant1 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
auto common_constant2 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
auto uncommon_constant =
builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{2.0, 4.0}, {6.0, 8.0}})));
auto tuple = builder.AddInstruction(HloInstruction::CreateTuple(
{common_constant1, common_constant2, uncommon_constant}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(4, computation->instruction_count());
EXPECT_THAT(tuple,
op::Tuple(common_constant1, common_constant2, uncommon_constant));
HloCSE cse(false);
EXPECT_TRUE(cse.Run(module.get()).value());
EXPECT_EQ(3, computation->instruction_count());
auto first_operand = tuple->operand(0);
EXPECT_THAT(first_operand,
::testing::AnyOf(common_constant1, common_constant2));
EXPECT_THAT(tuple,
op::Tuple(first_operand, first_operand, uncommon_constant));
}
TEST_F(HloCseTest, IdenticalInstructions) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
auto exp1 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kExp, constant));
auto exp2 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kExp, constant));
auto exp3 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kExp, constant));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({exp1, exp2, exp3}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(5, computation->instruction_count());
EXPECT_THAT(tuple, op::Tuple(exp1, exp2, exp3));
HloCSE cse(true);
EXPECT_TRUE(cse.Run(module.get()).value());
EXPECT_EQ(3, computation->instruction_count());
auto first_operand = tuple->operand(0);
EXPECT_THAT(first_operand, ::testing::AnyOf(exp1, exp2, exp3));
EXPECT_THAT(tuple, op::Tuple(first_operand, first_operand, first_operand));
}
TEST_F(HloCseTest, WhileLoopsIdenticalConditionsAndBodiesSameInput) {
const char* const hlo_string = R"(
HloModule WhileLoopsIdenticalConditionsAndBodiesSameInput
%body (param: (f32[], f32[])) -> (f32[], f32[]) {
%param = (f32[], f32[]) parameter(0)
%gte0 = get-tuple-element(%param), index=0
%gte1 = get-tuple-element(%param), index=1
%add = add(%gte0, %gte1)
ROOT %tuple = tuple(%gte0, %add)
}
%condition {
%param.1 = (f32[], f32[]) parameter(0)
ROOT %constant = pred[] constant(false)
}
%condition.1 {
%param.2 = (f32[], f32[]) parameter(0)
ROOT %constant.1 = pred[] constant(false)
}
ENTRY %WhileLoopsIdenticalConditionsAndBodiesSameInput {
%c0 = f32[] constant(1)
%c1 = f32[] constant(2)
%t = tuple(c0, c1)
%while = while(%t), condition=%condition, body=%body
%while.1 = while(%t), condition=%condition.1, body=%body
ROOT r = tuple(while, while.1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
auto computation = m->entry_computation();
EXPECT_EQ(6, computation->instruction_count());
HloCSE cse(true);
EXPECT_TRUE(cse.Run(m.get()).value());
EXPECT_EQ(5, computation->instruction_count());
}
TEST_F(HloCseTest, WhileLoopsIdenticalConditionsSameInputAndDifferentBodies) {
const char* const hlo_string = R"(
HloModule WhileLoopsIdenticalConditionsSameInputAndDifferentBodies
%body {
%param = (f32[], f32[]) parameter(0)
%get-tuple-element = get-tuple-element(%param), index=0
%get-tuple-element.1 = get-tuple-element(%param), index=1
%add = add(%get-tuple-element, %get-tuple-element.1)
ROOT %tuple = tuple(%get-tuple-element, %add)
}
%body2 {
%param.1 = (f32[], f32[]) parameter(0)
%get-tuple-element.2 = get-tuple-element(%param.1), index=0
%get-tuple-element.3 = get-tuple-element(%param.1), index=1
%sub = subtract(%get-tuple-element.2, %get-tuple-element.3)
ROOT %tuple.2 = tuple(%get-tuple-element.2, %sub)
}
%condition (param.2: (f32[], f32[])) -> pred[] {
%param.2 = (f32[], f32[]) parameter(0)
ROOT %constant = pred[] constant(false)
}
%condition.1 (param.3: (f32[], f32[])) -> pred[] {
%param.3 = (f32[], f32[]) parameter(0)
ROOT %constant.1 = pred[] constant(false)
}
ENTRY %WhileLoopsIdenticalConditionsSameInputAndDifferentBodies {
%constant.2 = f32[] constant(1)
%constant.3 = f32[] constant(2)
%tuple.1 = tuple(f32[] %constant.2, f32[] %constant.3)
%while = while(%tuple.1), condition=%condition, body=%body
ROOT %while.1 = while(%tuple.1), condition=%condition.1, body=%body2
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
auto computation = m->entry_computation();
EXPECT_EQ(5, computation->instruction_count());
HloCSE cse(true);
EXPECT_FALSE(cse.Run(m.get()).value());
EXPECT_EQ(5, computation->instruction_count());
}
TEST_F(HloCseTest, WhileLoopsIdenticalBodiesAndInputDifferentConditions) {
const char* const hlo_string = R"(
HloModule WhileLoopsIdenticalBodiesAndInputDifferentConditions
%body {
%param = (f32[], f32[]) parameter(0)
%get-tuple-element = get-tuple-element(%param), index=0
%get-tuple-element.1 = get-tuple-element((f32[], f32[]) %param), index=1
%add = add(%get-tuple-element, %get-tuple-element.1)
ROOT %tuple = tuple(%get-tuple-element, %add)
}
%condition {
%param.1 = (f32[], f32[]) parameter(0)
ROOT %constant = pred[] constant(false)
}
%condition.1 {
%param.2 = (f32[], f32[]) parameter(0)
ROOT %constant.1 = pred[] constant(true)
}
ENTRY %WhileLoopsIdenticalBodiesAndInputDifferentConditions {
%constant.2 = f32[] constant(1)
%constant.3 = f32[] constant(2)
%tuple.1 = tuple(%constant.2, %constant.3)
%while = while(%tuple.1), condition=%condition, body=%body
ROOT %while.1 = while(%tuple.1), condition=%condition.1, body=%body
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
auto computation = m->entry_computation();
EXPECT_EQ(5, computation->instruction_count());
HloCSE cse(true);
EXPECT_FALSE(cse.Run(m.get()).value());
EXPECT_EQ(5, computation->instruction_count());
}
TEST_F(HloCseTest, IdenticalInstructionsDifferentLayoutsSensitive) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
auto exp1 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kExp, constant));
*exp1->mutable_shape()->mutable_layout() = LayoutUtil::MakeLayout({0, 1});
auto exp2 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kExp, constant));
*exp2->mutable_shape()->mutable_layout() = LayoutUtil::MakeLayout({1, 0});
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({exp1, exp2}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(4, computation->instruction_count());
EXPECT_THAT(tuple, op::Tuple(exp1, exp2));
HloCSE cse(true);
EXPECT_FALSE(cse.Run(module.get()).value());
EXPECT_EQ(4, computation->instruction_count());
EXPECT_THAT(tuple, op::Tuple(exp1, exp2));
}
TEST_F(HloCseTest, IdenticalInstructionsDifferentLayoutsInsensitive) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
auto exp1 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kExp, constant));
*exp1->mutable_shape()->mutable_layout() = LayoutUtil::MakeLayout({0, 1});
auto exp2 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kExp, constant));
*exp2->mutable_shape()->mutable_layout() = LayoutUtil::MakeLayout({1, 0});
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({exp1, exp2}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(4, computation->instruction_count());
EXPECT_THAT(tuple, op::Tuple(exp1, exp2));
HloCSE cse(false);
EXPECT_TRUE(cse.Run(module.get()).value());
EXPECT_EQ(3, computation->instruction_count());
auto first_operand = tuple->operand(0);
EXPECT_THAT(first_operand, ::testing::AnyOf(exp1, exp2));
EXPECT_THAT(tuple, op::Tuple(first_operand, first_operand));
}
TEST_F(HloCseTest, FusionInternalCSE) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
const Shape shape_r0 = ShapeUtil::MakeShape(F32, {});
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape_r0, "p0"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, shape_r0, "p1"));
auto add1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape_r0, HloOpcode::kAdd, param0, param1));
auto add2 = builder.AddInstruction(
HloInstruction::CreateBinary(shape_r0, HloOpcode::kAdd, param0, param1));
auto mul = builder.AddInstruction(
HloInstruction::CreateBinary(shape_r0, HloOpcode::kMultiply, add1, add2));
auto computation = module->AddEntryComputation(builder.Build());
auto fused_computation =
computation
->CreateFusionInstruction({mul, add1, add2},
HloInstruction::FusionKind::kLoop)
->fused_instructions_computation();
EXPECT_EQ(5, fused_computation->instruction_count());
HloCSE cse(false);
EXPECT_TRUE(cse.Run(module.get()).value());
EXPECT_EQ(4, fused_computation->instruction_count());
auto root = fused_computation->root_instruction();
EXPECT_THAT(root, op::Multiply(root->operand(0), root->operand(0)));
}
TEST_F(HloCseTest, IdenticalExpressions) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
auto negate1 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kNegate, constant));
auto exp1 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kExp, constant));
auto add1 = builder.AddInstruction(HloInstruction::CreateBinary(
constant->shape(), HloOpcode::kAdd, negate1, exp1));
auto negate2 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kNegate, constant));
auto exp2 = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kExp, constant));
auto add2 = builder.AddInstruction(HloInstruction::CreateBinary(
constant->shape(), HloOpcode::kAdd, negate2, exp2));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({add1, add2}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(8, computation->instruction_count());
EXPECT_THAT(tuple, op::Tuple(op::Add(negate1, exp1), op::Add(negate2, exp2)));
HloCSE cse(false);
EXPECT_TRUE(cse.Run(module.get()).value());
EXPECT_EQ(5, computation->instruction_count());
auto operand = tuple->operand(0);
EXPECT_THAT(tuple, op::Tuple(operand, operand));
EXPECT_THAT(operand, op::Add(op::Negate(), op::Exp()));
}
TEST_F(HloCseTest, DoNotCombineRng) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f)));
auto rng1 = builder.AddInstruction(HloInstruction::CreateRng(
ShapeUtil::MakeShape(F32, {}), RandomDistribution::RNG_UNIFORM,
{constant1, constant2}));
auto rng2 = builder.AddInstruction(HloInstruction::CreateRng(
ShapeUtil::MakeShape(F32, {}), RandomDistribution::RNG_UNIFORM,
{constant1, constant2}));
builder.AddInstruction(HloInstruction::CreateBinary(
constant1->shape(), HloOpcode::kAdd, rng1, rng2));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
HloInstruction* root = computation->root_instruction();
EXPECT_THAT(root, op::Add(rng1, rng2));
uint32_t count_before = computation->instruction_count();
HloCSE cse(false);
EXPECT_FALSE(cse.Run(module.get()).value());
uint32_t count_after = computation->instruction_count();
EXPECT_EQ(count_before, count_after);
root = computation->root_instruction();
EXPECT_THAT(root, op::Add(rng1, rng2));
}
TEST_F(HloCseTest, DoNotCombineOpsWithDifferentShardings) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
constant.68 = s32[1]{0} constant({0})
custom-call.82 = s32[1]{0} custom-call(constant.68), custom_call_target="Sharding", sharding={replicated}
custom-call.1343 = s32[1]{0} custom-call(constant.68), custom_call_target="Sharding", sharding={manual}
custom-call.1344 = s32[8]{0} custom-call(custom-call.1343), custom_call_target="SPMDShardToFullShape", sharding={devices=[8]0,1,2,3,4,5,6,7}
ROOT tuple = (s32[1]{0}, s32[8]{0}) tuple(custom-call.82, custom-call.1344)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
HloCSE cse(false);
EXPECT_FALSE(cse.Run(m.get()).value());
}
TEST_F(HloCseTest, DoNotCombineCallsToImpureFunctions) {
auto module = CreateNewVerifiedModule();
HloComputation* rng_function = nullptr;
{
Shape scalar_shape = ShapeUtil::MakeShape(F32, {});
auto builder = HloComputation::Builder(TestName() + "_rng_fun");
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f)));
auto rng = builder.AddInstruction(HloInstruction::CreateRng(
scalar_shape, RandomDistribution::RNG_UNIFORM, {constant1, constant2}));
auto param = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "param"));
builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, rng, param));
rng_function = module->AddEmbeddedComputation(builder.Build());
}
HloComputation* computation = nullptr;
{
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>({5.0f})));
auto rng1 = builder.AddInstruction(
HloInstruction::CreateMap(constant->shape(), {constant}, rng_function));
auto rng2 = builder.AddInstruction(
HloInstruction::CreateMap(constant->shape(), {constant}, rng_function));
builder.AddInstruction(HloInstruction::CreateBinary(
constant->shape(), HloOpcode::kAdd, rng1, rng2));
computation = module->AddEntryComputation(builder.Build());
}
EXPECT_EQ(4, computation->instruction_count());
HloInstruction* root = computation->root_instruction();
EXPECT_THAT(root, op::Add(op::Map(), op::Map()));
VLOG(3) << "before: " << module->ToString();
HloCSE cse(false);
EXPECT_FALSE(cse.Run(module.get()).value());
VLOG(3) << "after: " << module->ToString();
EXPECT_EQ(4, computation->instruction_count());
root = computation->root_instruction();
EXPECT_THAT(root, op::Add(op::Map(op::Constant()), op::Map(op::Constant())));
}
TEST_F(HloCseTest, CompareComputations) {
const char* const hlo_string = R"(
HloModule m
add_computation {
add_lhs = f32[] parameter(0)
add_rhs = f32[] parameter(1)
ROOT add_root = add(add_lhs, add_rhs)
}
add_computation2 {
add_lhs2 = f32[] parameter(0)
add_rhs2 = f32[] parameter(1)
ROOT add_root2 = add(add_lhs2, add_rhs2)
}
ENTRY entry {
p = f32[10]{0} parameter(0)
c = f32[] constant(0)
r1 = reduce(p, c), dimensions={0}, to_apply=add_computation
r2 = reduce(p, c), dimensions={0}, to_apply=add_computation2
ROOT f2 = tuple(r1, r2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
HloCSE cse(false);
EXPECT_TRUE(cse.Run(m.get()).value());
HloInstruction* root = m->entry_computation()->root_instruction();
EXPECT_EQ(root->operand(0), root->operand(1));
}
TEST_F(HloCseTest, Domain) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param = f32[] parameter(0), sharding={maximal device=0}
%domain.0 = f32[] domain(%param),
domain={kind="sharding", entry={maximal device=0}, exit={maximal device=1}}
%domain.1 = f32[] domain(%param),
domain={kind="sharding", entry={maximal device=0}, exit={maximal device=1}}
%domain.2 = f32[] domain(%param),
domain={kind="sharding", entry={maximal device=0}, exit={maximal device=2}}
%negate.0 = f32[] negate(%domain.0)
%negate.1 = f32[] negate(%domain.1)
%negate.2 = f32[] negate(%domain.2)
%domain.3 = f32[] domain(%negate.0),
domain={kind="sharding", entry={maximal device=1}, exit={maximal device=0}}
%domain.4 = f32[] domain(%negate.1),
domain={kind="sharding", entry={maximal device=1}, exit={maximal device=0}}
%domain.5 = f32[] domain(%negate.2),
domain={kind="sharding", entry={maximal device=2}, exit={maximal device=0}}
%add = f32[] add(%domain.3, %domain.4)
ROOT %sub = f32[] subtract(%add, %domain.5)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
HloCSE cse(false);
EXPECT_TRUE(cse.Run(m.get()).value());
const HloInstruction* sub = m->entry_computation()->root_instruction();
const HloInstruction* add = sub->operand(0);
EXPECT_EQ(add->operand(0), add->operand(1));
EXPECT_NE(add->operand(0), sub->operand(1));
EXPECT_NE(add->operand(1), sub->operand(1));
}
TEST_F(HloCseTest, Iota) {
const char* const hlo_string = R"(
HloModule m
ENTRY entry {
i1 = s64[16,16] iota(), iota_dimension=0
i2 = s64[16,16] iota(), iota_dimension=0
i3 = s64[17,16] iota(), iota_dimension=0
i4 = s64[16,16] iota(), iota_dimension=1
ROOT root = (s64[16,16], s64[16,16], s64[17,16], s64[16,16]) tuple(i1, i2, i3, i4)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
HloCSE cse(false);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&cse, m.get()));
EXPECT_TRUE(changed);
HloInstruction* root = m->entry_computation()->root_instruction();
EXPECT_EQ(root->operand(0), root->operand(1));
EXPECT_NE(root->operand(0), root->operand(2));
EXPECT_NE(root->operand(0), root->operand(3));
}
TEST_F(HloCseTest, OptimizationBarrier) {
const char* const hlo_string = R"(
HloModule m
ENTRY entry {
%param.0 = f32[] parameter(0)
%param.1 = f32[] parameter(1)
%add.0 = f32[] add(%param.0, %param.1)
%cse_tmp.0 = (f32[], f32[], f32[]) tuple(%param.0, %param.1, %add.0)
%cse_tmp.1 = (f32[], f32[], f32[]) opt-barrier(%cse_tmp.0)
%param.0.1 = f32[] get-tuple-element(%cse_tmp.1), index=0
%param.1.1 = f32[] get-tuple-element(%cse_tmp.1), index=1
%add.0.1 = f32[] get-tuple-element(%cse_tmp.1), index=2
%add.1 = f32[] add(%param.0.1, %param.1.1)
ROOT %add.2 = f32[] add(%add.1, %add.0.1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
HloCSE cse(false);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&cse, m.get()));
EXPECT_FALSE(changed);
}
TEST_F(HloCseTest, OnlyScalar) {
const char* const hlo_string = R"(
HloModule m
ENTRY entry {
%const1 = f32[] constant(1)
%const2 = f32[] constant(1)
%const3 = f32[2] constant({1,2})
%const4 = f32[2] constant({1,2})
%add.0 = f32[] add(%const1, %const2)
%add.1 = f32[2] add(%const3, %const4)
ROOT out = (f32[], f32[2]) tuple(%add.0, %add.1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
HloCSE cse(false, false,
false, true);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&cse, m.get()));
EXPECT_TRUE(changed);
EXPECT_EQ(absl::c_count_if(m->entry_computation()->instructions(),
[](const HloInstruction* instruction) {
return instruction->IsConstant();
}),
3);
}
class HloCseCustomCallTest
: public HloCseTest,
public ::testing::WithParamInterface<std::tuple<
std::string , std::string , bool >> {};
TEST_P(HloCseCustomCallTest, DoIt) {
std::string op1 = std::get<0>(GetParam());
std::string op2 = std::get<1>(GetParam());
bool should_cse = std::get<2>(GetParam());
const char* const hlo_string_tmpl = R"(
HloModule m
ENTRY entry {
p0 = f32[1,1,1] parameter(0)
op0 = $0
op1 = $0
op2 = $1
ROOT root = tuple(op0, op1, op2)
}
)";
std::string hlo_string = absl::Substitute(hlo_string_tmpl, op1, op2);
SCOPED_TRACE(absl::StrCat("Module before CSE:\n", hlo_string));
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
HloCSE cse(false);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&cse, m.get()));
SCOPED_TRACE(absl::StrCat("Module after CSE:\n", m->ToString()));
EXPECT_EQ(changed, true);
HloInstruction* root = m->entry_computation()->root_instruction();
EXPECT_EQ(root->operand(0), root->operand(1))
<< "Identical ops should be CSE'ed";
if (should_cse) {
EXPECT_EQ(root->operand(0), root->operand(2)) << "Ops should be CSE'ed";
} else {
EXPECT_NE(root->operand(0), root->operand(2)) << "Ops should not be CSE'ed";
}
}
static std::vector<
std::tuple<std::string , std::string , bool >>
CustomCallTests() {
auto build = [](absl::string_view args1, absl::string_view args2) {
absl::string_view prefix =
"f32[] custom-call(p0), custom_call_target=\"foo\", ";
return std::make_tuple(absl::StrCat(prefix, args1),
absl::StrCat(prefix, args2), false);
};
return {
{
"f32[] custom-call(p0), custom_call_target=\"foo\"",
"f32[] custom-call(p0), custom_call_target=\"foo\", "
"metadata={op_name=\"bar\"}",
true,
},
{
"f32[] custom-call(p0), custom_call_target=\"foo\"",
"f32[] custom-call(p0, p0), custom_call_target=\"foo\"",
false,
},
{
"f32[1] custom-call(p0), custom_call_target=\"foo\"",
"f32[2] custom-call(p0), custom_call_target=\"foo\"",
false,
},
{
"f32[] custom-call(p0), custom_call_target=\"foo\"",
"f32[] custom-call(p0), custom_call_target=\"bar\"",
false,
},
build("window={size=1}", "window={size=2}"),
build("dim_labels=b0f_0oi->b0f", "dim_labels=b0f_0oi->bf0"),
build("backend_config=\"foo\"", "backend_config=\"bar\""),
build("literal=s32[] 0", "literal=s32[] 1"),
build("literal=s32[] 0", "literal=f32[] 0"),
build("operand_precision={high,default}",
"operand_precision={high, high}"),
build("api_version=API_VERSION_STATUS_RETURNING",
"api_version=API_VERSION_ORIGINAL"),
build("feature_group_count=0", "feature_group_count=1"),
};
}
INSTANTIATE_TEST_SUITE_P(HloCseCustomCallTestSuite, HloCseCustomCallTest,
::testing::ValuesIn(CustomCallTests()));
TEST_F(HloCseTest, CustomCallCalledComputations) {
const char* const hlo_string = R"(
HloModule m
comp {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT maximum = f32[] maximum(lhs, rhs)
}
ENTRY entry {
p0 = f32[] parameter(0)
op0 = f32[] custom-call(p0), custom_call_target="foo", called_computations={comp}
op1 = f32[] custom-call(p0), custom_call_target="foo", called_computations={comp, comp}
ROOT root = tuple(op0, op1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
HloCSE cse(false);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&cse, m.get()));
SCOPED_TRACE(absl::StrCat("Module after CSE:\n", m->ToString()));
EXPECT_EQ(changed, false);
}
TEST_F(HloCseTest, CustomCallSideEffects) {
const char* const hlo_string = R"(
HloModule m
ENTRY entry {
p0 = f32[] parameter(0)
op0 = f32[] custom-call(p0), custom_call_target="foo", custom_call_has_side_effect=true
op1 = f32[] custom-call(p0), custom_call_target="foo", custom_call_has_side_effect=true
ROOT root = tuple(op0, op1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
HloCSE cse(false);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&cse, m.get()));
SCOPED_TRACE(absl::StrCat("Module after CSE:\n", m->ToString()));
EXPECT_EQ(changed, false);
}
TEST_F(HloCseTest, IgnoreControlDependencies) {
const char* const hlo_string = R"(
HloModule m
%add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT x = f32[] add(p0, p1)
}
ENTRY entry {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ar0 = f32[] all-reduce(p0), replica_groups={}, to_apply=%add
ar1 = f32[] all-reduce(p1), replica_groups={}, to_apply=%add, control-predecessors={ar0}
ar2 = f32[] all-reduce(p0), replica_groups={}, to_apply=%add, control-predecessors={ar1}
ROOT root = tuple(ar0, ar1, ar2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
HloCSE cse(false, false,
true);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&cse, m.get()));
SCOPED_TRACE(absl::StrCat("Module after CSE:\n", m->ToString()));
EXPECT_EQ(changed, true);
}
TEST_F(HloCseTest, MultiOutputFusion) {
const char* const hlo_string = R"(
HloModule m
f {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
add.0 = f32[] add(p0, p1)
add.1 = f32[] add(p0, p1)
ROOT res = (f32[], f32[]) tuple(add.0, add.1)
}
ENTRY entry {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
fusion = (f32[], f32[]) fusion(p0, p1), kind=kLoop, calls=f
gte0 = f32[] get-tuple-element(fusion), index=0
gte1 = f32[] get-tuple-element(fusion), index=1
ROOT res = (f32[], f32[]) tuple(gte0, gte1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo_string));
HloCSE cse(false);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&cse, m.get()));
SCOPED_TRACE(absl::StrCat("Module after CSE:\n", m->ToString()));
EXPECT_EQ(changed, true);
HloInstruction* root = m->entry_computation()->root_instruction();
HloInstruction* add0;
HloInstruction* add1;
HloInstruction* gte0;
HloInstruction* gte1;
ASSERT_THAT(root, GmockMatch(m::Tuple(m::GetTupleElement(>e0),
m::GetTupleElement(>e1))));
EXPECT_EQ(gte0, gte1);
EXPECT_EQ(gte0->tuple_index(), 0);
const HloInstruction* fusion = gte0->operand(0);
ASSERT_THAT(
fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Add(&add0, m::Parameter(0), m::Parameter(1)),
m::Add(&add1, m::Parameter(0), m::Parameter(1)))));
EXPECT_EQ(add0, add1);
}
class HloCseCommutativeOpTest
: public HloCseTest,
public ::testing::WithParamInterface<std::string > {};
TEST_P(HloCseCommutativeOpTest, DoIt) {
std::string op = GetParam();
const char* kModuleStr = R"(
HloModule m
ENTRY test {
p0 = s32[10] parameter(0)
p1 = s32[10] parameter(1)
op1 = s32[10] $0(p0, p1)
op2 = s32[10] $0(p1, p0)
ROOT t = tuple(op1, op2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
absl::Substitute(kModuleStr, op)));
ASSERT_TRUE(HloCSE(false).Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* op0;
const HloInstruction* op1;
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::Op(&op0), m::Op(&op1))));
EXPECT_EQ(op0, op1);
}
INSTANTIATE_TEST_SUITE_P(AlgebraicSimplifierCanonicalizeCommutativeTestSuite,
HloCseCommutativeOpTest,
::testing::Values("add", "multiply", "and", "or",
"xor", "minimum", "maximum"));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_cse.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_cse_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c351440d-fff6-4367-a193-998c9dc9bbaf | cpp | tensorflow/tensorflow | control_flow_ops | tensorflow/core/ops/control_flow_ops.cc | tensorflow/core/ops/control_flow_ops_test.cc | #include <vector>
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
namespace tensorflow {
using shape_inference::InferenceContext;
using shape_inference::ShapeHandle;
namespace {
Status SwitchShape(InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
ShapeHandle out = c->input(0);
c->set_output(0, out);
c->set_output(1, out);
auto* handle_data = c->input_handle_shapes_and_types(0);
if (handle_data != nullptr) {
c->set_output_handle_shapes_and_types(0, *handle_data);
c->set_output_handle_shapes_and_types(1, *handle_data);
}
return absl::OkStatus();
}
Status SwitchNShape(InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
ShapeHandle out = c->input(0);
int num_outs;
TF_RETURN_IF_ERROR(c->GetAttr("num_outs", &num_outs));
for (int i = 0; i < num_outs; i++) {
c->set_output(i, out);
}
auto* handle_data = c->input_handle_shapes_and_types(0);
if (handle_data != nullptr) {
for (int i = 0; i < num_outs; i++) {
c->set_output_handle_shapes_and_types(i, *handle_data);
}
}
return absl::OkStatus();
}
}
REGISTER_OP("Switch")
.Input("data: T")
.Input("pred: bool")
.Output("output_false: T")
.Output("output_true: T")
.Attr("T: type")
.SetForwardTypeFn(full_type::ReplicateInput(0, 2))
.SetShapeFn(SwitchShape);
REGISTER_OP("RefSwitch")
.Input("data: Ref(T)")
.Input("pred: bool")
.Output("output_false: Ref(T)")
.Output("output_true: Ref(T)")
.Attr("T: type")
.SetAllowsUninitializedInput()
.SetShapeFn(SwitchShape);
REGISTER_OP("_SwitchN")
.Input("data: T")
.Input("output_index: int32")
.Output("outputs: num_outs * T")
.Attr("num_outs: int >= 1")
.Attr("T: type")
.SetShapeFn(SwitchNShape);
REGISTER_OP("RefSelect")
.Input("index: int32")
.Input("inputs: Ref(N * T)")
.Output("output: Ref(T)")
.Attr("T: type")
.Attr("N: int >= 1")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused));
ShapeHandle first_input = c->input(1);
if (!c->FullyDefined(first_input)) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
}
for (int i = 2; i < c->num_inputs(); ++i) {
ShapeHandle input = c->input(i);
if (!c->FullyDefined(input) ||
!c->Merge(first_input, input, &unused).ok()) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
}
}
c->set_output(0, first_input);
return absl::OkStatus();
});
namespace {
Status MergeShape(InferenceContext* c) {
ShapeHandle out = c->input(0);
if (!c->RankKnown(out)) {
out = c->UnknownShape();
} else {
int32_t rank = c->Rank(out);
for (int i = 1; i < c->num_inputs(); ++i) {
ShapeHandle input = c->input(i);
if (!c->RankKnown(input) || c->Rank(input) != rank) {
out = c->UnknownShape();
break;
}
for (int d = 0; d < rank; ++d) {
if (c->Value(c->Dim(input, d)) != c->Value(c->Dim(out, d))) {
TF_RETURN_IF_ERROR(c->ReplaceDim(out, d, c->UnknownDim(), &out));
}
}
}
}
c->set_output(0, out);
c->set_output(1, c->Scalar());
return absl::OkStatus();
}
TypeInferenceFn MergeTypeFn() {
std::vector<TypeInferenceFn> func_list{full_type::Merge(),
full_type::Tensor(TFT_INT32)};
return full_type::Tuple(func_list);
}
}
REGISTER_OP("Merge")
.Input("inputs: N * T")
.Output("output: T")
.Output("value_index: int32")
.Attr("T: type")
.Attr("N: int >= 1")
.SetForwardTypeFn(MergeTypeFn())
.SetShapeFn(MergeShape);
REGISTER_OP("RefMerge")
.Input("inputs: Ref(N * T)")
.Output("output: Ref(T)")
.Output("value_index: int32")
.Attr("T: type")
.Attr("N: int >= 1")
.SetShapeFn(MergeShape);
REGISTER_OP("Enter")
.Input("data: T")
.Output("output: T")
.Attr("T: type")
.Attr("frame_name: string")
.Attr("is_constant: bool = false")
.Attr("parallel_iterations: int = 10")
.SetForwardTypeFn(full_type::ReplicateInput())
.SetShapeFn([](InferenceContext* c) {
c->set_output(0, c->UnknownShape());
auto* handle_data = c->input_handle_shapes_and_types(0);
if (handle_data != nullptr) {
c->set_output_handle_shapes_and_types(0, *handle_data);
}
bool is_constant;
TF_RETURN_IF_ERROR(c->GetAttr("is_constant", &is_constant));
if (is_constant) {
c->set_output(0, c->input(0));
}
return absl::OkStatus();
});
REGISTER_OP("RefEnter")
.Input("data: Ref(T)")
.Output("output: Ref(T)")
.Attr("T: type")
.Attr("frame_name: string")
.Attr("is_constant: bool = false")
.Attr("parallel_iterations: int = 10")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("Exit")
.Input("data: T")
.Output("output: T")
.Attr("T: type")
.SetForwardTypeFn(full_type::ReplicateInput())
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("RefExit")
.Input("data: Ref(T)")
.Output("output: Ref(T)")
.Attr("T: type")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("NextIteration")
.Input("data: T")
.Output("output: T")
.Attr("T: type")
.SetForwardTypeFn(full_type::ReplicateInput())
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("RefNextIteration")
.Input("data: Ref(T)")
.Output("output: Ref(T)")
.Attr("T: type")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("LoopCond")
.Input("input: bool")
.Output("output: bool")
.SetShapeFn([](InferenceContext* c) {
return shape_inference::UnchangedShapeWithRank(c, 0);
});
REGISTER_OP("ControlTrigger").SetShapeFn(shape_inference::NoOutputs);
REGISTER_OP("Abort")
.Attr("error_msg: string = ''")
.Attr("exit_without_error: bool = false")
.SetShapeFn(shape_inference::NoOutputs);
} | #include <memory>
#include "tensorflow/core/common_runtime/type_inference.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(ControlFlowOpsTest, Merge_ShapeFn) {
ShapeInferenceTestOp op("Merge");
int n = 3;
std::vector<NodeDefBuilder::NodeOut> src_list;
src_list.reserve(n);
for (int i = 0; i < n; ++i) src_list.emplace_back("a", 0, DT_FLOAT);
TF_ASSERT_OK(NodeDefBuilder("test", "Merge")
.Input(src_list)
.Attr("N", n)
.Finalize(&op.node_def));
INFER_OK(op, "?;?;?", "?;[]");
INFER_OK(op, "[2,1];?;[2,1]", "?;[]");
INFER_OK(op, "[2,1];[2,1];?", "?;[]");
INFER_OK(op, "[2,1];[2,1];[3,1,2]", "?;[]");
INFER_OK(op, "[2,1];[2,1];[3,1]", "[?,d0_1];[]");
INFER_OK(op, "[2,1];[2,2];[3,1]", "[?,?];[]");
INFER_OK(op, "[2,1];[2,1];[2,1]", "in0;[]");
}
TEST(ControlFlowOpsTest, SwitchN_ShapeFn) {
ShapeInferenceTestOp op("_SwitchN");
int n = 5;
TF_ASSERT_OK(NodeDefBuilder("test", "_SwitchN")
.Input({"d", 0, DT_FLOAT})
.Input({"bi", 0, DT_INT32})
.Attr("num_outs", n)
.Finalize(&op.node_def));
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;[2]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;[1]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;[?]");
INFER_OK(op, "?;?", "in0;in0;in0;in0;in0");
INFER_OK(op, "[2,?];?", "in0;in0;in0;in0;in0");
INFER_OK(op, "[2,?];[]", "in0;in0;in0;in0;in0");
INFER_OK(op, "[2,3];[]", "in0;in0;in0;in0;in0");
}
TEST(ControlFlowOpsTest, RefSelect_ShapeFn) {
ShapeInferenceTestOp op("RefSelect");
int n = 3;
std::vector<NodeDefBuilder::NodeOut> src_list;
src_list.reserve(n);
for (int i = 0; i < n; ++i) src_list.emplace_back("a", 1, DT_FLOAT_REF);
TF_ASSERT_OK(NodeDefBuilder("test", "RefSelect")
.Input("index", 0, DT_INT32)
.Input(src_list)
.Attr("N", n)
.Finalize(&op.node_def));
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[2];?;?;?");
INFER_OK(op, "?;?;?;?", "?");
INFER_OK(op, "[];?;?;?", "?");
INFER_OK(op, "[];[1,2,3];?;?", "?");
INFER_OK(op, "[];[1,2,3];[1,2,?];[1,2,3]", "?");
INFER_OK(op, "[];[1,2,3];[1,2];[1,2,3]", "?");
INFER_OK(op, "[];[1,2,3];[1,2,4];[1,2,3]", "?");
INFER_OK(op, "[];[1,2,3];[1,2,3];[1,2,3]", "in1");
}
static Status type_inference(Graph& graph) {
GraphOptimizationPassOptions opt_options;
std::unique_ptr<Graph> graph_ptr(new Graph(OpRegistry::Global()));
graph_ptr->Copy(graph);
opt_options.graph = &graph_ptr;
opt_options.flib_def = graph.mutable_flib_def();
TypeInferencePass pass;
return pass.Run(opt_options);
}
REGISTER_OP("ControlFlowOpsTest>ConstTypeCtor")
.Output("output: dtype")
.Attr("value: tensor")
.Attr("dtype: type")
.SetTypeConstructor(full_type::Unary(TFT_TENSOR, "dtype"))
.SetShapeFn(shape_inference::UnknownShape);
TEST(ControlFlowOpsTest, Merge_TypeInfrnc) {
Graph graph(OpRegistry::Global());
Node* input_tensor_op1;
TensorProto tensor_proto1;
TF_EXPECT_OK(
NodeBuilder("input_tensor_op1", "ControlFlowOpsTest>ConstTypeCtor")
.Attr("value", tensor_proto1)
.Attr("dtype", DT_FLOAT)
.Finalize(&graph, &input_tensor_op1));
Node* input_tensor_op2;
TensorProto tensor_proto2;
TF_EXPECT_OK(
NodeBuilder("input_tensor_op2", "ControlFlowOpsTest>ConstTypeCtor")
.Attr("value", tensor_proto2)
.Attr("dtype", DT_FLOAT)
.Finalize(&graph, &input_tensor_op2));
Node* shape_op;
TF_EXPECT_OK(NodeBuilder("merge_op", "Merge")
.Input({input_tensor_op1, input_tensor_op2})
.Attr("T", DT_FLOAT)
.Finalize(&graph, &shape_op));
TF_EXPECT_OK(type_inference(graph));
FullTypeDef expected_shape_op_t;
protobuf::TextFormat::Parser parser;
CHECK(parser.ParseFromString(
R"pb(type_id: TFT_PRODUCT
args {
type_id: TFT_TENSOR
args { type_id: TFT_FLOAT }
}
args {
type_id: TFT_TENSOR
args { type_id: TFT_INT32 }
})pb",
&expected_shape_op_t));
EXPECT_TRUE(full_type::IsEqual(shape_op->def().experimental_type(),
expected_shape_op_t))
<< "fulltype is\n"
<< shape_op->def().experimental_type().DebugString() << "\nexpected\n"
<< expected_shape_op_t.DebugString();
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/control_flow_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/control_flow_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
26620278-c5dd-4003-a0f9-3124c44bf565 | cpp | tensorflow/tensorflow | list_stack | tensorflow/lite/kernels/variants/list_kernels/list_stack.cc | tensorflow/lite/kernels/variants/list_kernels/list_stack_test.cc | #include <cstring>
#include <utility>
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/variants/list_ops_util.h"
#include "tensorflow/lite/kernels/variants/tensor_array.h"
namespace tflite {
namespace variants {
namespace ops {
namespace {
constexpr int kListInput = 0;
constexpr int kShapeInput = 1;
constexpr int kTensorOutput = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
const TfLiteTensor* list_input;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kListInput, &list_input));
TF_LITE_ENSURE_TYPES_EQ(context, list_input->type, kTfLiteVariant);
const TfLiteTensor* shape_input;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kShapeInput, &shape_input));
TF_LITE_ENSURE_TYPES_EQ(context, shape_input->type, kTfLiteInt32);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kTensorOutput, &output));
SetTensorToDynamic(output);
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* list_input;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kListInput, &list_input));
TF_LITE_ENSURE_EQ(context, list_input->allocation_type, kTfLiteVariantObject);
TensorArray* arr = static_cast<TensorArray*>(
static_cast<VariantData*>(list_input->data.data));
const TfLiteTensor* shape_input;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kShapeInput, &shape_input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kTensorOutput, &output));
TF_LITE_ENSURE_TYPES_EQ(context, output->type, arr->ElementType());
IntArrayUniquePtr cur_shape_suffix;
TF_LITE_ENSURE_OK(context, GetShapeIfAllEqual(*arr, cur_shape_suffix));
cur_shape_suffix = MergeShapesOrNull(
MergeShapesOrNull(TensorAsShape(*shape_input),
BuildTfLiteArray(*arr->ElementShape())),
std::move(cur_shape_suffix));
TF_LITE_ENSURE_MSG(
context,
cur_shape_suffix != nullptr && IsShapeFullyDefined(*cur_shape_suffix),
"Shapes from input, list and elements are not compatible "
"or do not resolve to fully defined shape.");
IntArrayUniquePtr final_output_shape;
const bool suffix_is_not_scalar =
!(cur_shape_suffix->size == 0 ||
(cur_shape_suffix->size == 1 && cur_shape_suffix->data[0] == 1));
if (suffix_is_not_scalar) {
final_output_shape = BuildTfLiteArray(cur_shape_suffix->size + 1);
memcpy(final_output_shape->data + 1, cur_shape_suffix->data,
cur_shape_suffix->size * sizeof(int));
final_output_shape->data[0] = arr->NumElements();
} else {
final_output_shape = BuildTfLiteArray({arr->NumElements()});
}
context->ResizeTensor(context, output, final_output_shape.release());
const auto num_elements = static_cast<int>(NumElements(output));
if (num_elements == 0) {
TfLiteTensorDataFree(output);
return kTfLiteOk;
}
const int element_num_elements = num_elements / output->dims->data[0];
const size_t bytes_per_element =
element_num_elements * TfLiteTypeGetSize(output->type);
char* raw_data_offset = output->data.raw;
for (int i = 0; i < arr->NumElements(); ++i) {
if (arr->At(i) == nullptr) {
memset(raw_data_offset, 0, bytes_per_element);
} else {
memcpy(raw_data_offset, arr->At(i)->data.data, bytes_per_element);
}
raw_data_offset = raw_data_offset + bytes_per_element;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_LIST_STACK() {
static TfLiteRegistration r = {nullptr, nullptr, Prepare, Eval};
return &r;
}
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/kernels/variants/list_kernels/test_util.h"
#include "tensorflow/lite/kernels/variants/list_ops_lib.h"
#include "tensorflow/lite/schema/schema_generated.h"
using ::testing::ElementsAreArray;
using ::tflite::variants::ops::Register_LIST_STACK;
namespace tflite {
namespace {
class ListStackModel : public ListOpModel {
public:
explicit ListStackModel(TensorData output_data) {
tensor_id_ = AddOutput(output_data);
list_id_ = AddInput({TensorType_VARIANT, {}});
shape_id_ = AddInput({TensorType_INT32, {1}});
SetCustomOp("ListStack", {}, Register_LIST_STACK);
BuildInterpreter({{}, {1}});
}
ListStackModel(TensorData output_data, TensorData shape_input_data) {
tensor_id_ = AddOutput(output_data);
list_id_ = AddInput({TensorType_VARIANT, {}});
shape_id_ = AddInput(shape_input_data);
SetCustomOp("ListStack", {}, Register_LIST_STACK);
BuildInterpreter({{}, shape_input_data.shape});
}
const TfLiteTensor* GetOutputTensor(int tensor_id) {
return interpreter_->tensor(tensor_id);
}
int tensor_id_;
int shape_id_;
int list_id_;
};
TEST(ListStackTest, MismatchedListShapeInputShape_Fails) {
ListStackModel m({TensorType_INT32, {2, 2}});
m.PopulateListTensor(m.list_id_, {1}, 2, kTfLiteInt32);
m.PopulateTensor(m.shape_id_, {3});
ASSERT_EQ(m.Invoke(), kTfLiteError);
}
TEST(ListStackTest, MismatchedShapeOfElementsAndInput_Fails) {
ListStackModel m({TensorType_INT32, {2, 2}});
m.PopulateListTensor(m.list_id_, {}, 4, kTfLiteInt32);
m.PopulateTensor(m.shape_id_, {2});
m.ListSetItem(m.list_id_, 0, {1}, kTfLiteInt32, std::vector<int>{0}.data());
m.ListSetItem(m.list_id_, 1, {1}, kTfLiteInt32, std::vector<int>{1}.data());
ASSERT_EQ(m.Invoke(), kTfLiteError);
}
TEST(ListStackTest, ElementsNotSameShape_Fails) {
ListStackModel m({TensorType_INT32, {2, 2}});
m.PopulateListTensor(m.list_id_, {}, 2, kTfLiteInt32);
m.PopulateTensor(m.shape_id_, {2});
m.ListSetItem(m.list_id_, 0, {2}, kTfLiteInt32,
std::vector<int>{2, 2}.data());
m.ListSetItem(m.list_id_, 1, {1}, kTfLiteInt32, std::vector<int>{3}.data());
ASSERT_EQ(m.Invoke(), kTfLiteError);
}
TEST(ListStackTest, NoElementsNoShape_Fails) {
ListStackModel m({TensorType_INT32, {4}});
m.PopulateListTensor(m.list_id_, {}, 2, kTfLiteInt32);
m.PopulateTensor<int>(m.shape_id_, {-1});
EXPECT_EQ(m.Invoke(), kTfLiteError);
}
TEST(ListStackTest, ListElementTypeNotEqualOutputType_Fails) {
ListStackModel m({TensorType_INT32, {4}});
m.PopulateListTensor(m.list_id_, {}, 0, kTfLiteInt64);
m.PopulateTensor<int>(m.shape_id_, {-1});
EXPECT_EQ(m.Invoke(), kTfLiteError);
}
TEST(ListStackTest, ScalarElementShape_FullList_Returns1D) {
ListStackModel m({TensorType_INT32, {2}});
m.PopulateListTensor(m.list_id_, {}, 2, kTfLiteInt32);
m.PopulateTensor(m.shape_id_, {1});
m.ListSetItem(m.list_id_, 0, {1}, kTfLiteInt32, std::vector<int>{2}.data());
m.ListSetItem(m.list_id_, 1, {1}, kTfLiteInt32, std::vector<int>{3}.data());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutputTensor(m.tensor_id_);
ASSERT_THAT(output, DimsAre({2}));
ASSERT_THAT(output->type, kTfLiteInt32);
EXPECT_THAT(std::vector<int>(output->data.i32, output->data.i32 + 2),
ElementsAreArray({2, 3}));
EXPECT_EQ(output->allocation_type, kTfLiteDynamic);
}
TEST(ListStackTest, ScalarElementShape_PartialFilledList_Returns1DWithZeroed) {
ListStackModel m({TensorType_INT32, {2}});
m.PopulateListTensor(m.list_id_, {}, 2, kTfLiteInt32);
m.PopulateTensor(m.shape_id_, {1});
m.ListSetItem(m.list_id_, 0, {1}, kTfLiteInt32, std::vector<int>{2}.data());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutputTensor(m.tensor_id_);
ASSERT_THAT(output, DimsAre({2}));
ASSERT_THAT(output->type, kTfLiteInt32);
EXPECT_THAT(std::vector<int>(output->data.i32, output->data.i32 + 2),
ElementsAreArray({2, 0}));
EXPECT_EQ(output->allocation_type, kTfLiteDynamic);
}
TEST(ListStackTest, ScalarElementShape_EmptyList_Returns1DAllZeroed) {
ListStackModel m({TensorType_INT32, {2}});
m.PopulateListTensor(m.list_id_, {}, 2, kTfLiteInt32);
m.PopulateTensor(m.shape_id_, {1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutputTensor(m.tensor_id_);
ASSERT_THAT(output, DimsAre({2}));
ASSERT_THAT(output->type, kTfLiteInt32);
EXPECT_THAT(std::vector<int>(output->data.i32, output->data.i32 + 2),
ElementsAreArray({0, 0}));
EXPECT_EQ(output->allocation_type, kTfLiteDynamic);
}
TEST(ListStackTest, VectorElementShape_FilledList_Returns2D) {
ListStackModel m({TensorType_INT32, {2, 2}});
m.PopulateListTensor(m.list_id_, {}, 2, kTfLiteInt32);
m.PopulateTensor<int>(m.shape_id_, {2});
m.ListSetItem(m.list_id_, 0, {2}, kTfLiteInt32,
std::vector<int>{2, 2}.data());
m.ListSetItem(m.list_id_, 1, {2}, kTfLiteInt32,
std::vector<int>{3, 3}.data());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutputTensor(m.tensor_id_);
ASSERT_THAT(output, DimsAre({2, 2}));
EXPECT_THAT(std::vector<int>(output->data.i32, output->data.i32 + 4),
ElementsAreArray({2, 2, 3, 3}));
EXPECT_EQ(output->allocation_type, kTfLiteDynamic);
}
TEST(ListStackTest, VectorElementShape_PartialFilledList_Returns2DWithZeroed) {
ListStackModel m({TensorType_INT32, {2, 2}});
m.PopulateListTensor(m.list_id_, {}, 2, kTfLiteInt32);
m.PopulateTensor<int>(m.shape_id_, {2});
m.ListSetItem(m.list_id_, 0, {2}, kTfLiteInt32,
std::vector<int>{2, 2}.data());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutputTensor(m.tensor_id_);
ASSERT_THAT(output, DimsAre({2, 2}));
EXPECT_THAT(std::vector<int>(output->data.i32, output->data.i32 + 4),
ElementsAreArray({2, 2, 0, 0}));
EXPECT_EQ(output->allocation_type, kTfLiteDynamic);
}
TEST(ListStackTest, VectorElementShape_EmptyList_Returns2DAllZeroed) {
ListStackModel m({TensorType_INT32, {2, 2}});
m.PopulateListTensor(m.list_id_, {}, 2, kTfLiteInt32);
m.PopulateTensor<int>(m.shape_id_, {2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutputTensor(m.tensor_id_);
ASSERT_THAT(output, DimsAre({2, 2}));
EXPECT_THAT(std::vector<int>(output->data.i32, output->data.i32 + 4),
ElementsAreArray({0, 0, 0, 0}));
EXPECT_EQ(output->allocation_type, kTfLiteDynamic);
}
TEST(ListStackTest, NoShapeArguments_ZeroSizeList_InfersShapeFromElements) {
ListStackModel m({TensorType_INT32, {2, 2}});
m.PopulateListTensor(m.list_id_, {}, 2, kTfLiteInt32);
m.PopulateTensor<int>(m.shape_id_, {-1});
m.ListSetItem(m.list_id_, 0, {2}, kTfLiteInt32,
std::vector<int>{2, 2}.data());
EXPECT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutputTensor(m.tensor_id_);
ASSERT_THAT(output, DimsAre({2, 2}));
EXPECT_THAT(std::vector<int>(output->data.i32, output->data.i32 + 4),
ElementsAreArray({2, 2, 0, 0}));
EXPECT_EQ(output->allocation_type, kTfLiteDynamic);
}
TEST(ListStackTest, ListFirstDimZero_ReturnsEmptyTensor) {
ListStackModel m({TensorType_INT32, {0, 2}});
m.PopulateListTensor(m.list_id_, {}, 0, kTfLiteInt32);
m.PopulateTensor<int>(m.shape_id_, {2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutputTensor(m.tensor_id_);
EXPECT_THAT(output, DimsAre({0, 2}));
}
TEST(ListStackTest, MismatchedOutput_ReturnsResizedOutput1D) {
ListStackModel m({TensorType_INT32, {2}});
m.PopulateListTensor(m.list_id_, {}, 4, kTfLiteInt32);
m.PopulateTensor<int>(m.shape_id_, {1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutputTensor(m.tensor_id_);
EXPECT_THAT(output, DimsAre({4}));
}
TEST(ListStackTest, MismatchedOutput_ReturnsResizedOutput2D) {
ListStackModel m({TensorType_INT32, std::vector<int>{}});
m.PopulateListTensor(m.list_id_, {}, 2, kTfLiteInt32);
m.PopulateTensor<int>(m.shape_id_, {2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutputTensor(m.tensor_id_);
EXPECT_THAT(output, DimsAre({2, 2}));
}
TEST(ListStackTest, Trailing0DimInElementShape1D_NonZeroLen_Returns2DNoData) {
ListStackModel m({TensorType_INT32, std::vector<int>{}});
m.PopulateListTensor(m.list_id_, {}, 2, kTfLiteInt32);
m.PopulateTensor<int>(m.shape_id_, {0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutputTensor(m.tensor_id_);
ASSERT_THAT(output, DimsAre({2, 0}));
EXPECT_EQ(output->bytes, 0);
}
TEST(ListStackTest, Trailing0DimInElementShape2D_NonZeroLen_Returns3DNoData) {
ListStackModel m({TensorType_INT32, {}}, {TensorType_INT32, {2}});
m.PopulateListTensor(m.list_id_, {}, 2, kTfLiteInt32);
m.PopulateTensor<int>(m.shape_id_, {2, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutputTensor(m.tensor_id_);
ASSERT_THAT(output, DimsAre({2, 2, 0}));
EXPECT_EQ(output->bytes, 0);
}
TEST(ListStackTest, Trailing0DimInElementShape1D_ZeroLen_Returns2DNoData) {
ListStackModel m({TensorType_INT32, {}}, {TensorType_INT32, {1}});
m.PopulateListTensor(m.list_id_, {}, 0, kTfLiteInt32);
m.PopulateTensor<int>(m.shape_id_, {0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutputTensor(m.tensor_id_);
ASSERT_THAT(output, DimsAre({0, 0}));
EXPECT_EQ(output->bytes, 0);
}
TEST(ListStackTest, Trailing0DimInElementShape2D_ZeroLen_Returns3DNoData) {
ListStackModel m({TensorType_INT32, {}}, {TensorType_INT32, {2}});
m.PopulateListTensor(m.list_id_, {}, 0, kTfLiteInt32);
m.PopulateTensor<int>(m.shape_id_, {2, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* output = m.GetOutputTensor(m.tensor_id_);
ASSERT_THAT(output, DimsAre({0, 2, 0}));
EXPECT_EQ(output->bytes, 0);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/variants/list_kernels/list_stack.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/variants/list_kernels/list_stack_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |