ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
44
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 16
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 127
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 96
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
d80a51ca-5726-403e-8f99-66364c3b3663 | cpp | google/tensorstore | utils | tensorstore/internal/grpc/utils.cc | tensorstore/internal/grpc/utils_test.cc | #include "tensorstore/internal/grpc/utils.h"
#include <grpcpp/support/status.h>
#include <string>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "tensorstore/internal/source_location.h"
#include "tensorstore/util/status.h"
#define TENSORSTORE_STATUS_ASSERT(x, y) \
static_assert(static_cast<int>(grpc::StatusCode::x) == \
static_cast<int>(absl::StatusCode::y))
TENSORSTORE_STATUS_ASSERT(CANCELLED, kCancelled);
TENSORSTORE_STATUS_ASSERT(UNKNOWN, kUnknown);
TENSORSTORE_STATUS_ASSERT(INVALID_ARGUMENT, kInvalidArgument);
TENSORSTORE_STATUS_ASSERT(DEADLINE_EXCEEDED, kDeadlineExceeded);
TENSORSTORE_STATUS_ASSERT(NOT_FOUND, kNotFound);
TENSORSTORE_STATUS_ASSERT(ALREADY_EXISTS, kAlreadyExists);
TENSORSTORE_STATUS_ASSERT(PERMISSION_DENIED, kPermissionDenied);
TENSORSTORE_STATUS_ASSERT(RESOURCE_EXHAUSTED, kResourceExhausted);
TENSORSTORE_STATUS_ASSERT(FAILED_PRECONDITION, kFailedPrecondition);
TENSORSTORE_STATUS_ASSERT(ABORTED, kAborted);
TENSORSTORE_STATUS_ASSERT(OUT_OF_RANGE, kOutOfRange);
TENSORSTORE_STATUS_ASSERT(UNIMPLEMENTED, kUnimplemented);
TENSORSTORE_STATUS_ASSERT(INTERNAL, kInternal);
TENSORSTORE_STATUS_ASSERT(UNAVAILABLE, kUnavailable);
TENSORSTORE_STATUS_ASSERT(DATA_LOSS, kDataLoss);
TENSORSTORE_STATUS_ASSERT(UNAUTHENTICATED, kUnauthenticated);
#undef TENSORSTORE_STATUS_ASSERT
namespace tensorstore {
namespace internal {
absl::Status GrpcStatusToAbslStatus(grpc::Status s, SourceLocation loc) {
if (s.ok()) return absl::OkStatus();
auto absl_code = static_cast<absl::StatusCode>(s.error_code());
absl::Status status(absl_code, s.error_message());
MaybeAddSourceLocation(status, loc);
if (!s.error_details().empty()) {
status.SetPayload("grpc.Status.details", absl::Cord(s.error_details()));
}
return status;
}
grpc::Status AbslStatusToGrpcStatus(const absl::Status& status) {
if (status.ok()) return grpc::Status::OK;
auto grpc_code = static_cast<grpc::StatusCode>(status.code());
return grpc::Status(grpc_code, std::string(status.message()));
}
}
} | #include "tensorstore/internal/grpc/utils.h"
#include <grpcpp/support/status.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
namespace {
using ::tensorstore::internal::AbslStatusToGrpcStatus;
using ::tensorstore::internal::GrpcStatusToAbslStatus;
TEST(StatusToGrpcStatus, Basic) {
EXPECT_EQ(grpc::Status::OK.error_code(),
AbslStatusToGrpcStatus(absl::OkStatus()).error_code());
}
TEST(GrpcStatusToStatus, Basic) {
EXPECT_EQ(absl::OkStatus(), GrpcStatusToAbslStatus(grpc::Status::OK));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/grpc/utils.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/grpc/utils_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
d202f695-4abf-4358-aa4f-6952eaf62e6d | cpp | tensorflow/tensorflow | validate_utils | tensorflow/core/data/service/client/validate_utils.cc | tensorflow/core/data/service/client/validate_utils_test.cc | #include "tensorflow/core/data/service/client/validate_utils.h"
#include "tensorflow/core/data/service/client/common.h"
#include "tensorflow/core/data/service/common.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/worker_impl.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
namespace tensorflow {
namespace data {
namespace {
Status ValidateLocalWorkers(const DataServiceParams& data_service_params) {
if (data_service_params.target_workers != TARGET_WORKERS_LOCAL) {
return absl::OkStatus();
}
if (LocalWorkers::Empty()) {
if (IsStaticShard(data_service_params.processing_mode)) {
return errors::InvalidArgument(
"Static sharding policy <",
ProcessingModeDef::ShardingPolicy_Name(
data_service_params.processing_mode.sharding_policy()),
"> requires local tf.data workers, but no local worker is found. "
"You need to run local tf.data service workers in your training "
"workers. Static sharding also requires a fixed worker pool and "
"a list of worker addresses in the DispatcherConfig. See the "
"\"Processing Modes\" section in the module doc for details.");
}
return errors::InvalidArgument(
"Local reads require local tf.data workers, but no local worker "
"is found. You need to run local tf.data service workers in your "
"training workers.");
}
if (data_service_params.num_consumers.has_value()) {
return errors::InvalidArgument(
"Coordinated reads require non-local workers, but `target_workers` "
"is \"LOCAL\".");
}
return absl::OkStatus();
}
Status ValidateCrossTrainerCache(const DataServiceParams& data_service_params) {
if (!data_service_params.cross_trainer_cache_options.has_value()) {
return absl::OkStatus();
}
if (data_service_params.job_name.empty()) {
return errors::InvalidArgument(
"Cross-trainer caching requires named jobs. Got empty `job_name`.");
}
if (data_service_params.metadata.cardinality() >= 0) {
return errors::InvalidArgument(
"Cross-trainer caching requires the input dataset to be infinite. "
"Got input with cardinality ",
data_service_params.metadata.cardinality());
}
if (data_service_params.repetition > 1) {
return errors::InvalidArgument(
"Cross-trainer caching requires infinite datasets and disallows "
"multiple repetitions of the same dataset. Got repetition ",
data_service_params.repetition);
}
if (data_service_params.num_consumers.has_value()) {
return errors::InvalidArgument(
"Cross-trainer caching does not support coordinated reads. "
"Got number of coordinated consumers: ",
data_service_params.num_consumers.value());
}
return absl::OkStatus();
}
}
Status ValidateDataServiceParams(const DataServiceParams& data_service_params) {
TF_RETURN_IF_ERROR(ValidateLocalWorkers(data_service_params));
TF_RETURN_IF_ERROR(ValidateCrossTrainerCache(data_service_params));
return absl::OkStatus();
}
}
} | #include "tensorflow/core/data/service/client/validate_utils.h"
#include <memory>
#include "tensorflow/core/data/service/client/common.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/worker_impl.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/protobuf/service_config.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::tensorflow::testing::StatusIs;
using ::testing::HasSubstr;
DataServiceParams GetDefaultParams() {
DataServiceParams params;
params.dataset_id = "dataset_id";
params.processing_mode.set_sharding_policy(ProcessingModeDef::OFF);
params.address = "localhost";
params.protocol = "grpc";
params.data_transfer_protocol = "grpc";
params.metadata.set_cardinality(kUnknownCardinality);
return params;
}
std::shared_ptr<DataServiceWorkerImpl> GetLocalWorker() {
experimental::WorkerConfig config;
config.set_protocol("grpc");
config.set_dispatcher_address("localhost");
config.set_worker_address("localhost");
return std::make_shared<DataServiceWorkerImpl>(config);
}
TEST(ValidateUtilsTest, DefaultParams) {
TF_EXPECT_OK(ValidateDataServiceParams(GetDefaultParams()));
}
TEST(ValidateUtilsTest, LocalWorkerSuccess) {
DataServiceParams params = GetDefaultParams();
LocalWorkers::Add("localhost", GetLocalWorker());
params.target_workers = TARGET_WORKERS_LOCAL;
TF_EXPECT_OK(ValidateDataServiceParams(params));
LocalWorkers::Remove("localhost");
}
TEST(ValidateUtilsTest, NoLocalWorker) {
DataServiceParams params = GetDefaultParams();
params.target_workers = TARGET_WORKERS_LOCAL;
EXPECT_THAT(
ValidateDataServiceParams(params),
StatusIs(
error::INVALID_ARGUMENT,
HasSubstr(
"Local reads require local tf.data workers, but no local worker "
"is found.")));
}
TEST(ValidateUtilsTest, NoLocalWorkerStaticSharding) {
DataServiceParams params = GetDefaultParams();
params.processing_mode.set_sharding_policy(ProcessingModeDef::FILE_OR_DATA);
params.target_workers = TARGET_WORKERS_LOCAL;
EXPECT_THAT(
ValidateDataServiceParams(params),
StatusIs(
error::INVALID_ARGUMENT,
HasSubstr(
"Static sharding policy <FILE_OR_DATA> requires local tf.data "
"workers, but no local worker is found.")));
}
TEST(ValidateUtilsTest, LocalReadDisallowsCoordinatedRead) {
DataServiceParams params = GetDefaultParams();
LocalWorkers::Add("localhost", GetLocalWorker());
params.num_consumers = 1;
params.consumer_index = 0;
params.target_workers = TARGET_WORKERS_LOCAL;
EXPECT_THAT(
ValidateDataServiceParams(params),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Coordinated reads require non-local workers, but "
"`target_workers` is \"LOCAL\".")));
LocalWorkers::Remove("localhost");
}
TEST(ValidateUtilsTest, CrossTrainerCacheSuccess) {
DataServiceParams params = GetDefaultParams();
params.job_name = "job_name";
params.repetition = 1;
params.metadata.set_cardinality(kInfiniteCardinality);
params.cross_trainer_cache_options.emplace();
params.cross_trainer_cache_options->set_trainer_id("trainer ID");
TF_EXPECT_OK(ValidateDataServiceParams(params));
}
TEST(ValidateUtilsTest, CrossTrainerCacheRequiresJobName) {
DataServiceParams params = GetDefaultParams();
params.repetition = 1;
params.metadata.set_cardinality(kInfiniteCardinality);
params.cross_trainer_cache_options.emplace();
params.cross_trainer_cache_options->set_trainer_id("trainer ID");
EXPECT_THAT(
ValidateDataServiceParams(params),
StatusIs(
error::INVALID_ARGUMENT,
"Cross-trainer caching requires named jobs. Got empty `job_name`."));
}
TEST(ValidateUtilsTest, CrossTrainerCacheRequiresInfiniteDataset) {
DataServiceParams params = GetDefaultParams();
params.job_name = "job_name";
params.repetition = 1;
params.metadata.set_cardinality(10);
params.cross_trainer_cache_options.emplace();
params.cross_trainer_cache_options->set_trainer_id("trainer ID");
EXPECT_THAT(ValidateDataServiceParams(params),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Cross-trainer caching requires the input "
"dataset to be infinite.")));
}
TEST(ValidateUtilsTest, CrossTrainerCacheDisallowsRepetition) {
DataServiceParams params = GetDefaultParams();
params.job_name = "job_name";
params.repetition = 5;
params.metadata.set_cardinality(kInfiniteCardinality);
params.cross_trainer_cache_options.emplace();
params.cross_trainer_cache_options->set_trainer_id("trainer ID");
EXPECT_THAT(
ValidateDataServiceParams(params),
StatusIs(
error::INVALID_ARGUMENT,
HasSubstr(
"Cross-trainer caching requires infinite datasets and disallows "
"multiple repetitions of the same dataset.")));
}
TEST(ValidateUtilsTest, CrossTrainerCacheDisallowsCoordinatedRead) {
DataServiceParams params = GetDefaultParams();
params.job_name = "job_name";
params.repetition = 1;
params.num_consumers = 1;
params.consumer_index = 0;
params.metadata.set_cardinality(kInfiniteCardinality);
params.cross_trainer_cache_options.emplace();
params.cross_trainer_cache_options->set_trainer_id("trainer ID");
EXPECT_THAT(
ValidateDataServiceParams(params),
StatusIs(
error::INVALID_ARGUMENT,
HasSubstr(
"Cross-trainer caching does not support coordinated reads.")));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/client/validate_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/client/validate_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
75ef4a16-796c-4902-b0f9-b8a1804aa18c | cpp | tensorflow/tensorflow | fingerprinting | tensorflow/cc/saved_model/fingerprinting.cc | tensorflow/cc/saved_model/fingerprinting_test.cc | #include "tensorflow/cc/saved_model/fingerprinting.h"
#include <cstdint>
#include <string>
#include "absl/container/btree_map.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "tensorflow/cc/saved_model/constants.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/regularization/simple_delete.h"
#include "tensorflow/core/graph/regularization/util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/file_system_helper.h"
#include "tensorflow/core/platform/fingerprint.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/protobuf/fingerprint.pb.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/saved_model.pb.h"
#include "tensorflow/core/protobuf/saved_object_graph.pb.h"
#include "tensorflow/core/util/tensor_bundle/naming.h"
#if !defined(PLATFORM_WINDOWS) && !defined(__APPLE__)
#include "tensorflow/cc/saved_model/fingerprinting_utils.h"
#include "tensorflow/tools/proto_splitter/cc/util.h"
#endif
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow::saved_model::fingerprinting {
namespace {
using ::tensorflow::protobuf::Map;
using ::tensorflow::protobuf::io::CodedOutputStream;
using ::tensorflow::protobuf::io::StringOutputStream;
uint64_t HashCheckpointIndexFile(absl::string_view model_dir) {
std::string meta_filename = MetaFilename(io::JoinPath(
model_dir, kSavedModelVariablesDirectory, kSavedModelVariablesFilename));
std::string data;
absl::Status read_status =
ReadFileToString(Env::Default(), meta_filename, &data);
if (read_status.ok()) {
return tensorflow::Fingerprint64(data);
} else {
LOG(WARNING) << "Failed to read checkpoint file: " << read_status;
return 0;
}
}
uint64_t HashSavedModel(const SavedModel& saved_model) {
std::string saved_model_serialized;
{
StringOutputStream stream(&saved_model_serialized);
CodedOutputStream output(&stream);
output.SetSerializationDeterministic(true);
saved_model.SerializeToCodedStream(&output);
}
return tensorflow::Fingerprint64(saved_model_serialized);
}
uint64_t RegularizeAndHashSignatureDefs(
const Map<std::string, SignatureDef>& signature_def_map) {
absl::btree_map<std::string, SignatureDef> sorted_signature_defs;
sorted_signature_defs.insert(signature_def_map.begin(),
signature_def_map.end());
uint64_t result_hash = 0;
for (const auto& item : sorted_signature_defs) {
result_hash =
FingerprintCat64(result_hash, tensorflow::Fingerprint64(item.first));
std::string signature_def_serialized;
{
StringOutputStream stream(&signature_def_serialized);
CodedOutputStream output(&stream);
output.SetSerializationDeterministic(true);
item.second.SerializeToCodedStream(&output);
}
result_hash = FingerprintCat64(
result_hash, tensorflow::Fingerprint64(signature_def_serialized));
}
return result_hash;
}
absl::StatusOr<uint64_t> RegularizeAndHashSavedObjectGraph(
const SavedObjectGraph& object_graph_def) {
absl::btree_map<int64_t, std::string> uid_to_function_names;
for (const auto& [name, concrete_function] :
object_graph_def.concrete_functions()) {
TF_ASSIGN_OR_RETURN(int64_t uid, graph_regularization::GetSuffixUID(name));
uid_to_function_names.insert({uid, name});
}
uint64_t result_hash = 0;
for (const auto& [uid, function_name] : uid_to_function_names) {
result_hash = FingerprintCat64(result_hash,
tensorflow::Fingerprint64(absl::StripSuffix(
function_name, std::to_string(uid))));
std::string concrete_function_serialized;
{
StringOutputStream stream(&concrete_function_serialized);
CodedOutputStream output(&stream);
output.SetSerializationDeterministic(true);
object_graph_def.concrete_functions()
.at(function_name)
.SerializeToCodedStream(&output);
}
result_hash = FingerprintCat64(
result_hash, tensorflow::Fingerprint64(concrete_function_serialized));
}
return result_hash;
}
absl::StatusOr<FingerprintDef> CreateFingerprintDefPb(
absl::string_view export_dir, std::string pb_file) {
const int kFingerprintProducer = 1;
SavedModel saved_model;
TF_RETURN_IF_ERROR(ReadBinaryProto(Env::Default(), pb_file, &saved_model));
FingerprintDef fingerprint_def;
MetaGraphDef* metagraph = saved_model.mutable_meta_graphs(0);
fingerprint_def.set_saved_model_checksum(HashSavedModel(saved_model));
graph_regularization::SimpleDelete(*metagraph->mutable_graph_def());
fingerprint_def.set_graph_def_program_hash(
graph_regularization::ComputeHash(metagraph->graph_def()));
fingerprint_def.set_signature_def_hash(
RegularizeAndHashSignatureDefs(metagraph->signature_def()));
TF_ASSIGN_OR_RETURN(
uint64_t object_graph_hash,
RegularizeAndHashSavedObjectGraph(metagraph->object_graph_def()));
fingerprint_def.set_saved_object_graph_hash(object_graph_hash);
fingerprint_def.set_checkpoint_hash(HashCheckpointIndexFile(export_dir));
VersionDef* version = fingerprint_def.mutable_version();
version->set_producer(kFingerprintProducer);
return fingerprint_def;
}
}
absl::StatusOr<FingerprintDef> CreateFingerprintDef(
absl::string_view export_dir) {
std::string prefix = io::JoinPath(export_dir, kSavedModelFilenamePrefix);
#if !defined(PLATFORM_WINDOWS) && !defined(__APPLE__)
TF_ASSIGN_OR_RETURN(bool only_contains_pb,
tools::proto_splitter::OnlyContainsPb(prefix));
if (only_contains_pb) {
return CreateFingerprintDefPb(export_dir, absl::StrCat(prefix, ".pb"));
}
return CreateFingerprintDefCpb(export_dir, absl::StrCat(prefix, ".cpb"));
#else
return CreateFingerprintDefPb(export_dir, absl::StrCat(prefix, ".pb"));
#endif
}
absl::StatusOr<FingerprintDef> ReadSavedModelFingerprint(
absl::string_view export_dir) {
const std::string fingerprint_pb_path =
io::JoinPath(export_dir, kFingerprintFilenamePb);
TF_RETURN_IF_ERROR(Env::Default()->FileExists(fingerprint_pb_path));
FingerprintDef fingerprint_proto;
absl::Status result =
ReadBinaryProto(Env::Default(), fingerprint_pb_path, &fingerprint_proto);
if (!result.ok()) return result;
return fingerprint_proto;
}
std::string Singleprint(uint64_t graph_def_program_hash,
uint64_t signature_def_hash,
uint64_t saved_object_graph_hash,
uint64_t checkpoint_hash) {
return std::to_string(graph_def_program_hash) + "/" +
std::to_string(signature_def_hash) + "/" +
std::to_string(saved_object_graph_hash) + "/" +
std::to_string(checkpoint_hash);
}
std::string Singleprint(const FingerprintDef& fingerprint) {
return Singleprint(
fingerprint.graph_def_program_hash(), fingerprint.signature_def_hash(),
fingerprint.saved_object_graph_hash(), fingerprint.checkpoint_hash());
}
absl::StatusOr<std::string> Singleprint(absl::string_view export_dir) {
TF_ASSIGN_OR_RETURN(const FingerprintDef fingerprint_def,
ReadSavedModelFingerprint(export_dir));
return Singleprint(fingerprint_def);
}
} | #include "tensorflow/cc/saved_model/fingerprinting.h"
#include <string>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/fingerprint.pb.h"
#include "tensorflow/core/protobuf/saved_model.pb.h"
#include "tsl/platform/statusor.h"
namespace tensorflow::saved_model::fingerprinting {
namespace {
absl::StatusOr<SavedModel> ReadSavedModel(absl::string_view file_dir) {
std::string file_path = io::JoinPath(file_dir, "saved_model.pb");
std::string serialized_saved_model;
auto status =
ReadFileToString(Env::Default(), file_path, &serialized_saved_model);
if (!status.ok()) {
return status;
}
SavedModel saved_model_pb;
saved_model_pb.ParseFromString(serialized_saved_model);
return saved_model_pb;
}
TEST(FingerprintingTest, TestCreateFingerprint) {
const std::string export_dir =
io::JoinPath(testing::TensorFlowSrcRoot(), "cc/saved_model/testdata",
"VarsAndArithmeticObjectGraph");
TF_ASSERT_OK_AND_ASSIGN(SavedModel saved_model_pb,
ReadSavedModel(export_dir));
TF_ASSERT_OK_AND_ASSIGN(FingerprintDef fingerprint_def,
CreateFingerprintDef(export_dir));
EXPECT_GT(fingerprint_def.saved_model_checksum(), 0);
EXPECT_EQ(fingerprint_def.graph_def_program_hash(), 10127142238652115842U);
EXPECT_EQ(fingerprint_def.signature_def_hash(), 15570736222402453744U);
EXPECT_EQ(fingerprint_def.saved_object_graph_hash(), 3678101440349108924U);
EXPECT_GT(fingerprint_def.checkpoint_hash(), 0);
}
TEST(FingerprintingTest, TestCompareFingerprintForTwoModelSavedTwice) {
const std::string export_dir = io::JoinPath(
testing::TensorFlowSrcRoot(), "cc/saved_model/testdata", "bert1");
TF_ASSERT_OK_AND_ASSIGN(SavedModel saved_model_pb,
ReadSavedModel(export_dir));
TF_ASSERT_OK_AND_ASSIGN(FingerprintDef fingerprint_def,
CreateFingerprintDef(export_dir));
const std::string export_dir2 = io::JoinPath(
testing::TensorFlowSrcRoot(), "cc/saved_model/testdata", "bert2");
TF_ASSERT_OK_AND_ASSIGN(SavedModel saved_model_pb2,
ReadSavedModel(export_dir2));
TF_ASSERT_OK_AND_ASSIGN(FingerprintDef fingerprint_def2,
CreateFingerprintDef(export_dir2));
EXPECT_GT(fingerprint_def.saved_model_checksum(), 0);
EXPECT_GT(fingerprint_def2.saved_model_checksum(), 0);
EXPECT_EQ(fingerprint_def.graph_def_program_hash(),
fingerprint_def2.graph_def_program_hash());
EXPECT_EQ(fingerprint_def.signature_def_hash(),
fingerprint_def2.signature_def_hash());
EXPECT_EQ(fingerprint_def.saved_object_graph_hash(),
fingerprint_def2.saved_object_graph_hash());
}
TEST(FingerprintingTest, TestFingerprintComputationDoesNotMutateModel) {
const std::string export_dir = io::JoinPath(
testing::TensorFlowSrcRoot(), "cc/saved_model/testdata", "bert1");
TF_ASSERT_OK_AND_ASSIGN(SavedModel saved_model_pb,
ReadSavedModel(export_dir));
TF_ASSERT_OK_AND_ASSIGN(FingerprintDef fingerprint_def,
CreateFingerprintDef(export_dir));
TF_ASSERT_OK_AND_ASSIGN(FingerprintDef fingerprint_def2,
CreateFingerprintDef(export_dir));
EXPECT_EQ(fingerprint_def.saved_model_checksum(),
fingerprint_def2.saved_model_checksum());
}
TEST(FingerprintingTest, TestFingerprintHasVersion) {
const std::string export_dir = io::JoinPath(
testing::TensorFlowSrcRoot(), "cc/saved_model/testdata", "bert1");
TF_ASSERT_OK_AND_ASSIGN(SavedModel saved_model_pb,
ReadSavedModel(export_dir));
TF_ASSERT_OK_AND_ASSIGN(FingerprintDef fingerprint_def,
CreateFingerprintDef(export_dir));
EXPECT_EQ(fingerprint_def.version().producer(), 1);
}
TEST(FingerprintingTest, TestHashCheckpointForModelWithNoVariables) {
const std::string export_dir = io::JoinPath(
testing::TensorFlowSrcRoot(), "cc/saved_model/testdata", "bert1");
TF_ASSERT_OK_AND_ASSIGN(SavedModel saved_model_pb,
ReadSavedModel(export_dir));
TF_ASSERT_OK_AND_ASSIGN(FingerprintDef fingerprint_def,
CreateFingerprintDef(export_dir));
EXPECT_EQ(fingerprint_def.checkpoint_hash(), 0);
}
TEST(FingerprintingTest, TestReadValidFingerprint) {
const std::string export_dir =
io::JoinPath(testing::TensorFlowSrcRoot(), "cc/saved_model/testdata",
"VarsAndArithmeticObjectGraph");
TF_ASSERT_OK_AND_ASSIGN(FingerprintDef fingerprint_pb,
ReadSavedModelFingerprint(export_dir));
EXPECT_EQ(fingerprint_pb.saved_model_checksum(), 15788619162413586750u);
}
TEST(FingerprintingTest, TestReadNonexistentFingerprint) {
const std::string export_dir = io::JoinPath(
testing::TensorFlowSrcRoot(), "cc/saved_model/testdata", "AssetModule");
EXPECT_EQ(ReadSavedModelFingerprint(export_dir).status().code(),
absl::StatusCode::kNotFound);
}
TEST(FingerprintingTest, TestSingleprint) {
const std::string export_dir =
io::JoinPath(testing::TensorFlowSrcRoot(), "cc/saved_model/testdata",
"VarsAndArithmeticObjectGraph");
const std::string const_singleprint =
"706963557435316516/5693392539583495303/12074714563970609759/"
"10788359570789890102";
TF_ASSERT_OK_AND_ASSIGN(std::string singleprint, Singleprint(export_dir));
EXPECT_EQ(singleprint, const_singleprint);
TF_ASSERT_OK_AND_ASSIGN(FingerprintDef fingerprint_pb,
ReadSavedModelFingerprint(export_dir));
EXPECT_EQ(Singleprint(fingerprint_pb), const_singleprint);
EXPECT_EQ(Singleprint(fingerprint_pb.graph_def_program_hash(),
fingerprint_pb.signature_def_hash(),
fingerprint_pb.saved_object_graph_hash(),
fingerprint_pb.checkpoint_hash()),
const_singleprint);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/saved_model/fingerprinting.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/saved_model/fingerprinting_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7beea957-2f27-4d00-9869-9d10b6996c6c | cpp | google/quiche | simple_buffer_allocator | quiche/common/simple_buffer_allocator.cc | quiche/common/simple_buffer_allocator_test.cc | #include "quiche/common/simple_buffer_allocator.h"
namespace quiche {
char* SimpleBufferAllocator::New(size_t size) { return new char[size]; }
char* SimpleBufferAllocator::New(size_t size, bool ) {
return New(size);
}
void SimpleBufferAllocator::Delete(char* buffer) { delete[] buffer; }
} | #include "quiche/common/simple_buffer_allocator.h"
#include <utility>
#include "quiche/common/platform/api/quiche_test.h"
namespace quiche {
namespace {
TEST(SimpleBufferAllocatorTest, NewDelete) {
SimpleBufferAllocator alloc;
char* buf = alloc.New(4);
EXPECT_NE(nullptr, buf);
alloc.Delete(buf);
}
TEST(SimpleBufferAllocatorTest, DeleteNull) {
SimpleBufferAllocator alloc;
alloc.Delete(nullptr);
}
TEST(SimpleBufferAllocatorTest, MoveBuffersConstructor) {
SimpleBufferAllocator alloc;
QuicheBuffer buffer1(&alloc, 16);
EXPECT_NE(buffer1.data(), nullptr);
EXPECT_EQ(buffer1.size(), 16u);
QuicheBuffer buffer2(std::move(buffer1));
EXPECT_EQ(buffer1.data(), nullptr);
EXPECT_EQ(buffer1.size(), 0u);
EXPECT_NE(buffer2.data(), nullptr);
EXPECT_EQ(buffer2.size(), 16u);
}
TEST(SimpleBufferAllocatorTest, MoveBuffersAssignment) {
SimpleBufferAllocator alloc;
QuicheBuffer buffer1(&alloc, 16);
QuicheBuffer buffer2;
EXPECT_NE(buffer1.data(), nullptr);
EXPECT_EQ(buffer1.size(), 16u);
EXPECT_EQ(buffer2.data(), nullptr);
EXPECT_EQ(buffer2.size(), 0u);
buffer2 = std::move(buffer1);
EXPECT_EQ(buffer1.data(), nullptr);
EXPECT_EQ(buffer1.size(), 0u);
EXPECT_NE(buffer2.data(), nullptr);
EXPECT_EQ(buffer2.size(), 16u);
}
TEST(SimpleBufferAllocatorTest, CopyBuffer) {
SimpleBufferAllocator alloc;
const absl::string_view original = "Test string";
QuicheBuffer copy = QuicheBuffer::Copy(&alloc, original);
EXPECT_EQ(copy.AsStringView(), original);
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/simple_buffer_allocator.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/simple_buffer_allocator_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
e680641a-da49-449a-bb3e-3767970a1e11 | cpp | tensorflow/tensorflow | grpc_eager_client | tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_client.cc | tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_client_test.cc | #include "tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_client.h"
#include <cstdint>
#include <string>
#include "grpcpp/generic/generic_stub.h"
#include "xla/tsl/distributed_runtime/call_options.h"
#include "tensorflow/core/distributed_runtime/call_options.h"
#include "tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_service.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_client_cq_tag.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_state.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_util.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/lib/core/refcount.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/error_payloads.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/protobuf/core_platform_payloads.pb.h"
#include "tensorflow/core/protobuf/eager_service.pb.h"
#include "tensorflow/core/util/env_var.h"
namespace tensorflow {
namespace eager {
namespace {
bool EnableStreaming() {
bool result;
TF_CHECK_OK(ReadBoolFromEnvVar("TF_ENABLE_EAGER_CLIENT_STREAMING_ENQUEUE",
true, &result));
return result;
}
class GrpcEagerClientThread : public core::RefCounted {
public:
GrpcEagerClientThread() {
Ref();
thread_.reset(Env::Default()->StartThread(
ThreadOptions(), "eager_client_thread", [this]() {
void* tag;
bool ok;
while (completion_queue_.Next(&tag, &ok)) {
VLOG(4) << "GrpcEagerClientThread got next tag";
GrpcClientCQTag* callback_tag = static_cast<GrpcClientCQTag*>(tag);
callback_tag->OnCompleted(ok);
VLOG(4) << "GrpcEagerClientThread blocking for next tag";
if (RefCountIsOne()) {
break;
}
}
VLOG(4) << "GrpcEagerClientThread exiting";
completion_queue_.Shutdown();
Env::Default()->SchedClosure([this]() { this->Unref(); });
}));
}
~GrpcEagerClientThread() override {}
::grpc::CompletionQueue* completion_queue() { return &completion_queue_; }
private:
::grpc::CompletionQueue completion_queue_;
std::unique_ptr<Thread> thread_;
};
class GrpcEagerClient : public EagerClient {
public:
GrpcEagerClient(const tensorflow::SharedGrpcChannelPtr& channel,
GrpcEagerClientThread* thread, const string& target)
: stub_(channel), thread_(thread), target_(target) {
thread_->Ref();
cq_ = thread->completion_queue();
}
~GrpcEagerClient() override { thread_->Unref(); }
bool allow_multiple_pending_requests() const override {
return EnableStreaming();
}
#define CLIENT_METHOD(method) \
void method##Async(const method##Request* request, \
method##Response* response, StatusCallback done) \
override { \
StatusCallback done_wrapped = callback_wrapper(std::move(done)); \
new RPCState<protobuf::Message>( \
&stub_, cq_, "/tensorflow.eager.EagerService/" #method, *request, \
response, std::move(done_wrapped), nullptr, \
nullptr, 0, true, \
&target_); \
}
CLIENT_METHOD(CreateContext);
CLIENT_METHOD(UpdateContext);
CLIENT_METHOD(WaitQueueDone);
CLIENT_METHOD(KeepAlive);
#undef CLIENT_METHOD
#define CLIENT_METHOD_WITH_TIMEOUT_AND_RETRIES(method) \
void method##Async(const method##Request* request, \
method##Response* response, StatusCallback done, \
int64_t init_timeout_in_ms, int retries) override { \
CallOptions* call_ops = nullptr; \
StatusCallback done_wrapped; \
if (init_timeout_in_ms > 0) { \
call_ops = new CallOptions; \
call_ops->SetTimeout(init_timeout_in_ms); \
auto new_done = [call_ops, done = std::move(done)](const Status& s) { \
done(s); \
delete call_ops; \
}; \
done_wrapped = callback_wrapper(new_done); \
} else { \
done_wrapped = callback_wrapper(std::move(done)); \
} \
new RPCState<protobuf::Message>( \
&stub_, cq_, "/tensorflow.eager.EagerService/" #method, *request, \
response, std::move(done_wrapped), call_ops, nullptr, \
retries, true, &target_); \
}
CLIENT_METHOD_WITH_TIMEOUT_AND_RETRIES(CreateContext);
#undef CLIENT_METHOD_WITH_TIMEOUT_AND_RETRIES
#define CLIENT_CANCELABLE_METHOD(method) \
void method##Async(CallOptions* call_opts, const method##Request* request, \
method##Response* response, StatusCallback done) \
override { \
StatusCallback done_wrapped = callback_wrapper(std::move(done)); \
new RPCState<protobuf::Message>( \
&stub_, cq_, "/tensorflow.eager.EagerService/" #method, *request, \
response, std::move(done_wrapped), call_opts, nullptr, \
0, true, &target_); \
}
CLIENT_CANCELABLE_METHOD(Enqueue);
CLIENT_CANCELABLE_METHOD(RunComponentFunction);
#undef CLIENT_CANCELABLE_METHOD
void CloseContextAsync(const CloseContextRequest* request,
CloseContextResponse* response,
StatusCallback done) override {
StatusCallback done_wrapped = callback_wrapper(std::move(done));
new RPCState<protobuf::Message>(
&stub_, cq_, "/tensorflow.eager.EagerService/CloseContext", *request,
response, std::move(done_wrapped), nullptr,
nullptr, 0, true,
&target_);
VLOG(1) << "Sending RPC to close remote eager context "
<< request->DebugString();
mutex_lock l(mu_);
const auto& it = enqueue_dispatchers_.find(request->context_id());
if (it != enqueue_dispatchers_.end()) {
it->second.CancelCall();
enqueue_dispatchers_.erase(it);
} else if (EnableStreaming()) {
LOG(ERROR) << "Remote EagerContext with id " << request->context_id()
<< " does not seem to exist.";
}
}
void StreamingEnqueueAsync(bool enable_streaming_enqueue,
CallOptions* call_opts,
const EnqueueRequest* request,
EnqueueResponse* response,
StatusCallback done) override {
StatusCallback done_wrapped = callback_wrapper(std::move(done));
if (EnableStreaming() && enable_streaming_enqueue) {
mutex_lock l(mu_);
auto it = enqueue_dispatchers_.find(request->context_id());
if (it == enqueue_dispatchers_.end()) {
auto it_and_bool = enqueue_dispatchers_.emplace(
std::piecewise_construct,
std::forward_as_tuple(request->context_id()),
std::forward_as_tuple(
&stub_, cq_,
"/tensorflow.eager.EagerService/StreamingEnqueue"));
it = it_and_bool.first;
}
it->second.SendNextRequest(*request, response, std::move(done_wrapped));
} else {
Notification n;
Status status;
EnqueueAsync(call_opts, request, response,
[&n, &status](const Status& s) {
status.Update(s);
n.Notify();
});
n.WaitForNotification();
done_wrapped(status);
}
}
private:
::grpc::GenericStub stub_;
const GrpcEagerClientThread* thread_;
const string target_;
::grpc::CompletionQueue* cq_;
mutable mutex mu_;
std::unordered_map<uint64, StreamingRPCDispatcher<EnqueueResponse>>
enqueue_dispatchers_ TF_GUARDED_BY(mu_);
StatusCallback callback_wrapper(StatusCallback done) {
Ref();
return [this, done = std::move(done)](const Status& status) {
done(status);
this->Unref();
if (TF_PREDICT_FALSE(!status.ok())) {
auto error_source_payload = status.GetPayload(kErrorSource);
if (error_source_payload.has_value()) {
tensorflow::core::platform::ErrorSourceProto error_source_proto;
error_source_proto.ParseFromString(
std::string(*error_source_payload));
metrics::UpdateEagerClientErrorCounter(
error_source_proto.ErrorSource_Name(
error_source_proto.error_source()),
absl::StatusCodeToString(status.code()));
} else {
metrics::UpdateEagerClientErrorCounter(
"unknown", absl::StatusCodeToString(status.code()));
}
}
};
}
};
class GrpcEagerClientCache : public EagerClientCache {
public:
explicit GrpcEagerClientCache(
std::shared_ptr<tensorflow::GrpcChannelCache> cache)
: next_round_robin_assignment_(0), cache_(cache), threads_(4) {
for (int i = 0, end = threads_.size(); i < end; i++) {
threads_[i].reset(new GrpcEagerClientThread());
}
}
~GrpcEagerClientCache() override { threads_.clear(); }
Status GetClient(const string& target,
core::RefCountPtr<EagerClient>* client) override {
mutex_lock l(clients_mu_);
auto it = clients_.find(target);
if (it == clients_.end()) {
tensorflow::SharedGrpcChannelPtr shared =
cache_->FindWorkerChannel(target);
if (shared == nullptr) {
return errors::InvalidArgument("Client for target ", target,
" not found.");
}
int assigned_index = AssignClientToThread(target);
GrpcEagerClientThread* thread = threads_[assigned_index].get();
core::RefCountPtr<EagerClient> worker(
new GrpcEagerClient(shared, thread, target));
it = clients_.emplace(target, std::move(worker)).first;
}
it->second->Ref();
client->reset(it->second.get());
return absl::OkStatus();
}
private:
mutex assignment_mu_;
std::unordered_map<std::string, size_t> target_assignments_
TF_GUARDED_BY(assignment_mu_);
size_t next_round_robin_assignment_ TF_GUARDED_BY(assignment_mu_);
size_t AssignClientToThread(const string& target) {
mutex_lock lock(assignment_mu_);
auto it = target_assignments_.find(target);
if (it == target_assignments_.end()) {
it = target_assignments_
.insert(std::make_pair(
target, (next_round_robin_assignment_++) % threads_.size()))
.first;
}
return it->second;
}
std::shared_ptr<tensorflow::GrpcChannelCache> cache_;
mutable mutex clients_mu_;
std::unordered_map<string, core::RefCountPtr<EagerClient>> clients_
TF_GUARDED_BY(clients_mu_);
std::vector<core::RefCountPtr<GrpcEagerClientThread>> threads_;
};
}
EagerClientCache* NewGrpcEagerClientCache(
std::shared_ptr<tensorflow::GrpcChannelCache> channel) {
return new GrpcEagerClientCache(channel);
}
}
} | #include "tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_client.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_channel.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace eager {
TEST(GrpcEagerClientCache, TestGetClientThreadSafety) {
GrpcChannelSpec spec;
TF_ASSERT_OK(spec.AddHostPortsJob("worker", {{0, "a:1"},
{1, "b:2"},
{2, "c:3"},
{3, "d:4"},
{4, "e:5"},
{5, "f:6"}}));
ChannelCreationFunction channel_func =
ConvertToChannelCreationFunction(NewHostPortGrpcChannel);
auto channel_cache = std::shared_ptr<GrpcChannelCache>(
NewGrpcChannelCache(spec, channel_func));
std::unique_ptr<EagerClientCache> client_cache(
NewGrpcEagerClientCache(channel_cache));
const int num_calls = 10;
BlockingCounter counter(num_calls);
for (int i = 0; i < num_calls; i++) {
Env::Default()->SchedClosure([&client_cache, i, &counter]() {
string target = strings::StrCat("/job:worker/replica:0/task:", i);
core::RefCountPtr<EagerClient> eager_client;
Status s = client_cache->GetClient(target, &eager_client);
error::Code expected_code = i <= 5 ? error::OK : error::INVALID_ARGUMENT;
EXPECT_EQ(expected_code, s.code());
counter.DecrementCount();
});
}
counter.Wait();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_client.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_client_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bc72dbe1-77c6-4f82-b5f5-b5b2a1d20887 | cpp | tensorflow/tensorflow | mirror_pad_op | tensorflow/compiler/tf2xla/kernels/mirror_pad_op.cc | tensorflow/core/kernels/image/mirror_pad_op_test.cc | #include "absl/status/statusor.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/literal.h"
#include "xla/shape.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/util/mirror_pad_mode.h"
namespace tensorflow {
namespace {
class MirrorPadOp : public XlaOpKernel {
public:
explicit MirrorPadOp(OpKernelConstruction* context) : XlaOpKernel(context) {}
absl::StatusOr<xla::XlaOp> DoMirrorPad(const xla::XlaOp t,
const xla::Shape& original_shape,
const xla::LiteralSlice& pad_literal,
const MirrorPadMode mode,
xla::XlaBuilder* b) {
int64_t excluded_edges = mode == MirrorPadMode::REFLECT ? 1 : 0;
xla::XlaOp accum = t;
for (int64_t dimno = original_shape.rank() - 1; dimno >= 0; --dimno) {
auto t_rev = xla::Rev(accum, {dimno});
int64_t lhs_padding = pad_literal.Get<int64_t>({dimno, 0});
int64_t rhs_padding = pad_literal.Get<int64_t>({dimno, 1});
int64_t dim_size = original_shape.dimensions(dimno);
TF_RET_CHECK(lhs_padding >= 0 &&
lhs_padding <= dim_size - excluded_edges);
TF_RET_CHECK(rhs_padding >= 0 &&
rhs_padding <= dim_size - excluded_edges);
auto lhs_pad =
xla::SliceInDim(t_rev, dim_size - excluded_edges - lhs_padding,
dim_size - excluded_edges, 1, dimno);
auto rhs_pad = xla::SliceInDim(t_rev, excluded_edges,
excluded_edges + rhs_padding, 1, dimno);
accum = xla::ConcatInDim(b, {lhs_pad, accum, rhs_pad}, dimno);
}
return accum;
}
void Compile(XlaOpKernelContext* ctx) override {
const TensorShape input_shape = ctx->InputShape("input");
const TensorShape pad_shape = ctx->InputShape("paddings");
MirrorPadMode mode;
OP_REQUIRES_OK(ctx, GetNodeAttr(def(), "mode", &mode));
OP_REQUIRES(
ctx, mode == MirrorPadMode::REFLECT || mode == MirrorPadMode::SYMMETRIC,
xla::Unimplemented("Unsupported MirrorPad mode. Only SYMMETRIC and "
"REFLECT modes are currently supported"));
const int dims = input_shape.dims();
OP_REQUIRES(
ctx,
TensorShapeUtils::IsMatrix(pad_shape) && pad_shape.dim_size(1) == 2,
errors::InvalidArgument("paddings must be a matrix with 2 columns: ",
pad_shape.DebugString()));
OP_REQUIRES(
ctx, dims == pad_shape.dim_size(0),
errors::InvalidArgument(
"The first dimension of paddings must be the rank of inputs",
pad_shape.DebugString(), " ", input_shape.DebugString()));
xla::Literal pad_literal;
OP_REQUIRES_OK(ctx,
ctx->ConstantInputAsInt64Literal("paddings", &pad_literal));
xla::XlaBuilder* b = ctx->builder();
auto in0 = ctx->Input("input");
absl::StatusOr<xla::Shape> in0_shape = b->GetShape(in0);
OP_REQUIRES(ctx, in0_shape.ok(), in0_shape.status());
absl::StatusOr<xla::XlaOp> accum_status =
DoMirrorPad(in0, in0_shape.value(), pad_literal, mode, b);
OP_REQUIRES_OK(ctx, accum_status.status());
ctx->SetOutput(0, accum_status.value());
}
private:
MirrorPadOp(const MirrorPadOp&) = delete;
void operator=(const MirrorPadOp&) = delete;
};
REGISTER_XLA_OP(Name("MirrorPad").CompileTimeConstantInput("paddings"),
MirrorPadOp);
class MirrorPadGradOp : public XlaOpKernel {
public:
explicit MirrorPadGradOp(OpKernelConstruction* context)
: XlaOpKernel(context) {}
absl::StatusOr<xla::XlaOp> DoMirrorPadGrad(
const xla::XlaOp t, const xla::Shape& original_shape,
const xla::LiteralSlice& pad_literal, const MirrorPadMode mode,
xla::XlaBuilder* b) {
int64_t excluded_edges = mode == MirrorPadMode::REFLECT ? 1 : 0;
xla::XlaOp grad = t;
for (int64_t dimno = original_shape.rank() - 1; dimno >= 0; --dimno) {
int64_t lhs_padding = pad_literal.Get<int64_t>({dimno, 0});
int64_t rhs_padding = pad_literal.Get<int64_t>({dimno, 1});
int64_t dim_size = original_shape.dimensions(dimno);
int64_t result_dim_size = dim_size - lhs_padding - rhs_padding;
TF_RET_CHECK(lhs_padding >= 0 &&
lhs_padding <= dim_size - excluded_edges);
TF_RET_CHECK(rhs_padding >= 0 &&
rhs_padding <= dim_size - excluded_edges);
xla::XlaOp lhs_pad = xla::SliceInDim(grad, 0, lhs_padding, 1, dimno);
xla::XlaOp reverse_lhs_pad = xla::Rev(lhs_pad, {dimno});
xla::XlaOp padded_lhs_pad = xla::PadInDim(
reverse_lhs_pad, xla::ScalarLike(reverse_lhs_pad, 0), dimno,
excluded_edges,
result_dim_size - lhs_padding - excluded_edges);
xla::XlaOp rhs_pad =
xla::SliceInDim(grad, dim_size - rhs_padding, dim_size, 1, dimno);
xla::XlaOp reverse_rhs_pad = xla::Rev(rhs_pad, {dimno});
xla::XlaOp padded_rhs_pad = xla::PadInDim(
reverse_rhs_pad, xla::ScalarLike(reverse_rhs_pad, 0), dimno,
result_dim_size - rhs_padding - excluded_edges,
excluded_edges);
xla::XlaOp grad_core =
xla::SliceInDim(grad, lhs_padding, dim_size - rhs_padding, 1, dimno);
grad = padded_lhs_pad + grad_core + padded_rhs_pad;
}
return grad;
}
void Compile(XlaOpKernelContext* ctx) override {
const TensorShape input_shape = ctx->InputShape("input");
const TensorShape pad_shape = ctx->InputShape("paddings");
MirrorPadMode mode;
OP_REQUIRES_OK(ctx, GetNodeAttr(def(), "mode", &mode));
OP_REQUIRES(
ctx, mode == MirrorPadMode::REFLECT || mode == MirrorPadMode::SYMMETRIC,
xla::Unimplemented("Unsupported MirrorPadGrad mode. Only SYMMETRIC and "
"REFLECT modes are currently supported"));
const int dims = input_shape.dims();
OP_REQUIRES(
ctx,
TensorShapeUtils::IsMatrix(pad_shape) && pad_shape.dim_size(1) == 2,
errors::InvalidArgument("paddings must be a matrix with 2 columns: ",
pad_shape.DebugString()));
OP_REQUIRES(
ctx, dims == pad_shape.dim_size(0),
errors::InvalidArgument(
"The first dimension of paddings must be the rank of inputs",
pad_shape.DebugString(), " ", input_shape.DebugString()));
xla::Literal pad_literal;
OP_REQUIRES_OK(ctx,
ctx->ConstantInputAsInt64Literal("paddings", &pad_literal));
xla::XlaBuilder* b = ctx->builder();
auto in0 = ctx->Input("input");
absl::StatusOr<xla::Shape> in0_shape = b->GetShape(in0);
OP_REQUIRES(ctx, in0_shape.ok(), in0_shape.status());
absl::StatusOr<xla::XlaOp> accum_status =
DoMirrorPadGrad(in0, in0_shape.value(), pad_literal, mode, b);
OP_REQUIRES_OK(ctx, accum_status.status());
ctx->SetOutput(0, accum_status.value());
}
private:
MirrorPadGradOp(const MirrorPadGradOp&) = delete;
void operator=(const MirrorPadGradOp&) = delete;
};
REGISTER_XLA_OP(Name("MirrorPadGrad").CompileTimeConstantInput("paddings"),
MirrorPadGradOp);
}
} | #include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
class MirrorPadOpTest : public OpsTestBase {
protected:
template <typename T>
void MakeOp(const string& mode) {
TF_EXPECT_OK(NodeDefBuilder("mirror_pad_op", "MirrorPad")
.Input(FakeInput(DataTypeToEnum<T>::value))
.Input(FakeInput(DT_INT32))
.Attr("mode", mode)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
};
#define REGISTER_TEST(T) \
TEST_F(MirrorPadOpTest, TestMirrorPadReflect##T) { \
MakeOp<T>("REFLECT"); \
AddInputFromArray<T>(TensorShape({1, 2, 3, 1}), {1, 2, 3, 4, 5, 6}); \
AddInputFromArray<int32>(TensorShape({4, 2}), {0, 0, 1, 1, 2, 2, 0, 0}); \
TF_ASSERT_OK(RunOpKernel()); \
\
Tensor expected(allocator(), DataTypeToEnum<T>::value, \
TensorShape({1, 4, 7, 1})); \
test::FillValues<T>(&expected, \
{6, 5, 4, 5, 6, 5, 4, 3, 2, 1, 2, 3, 2, 1, \
6, 5, 4, 5, 6, 5, 4, 3, 2, 1, 2, 3, 2, 1}); \
test::ExpectTensorEqual<T>(expected, *GetOutput(0)); \
} \
\
TEST_F(MirrorPadOpTest, TestMirrorPadSymmetric##T) { \
MakeOp<T>("SYMMETRIC"); \
AddInputFromArray<T>(TensorShape({1, 2, 1, 3}), {1, 2, 3, 4, 5, 6}); \
AddInputFromArray<int32>(TensorShape({4, 2}), {1, 1, 0, 0, 0, 0, 2, 2}); \
TF_ASSERT_OK(RunOpKernel()); \
\
Tensor expected(allocator(), DataTypeToEnum<T>::value, \
TensorShape({3, 2, 1, 7})); \
test::FillValues<T>( \
&expected, \
{2, 1, 1, 2, 3, 3, 2, 5, 4, 4, 5, 6, 6, 5, 2, 1, 1, 2, 3, 3, 2, \
5, 4, 4, 5, 6, 6, 5, 2, 1, 1, 2, 3, 3, 2, 5, 4, 4, 5, 6, 6, 5}); \
test::ExpectTensorEqual<T>(expected, *GetOutput(0)); \
}
REGISTER_TEST(float)
REGISTER_TEST(double)
REGISTER_TEST(quint8)
REGISTER_TEST(qint8)
REGISTER_TEST(qint32)
REGISTER_TEST(uint8)
REGISTER_TEST(uint16)
REGISTER_TEST(int8)
REGISTER_TEST(int16)
REGISTER_TEST(int32)
REGISTER_TEST(int64_t)
#undef REGISTER_TEST
TEST_F(MirrorPadOpTest, TestMirrorPadReflectLargeInput) {
MakeOp<float>("REFLECT");
const int kInput = 1000;
const int kPad = 10;
const int kOutput = kInput + 2 * kPad;
AddInput<float>(TensorShape({1, kInput, kInput, 1}),
[=](int i) -> float { return i % kInput; });
AddInputFromArray<int32>(TensorShape({4, 2}),
{0, 0, kPad, kPad, kPad, kPad, 0, 0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, kOutput, kOutput, 1}));
test::FillFn<float>(&expected, [=](int i) -> float {
i = i % kOutput;
if (0 <= i && i < kPad)
return kPad - i;
else if (kPad <= i && i < kInput + kPad)
return i - kPad;
else if (kInput + kPad <= i && i < kOutput)
return 2 * kInput + kPad - 2 - i;
else
return -1;
});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(MirrorPadOpTest, TestMirrorPadSymmetricLargeInput) {
MakeOp<float>("SYMMETRIC");
const int kInput = 1000;
const int kPad = 10;
const int kOutput = kInput + 2 * kPad;
AddInput<float>(TensorShape({1, kInput, kInput, 1}),
[=](int i) -> float { return i % kInput; });
AddInputFromArray<int32>(TensorShape({4, 2}),
{0, 0, kPad, kPad, kPad, kPad, 0, 0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, kOutput, kOutput, 1}));
test::FillFn<float>(&expected, [=](int i) -> float {
i = i % kOutput;
if (0 <= i && i < kPad)
return kPad - i - 1;
else if (kPad <= i && i < kInput + kPad)
return i - kPad;
else if (kInput + kPad <= i && i < kOutput)
return 2 * kInput + kPad - 1 - i;
else
return -1;
});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
class MirrorPadGradOpTest : public OpsTestBase {
protected:
template <typename T>
void MakeOp(const string& mode) {
TF_EXPECT_OK(NodeDefBuilder("mirror_pad_grad_op", "MirrorPadGrad")
.Input(FakeInput(DataTypeToEnum<T>::value))
.Input(FakeInput(DT_INT32))
.Attr("mode", mode)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
};
#define REGISTER_TEST(T) \
TEST_F(MirrorPadGradOpTest, TestMirrorPadGradReflect##T) { \
MakeOp<T>("REFLECT"); \
AddInput<T>(TensorShape({1, 4, 7, 1}), [](int i) -> T { return i % 7; }); \
AddInputFromArray<int32>(TensorShape({4, 2}), {0, 0, 1, 1, 2, 2, 0, 0}); \
TF_ASSERT_OK(RunOpKernel()); \
\
Tensor expected(allocator(), DataTypeToEnum<T>::value, \
TensorShape({1, 2, 3, 1})); \
test::FillValues<T>(&expected, {16, 18, 8, 16, 18, 8}); \
test::ExpectTensorEqual<T>(expected, *GetOutput(0)); \
} \
\
TEST_F(MirrorPadGradOpTest, TestMirrorPadGradSymmetric##T) { \
MakeOp<T>("SYMMETRIC"); \
AddInput<T>(TensorShape({3, 2, 1, 7}), [](int i) -> T { return i % 7; }); \
AddInputFromArray<int32>(TensorShape({4, 2}), {1, 1, 0, 0, 0, 0, 2, 2}); \
TF_ASSERT_OK(RunOpKernel()); \
\
Tensor expected(allocator(), DataTypeToEnum<T>::value, \
TensorShape({1, 2, 1, 3})); \
test::FillValues<T>(&expected, {9, 27, 27, 9, 27, 27}); \
test::ExpectTensorEqual<T>(expected, *GetOutput(0)); \
}
REGISTER_TEST(float)
REGISTER_TEST(double)
REGISTER_TEST(uint8)
REGISTER_TEST(uint16)
REGISTER_TEST(int8)
REGISTER_TEST(int16)
REGISTER_TEST(int32)
REGISTER_TEST(int64_t)
#undef REGISTER_TEST
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/mirror_pad_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/mirror_pad_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
60aad745-e11c-4fce-abc6-3248231d3e2a | cpp | tensorflow/tensorflow | whole_graph_manual_pass | third_party/xla/xla/service/spmd/whole_graph_manual_pass.cc | third_party/xla/xla/service/spmd/whole_graph_manual_pass_test.cc | #include "xla/service/spmd/whole_graph_manual_pass.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
bool ShouldClearInstruction(HloInstruction* inst) {
return inst->opcode() != HloOpcode::kParameter &&
inst != inst->parent()->root_instruction() &&
inst->opcode() != HloOpcode::kPartitionId &&
DynCast<HloCollectiveInstruction>(inst) == nullptr &&
!inst->HasSideEffectNoRecurse();
}
absl::StatusOr<bool> RunOnComputation(HloComputation* computation) {
bool changed = false;
for (HloInstruction* inst : computation->instructions()) {
if (ShouldClearInstruction(inst)) {
inst->clear_sharding();
changed = true;
continue;
}
if (inst->shape().IsTuple()) {
inst->set_sharding(
HloSharding::SingleTuple(inst->shape(), HloSharding::Manual()));
changed = true;
} else {
inst->set_sharding(HloSharding::Manual());
changed = true;
}
}
return changed;
}
}
absl::StatusOr<bool> WholeGraphManualPass::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (auto* comp : module->computations()) {
TF_ASSIGN_OR_RETURN(bool comp_changed, RunOnComputation(comp));
changed |= comp_changed;
}
return changed;
}
} | #include "xla/service/spmd/whole_graph_manual_pass.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace spmd {
namespace {
using ::testing::_;
using ::testing::AllOf;
namespace op = xla::testing::opcode_matchers;
class WholeGraphManualPassTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> RunPass(
absl::string_view hlo_module) {
TF_ASSIGN_OR_RETURN(
auto module,
ParseAndReturnVerifiedModule(
hlo_module,
GetModuleConfigForTest(1, 4)));
HloPassPipeline pipeline("whole-graph-manual-pass");
pipeline.AddPass<WholeGraphManualPass>();
TF_RETURN_IF_ERROR(pipeline.Run(module.get()).status());
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module));
}
absl::Status RunPassOnModule(HloModule* module,
int64_t distance_threshold = 100) {
HloPassPipeline pipeline("all-gather-cse");
pipeline.AddPass<WholeGraphManualPass>();
TF_RETURN_IF_ERROR(pipeline.Run(module).status());
return absl::OkStatus();
}
};
TEST_F(WholeGraphManualPassTest, SimpleRewrite) {
absl::string_view hlo_string = R"(
HloModule module
body {
p_body = (f32[2], f32[2], f32[2], s32[]) parameter(0)
val.0 = f32[2] get-tuple-element(p_body), index=0
val.1 = f32[2] get-tuple-element(p_body), index=1
add = f32[2] add(val.0, val.1)
const = s32[] constant(-1)
ROOT root = (f32[2], f32[2], f32[2], s32[]) tuple(val.0, val.1, add, const)
}
condition {
p_cond = (f32[2], f32[2], f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=3
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param0 = (s32[8]{0}, s32[8]{0}) parameter(0)
g1 = s32[8]{0} get-tuple-element(param0), index=0
g2 = s32[8]{0} get-tuple-element(param0), index=1
resh1 = s32[1,8]{1,0} reshape(g1)
resh2 = s32[1,8]{1,0} reshape(g2)
param1 = f32[2] parameter(1)
param2 = s32[] parameter(2)
while_init = (f32[2], f32[2], f32[2], s32[]) tuple(param1, param1, param1, param2)
while = (f32[2], f32[2], f32[2], s32[]) while(while_init), condition=condition, body=body
g3 = f32[2] get-tuple-element(while), index=0
ROOT t = (s32[1,8]{1,0}, s32[1,8]{1,0}, f32[2]) tuple(resh1, resh2, g3), sharding={{devices=[1,4]0,1,2,3}, {devices=[1,4]0,1,2,3}, {replicated}}
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
for (auto* i : module->entry_computation()->instructions()) {
if (module->entry_computation()->root_instruction() == i) {
EXPECT_THAT(i, op::Sharding("{{manual}, "
"{manual}, {manual}}"));
} else if (i->opcode() == HloOpcode::kParameter) {
EXPECT_THAT(i, AnyOf(op::Sharding("{manual}"),
op::Sharding("{{manual},{manual}}")));
}
}
}
TEST_F(WholeGraphManualPassTest, SimplePartitionIdCollectives) {
absl::string_view hlo_string = R"(
HloModule module
body {
p_body = (f32[2], f32[2], f32[2], s32[]) parameter(0)
val.0 = f32[2] get-tuple-element(p_body), index=0
val.1 = f32[2] get-tuple-element(p_body), index=1
t = token[] after-all()
p = u32[] partition-id()
ag = f32[8] all-gather(val.1), dimensions={0}, replica_groups={{0,1,2,3}}, use_global_device_ids=true, channel_id=1
s = (f32[8], s32[], token[]) send(ag, t), channel_id=2
sd = token[] send-done(s), channel_id=2
add = f32[2] add(val.0, val.1)
const = s32[] constant(-1)
ROOT root = (f32[2], f32[2], f32[2], s32[]) tuple(val.0, val.1, add, const)
}
condition {
p_cond = (f32[2], f32[2], f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=3
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param0 = (s32[8]{0}, s32[8]{0}) parameter(0)
g1 = s32[8]{0} get-tuple-element(param0), index=0
g2 = s32[8]{0} get-tuple-element(param0), index=1
resh1 = s32[1,8]{1,0} reshape(g1)
resh2 = s32[1,8]{1,0} reshape(g2)
param1 = f32[2] parameter(1)
param2 = s32[] parameter(2)
while_init = (f32[2], f32[2], f32[2], s32[]) tuple(param1, param1, param1, param2)
while = (f32[2], f32[2], f32[2], s32[]) while(while_init), condition=condition, body=body
g3 = f32[2] get-tuple-element(while), index=0
ROOT t = (s32[1,8]{1,0}, s32[1,8]{1,0}, f32[2]) tuple(resh1, resh2, g3), sharding={{devices=[1,4]0,1,2,3}, {devices=[1,4]0,1,2,3}, {replicated}}
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
for (auto* c : module->computations()) {
for (auto* i : c->instructions()) {
if (c->root_instruction() == i) {
EXPECT_THAT(
i, AnyOf(op::Sharding("{manual}"),
op::Sharding("{{manual},{manual},{manual}}"),
op::Sharding("{{manual}, {manual}, {manual}, {manual}}")));
} else if (i->opcode() == HloOpcode::kParameter) {
EXPECT_THAT(
i,
AnyOf(op::Sharding("{manual}"), op::Sharding("{{manual},{manual}}"),
op::Sharding("{{manual},{manual},{manual},{manual}}")));
} else if (i->opcode() == HloOpcode::kPartitionId ||
i->opcode() == HloOpcode::kAllGather ||
i->opcode() == HloOpcode::kSendDone) {
EXPECT_THAT(i, op::Sharding("{manual}"));
} else if (i->opcode() == HloOpcode::kSend) {
EXPECT_THAT(i, op::Sharding("{{manual},{manual},{manual}}"));
} else {
EXPECT_FALSE(i->has_sharding());
}
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/whole_graph_manual_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/whole_graph_manual_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bcbda2a7-93ed-47d7-b14f-5c2b1c80a977 | cpp | tensorflow/tensorflow | host_offloading_prepare | third_party/xla/xla/service/host_offloading_prepare.cc | third_party/xla/xla/service/host_offloading_prepare_test.cc | #include "xla/service/host_offloading_prepare.h"
#include <memory>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/call_graph.h"
#include "xla/service/host_memory_offload_annotations.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using xla::host_memory_offload_annotations::kMoveToHostCustomCallTarget;
bool IsHostAsyncStart(const HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kAsyncStart &&
instruction->async_execution_thread() == HloInstruction::kHostThread &&
instruction->async_wrapped_instruction()->opcode() == HloOpcode::kCall;
}
absl::StatusOr<bool> RemoveSurroundingMoveCustomCalls(
HloInstruction* async_start) {
bool removed = false;
for (HloInstruction* operand : async_start->operands()) {
if (operand->IsCustomCall(kMoveToHostCustomCallTarget)) {
CHECK_EQ(operand->operands().size(), 1);
VLOG(1) << "Replacing " << operand->ToString() << " with "
<< operand->operands().at(0)->ToString();
TF_RETURN_IF_ERROR(
operand->ReplaceAllUsesWith(operand->mutable_operand(0)));
TF_RETURN_IF_ERROR(async_start->parent()->RemoveInstruction(operand));
removed = true;
}
}
return removed;
}
absl::StatusOr<bool> ElideMoveCustomCalls(HloModule* module) {
bool changed = false;
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module);
for (HloComputation* computation : module->computations()) {
if (computation->execution_thread() != HloInstruction::kHostThread) {
continue;
}
std::vector<HloInstruction*> callers =
call_graph->GetComputationCallers(computation);
for (HloInstruction* caller : callers) {
VLOG(2) << "Hlo computation " << computation->name()
<< " is offloaded to host and has caller " << caller->ToString();
if (caller->parent()->execution_thread() == HloInstruction::kHostThread) {
VLOG(3) << "Nested host computation, must be a async-wrapper";
continue;
}
VLOG(2) << "Going to adjust before and after " << caller->name();
}
}
for (HloComputation* computation : module->computations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (IsHostAsyncStart(instruction)) {
VLOG(2) << "Found async start of host computation: "
<< instruction->ToString() << " done must be "
<< instruction->users().at(0)->ToString();
TF_ASSIGN_OR_RETURN(bool removed,
RemoveSurroundingMoveCustomCalls(instruction));
changed = changed || removed;
}
}
}
return changed;
}
absl::StatusOr<bool> ConvertToCustomCall(HloModule* module) {
bool changed = false;
for (HloComputation* computation : module->computations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (IsHostAsyncStart(instruction)) {
auto* call_start = Cast<HloAsyncInstruction>(instruction);
auto* call = call_start->async_wrapped_instruction();
auto custom_call = HloInstruction::CreateCustomCall(
call->shape(), call->operands(), call->called_computations().at(0),
"HostExecute");
custom_call->set_output_to_operand_aliasing(
call->output_operand_aliasing());
HloComputation* async_computation =
call_start->async_wrapped_computation();
async_computation->set_root_instruction(
async_computation->AddInstruction(std::move(custom_call)));
TF_RETURN_IF_ERROR(async_computation->RemoveInstruction(call));
changed = true;
}
}
}
return changed;
}
}
absl::StatusOr<bool> HostOffloadingPrepare::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
switch (rewrite_) {
case Rewrite::kElideMoveToHost:
return ElideMoveCustomCalls(module);
case Rewrite::kConvertToCustomCall:
return ConvertToCustomCall(module);
}
}
} | #include "xla/service/host_offloading_prepare.h"
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/host_memory_offload_annotations.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using Rewrite = HostOffloadingPrepare::Rewrite;
class HostOffloadingPrepareTest : public HloTestBase {
protected:
absl::StatusOr<bool> RunRewrite(HloModule* module, Rewrite rewrite) {
TF_EXPECT_OK(verifier().Run(module).status());
if (module->has_schedule()) {
return absl::InternalError("Expected a non-scheduled module");
}
HostOffloadingPrepare pass(rewrite);
TF_ASSIGN_OR_RETURN(bool changed, pass.Run(module));
return changed;
}
std::vector<const HloInstruction*> GetHostOffloadAsyncStartInstructions(
const HloModule* module) {
std::vector<const HloInstruction*> result;
for (const HloComputation* computation : module->computations()) {
for (const HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kAsyncStart &&
instruction->async_execution_thread() ==
HloInstruction::kHostThread) {
result.push_back(instruction);
}
}
}
return result;
}
};
TEST_F(HostOffloadingPrepareTest, SingleInputHasMoveToHost) {
const std::string& hlo_string = R"(
HloModule my_module, entry_computation_layout={(s32[32]{0:T(128)})->s32[32]{0:T(128)}}
host_computation {
Arg_0.0 = s32[32]{0} parameter(0)
ROOT multiply.0 = s32[32]{0} multiply(Arg_0.0, Arg_0.0)
}, execution_thread="host"
async_computation {
param_0 = s32[32]{0} parameter(0)
ROOT call = s32[32]{0} call(param_0), to_apply=host_computation, frontend_attributes={_xla_compute_type="host"}
}, execution_thread="host"
ENTRY main {
Arg_0.1 = s32[32]{0:T(128)} parameter(0)
constant.2 = s32[]{:T(128)} constant(2)
broadcast.3 = s32[32]{0:T(128)} broadcast(constant.2), dimensions={}
multiply.4 = s32[32]{0:T(128)} multiply(Arg_0.1, broadcast.3)
move_to_host = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target="MoveToHost"
start = ((s32[32]{0:T(128)}), s32[32]{0:T(128)}, u32[]{:T(128)}) async-start(move_to_host), async_execution_thread="host", calls=async_computation
ROOT done = s32[32]{0:T(128)} async-done(start), frontend_attributes={_xla_compute_type="host"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunRewrite(module.get(), Rewrite::kElideMoveToHost));
EXPECT_TRUE(changed);
for (const HloInstruction* instruction :
GetHostOffloadAsyncStartInstructions(module.get())) {
for (const HloInstruction* operand : instruction->operands()) {
EXPECT_FALSE(operand->IsCustomCall(
{host_memory_offload_annotations::kMoveToHostCustomCallTarget}));
}
for (const HloInstruction* user : instruction->users()) {
EXPECT_FALSE(user->IsCustomCall(
{host_memory_offload_annotations::kMoveToDeviceCustomCallTarget}));
}
}
}
TEST_F(HostOffloadingPrepareTest, MultipleInputHasOneMoveToHost) {
const std::string& hlo_string = R"(
HloModule my_module, entry_computation_layout={(s32[32]{0:T(128)})->s32[32]{0:T(128)}}
host_computation {
Arg_0.0 = s32[32]{0} parameter(0)
Arg_0.1 = s32[32]{0} parameter(1)
ROOT multiply.0 = s32[32]{0} multiply(Arg_0.0, Arg_0.1)
}, execution_thread="host"
async_computation {
param_0 = s32[32]{0} parameter(0)
param_1 = s32[32]{0} parameter(1)
ROOT call = s32[32]{0} call(param_0, param_1), to_apply=host_computation, frontend_attributes={_xla_compute_type="host"}
}, execution_thread="host"
ENTRY main {
Arg_0.1 = s32[32]{0:T(128)} parameter(0)
constant.2 = s32[]{:T(128)} constant(2)
broadcast.3 = s32[32]{0:T(128)} broadcast(constant.2), dimensions={}
multiply.4 = s32[32]{0:T(128)} multiply(Arg_0.1, broadcast.3)
move_to_host = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target="MoveToHost"
start = ((s32[32]{0:T(128)}, s32[32]{0:T(128)}), s32[32]{0:T(128)}, u32[]{:T(128)}) async-start(move_to_host, move_to_host), async_execution_thread="host", calls=async_computation
ROOT done = s32[32]{0:T(128)} async-done(start), frontend_attributes={_xla_compute_type="host"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunRewrite(module.get(), Rewrite::kElideMoveToHost));
EXPECT_TRUE(changed);
for (const HloInstruction* instruction :
GetHostOffloadAsyncStartInstructions(module.get())) {
for (const HloInstruction* operand : instruction->operands()) {
EXPECT_FALSE(operand->IsCustomCall(
{host_memory_offload_annotations::kMoveToHostCustomCallTarget}));
}
for (const HloInstruction* user : instruction->users()) {
EXPECT_FALSE(user->IsCustomCall(
{host_memory_offload_annotations::kMoveToDeviceCustomCallTarget}));
}
}
}
TEST_F(HostOffloadingPrepareTest, MultipleInputHasMultipleMoveToHost) {
const std::string& hlo_string = R"(
HloModule my_module, entry_computation_layout={(s32[32]{0:T(128)})->s32[32]{0:T(128)}}
host_computation {
Arg_0.0 = s32[32]{0} parameter(0)
Arg_0.1 = s32[32]{0} parameter(1)
ROOT multiply.0 = s32[32]{0} multiply(Arg_0.0, Arg_0.1)
}, execution_thread="host"
async_computation {
param_0 = s32[32]{0} parameter(0)
param_1 = s32[32]{0} parameter(1)
ROOT call = s32[32]{0} call(param_0, param_1), to_apply=host_computation, frontend_attributes={_xla_compute_type="host"}
}, execution_thread="host"
ENTRY main {
Arg_0.1 = s32[32]{0:T(128)} parameter(0)
constant.2 = s32[]{:T(128)} constant(2)
broadcast.3 = s32[32]{0:T(128)} broadcast(constant.2), dimensions={}
multiply.4 = s32[32]{0:T(128)} multiply(Arg_0.1, broadcast.3)
move_to_host.1 = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target="MoveToHost"
move_to_host.2 = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target="MoveToHost"
start = ((s32[32]{0:T(128)}, s32[32]{0:T(128)}), s32[32]{0:T(128)}, u32[]{:T(128)}) async-start(move_to_host.1, move_to_host.2), async_execution_thread="host", calls=async_computation
ROOT done = s32[32]{0:T(128)} async-done(start), frontend_attributes={_xla_compute_type="host"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunRewrite(module.get(), Rewrite::kElideMoveToHost));
EXPECT_TRUE(changed);
for (const HloInstruction* instruction :
GetHostOffloadAsyncStartInstructions(module.get())) {
for (const HloInstruction* operand : instruction->operands()) {
EXPECT_FALSE(operand->IsCustomCall(
{host_memory_offload_annotations::kMoveToHostCustomCallTarget}));
}
for (const HloInstruction* user : instruction->users()) {
EXPECT_FALSE(user->IsCustomCall(
{host_memory_offload_annotations::kMoveToDeviceCustomCallTarget}));
}
}
}
TEST_F(HostOffloadingPrepareTest, SingleInputHasMoveToDevice) {
const std::string& hlo_string = R"(
HloModule my_module, entry_computation_layout={(s32[32]{0:T(128)})->s32[32]{0:T(128)}}
host_computation {
Arg_0.0 = s32[32]{0} parameter(0)
ROOT multiply.0 = s32[32]{0} multiply(Arg_0.0, Arg_0.0)
}, execution_thread="host"
async_computation {
param_0 = s32[32]{0} parameter(0)
ROOT call = s32[32]{0} call(param_0), to_apply=host_computation, frontend_attributes={_xla_compute_type="host"}
}, execution_thread="host"
ENTRY main {
Arg_0.1 = s32[32]{0:T(128)} parameter(0)
constant.2 = s32[]{:T(128)} constant(2)
broadcast.3 = s32[32]{0:T(128)} broadcast(constant.2), dimensions={}
multiply.4 = s32[32]{0:T(128)} multiply(Arg_0.1, broadcast.3)
move_to_device = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target="MoveToDevice"
start = ((s32[32]{0:T(128)}), s32[32]{0:T(128)}, u32[]{:T(128)}) async-start(move_to_device), async_execution_thread="host", calls=async_computation
ROOT done = s32[32]{0:T(128)} async-done(start), frontend_attributes={_xla_compute_type="host"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunRewrite(module.get(), Rewrite::kElideMoveToHost));
EXPECT_FALSE(changed);
}
TEST_F(HostOffloadingPrepareTest, MultipleInputHasOneMoveToDevice) {
const std::string& hlo_string = R"(
HloModule my_module, entry_computation_layout={(s32[32]{0:T(128)})->s32[32]{0:T(128)}}
host_computation {
Arg_0.0 = s32[32]{0} parameter(0)
Arg_0.1 = s32[32]{0} parameter(1)
ROOT multiply.0 = s32[32]{0} multiply(Arg_0.0, Arg_0.1)
}, execution_thread="host"
async_computation {
param_0 = s32[32]{0} parameter(0)
param_1 = s32[32]{0} parameter(1)
ROOT call = s32[32]{0} call(param_0, param_1), to_apply=host_computation, frontend_attributes={_xla_compute_type="host"}
}, execution_thread="host"
ENTRY main {
Arg_0.1 = s32[32]{0:T(128)} parameter(0)
constant.2 = s32[]{:T(128)} constant(2)
broadcast.3 = s32[32]{0:T(128)} broadcast(constant.2), dimensions={}
multiply.4 = s32[32]{0:T(128)} multiply(Arg_0.1, broadcast.3)
move_to_device = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target="MoveToDevice"
custom-call.cloned.call-start = ((s32[32]{0:T(128)}, s32[32]{0:T(128)}), s32[32]{0:T(128)}, u32[]{:T(128)}) async-start(move_to_device, move_to_device), async_execution_thread="host", calls=async_computation
ROOT custom-call.cloned.call-done = s32[32]{0:T(128)} async-done(custom-call.cloned.call-start), frontend_attributes={_xla_compute_type="host"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunRewrite(module.get(), Rewrite::kElideMoveToHost));
EXPECT_FALSE(changed);
}
TEST_F(HostOffloadingPrepareTest, MultipleInputHasMultipleMoveToDevice) {
const std::string& hlo_string = R"(
HloModule my_module, entry_computation_layout={(s32[32]{0:T(128)})->s32[32]{0:T(128)}}
host_computation {
Arg_0.0 = s32[32]{0} parameter(0)
Arg_0.1 = s32[32]{0} parameter(1)
ROOT multiply.0 = s32[32]{0} multiply(Arg_0.0, Arg_0.1)
}, execution_thread="host"
async_computation {
param_0 = s32[32]{0} parameter(0)
param_1 = s32[32]{0} parameter(1)
ROOT call = s32[32]{0} call(param_0, param_1), to_apply=host_computation, frontend_attributes={_xla_compute_type="host"}
}, execution_thread="host"
ENTRY main {
Arg_0.1 = s32[32]{0:T(128)} parameter(0)
constant.2 = s32[]{:T(128)} constant(2)
broadcast.3 = s32[32]{0:T(128)} broadcast(constant.2), dimensions={}
multiply.4 = s32[32]{0:T(128)} multiply(Arg_0.1, broadcast.3)
move_to_device.1 = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target="MoveToDevice"
move_to_device.2 = s32[32]{0:T(128)} custom-call(multiply.4), custom_call_target="MoveToDevice"
start = ((s32[32]{0:T(128)}, s32[32]{0:T(128)}), s32[32]{0:T(128)}, u32[]{:T(128)}) async-start(move_to_device.1, move_to_device.2), async_execution_thread="host", calls=async_computation
ROOT done = s32[32]{0:T(128)} async-done(start), frontend_attributes={_xla_compute_type="host"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunRewrite(module.get(), Rewrite::kElideMoveToHost));
EXPECT_FALSE(changed);
}
TEST_F(HostOffloadingPrepareTest, ConvertToCustomCall) {
const char* hlo = R"(
HloModule my_module
host_computation {
Arg_0.0 = s32[32] parameter(0)
ROOT multiply.0 = s32[32] multiply(Arg_0.0, Arg_0.0)
}, execution_thread="host"
async_computation {
param_0 = s32[32] parameter(0)
ROOT call = s32[32] call(param_0), to_apply=host_computation
}, execution_thread="host"
ENTRY main {
Arg_0.1 = s32[32] parameter(0)
start = ((s32[32]), s32[32], u32[]) async-start(Arg_0.1),
async_execution_thread="host", calls=async_computation
ROOT done = s32[32] async-done(start)
}
)";
const char* expected = R"(
)";
RunAndFilecheckHloRewrite(
hlo, HostOffloadingPrepare(Rewrite::kConvertToCustomCall), expected);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/host_offloading_prepare.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/host_offloading_prepare_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7c7e80e4-5c2e-4c2d-b596-3d9fceb6252c | cpp | tensorflow/tensorflow | simple_opaque_delegate | tensorflow/lite/delegates/utils/simple_opaque_delegate.cc | tensorflow/lite/delegates/utils/simple_opaque_delegate_test.cc | #include "tensorflow/lite/delegates/utils/simple_opaque_delegate.h"
#include <stddef.h>
#include <stdint.h>
#include <memory>
#include <vector>
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/c/c_api.h"
#include "tensorflow/lite/c/c_api_opaque.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace tflite {
namespace {
TfLiteOperator* CreateDelegateKernelRegistration(
SimpleOpaqueDelegateInterface* delegate) {
TfLiteOperator* kernel_registration =
TfLiteOperatorCreate(kTfLiteBuiltinDelegate, delegate->Name(),
1, nullptr);
TfLiteOperatorSetFreeWithData(
kernel_registration,
[](void* user_data, TfLiteOpaqueContext* context, void* buffer) -> void {
delete reinterpret_cast<SimpleOpaqueDelegateInterface*>(buffer);
});
TfLiteOperatorSetInitWithData(
kernel_registration,
[](void* user_data, TfLiteOpaqueContext* context, const char* buffer,
size_t length) -> void* {
const TfLiteOpaqueDelegateParams* params =
reinterpret_cast<const TfLiteOpaqueDelegateParams*>(buffer);
if (params == nullptr) {
return nullptr;
}
auto* delegate_data = reinterpret_cast<SimpleOpaqueDelegateInterface*>(
params->delegate_data);
std::unique_ptr<SimpleOpaqueDelegateKernelInterface> delegate_kernel(
delegate_data->CreateDelegateKernelInterface());
if (delegate_kernel->Init(context, params) != kTfLiteOk) {
return nullptr;
}
return delegate_kernel.release();
});
TfLiteOperatorSetPrepareWithData(
kernel_registration,
[](void* user_data, TfLiteOpaqueContext* context,
TfLiteOpaqueNode* opaque_node) -> TfLiteStatus {
SimpleOpaqueDelegateKernelInterface* delegate_kernel =
reinterpret_cast<SimpleOpaqueDelegateKernelInterface*>(
TfLiteOpaqueNodeGetUserData(opaque_node));
return delegate_kernel->Prepare(context, opaque_node);
});
TfLiteOperatorSetInvokeWithData(
kernel_registration,
[](void* user_data, TfLiteOpaqueContext* context,
TfLiteOpaqueNode* opaque_node) -> TfLiteStatus {
SimpleOpaqueDelegateKernelInterface* delegate_kernel =
reinterpret_cast<SimpleOpaqueDelegateKernelInterface*>(
TfLiteOpaqueNodeGetUserData(opaque_node));
TFLITE_DCHECK(delegate_kernel != nullptr);
return delegate_kernel->Eval(context, opaque_node);
});
return kernel_registration;
}
TfLiteStatus DelegatePrepare(TfLiteOpaqueContext* opaque_context,
TfLiteOpaqueDelegate* opaque_delegate,
void* data) {
auto* simple_opaque_delegate =
reinterpret_cast<SimpleOpaqueDelegateInterface*>(data);
TF_LITE_ENSURE_STATUS(simple_opaque_delegate->Initialize(opaque_context));
std::vector<int> supported_nodes;
TfLiteIntArray* execution_plan;
TF_LITE_ENSURE_STATUS(
TfLiteOpaqueContextGetExecutionPlan(opaque_context, &execution_plan));
IntArrayUniquePtr plan(TfLiteIntArrayCopy(execution_plan));
for (int i = 0; i < plan->size; ++i) {
const int node_id = plan->data[i];
TfLiteOpaqueNode* opaque_node;
TfLiteOperator* registration_external;
TfLiteOpaqueContextGetNodeAndRegistration(
opaque_context, node_id, &opaque_node, ®istration_external);
if (simple_opaque_delegate->IsNodeSupportedByDelegate(
registration_external, opaque_node, opaque_context)) {
supported_nodes.push_back(node_id);
}
}
TfLiteOperator* delegate_kernel_registration =
CreateDelegateKernelRegistration(simple_opaque_delegate);
return TfLiteOpaqueContextReplaceNodeSubsetsWithDelegateKernels(
opaque_context, delegate_kernel_registration,
BuildTfLiteArray(supported_nodes).get(), opaque_delegate);
}
}
TfLiteOpaqueDelegate* TfLiteOpaqueDelegateFactory::CreateSimpleDelegate(
std::unique_ptr<SimpleOpaqueDelegateInterface> simple_delegate,
int64_t flags) {
if (simple_delegate == nullptr) {
return {};
}
TfLiteOpaqueDelegateBuilder opaque_delegate_builder{};
opaque_delegate_builder.Prepare = &DelegatePrepare;
opaque_delegate_builder.flags = flags;
opaque_delegate_builder.data = simple_delegate.release();
opaque_delegate_builder.CopyFromBufferHandle =
[](TfLiteOpaqueContext* context, TfLiteOpaqueDelegate* delegate,
void* data, TfLiteBufferHandle buffer_handle,
TfLiteOpaqueTensor* tensor) {
auto* simple_delegate =
reinterpret_cast<SimpleOpaqueDelegateInterface*>(data);
return simple_delegate->CopyFromBufferHandle(context, buffer_handle,
tensor);
};
opaque_delegate_builder.CopyToBufferHandle =
[](TfLiteOpaqueContext* context, TfLiteOpaqueDelegate* delegate,
void* data, TfLiteBufferHandle buffer_handle,
TfLiteOpaqueTensor* tensor) {
auto* simple_delegate =
reinterpret_cast<SimpleOpaqueDelegateInterface*>(data);
return simple_delegate->CopyToBufferHandle(context, buffer_handle,
tensor);
};
opaque_delegate_builder.FreeBufferHandle =
[](TfLiteOpaqueContext* context, TfLiteOpaqueDelegate* delegate,
void* data, TfLiteBufferHandle* buffer_handle) {
auto* simple_delegate =
reinterpret_cast<SimpleOpaqueDelegateInterface*>(data);
simple_delegate->FreeBufferHandle(context, buffer_handle);
};
return TfLiteOpaqueDelegateCreate(&opaque_delegate_builder);
}
void TfLiteOpaqueDelegateFactory::DeleteSimpleDelegate(
TfLiteOpaqueDelegate* opaque_delegate) {
if (!opaque_delegate) return;
auto* simple_delegate = reinterpret_cast<SimpleOpaqueDelegateInterface*>(
TfLiteOpaqueDelegateGetData(opaque_delegate));
delete simple_delegate;
TfLiteOpaqueDelegateDelete(opaque_delegate);
}
} | #include "tensorflow/lite/delegates/utils/simple_opaque_delegate.h"
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <array>
#include <memory>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/c/c_api.h"
#include "tensorflow/lite/c/c_api_opaque.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/delegates/delegate_test_util.h"
#include "tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/interpreter_builder.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/register.h"
#include "tensorflow/lite/model_builder.h"
namespace tflite {
class TestDelegate : public ::testing::Test {};
TEST_F(TestDelegate, TestDataAddBin_SingleInputSingleOutput_FullyDelegated) {
TfLiteOpaqueDelegateUniquePtr my_opaque_delegate =
TfLiteOpaqueDelegateFactory::Create(
std::make_unique<example::SampleStableDelegate>());
TfLiteModel* model =
TfLiteModelCreateFromFile("tensorflow/lite/testdata/add.bin");
ASSERT_NE(model, nullptr);
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
ASSERT_NE(options, nullptr);
TfLiteInterpreterOptionsSetNumThreads(options, 2);
TfLiteInterpreterOptionsAddDelegate(options, my_opaque_delegate.get());
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
ASSERT_NE(interpreter, nullptr);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterGetInputTensorCount(interpreter), 1);
ASSERT_EQ(TfLiteInterpreterGetOutputTensorCount(interpreter), 1);
TfLiteTensor* input_tensor =
TfLiteInterpreterGetInputTensor(interpreter, 0);
ASSERT_NE(input_tensor, nullptr);
EXPECT_EQ(TfLiteTensorType(input_tensor), kTfLiteFloat32);
EXPECT_NE(TfLiteTensorData(input_tensor), nullptr);
EXPECT_STREQ(TfLiteTensorName(input_tensor), "input");
TfLiteQuantizationParams input_params =
TfLiteTensorQuantizationParams(input_tensor);
EXPECT_EQ(input_params.scale, 0.f);
EXPECT_EQ(input_params.zero_point, 0);
const float kTensorCellValue = 3.f;
int64_t n = tflite::NumElements(input_tensor);
std::vector<float> input(n, kTensorCellValue);
ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor, input.data(),
input.size() * sizeof(float)),
kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
ASSERT_NE(output_tensor, nullptr);
EXPECT_EQ(TfLiteTensorType(output_tensor), kTfLiteFloat32);
EXPECT_NE(TfLiteTensorData(output_tensor), nullptr);
EXPECT_STREQ(TfLiteTensorName(output_tensor), "output");
TfLiteQuantizationParams output_params =
TfLiteTensorQuantizationParams(output_tensor);
EXPECT_EQ(output_params.scale, 0.f);
EXPECT_EQ(output_params.zero_point, 0);
std::vector<float> output(n, 0);
ASSERT_EQ(TfLiteTensorCopyToBuffer(output_tensor, output.data(),
output.size() * sizeof(float)),
kTfLiteOk);
for (int i = 0; i < output.size(); ++i) {
EXPECT_EQ(output[i], kTensorCellValue * 3);
}
TfLiteInterpreterDelete(interpreter);
TfLiteModelDelete(model);
}
TEST(DelegateTest,
TestDataAddBin_SingleInputSingleOutput_FullyDelegated_ResizeInputTensors) {
TfLiteOpaqueDelegateUniquePtr my_opaque_delegate =
TfLiteOpaqueDelegateFactory::Create(
std::make_unique<example::SampleStableDelegate>());
TfLiteModel* model =
TfLiteModelCreateFromFile("tensorflow/lite/testdata/add.bin");
ASSERT_NE(model, nullptr);
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
ASSERT_NE(options, nullptr);
TfLiteInterpreterOptionsSetNumThreads(options, 2);
TfLiteInterpreterOptionsAddDelegate(options, my_opaque_delegate.get());
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
ASSERT_NE(interpreter, nullptr);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterGetInputTensorCount(interpreter), 1);
ASSERT_EQ(TfLiteInterpreterGetOutputTensorCount(interpreter), 1);
std::array<int, 1> input_dims = {2};
ASSERT_EQ(TfLiteInterpreterResizeInputTensor(
interpreter, 0, input_dims.data(), input_dims.size()),
kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor =
TfLiteInterpreterGetInputTensor(interpreter, 0);
ASSERT_NE(input_tensor, nullptr);
EXPECT_EQ(TfLiteTensorType(input_tensor), kTfLiteFloat32);
EXPECT_EQ(TfLiteTensorNumDims(input_tensor), 1);
EXPECT_EQ(TfLiteTensorDim(input_tensor, 0), 2);
EXPECT_EQ(TfLiteTensorByteSize(input_tensor), sizeof(float) * 2);
EXPECT_NE(TfLiteTensorData(input_tensor), nullptr);
EXPECT_STREQ(TfLiteTensorName(input_tensor), "input");
TfLiteQuantizationParams input_params =
TfLiteTensorQuantizationParams(input_tensor);
EXPECT_EQ(input_params.scale, 0.f);
EXPECT_EQ(input_params.zero_point, 0);
std::array<float, 2> input = {1.f, 3.f};
ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor, input.data(),
input.size() * sizeof(float)),
kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
ASSERT_NE(output_tensor, nullptr);
EXPECT_EQ(TfLiteTensorType(output_tensor), kTfLiteFloat32);
EXPECT_EQ(TfLiteTensorNumDims(output_tensor), 1);
EXPECT_EQ(TfLiteTensorDim(output_tensor, 0), 2);
EXPECT_EQ(TfLiteTensorByteSize(output_tensor), sizeof(float) * 2);
EXPECT_NE(TfLiteTensorData(output_tensor), nullptr);
EXPECT_STREQ(TfLiteTensorName(output_tensor), "output");
TfLiteQuantizationParams output_params =
TfLiteTensorQuantizationParams(output_tensor);
EXPECT_EQ(output_params.scale, 0.f);
EXPECT_EQ(output_params.zero_point, 0);
std::array<float, 2> output;
ASSERT_EQ(TfLiteTensorCopyToBuffer(output_tensor, output.data(),
output.size() * sizeof(float)),
kTfLiteOk);
EXPECT_EQ(output[0], 3.f);
EXPECT_EQ(output[1], 9.f);
TfLiteInterpreterDelete(interpreter);
TfLiteModelDelete(model);
}
TEST(DelegateTest, TestDataMultiAddBin_MultiInputMultiOutput_FullyDelegated) {
TfLiteOpaqueDelegateUniquePtr my_opaque_delegate =
TfLiteOpaqueDelegateFactory::Create(
std::make_unique<example::SampleStableDelegate>());
TfLiteModel* model = TfLiteModelCreateFromFile(
"tensorflow/lite/testdata/multi_add.bin");
ASSERT_NE(model, nullptr);
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
ASSERT_NE(options, nullptr);
TfLiteInterpreterOptionsSetNumThreads(options, 2);
TfLiteInterpreterOptionsAddDelegate(options, my_opaque_delegate.get());
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
ASSERT_NE(interpreter, nullptr);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterGetInputTensorCount(interpreter), 4);
ASSERT_EQ(TfLiteInterpreterGetOutputTensorCount(interpreter), 2);
TfLiteTensor* input_tensor0 =
TfLiteInterpreterGetInputTensor(interpreter, 0);
TfLiteTensor* input_tensor1 =
TfLiteInterpreterGetInputTensor(interpreter, 1);
TfLiteTensor* input_tensor2 =
TfLiteInterpreterGetInputTensor(interpreter, 2);
TfLiteTensor* input_tensor3 =
TfLiteInterpreterGetInputTensor(interpreter, 3);
std::vector<TfLiteTensor*> input_tensors{input_tensor0, input_tensor1,
input_tensor2, input_tensor3};
for (TfLiteTensor* input_tensor : input_tensors) {
const float kTensorCellValue = 1.f;
int64_t n = tflite::NumElements(input_tensor);
std::vector<float> input(n, kTensorCellValue);
ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor, input.data(),
input.size() * sizeof(float)),
kTfLiteOk);
}
ASSERT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor0 =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
const TfLiteTensor* output_tensor1 =
TfLiteInterpreterGetOutputTensor(interpreter, 1);
std::vector<const TfLiteTensor*> output_tensors{output_tensor0,
output_tensor1};
for (const TfLiteTensor* output_tensor : output_tensors) {
int64_t n = tflite::NumElements(output_tensor);
std::vector<float> output_tensor_values(n, 0);
ASSERT_EQ(
TfLiteTensorCopyToBuffer(output_tensor, output_tensor_values.data(),
output_tensor_values.size() * sizeof(float)),
kTfLiteOk);
for (int i = 0; i < n; ++i) {
EXPECT_EQ(output_tensor_values[i], 3.f);
}
}
TfLiteInterpreterDelete(interpreter);
TfLiteModelDelete(model);
}
TfLiteOperator* CreateDelegateKernelRegistrationImpl(
SimpleOpaqueDelegateInterface* delegate) {
TfLiteOperator* kernel_registration = TfLiteOperatorCreate(
kTfLiteBuiltinDelegate, delegate->Name(), 1, nullptr);
TfLiteOperatorSetFreeWithData(
kernel_registration,
[](void* user_data, TfLiteOpaqueContext* context, void* buffer) -> void {
delete reinterpret_cast<SimpleOpaqueDelegateInterface*>(buffer);
});
TfLiteOperatorSetInitWithData(
kernel_registration,
[](void* user_data, TfLiteOpaqueContext* context, const char* buffer,
size_t length) -> void* {
auto* params =
reinterpret_cast<const TfLiteOpaqueDelegateParams*>(buffer);
if (params == nullptr) {
return nullptr;
}
auto* simple_delegate =
reinterpret_cast<SimpleOpaqueDelegateInterface*>(
params->delegate_data);
std::unique_ptr<SimpleOpaqueDelegateKernelInterface> delegate_kernel(
simple_delegate->CreateDelegateKernelInterface());
if (delegate_kernel->Init(context, params) != kTfLiteOk) {
return nullptr;
}
return delegate_kernel.release();
});
TfLiteOperatorSetPrepareWithData(
kernel_registration,
[](void* user_data, TfLiteOpaqueContext* context,
TfLiteOpaqueNode* opaque_node) -> TfLiteStatus {
SimpleOpaqueDelegateKernelInterface* delegate_kernel =
reinterpret_cast<SimpleOpaqueDelegateKernelInterface*>(
TfLiteOpaqueNodeGetUserData(opaque_node));
return delegate_kernel->Prepare(context, opaque_node);
});
TfLiteOperatorSetInvokeWithData(
kernel_registration,
[](void* user_data, TfLiteOpaqueContext* context,
TfLiteOpaqueNode* opaque_node) -> TfLiteStatus {
SimpleOpaqueDelegateKernelInterface* delegate_kernel =
reinterpret_cast<SimpleOpaqueDelegateKernelInterface*>(
TfLiteOpaqueNodeGetUserData(opaque_node));
TFLITE_DCHECK(delegate_kernel != nullptr);
return delegate_kernel->Eval(context, opaque_node);
});
return kernel_registration;
}
using ::tflite::delegates::test_utils::TestFP16Delegation;
TEST_F(TestFP16Delegation, MultipleDelegateKernels) {
auto my_simple_delegate = std::make_unique<example::SampleStableDelegate>();
TfLiteOpaqueDelegate* opaque_delegate =
TfLiteOpaqueDelegateFactory::CreateSimpleDelegate(
std::move(my_simple_delegate));
ASSERT_EQ(interpreter_->ModifyGraphWithDelegate(
reinterpret_cast<TfLiteDelegate*>(opaque_delegate)),
kTfLiteOk);
ASSERT_EQ(interpreter_->execution_plan().size(), 7);
VerifyInvoke();
TfLiteOpaqueDelegateFactory::DeleteSimpleDelegate(opaque_delegate);
}
class MySimpleOpaqueDelegateWithBufferHandleSupport
: public example::SampleStableDelegate {
public:
static constexpr int kDelegateOutputValue = 42;
TfLiteStatus CopyFromBufferHandle(TfLiteOpaqueContext* context,
TfLiteBufferHandle buffer_handle,
TfLiteOpaqueTensor* tensor) override {
auto* output = reinterpret_cast<float*>(TfLiteOpaqueTensorData(tensor));
std::vector<float> test_output(
example::helpers::CalculateNumElements(tensor), kDelegateOutputValue);
memcpy(output, test_output.data(), test_output.size() * sizeof(float));
return kTfLiteOk;
}
void FreeBufferHandle(TfLiteOpaqueContext* context,
TfLiteBufferHandle* handle) override {
recorded_buffer_handle_ = *handle;
free_buffer_handle_called_ = true;
}
int recorded_buffer_handle_ = -1;
bool free_buffer_handle_called_ = false;
};
TEST_F(TestDelegate, SetBufferHandle) {
MySimpleOpaqueDelegateWithBufferHandleSupport my_simple_delegate;
TfLiteOpaqueDelegateBuilder opaque_delegate_builder{};
opaque_delegate_builder.Prepare = [](TfLiteOpaqueContext* opaque_context,
TfLiteOpaqueDelegate* opaque_delegate,
void* data) {
auto* simple_opaque_delegate =
reinterpret_cast<SimpleOpaqueDelegateInterface*>(data);
TF_LITE_ENSURE_STATUS(simple_opaque_delegate->Initialize(opaque_context));
TfLiteIntArray* execution_plan;
TF_LITE_ENSURE_STATUS(
TfLiteOpaqueContextGetExecutionPlan(opaque_context, &execution_plan));
TfLiteOperator* delegate_kernel_registration =
CreateDelegateKernelRegistrationImpl(simple_opaque_delegate);
return TfLiteOpaqueContextReplaceNodeSubsetsWithDelegateKernels(
opaque_context, delegate_kernel_registration, execution_plan,
opaque_delegate);
};
opaque_delegate_builder.flags = kTfLiteDelegateFlagsNone;
opaque_delegate_builder.data = &my_simple_delegate;
opaque_delegate_builder.CopyFromBufferHandle =
[](TfLiteOpaqueContext* context, TfLiteOpaqueDelegate* delegate,
void* data, TfLiteBufferHandle buffer_handle,
TfLiteOpaqueTensor* tensor) -> TfLiteStatus {
auto* simple_opaque_delegate =
reinterpret_cast<MySimpleOpaqueDelegateWithBufferHandleSupport*>(data);
simple_opaque_delegate->CopyFromBufferHandle(context, buffer_handle,
tensor);
return kTfLiteOk;
};
opaque_delegate_builder.FreeBufferHandle = [](TfLiteOpaqueContext* context,
TfLiteOpaqueDelegate* delegate,
void* data,
TfLiteBufferHandle* handle) {
auto* simple_opaque_delegate =
reinterpret_cast<MySimpleOpaqueDelegateWithBufferHandleSupport*>(data);
simple_opaque_delegate->FreeBufferHandle(context, handle);
};
TfLiteDelegate tflite_delegate{};
tflite_delegate.opaque_delegate_builder = &opaque_delegate_builder;
std::unique_ptr<tflite::FlatBufferModel> model =
tflite::FlatBufferModel::BuildFromFile(
"tensorflow/lite/testdata/add.bin");
ASSERT_NE(model, nullptr);
tflite::ops::builtin::BuiltinOpResolver resolver;
tflite::InterpreterBuilder builder(*model, resolver);
builder.AddDelegate(&tflite_delegate);
std::unique_ptr<tflite::Interpreter> interpreter;
builder(&interpreter);
ASSERT_NE(interpreter, nullptr);
ASSERT_EQ(interpreter->AllocateTensors(), kTfLiteOk);
constexpr int kTensorDimensions = 1 * 8 * 8 * 3;
std::vector<float> floats(kTensorDimensions, 1);
memcpy(interpreter->typed_input_tensor<float>(0), floats.data(),
floats.size() * sizeof(float));
EXPECT_FALSE(my_simple_delegate.free_buffer_handle_called_);
int first_buffer_handle = 1;
const int kOutputTensorIndex = 2;
interpreter->SetBufferHandle(
kOutputTensorIndex, first_buffer_handle,
reinterpret_cast<TfLiteDelegate*>(&tflite_delegate));
TfLiteTensor* output_t = interpreter->output_tensor(0);
output_t->data_is_stale = true;
EXPECT_FALSE(my_simple_delegate.free_buffer_handle_called_);
EXPECT_NE(my_simple_delegate.recorded_buffer_handle_, first_buffer_handle);
ASSERT_EQ(interpreter->Invoke(), kTfLiteOk);
std::vector<float> outputs(kTensorDimensions, 0);
memcpy(outputs.data(), interpreter->typed_output_tensor<float>(0),
outputs.size() * sizeof(float));
for (int i = 0; i < outputs.size(); ++i) {
EXPECT_EQ(
outputs[i],
MySimpleOpaqueDelegateWithBufferHandleSupport::kDelegateOutputValue);
}
int next_buffer_handle = first_buffer_handle + 1;
interpreter->SetBufferHandle(kOutputTensorIndex, next_buffer_handle,
&tflite_delegate);
EXPECT_TRUE(my_simple_delegate.free_buffer_handle_called_);
EXPECT_EQ(my_simple_delegate.recorded_buffer_handle_, first_buffer_handle);
my_simple_delegate.free_buffer_handle_called_ = false;
my_simple_delegate.recorded_buffer_handle_ = first_buffer_handle = -1;
interpreter.reset();
EXPECT_TRUE(my_simple_delegate.free_buffer_handle_called_);
EXPECT_EQ(my_simple_delegate.recorded_buffer_handle_, next_buffer_handle);
}
TEST(DelegateTest,
TestDataConvHugeIm2ColBin_MultiInputSingleOutput_PartiallyDelegated) {
TfLiteOpaqueDelegateUniquePtr my_opaque_delegate =
TfLiteOpaqueDelegateFactory::Create(
std::make_unique<example::SampleStableDelegate>());
TfLiteModel* model = TfLiteModelCreateFromFile(
"tensorflow/lite/testdata/conv_huge_im2col.bin");
ASSERT_NE(model, nullptr);
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
ASSERT_NE(options, nullptr);
TfLiteInterpreterOptionsSetNumThreads(options, 2);
TfLiteInterpreterOptionsAddDelegate(options, my_opaque_delegate.get());
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
ASSERT_NE(interpreter, nullptr);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterGetInputTensorCount(interpreter), 4);
ASSERT_EQ(TfLiteInterpreterGetOutputTensorCount(interpreter), 1);
TfLiteTensor* input_tensor0 =
TfLiteInterpreterGetInputTensor(interpreter, 0);
TfLiteTensor* input_tensor1 =
TfLiteInterpreterGetInputTensor(interpreter, 1);
TfLiteTensor* input_tensor2 =
TfLiteInterpreterGetInputTensor(interpreter, 2);
TfLiteTensor* input_tensor3 =
TfLiteInterpreterGetInputTensor(interpreter, 3);
std::vector<TfLiteTensor*> input_tensors{input_tensor0, input_tensor1,
input_tensor2, input_tensor3};
for (TfLiteTensor* input_tensor : input_tensors) {
const float kTensorCellValue = 4.f;
int64_t n = tflite::NumElements(input_tensor);
std::vector<float> input(n, kTensorCellValue);
ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor, input.data(),
input.size() * sizeof(float)),
kTfLiteOk);
}
ASSERT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
ASSERT_NE(output_tensor, nullptr);
EXPECT_EQ(TfLiteTensorType(output_tensor), kTfLiteFloat32);
EXPECT_NE(TfLiteTensorData(output_tensor), nullptr);
TfLiteQuantizationParams output_params =
TfLiteTensorQuantizationParams(output_tensor);
EXPECT_EQ(output_params.scale, 0.f);
EXPECT_EQ(output_params.zero_point, 0);
int64_t n = tflite::NumElements(output_tensor);
std::vector<float> output(n, 0);
ASSERT_EQ(TfLiteTensorCopyToBuffer(output_tensor, output.data(),
output.size() * sizeof(float)),
kTfLiteOk);
for (int i = 0; i < n; ++i) {
EXPECT_EQ(output[i], 4);
}
TfLiteInterpreterDelete(interpreter);
TfLiteModelDelete(model);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/utils/simple_opaque_delegate.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/utils/simple_opaque_delegate_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d02217b8-66ec-4437-93a2-5f7cdff94d3b | cpp | tensorflow/tensorflow | cl_device | tensorflow/lite/delegates/gpu/cl/cl_device.cc | tensorflow/lite/delegates/gpu/cl/cl_device_test.cc | #include "tensorflow/lite/delegates/gpu/cl/cl_device.h"
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/ascii.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_split.h"
#include "tensorflow/lite/delegates/gpu/cl/opencl_wrapper.h"
#include "tensorflow/lite/delegates/gpu/cl/util.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/experimental/acceleration/compatibility/android_info.h"
namespace tflite {
namespace gpu {
namespace cl {
void ParseQualcommOpenClCompilerVersion(
const std::string& cl_driver_version,
AdrenoInfo::OpenClCompilerVersion* result) {
const std::string start = "Compiler E031.";
size_t position = cl_driver_version.find(start);
if (position == std::string::npos) {
return;
}
const size_t main_part_length = 8;
if (position + start.length() + main_part_length >
cl_driver_version.length()) {
return;
}
const std::string main_part =
cl_driver_version.substr(position + start.length(), main_part_length);
if (!absl::ascii_isdigit(main_part[0]) ||
!absl::ascii_isdigit(main_part[1]) || main_part[2] != '.' ||
!absl::ascii_isdigit(main_part[3]) ||
!absl::ascii_isdigit(main_part[4]) || main_part[5] != '.' ||
!absl::ascii_isdigit(main_part[6]) ||
!absl::ascii_isdigit(main_part[7])) {
return;
}
result->major = (main_part[0] - '0') * 10 + (main_part[1] - '0');
result->minor = (main_part[3] - '0') * 10 + (main_part[4] - '0');
result->patch = (main_part[6] - '0') * 10 + (main_part[7] - '0');
}
static void ParsePowerVRDriverVersion(const std::string& cl_driver_version,
PowerVRInfo::DriverVersion& result) {
size_t position = cl_driver_version.find('@');
if (position == std::string::npos) {
return;
}
int main = 0;
size_t curpos = 0;
while (curpos < position && absl::ascii_isdigit(cl_driver_version[curpos])) {
main = main * 10 + cl_driver_version[curpos] - '0';
++curpos;
}
++curpos;
int minor = 0;
while (curpos < position) {
minor = minor * 10 + cl_driver_version[curpos] - '0';
++curpos;
}
curpos = position + 1;
int id = 0;
while (curpos < cl_driver_version.length()) {
id = id * 10 + cl_driver_version[curpos] - '0';
++curpos;
}
result.branch_main = main;
result.branch_minor = minor;
result.id = id;
}
template <>
std::string GetDeviceInfo<std::string>(cl_device_id id, cl_device_info info) {
size_t size;
cl_int error = clGetDeviceInfo(id, info, 0, nullptr, &size);
if (error != CL_SUCCESS) {
return "";
}
std::string result(size - 1, 0);
error = clGetDeviceInfo(id, info, size, &result[0], nullptr);
if (error != CL_SUCCESS) {
return "";
}
return result;
}
namespace {
template <typename T>
T GetPlatformInfo(cl_platform_id id, cl_platform_info info) {
T result;
cl_int error = clGetPlatformInfo(id, info, sizeof(T), &result, nullptr);
if (error != CL_SUCCESS) {
return -1;
}
return result;
}
std::string GetPlatformInfo(cl_platform_id id, cl_platform_info info) {
size_t size;
cl_int error = clGetPlatformInfo(id, info, 0, nullptr, &size);
if (error != CL_SUCCESS) {
return "";
}
std::string result(size - 1, 0);
error = clGetPlatformInfo(id, info, size, &result[0], nullptr);
if (error != CL_SUCCESS) {
return "";
}
return result;
}
void GetDeviceWorkDimsSizes(cl_device_id id, int3* result) {
int dims_count =
GetDeviceInfo<cl_uint>(id, CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS);
if (dims_count < 3) {
return;
}
std::vector<size_t> limits(dims_count);
cl_int error =
clGetDeviceInfo(id, CL_DEVICE_MAX_WORK_ITEM_SIZES,
sizeof(size_t) * dims_count, limits.data(), nullptr);
if (error != CL_SUCCESS) {
return;
}
result->x = limits[0];
result->y = limits[1];
result->z = limits[2];
}
OpenClVersion ParseCLVersion(const std::string& version) {
const auto first_dot_pos = version.find_first_of('.');
if (first_dot_pos == std::string::npos) {
return OpenClVersion::kCl1_0;
}
const int major = version[first_dot_pos - 1] - '0';
const int minor = version[first_dot_pos + 1] - '0';
if (major == 1) {
if (minor == 2) {
return OpenClVersion::kCl1_2;
} else if (minor == 1) {
return OpenClVersion::kCl1_1;
} else {
return OpenClVersion::kCl1_0;
}
} else if (major == 2) {
if (minor == 2) {
return OpenClVersion::kCl2_2;
} else if (minor == 1) {
return OpenClVersion::kCl2_1;
} else {
return OpenClVersion::kCl2_0;
}
} else if (major == 3) {
return OpenClVersion::kCl3_0;
} else {
return OpenClVersion::kCl1_0;
}
}
bool IsGPUVersionInRange(int gpu_version, int min_version, int max_version) {
return gpu_version >= min_version && gpu_version < max_version;
}
GpuInfo GpuInfoFromDeviceID(cl_device_id id, cl_platform_id platform_id) {
GpuInfo info;
info.opencl_info.platform_version =
GetPlatformInfo(platform_id, CL_PLATFORM_VERSION);
info.opencl_info.device_name = GetDeviceInfo<std::string>(id, CL_DEVICE_NAME);
info.opencl_info.vendor_name =
GetDeviceInfo<std::string>(id, CL_DEVICE_VENDOR);
info.opencl_info.opencl_c_version =
GetDeviceInfo<std::string>(id, CL_DEVICE_OPENCL_C_VERSION);
info.opencl_info.driver_version =
GetDeviceInfo<std::string>(id, CL_DRIVER_VERSION);
const std::string gpu_description = absl::StrCat(
info.opencl_info.device_name, " ", info.opencl_info.vendor_name, " ",
info.opencl_info.opencl_c_version);
GetGpuInfoFromDeviceDescription(gpu_description, GpuApi::kOpenCl, &info);
info.opencl_info.cl_version =
ParseCLVersion(info.opencl_info.opencl_c_version);
info.opencl_info.extensions =
absl::StrSplit(GetDeviceInfo<std::string>(id, CL_DEVICE_EXTENSIONS), ' ');
const std::vector<std::string> unsupported_extensions =
GetUnsupportedExtensions();
for (const auto& unsupported_extension : unsupported_extensions) {
for (auto it = info.opencl_info.extensions.begin();
it != info.opencl_info.extensions.end();) {
if (*it == unsupported_extension) {
it = info.opencl_info.extensions.erase(it);
} else {
++it;
}
}
}
info.opencl_info.supports_fp16 = false;
info.opencl_info.supports_image3d_writes = false;
for (const auto& ext : info.opencl_info.extensions) {
if (ext == "cl_khr_fp16") {
info.opencl_info.supports_fp16 = true;
}
if (ext == "cl_khr_3d_image_writes") {
info.opencl_info.supports_image3d_writes = true;
}
}
info.opencl_info.supports_images =
GetDeviceInfo<cl_bool>(id, CL_DEVICE_IMAGE_SUPPORT);
cl_device_fp_config f32_config =
GetDeviceInfo<cl_device_fp_config>(id, CL_DEVICE_SINGLE_FP_CONFIG);
info.opencl_info.supports_fp32_rtn = f32_config & CL_FP_ROUND_TO_NEAREST;
if (info.opencl_info.supports_fp16) {
cl_device_fp_config f16_config;
auto status = GetDeviceInfo<cl_device_fp_config>(
id, CL_DEVICE_HALF_FP_CONFIG, &f16_config);
if (status.ok() && !info.IsAMD()) {
info.opencl_info.supports_fp16_rtn = f16_config & CL_FP_ROUND_TO_NEAREST;
} else {
f16_config = f32_config;
info.opencl_info.supports_fp16_rtn = info.opencl_info.supports_fp32_rtn;
}
} else {
info.opencl_info.supports_fp16_rtn = false;
}
if (info.IsPowerVR()) {
if (!info.powervr_info.IsBetterThan(PowerVRGpu::kRogueGm9xxx)) {
info.opencl_info.supports_fp16 = false;
} else if (!info.opencl_info.supports_fp16) {
info.opencl_info.supports_fp16 = true;
info.opencl_info.supports_fp16_rtn = info.opencl_info.supports_fp32_rtn;
}
}
if (!info.opencl_info.supports_image3d_writes &&
((info.IsAdreno() && info.adreno_info.IsAdreno4xx()) ||
info.IsNvidia())) {
info.opencl_info.supports_image3d_writes = true;
}
info.opencl_info.compute_units_count =
GetDeviceInfo<cl_uint>(id, CL_DEVICE_MAX_COMPUTE_UNITS);
info.opencl_info.image2d_max_width =
GetDeviceInfo<size_t>(id, CL_DEVICE_IMAGE2D_MAX_WIDTH);
info.opencl_info.image2d_max_height =
GetDeviceInfo<size_t>(id, CL_DEVICE_IMAGE2D_MAX_HEIGHT);
info.opencl_info.buffer_max_size =
GetDeviceInfo<cl_ulong>(id, CL_DEVICE_MAX_MEM_ALLOC_SIZE);
info.opencl_info.max_allocation_size =
GetDeviceInfo<cl_ulong>(id, CL_DEVICE_MAX_MEM_ALLOC_SIZE);
if (info.opencl_info.cl_version >= OpenClVersion::kCl1_2) {
info.opencl_info.image_buffer_max_size =
GetDeviceInfo<size_t>(id, CL_DEVICE_IMAGE_MAX_BUFFER_SIZE);
info.opencl_info.image_array_max_layers =
GetDeviceInfo<size_t>(id, CL_DEVICE_IMAGE_MAX_ARRAY_SIZE);
}
info.opencl_info.image3d_max_width =
GetDeviceInfo<size_t>(id, CL_DEVICE_IMAGE3D_MAX_WIDTH);
info.opencl_info.image3d_max_height =
GetDeviceInfo<size_t>(id, CL_DEVICE_IMAGE2D_MAX_HEIGHT);
info.opencl_info.image3d_max_depth =
GetDeviceInfo<size_t>(id, CL_DEVICE_IMAGE3D_MAX_DEPTH);
int3 max_work_group_sizes;
GetDeviceWorkDimsSizes(id, &max_work_group_sizes);
info.opencl_info.max_work_group_size_x = max_work_group_sizes.x;
info.opencl_info.max_work_group_size_y = max_work_group_sizes.y;
info.opencl_info.max_work_group_size_z = max_work_group_sizes.z;
info.opencl_info.max_work_group_total_size =
GetDeviceInfo<size_t>(id, CL_DEVICE_MAX_WORK_GROUP_SIZE);
info.opencl_info.dedicated_local_memory =
(GetDeviceInfo<cl_device_local_mem_type>(id, CL_DEVICE_LOCAL_MEM_TYPE) ==
CL_LOCAL);
if (info.IsCL30OrHigher()) {
info.opencl_info.preferred_work_group_size_multiple =
GetDeviceInfo<size_t>(id, CL_DEVICE_PREFERRED_WORK_GROUP_SIZE_MULTIPLE);
} else {
info.opencl_info.preferred_work_group_size_multiple = 0;
}
info.opencl_info.base_addr_align_in_bits =
GetDeviceInfo<cl_uint>(id, CL_DEVICE_MEM_BASE_ADDR_ALIGN);
info.opencl_info.image_pitch_alignment = 0;
if (info.opencl_info.cl_version == OpenClVersion::kCl2_0 ||
info.opencl_info.cl_version == OpenClVersion::kCl2_1 ||
info.opencl_info.cl_version == OpenClVersion::kCl2_2) {
info.opencl_info.image_pitch_alignment =
GetDeviceInfo<cl_uint>(id, CL_DEVICE_IMAGE_PITCH_ALIGNMENT);
info.opencl_info.image_base_address_alignment =
GetDeviceInfo<cl_uint>(id, CL_DEVICE_IMAGE_BASE_ADDRESS_ALIGNMENT);
} else if (info.SupportsExtension("cl_khr_image2d_from_buffer")) {
cl_uint result = 0;
auto status =
GetDeviceInfo(id, CL_DEVICE_IMAGE_PITCH_ALIGNMENT_KHR, &result);
if (status.ok()) {
info.opencl_info.image_pitch_alignment = result;
}
result = 0;
status =
GetDeviceInfo(id, CL_DEVICE_IMAGE_BASE_ADDRESS_ALIGNMENT_KHR, &result);
if (status.ok()) {
info.opencl_info.image_base_address_alignment = result;
}
}
if (info.SupportsExtension("cl_arm_scheduling_controls")) {
auto capabilities =
GetDeviceInfo<cl_device_scheduling_controls_capabilities_arm>(
id, CL_DEVICE_SCHEDULING_CONTROLS_CAPABILITIES_ARM);
info.opencl_info.supports_register_allocation_arm =
capabilities & CL_DEVICE_SCHEDULING_REGISTER_ALLOCATION_ARM;
}
if (info.SupportsExtension("cl_intel_required_subgroup_size")) {
size_t sub_groups_ret_size;
cl_int status =
clGetDeviceInfo(id, 0x4108 , 0,
nullptr, &sub_groups_ret_size);
if (status == CL_SUCCESS) {
size_t sub_groups_count = sub_groups_ret_size / sizeof(size_t);
std::vector<size_t> sub_group_sizes(sub_groups_count);
status =
clGetDeviceInfo(id, 0x4108 ,
sub_groups_ret_size, sub_group_sizes.data(), nullptr);
if (status == CL_SUCCESS) {
for (int i = 0; i < sub_groups_count; ++i) {
info.supported_subgroup_sizes.push_back(sub_group_sizes[i]);
}
}
}
}
if (info.IsAdreno()) {
ParseQualcommOpenClCompilerVersion(info.opencl_info.driver_version,
&info.adreno_info.cl_compiler_version);
} else if (info.IsPowerVR()) {
ParsePowerVRDriverVersion(info.opencl_info.driver_version,
info.powervr_info.driver_version);
}
return info;
}
}
CLDevice::CLDevice(cl_device_id id, cl_platform_id platform_id)
: info_(GpuInfoFromDeviceID(id, platform_id)),
id_(id),
platform_id_(platform_id) {
if (info_.IsAdreno() &&
info_.adreno_info.adreno_gpu == AdrenoGpu::kAdreno630) {
acceleration::AndroidInfo android_info;
if (acceleration::RequestAndroidInfo(&android_info).ok()) {
info_.adreno_info.compiler_bugs_in_a6xx =
android_info.android_sdk_version == "26";
}
}
}
CLDevice::CLDevice(const CLDevice& device)
: info_(device.info_), id_(device.id_), platform_id_(device.platform_id_) {}
CLDevice& CLDevice::operator=(const CLDevice& device) {
if (this != &device) {
info_ = device.info_;
id_ = device.id_;
platform_id_ = device.platform_id_;
}
return *this;
}
CLDevice::CLDevice(CLDevice&& device)
: info_(std::move(device.info_)),
id_(device.id_),
platform_id_(device.platform_id_) {
device.id_ = nullptr;
device.platform_id_ = nullptr;
}
CLDevice& CLDevice::operator=(CLDevice&& device) {
if (this != &device) {
id_ = nullptr;
platform_id_ = nullptr;
info_ = std::move(device.info_);
std::swap(id_, device.id_);
std::swap(platform_id_, device.platform_id_);
}
return *this;
}
std::string CLDevice::GetPlatformVersion() const {
return GetPlatformInfo(platform_id_, CL_PLATFORM_VERSION);
}
void CLDevice::DisableOneLayerTextureArray() {
info_.adreno_info.support_one_layer_texture_array = false;
}
absl::Status CreateDefaultGPUDevice(CLDevice* result) {
cl_uint num_platforms;
cl_int status = clGetPlatformIDs(0, nullptr, &num_platforms);
if (status != CL_SUCCESS) {
return absl::UnknownError(
absl::StrFormat("clGetPlatformIDs returned %d", status));
}
if (num_platforms == 0) {
return absl::UnknownError("No supported OpenCL platform.");
}
std::vector<cl_platform_id> platforms(num_platforms);
status = clGetPlatformIDs(num_platforms, platforms.data(), nullptr);
if (status != CL_SUCCESS) {
return absl::UnknownError(
absl::StrFormat("clGetPlatformIDs returned %d", status));
}
cl_platform_id platform_id = platforms[0];
cl_uint num_devices;
status =
clGetDeviceIDs(platform_id, CL_DEVICE_TYPE_GPU, 0, nullptr, &num_devices);
if (status != CL_SUCCESS) {
return absl::UnknownError(
absl::StrFormat("clGetDeviceIDs returned %d", status));
}
if (num_devices == 0) {
return absl::UnknownError("No GPU on current platform.");
}
std::vector<cl_device_id> devices(num_devices);
status = clGetDeviceIDs(platform_id, CL_DEVICE_TYPE_GPU, num_devices,
devices.data(), nullptr);
if (status != CL_SUCCESS) {
return absl::UnknownError(
absl::StrFormat("clGetDeviceIDs returned %d", status));
}
*result = CLDevice(devices[0], platform_id);
LoadOpenCLFunctionExtensions(platform_id);
return absl::OkStatus();
}
}
}
} | #include "tensorflow/lite/delegates/gpu/cl/cl_device.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite {
namespace gpu {
namespace cl {
TEST(QualcommOpenClCompilerVersionParsing, Base) {
AdrenoInfo::OpenClCompilerVersion result;
ParseQualcommOpenClCompilerVersion("random text Compiler E031.79.53.41",
&result);
EXPECT_EQ(result.major, 79);
EXPECT_EQ(result.minor, 53);
EXPECT_EQ(result.patch, 41);
}
TEST(QualcommOpenClCompilerVersionParsing, WrongFormat0) {
AdrenoInfo::OpenClCompilerVersion result;
ParseQualcommOpenClCompilerVersion("random text Assembler A337.79.53.41",
&result);
EXPECT_EQ(result.major, 0);
EXPECT_EQ(result.minor, 0);
EXPECT_EQ(result.patch, 0);
}
TEST(QualcommOpenClCompilerVersionParsing, WrongFormat1) {
AdrenoInfo::OpenClCompilerVersion result;
ParseQualcommOpenClCompilerVersion("random text Compiler E031.79.53.4",
&result);
EXPECT_EQ(result.major, 0);
EXPECT_EQ(result.minor, 0);
EXPECT_EQ(result.patch, 0);
}
TEST(QualcommOpenClCompilerVersionParsing, WrongFormat2) {
AdrenoInfo::OpenClCompilerVersion result;
ParseQualcommOpenClCompilerVersion("random text Compiler E031:79:53:41",
&result);
EXPECT_EQ(result.major, 0);
EXPECT_EQ(result.minor, 0);
EXPECT_EQ(result.patch, 0);
}
TEST(QualcommOpenClCompilerVersionParsing, WrongFormat3) {
AdrenoInfo::OpenClCompilerVersion result;
ParseQualcommOpenClCompilerVersion("random text Compiler E031.79.x53.41",
&result);
EXPECT_EQ(result.major, 0);
EXPECT_EQ(result.minor, 0);
EXPECT_EQ(result.patch, 0);
}
TEST(QualcommOpenClCompilerVersionParsing, WrongFormat4) {
AdrenoInfo::OpenClCompilerVersion result;
ParseQualcommOpenClCompilerVersion("random text Compiler E031.a9.53.41",
&result);
EXPECT_EQ(result.major, 0);
EXPECT_EQ(result.minor, 0);
EXPECT_EQ(result.patch, 0);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/cl_device.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/cl_device_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
76ef3c68-b50c-4783-b4da-7df0f63675ba | cpp | tensorflow/tensorflow | saved_model_import | tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_import.cc | tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_import_test.cc | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_import.h"
#include <memory>
#include <string>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/attributes.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "tensorflow/cc/saved_model/loader.h"
#include "tensorflow/cc/saved_model/reader.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/types.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantize_preprocess.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_import_options.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace mlir::quant::stablehlo {
using ::stablehlo::quantization::QuantizationConfig;
using ::tensorflow::MLIRImportOptions;
using ::tensorflow::SavedModelBundle;
using ::tensorflow::SavedModelSignatureDefsToMlirImport;
using ::tensorflow::quantization::PreprocessAndFreezeGraph;
absl::StatusOr<ImportedMlirModuleOp> SavedModelToMlirModuleOp(
const absl::string_view saved_model_path,
const std::unordered_set<std::string>& tags,
const std::vector<std::string>& signature_keys,
MLIRContext& ctx ABSL_ATTRIBUTE_LIFETIME_BOUND) {
MLIRImportOptions import_options;
import_options.upgrade_legacy = true;
import_options.lift_variables = false;
import_options.include_variables_in_initializers = true;
auto bundle = std::make_unique<SavedModelBundle>();
std::vector<std::string> exported_names = signature_keys;
absl::StatusOr<OwningOpRef<ModuleOp>> module_op =
SavedModelSignatureDefsToMlirImport(saved_model_path, tags,
absl::MakeSpan(exported_names), &ctx,
import_options, &bundle);
if (!module_op.status().ok()) {
return absl::InternalError(absl::StrCat("Failed to import SavedModel: ",
module_op.status().ToString()));
}
return std::make_pair(std::move(*module_op), std::move(bundle));
}
absl::StatusOr<absl::flat_hash_map<FunctionName, FunctionAlias>>
GetFunctionAliases(absl::string_view saved_model_path,
const std::unordered_set<std::string>& tags) {
tensorflow::MetaGraphDef meta_graph;
TF_RETURN_IF_ERROR(tensorflow::ReadMetaGraphDefFromSavedModel(
saved_model_path, tags, &meta_graph));
absl::flat_hash_map<FunctionName, FunctionAlias> function_aliases(
meta_graph.meta_info_def().function_aliases().begin(),
meta_graph.meta_info_def().function_aliases().end());
return function_aliases;
}
void UpdateFunctionAliases(
absl::flat_hash_map<FunctionName, FunctionAlias>& function_aliases,
ModuleOp module_op) {
absl::flat_hash_set<FunctionName> existing_func_names;
module_op->walk([&](func::FuncOp func_op) {
FunctionName func_name = func_op.getSymName().str();
existing_func_names.insert(func_name);
auto original_func_name =
func_op->getAttrOfType<StringAttr>("tf._original_func_name");
if (original_func_name) {
if (auto alias_itr = function_aliases.find(original_func_name.str());
alias_itr != function_aliases.end()) {
const FunctionAlias alias = alias_itr->second;
function_aliases[func_name] = alias;
}
}
});
absl::erase_if(function_aliases, [&existing_func_names](const auto& item) {
return !existing_func_names.contains(item.first);
});
}
absl::StatusOr<OwningOpRef<ModuleOp>> ImportSavedModel(
const absl::string_view saved_model_path,
const std::vector<std::string>& signature_keys,
const std::unordered_set<std::string>& tags,
const QuantizationConfig& quantization_config,
const absl::string_view mlir_dump_file_prefix,
absl::flat_hash_map<FunctionName, FunctionAlias>& function_aliases,
MLIRContext& ctx ABSL_ATTRIBUTE_LIFETIME_BOUND) {
TF_ASSIGN_OR_RETURN(
ImportedMlirModuleOp imported_module,
SavedModelToMlirModuleOp(saved_model_path, tags, signature_keys, ctx));
auto [module_op, saved_model_bundle] = std::move(imported_module);
UpdateFunctionAliases(function_aliases, *module_op);
absl::flat_hash_set<std::string> aliased_function_names;
absl::c_for_each(function_aliases, [&](const auto& aliases) {
return aliased_function_names.insert(aliases.first);
});
TF_RETURN_IF_ERROR(PreprocessAndFreezeGraph(
mlir_dump_file_prefix, true,
aliased_function_names, *module_op, &ctx,
saved_model_bundle == nullptr ? nullptr
: saved_model_bundle->GetSession(),
true, false));
return std::move(module_op);
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_import.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/types.h"
namespace mlir::quant::stablehlo {
namespace {
using ::testing::IsEmpty;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
using UpdateFunctionAliasesTest = ::mlir::quant::QuantizationTestBase;
TEST_F(UpdateFunctionAliasesTest, NoAliasesReturnsEmptyMap) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(R"mlir(
func.func private @main(%arg: tensor<1x2xf32>) -> (tensor<1x2xf32>) attributes {tf._original_func_name = "main_original"} {
return %arg : tensor<1x2xf32>
}
)mlir");
ASSERT_TRUE(module_op);
absl::flat_hash_map<FunctionName, FunctionAlias> function_aliases;
UpdateFunctionAliases(function_aliases, *module_op);
EXPECT_THAT(function_aliases, IsEmpty());
}
TEST_F(UpdateFunctionAliasesTest, AliasUpdatedByMlirFunctionName) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(R"mlir(
func.func private @main(%arg: tensor<1x2xf32>) -> (tensor<1x2xf32>) attributes {tf._original_func_name = "main_original"} {
return %arg : tensor<1x2xf32>
}
)mlir");
ASSERT_TRUE(module_op);
absl::flat_hash_map<FunctionName, FunctionAlias> function_aliases{
{"main_original", "main_alias"}};
UpdateFunctionAliases(function_aliases, *module_op);
EXPECT_THAT(function_aliases,
UnorderedElementsAre(Pair("main", "main_alias")));
}
TEST_F(UpdateFunctionAliasesTest, IgnoresUnmatchedFunctions) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(R"mlir(
func.func private @main(%arg: tensor<1x2xf32>) -> (tensor<1x2xf32>) attributes {tf._original_func_name = "main_original"} {
return %arg : tensor<1x2xf32>
}
)mlir");
ASSERT_TRUE(module_op);
absl::flat_hash_map<FunctionName, FunctionAlias> function_aliases{
{"not_main", "not_main_alias"}};
UpdateFunctionAliases(function_aliases, *module_op);
EXPECT_THAT(function_aliases, IsEmpty());
}
TEST_F(UpdateFunctionAliasesTest,
SkipsFunctionsWithNoOriginalFuncNameAttribute) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(R"mlir(
func.func private @main(%arg: tensor<1x2xf32>) -> (tensor<1x2xf32>) {
return %arg : tensor<1x2xf32>
}
)mlir");
ASSERT_TRUE(module_op);
absl::flat_hash_map<FunctionName, FunctionAlias> function_aliases{
{"main_original", "main_alias"}};
UpdateFunctionAliases(function_aliases, *module_op);
EXPECT_THAT(function_aliases, IsEmpty());
}
TEST_F(UpdateFunctionAliasesTest, FunctionNameNotChanged) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(R"mlir(
func.func private @main_original(%arg: tensor<1x2xf32>) -> (tensor<1x2xf32>) {
return %arg : tensor<1x2xf32>
}
)mlir");
ASSERT_TRUE(module_op);
absl::flat_hash_map<FunctionName, FunctionAlias> function_aliases{
{"main_original", "main_alias"}};
UpdateFunctionAliases(function_aliases, *module_op);
EXPECT_THAT(function_aliases,
UnorderedElementsAre(Pair("main_original", "main_alias")));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_import.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_import_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
286832a6-e242-4090-a201-48cb5d2db23c | cpp | google/tensorstore | json_absl_flag | tensorstore/util/json_absl_flag.h | tensorstore/util/json_absl_flag_test.cc | #ifndef TENSORSTORE_UTIL_JSON_ABSL_FLAG_H_
#define TENSORSTORE_UTIL_JSON_ABSL_FLAG_H_
#include <string>
#include <string_view>
#include <type_traits>
#include "absl/flags/marshalling.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
template <typename T>
struct JsonAbslFlag {
T value;
JsonAbslFlag() = default;
template <typename... U,
typename = std::enable_if_t<std::is_constructible_v<T, U&&...>>>
JsonAbslFlag(U&&... arg) : value(std::forward<U>(arg)...) {}
friend std::string AbslUnparseFlag(const JsonAbslFlag& json_flag) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto j, internal_json_binding::ToJson(json_flag.value), "");
if (j.is_discarded()) return {};
return absl::UnparseFlag(j.dump());
}
friend bool AbslParseFlag(std::string_view in, JsonAbslFlag* out,
std::string* error) {
if (in.empty()) {
out->value = {};
return true;
}
::nlohmann::json j = ::nlohmann::json::parse(in, nullptr, false);
if (j.is_discarded()) {
*error = absl::StrFormat("Failed to parse JSON: '%s'", in);
return false;
}
T new_value = {};
absl::Status status = internal_json_binding::DefaultBinder<>(
std::true_type{}, internal_json_binding::NoOptions{}, &new_value, &j);
if (!status.ok()) {
*error = absl::StrFormat("Failed to bind JSON: %s", status.message());
return false;
}
out->value = std::move(new_value);
return true;
}
};
}
#endif | #include "tensorstore/util/json_absl_flag.h"
#include <cstdint>
#include <string>
#include <gtest/gtest.h>
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/kvstore/spec.h"
namespace {
TEST(JsonAbslFlag, IntFlag) {
tensorstore::JsonAbslFlag<int64_t> flag = {};
std::string default_value = AbslUnparseFlag(flag);
std::string error;
EXPECT_TRUE(AbslParseFlag(default_value, &flag, &error));
EXPECT_TRUE(error.empty());
}
TEST(JsonAbslFlag, KvStoreSpecFlag) {
tensorstore::JsonAbslFlag<tensorstore::kvstore::Spec> flag = {};
std::string default_value = AbslUnparseFlag(flag);
std::string error;
EXPECT_TRUE(AbslParseFlag(default_value, &flag, &error))
<< "value: " << default_value;
EXPECT_TRUE(error.empty()) << error;
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/json_absl_flag.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/json_absl_flag_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
543a1e52-6a80-4a21-8635-769d455ec6d8 | cpp | tensorflow/tensorflow | tpu_cross_replica_ops | tensorflow/core/ops/tpu_cross_replica_ops.cc | tensorflow/core/ops/tpu_cross_replica_ops_test.cc | #include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
namespace tensorflow {
using shape_inference::DimensionHandle;
using shape_inference::InferenceContext;
using shape_inference::ShapeHandle;
REGISTER_OP("AllToAll")
.Input("input: T")
.Input("group_assignment: int32")
.Output("output: T")
.Attr("T: {numbertype, bool}")
.Attr("concat_dimension: int")
.Attr("split_dimension: int")
.Attr("split_count: int")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle input = c->input(0);
ShapeHandle group_assignment = c->input(1);
if (!c->RankKnown(input)) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
}
int64_t rank = c->Rank(input);
int concat_dimension;
int split_dimension;
int split_count;
TF_RETURN_IF_ERROR(c->GetAttr("split_count", &split_count));
if (split_count < 1) {
return errors::InvalidArgument("split_count ", split_count,
" must at least be one.");
}
if (c->RankKnown(group_assignment) && c->Rank(group_assignment) != 2) {
return errors::InvalidArgument("group_assignment must have rank 2.");
}
DimensionHandle num_replicas_per_group = c->Dim(group_assignment, 1);
if (c->ValueKnown(num_replicas_per_group) &&
(c->Value(num_replicas_per_group) != split_count)) {
return errors::InvalidArgument(
"split_count ", split_count,
" must equal the size of the second dimension of group_assignment ",
c->Value(num_replicas_per_group));
}
TF_RETURN_IF_ERROR(c->GetAttr("concat_dimension", &concat_dimension));
if (concat_dimension < 0 || concat_dimension >= rank) {
return errors::InvalidArgument("concat_dimension ", concat_dimension,
" is out of range of input rank ", rank);
}
TF_RETURN_IF_ERROR(c->GetAttr("split_dimension", &split_dimension));
if (split_dimension < 0 || split_dimension >= rank) {
return errors::InvalidArgument("split_dimension ", split_dimension,
" is out of range of input rank ", rank);
}
if (!c->ValueKnown(c->Dim(input, concat_dimension)) ||
!c->ValueKnown(c->Dim(input, split_dimension))) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
}
std::vector<DimensionHandle> dims;
dims.resize(rank);
for (int32_t i = 0; i < rank; ++i) {
dims[i] = c->Dim(input, i);
if (i == concat_dimension) {
dims[i] = c->MakeDim(c->Value(dims[i]) * split_count);
}
if (i == split_dimension) {
if (c->ValueKnown(dims[i]) &&
(c->Value(dims[i]) % split_count != 0)) {
return errors::InvalidArgument(
"input dimension ", c->Value(dims[i]),
" not divisible by split_count ", split_count);
}
dims[i] = c->MakeDim(c->Value(dims[i]) / split_count);
}
}
c->set_output(0, c->MakeShape(dims));
return absl::OkStatus();
});
REGISTER_OP("CrossReplicaSum")
.Input("input: T")
.Input("group_assignment: int32")
.Output("output: T")
.Attr("T: {half, bfloat16, float, float64, int32, uint32}")
.SetIsStateful()
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("CollectivePermute")
.Input("input: T")
.Input("source_target_pairs: int32")
.Output("output: T")
.Attr("T: numbertype")
.SetIsStateful()
.SetShapeFn(shape_inference::UnchangedShape);
} | #include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(AllToAll, UnknownRank) {
ShapeInferenceTestOp op("AllToAll");
op.input_tensors.resize(2);
INFER_OK(op, "?;?", "?");
}
TEST(AllToAll, KnownRankUnknownDims) {
ShapeInferenceTestOp op("AllToAll");
op.input_tensors.resize(2);
AddNodeAttr("concat_dimension", 0, &op.node_def);
AddNodeAttr("split_count", 1, &op.node_def);
AddNodeAttr("split_dimension", 1, &op.node_def);
INFER_OK(op, "[?,1];[?,?]", "?");
INFER_OK(op, "[1,?];[?,?]", "?");
INFER_OK(op, "[?,?];[?,?]", "?");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/tpu_cross_replica_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/tpu_cross_replica_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7d89dd65-9a99-458f-9936-e05ead4d4730 | cpp | google/cel-cpp | string_wrapper_type | common/types/string_wrapper_type.h | common/types/string_wrapper_type_test.cc | #ifndef THIRD_PARTY_CEL_CPP_COMMON_TYPES_STRING_WRAPPER_TYPE_H_
#define THIRD_PARTY_CEL_CPP_COMMON_TYPES_STRING_WRAPPER_TYPE_H_
#include <ostream>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "common/type_kind.h"
namespace cel {
class Type;
class TypeParameters;
class StringWrapperType final {
public:
static constexpr TypeKind kKind = TypeKind::kStringWrapper;
static constexpr absl::string_view kName = "google.protobuf.StringValue";
StringWrapperType() = default;
StringWrapperType(const StringWrapperType&) = default;
StringWrapperType(StringWrapperType&&) = default;
StringWrapperType& operator=(const StringWrapperType&) = default;
StringWrapperType& operator=(StringWrapperType&&) = default;
static TypeKind kind() { return kKind; }
static absl::string_view name() { return kName; }
static TypeParameters GetParameters();
static std::string DebugString() { return std::string(name()); }
constexpr void swap(StringWrapperType&) noexcept {}
};
inline constexpr void swap(StringWrapperType& lhs,
StringWrapperType& rhs) noexcept {
lhs.swap(rhs);
}
inline constexpr bool operator==(StringWrapperType, StringWrapperType) {
return true;
}
inline constexpr bool operator!=(StringWrapperType lhs, StringWrapperType rhs) {
return !operator==(lhs, rhs);
}
template <typename H>
H AbslHashValue(H state, StringWrapperType) {
return std::move(state);
}
inline std::ostream& operator<<(std::ostream& out,
const StringWrapperType& type) {
return out << type.DebugString();
}
}
#endif | #include <sstream>
#include "absl/hash/hash.h"
#include "common/type.h"
#include "internal/testing.h"
namespace cel {
namespace {
TEST(StringWrapperType, Kind) {
EXPECT_EQ(StringWrapperType().kind(), StringWrapperType::kKind);
EXPECT_EQ(Type(StringWrapperType()).kind(), StringWrapperType::kKind);
}
TEST(StringWrapperType, Name) {
EXPECT_EQ(StringWrapperType().name(), StringWrapperType::kName);
EXPECT_EQ(Type(StringWrapperType()).name(), StringWrapperType::kName);
}
TEST(StringWrapperType, DebugString) {
{
std::ostringstream out;
out << StringWrapperType();
EXPECT_EQ(out.str(), StringWrapperType::kName);
}
{
std::ostringstream out;
out << Type(StringWrapperType());
EXPECT_EQ(out.str(), StringWrapperType::kName);
}
}
TEST(StringWrapperType, Hash) {
EXPECT_EQ(absl::HashOf(StringWrapperType()),
absl::HashOf(StringWrapperType()));
}
TEST(StringWrapperType, Equal) {
EXPECT_EQ(StringWrapperType(), StringWrapperType());
EXPECT_EQ(Type(StringWrapperType()), StringWrapperType());
EXPECT_EQ(StringWrapperType(), Type(StringWrapperType()));
EXPECT_EQ(Type(StringWrapperType()), Type(StringWrapperType()));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/string_wrapper_type.h | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/string_wrapper_type_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
3beedbbb-f1a8-4a93-bcca-28005e3b2a6e | cpp | google/quiche | web_transport_fingerprint_proof_verifier | quiche/quic/core/crypto/web_transport_fingerprint_proof_verifier.cc | quiche/quic/core/crypto/web_transport_fingerprint_proof_verifier_test.cc | #include "quiche/quic/core/crypto/web_transport_fingerprint_proof_verifier.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/escaping.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "openssl/sha.h"
#include "quiche/quic/core/crypto/certificate_view.h"
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/common/quiche_text_utils.h"
namespace quic {
namespace {
constexpr size_t kFingerprintLength = SHA256_DIGEST_LENGTH * 3 - 1;
bool IsNormalizedHexDigit(char c) {
return (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f');
}
void NormalizeFingerprint(CertificateFingerprint& fingerprint) {
fingerprint.fingerprint =
quiche::QuicheTextUtils::ToLower(fingerprint.fingerprint);
}
}
constexpr char CertificateFingerprint::kSha256[];
constexpr char WebTransportHash::kSha256[];
ProofVerifyDetails* WebTransportFingerprintProofVerifier::Details::Clone()
const {
return new Details(*this);
}
WebTransportFingerprintProofVerifier::WebTransportFingerprintProofVerifier(
const QuicClock* clock, int max_validity_days)
: clock_(clock),
max_validity_days_(max_validity_days),
max_validity_(
QuicTime::Delta::FromSeconds(max_validity_days * 86400 + 1)) {}
bool WebTransportFingerprintProofVerifier::AddFingerprint(
CertificateFingerprint fingerprint) {
NormalizeFingerprint(fingerprint);
if (!absl::EqualsIgnoreCase(fingerprint.algorithm,
CertificateFingerprint::kSha256)) {
QUIC_DLOG(WARNING) << "Algorithms other than SHA-256 are not supported";
return false;
}
if (fingerprint.fingerprint.size() != kFingerprintLength) {
QUIC_DLOG(WARNING) << "Invalid fingerprint length";
return false;
}
for (size_t i = 0; i < fingerprint.fingerprint.size(); i++) {
char current = fingerprint.fingerprint[i];
if (i % 3 == 2) {
if (current != ':') {
QUIC_DLOG(WARNING)
<< "Missing colon separator between the bytes of the hash";
return false;
}
} else {
if (!IsNormalizedHexDigit(current)) {
QUIC_DLOG(WARNING) << "Fingerprint must be in hexadecimal";
return false;
}
}
}
std::string normalized =
absl::StrReplaceAll(fingerprint.fingerprint, {{":", ""}});
std::string normalized_bytes;
if (!absl::HexStringToBytes(normalized, &normalized_bytes)) {
QUIC_DLOG(WARNING) << "Fingerprint hexadecimal is invalid";
return false;
}
hashes_.push_back(
WebTransportHash{fingerprint.algorithm, std::move(normalized_bytes)});
return true;
}
bool WebTransportFingerprintProofVerifier::AddFingerprint(
WebTransportHash hash) {
if (hash.algorithm != CertificateFingerprint::kSha256) {
QUIC_DLOG(WARNING) << "Algorithms other than SHA-256 are not supported";
return false;
}
if (hash.value.size() != SHA256_DIGEST_LENGTH) {
QUIC_DLOG(WARNING) << "Invalid fingerprint length";
return false;
}
hashes_.push_back(std::move(hash));
return true;
}
QuicAsyncStatus WebTransportFingerprintProofVerifier::VerifyProof(
const std::string& , const uint16_t ,
const std::string& ,
QuicTransportVersion , absl::string_view ,
const std::vector<std::string>& , const std::string& ,
const std::string& , const ProofVerifyContext* ,
std::string* error_details, std::unique_ptr<ProofVerifyDetails>* details,
std::unique_ptr<ProofVerifierCallback> ) {
*error_details =
"QUIC crypto certificate verification is not supported in "
"WebTransportFingerprintProofVerifier";
QUIC_BUG(quic_bug_10879_1) << *error_details;
*details = std::make_unique<Details>(Status::kInternalError);
return QUIC_FAILURE;
}
QuicAsyncStatus WebTransportFingerprintProofVerifier::VerifyCertChain(
const std::string& , const uint16_t ,
const std::vector<std::string>& certs, const std::string& ,
const std::string& , const ProofVerifyContext* ,
std::string* error_details, std::unique_ptr<ProofVerifyDetails>* details,
uint8_t* ,
std::unique_ptr<ProofVerifierCallback> ) {
if (certs.empty()) {
*details = std::make_unique<Details>(Status::kInternalError);
*error_details = "No certificates provided";
return QUIC_FAILURE;
}
if (!HasKnownFingerprint(certs[0])) {
*details = std::make_unique<Details>(Status::kUnknownFingerprint);
*error_details = "Certificate does not match any fingerprint";
return QUIC_FAILURE;
}
std::unique_ptr<CertificateView> view =
CertificateView::ParseSingleCertificate(certs[0]);
if (view == nullptr) {
*details = std::make_unique<Details>(Status::kCertificateParseFailure);
*error_details = "Failed to parse the certificate";
return QUIC_FAILURE;
}
if (!HasValidExpiry(*view)) {
*details = std::make_unique<Details>(Status::kExpiryTooLong);
*error_details =
absl::StrCat("Certificate expiry exceeds the configured limit of ",
max_validity_days_, " days");
return QUIC_FAILURE;
}
if (!IsWithinValidityPeriod(*view)) {
*details = std::make_unique<Details>(Status::kExpired);
*error_details =
"Certificate has expired or has validity listed in the future";
return QUIC_FAILURE;
}
if (!IsKeyTypeAllowedByPolicy(*view)) {
*details = std::make_unique<Details>(Status::kDisallowedKeyAlgorithm);
*error_details =
absl::StrCat("Certificate uses a disallowed public key type (",
PublicKeyTypeToString(view->public_key_type()), ")");
return QUIC_FAILURE;
}
*details = std::make_unique<Details>(Status::kValidCertificate);
return QUIC_SUCCESS;
}
std::unique_ptr<ProofVerifyContext>
WebTransportFingerprintProofVerifier::CreateDefaultContext() {
return nullptr;
}
bool WebTransportFingerprintProofVerifier::HasKnownFingerprint(
absl::string_view der_certificate) {
const std::string hash = RawSha256(der_certificate);
for (const WebTransportHash& reference : hashes_) {
if (reference.algorithm != WebTransportHash::kSha256) {
QUIC_BUG(quic_bug_10879_2) << "Unexpected non-SHA-256 hash";
continue;
}
if (hash == reference.value) {
return true;
}
}
return false;
}
bool WebTransportFingerprintProofVerifier::HasValidExpiry(
const CertificateView& certificate) {
if (!certificate.validity_start().IsBefore(certificate.validity_end())) {
return false;
}
const QuicTime::Delta duration_seconds =
certificate.validity_end() - certificate.validity_start();
return duration_seconds <= max_validity_;
}
bool WebTransportFingerprintProofVerifier::IsWithinValidityPeriod(
const CertificateView& certificate) {
QuicWallTime now = clock_->WallNow();
return now.IsAfter(certificate.validity_start()) &&
now.IsBefore(certificate.validity_end());
}
bool WebTransportFingerprintProofVerifier::IsKeyTypeAllowedByPolicy(
const CertificateView& certificate) {
switch (certificate.public_key_type()) {
case PublicKeyType::kP256:
case PublicKeyType::kP384:
case PublicKeyType::kEd25519:
return true;
case PublicKeyType::kRsa:
return true;
default:
return false;
}
}
} | #include "quiche/quic/core/crypto/web_transport_fingerprint_proof_verifier.h"
#include <memory>
#include <string>
#include "absl/strings/escaping.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/mock_clock.h"
#include "quiche/quic/test_tools/test_certificates.h"
namespace quic {
namespace test {
namespace {
using ::testing::HasSubstr;
constexpr QuicTime::Delta kValidTime = QuicTime::Delta::FromSeconds(1580560556);
struct VerifyResult {
QuicAsyncStatus status;
WebTransportFingerprintProofVerifier::Status detailed_status;
std::string error;
};
class WebTransportFingerprintProofVerifierTest : public QuicTest {
public:
WebTransportFingerprintProofVerifierTest() {
clock_.AdvanceTime(kValidTime);
verifier_ = std::make_unique<WebTransportFingerprintProofVerifier>(
&clock_, 365);
AddTestCertificate();
}
protected:
VerifyResult Verify(absl::string_view certificate) {
VerifyResult result;
std::unique_ptr<ProofVerifyDetails> details;
uint8_t tls_alert;
result.status = verifier_->VerifyCertChain(
"", 0,
std::vector<std::string>{std::string(certificate)},
"",
"",
nullptr, &result.error, &details, &tls_alert,
nullptr);
result.detailed_status =
static_cast<WebTransportFingerprintProofVerifier::Details*>(
details.get())
->status();
return result;
}
void AddTestCertificate() {
EXPECT_TRUE(verifier_->AddFingerprint(WebTransportHash{
WebTransportHash::kSha256, RawSha256(kTestCertificate)}));
}
MockClock clock_;
std::unique_ptr<WebTransportFingerprintProofVerifier> verifier_;
};
TEST_F(WebTransportFingerprintProofVerifierTest, Sha256Fingerprint) {
EXPECT_EQ(absl::BytesToHexString(RawSha256(kTestCertificate)),
"f2e5465e2bf7ecd6f63066a5a37511734aa0eb7c4701"
"0e86d6758ed4f4fa1b0f");
}
TEST_F(WebTransportFingerprintProofVerifierTest, SimpleFingerprint) {
VerifyResult result = Verify(kTestCertificate);
EXPECT_EQ(result.status, QUIC_SUCCESS);
EXPECT_EQ(result.detailed_status,
WebTransportFingerprintProofVerifier::Status::kValidCertificate);
result = Verify(kWildcardCertificate);
EXPECT_EQ(result.status, QUIC_FAILURE);
EXPECT_EQ(result.detailed_status,
WebTransportFingerprintProofVerifier::Status::kUnknownFingerprint);
result = Verify("Some random text");
EXPECT_EQ(result.status, QUIC_FAILURE);
}
TEST_F(WebTransportFingerprintProofVerifierTest, Validity) {
constexpr QuicTime::Delta kStartTime =
QuicTime::Delta::FromSeconds(1580324400);
clock_.Reset();
clock_.AdvanceTime(kStartTime);
VerifyResult result = Verify(kTestCertificate);
EXPECT_EQ(result.status, QUIC_FAILURE);
EXPECT_EQ(result.detailed_status,
WebTransportFingerprintProofVerifier::Status::kExpired);
clock_.AdvanceTime(QuicTime::Delta::FromSeconds(86400));
result = Verify(kTestCertificate);
EXPECT_EQ(result.status, QUIC_SUCCESS);
EXPECT_EQ(result.detailed_status,
WebTransportFingerprintProofVerifier::Status::kValidCertificate);
clock_.AdvanceTime(QuicTime::Delta::FromSeconds(4 * 86400));
result = Verify(kTestCertificate);
EXPECT_EQ(result.status, QUIC_FAILURE);
EXPECT_EQ(result.detailed_status,
WebTransportFingerprintProofVerifier::Status::kExpired);
}
TEST_F(WebTransportFingerprintProofVerifierTest, MaxValidity) {
verifier_ = std::make_unique<WebTransportFingerprintProofVerifier>(
&clock_, 2);
AddTestCertificate();
VerifyResult result = Verify(kTestCertificate);
EXPECT_EQ(result.status, QUIC_FAILURE);
EXPECT_EQ(result.detailed_status,
WebTransportFingerprintProofVerifier::Status::kExpiryTooLong);
EXPECT_THAT(result.error, HasSubstr("limit of 2 days"));
verifier_ = std::make_unique<WebTransportFingerprintProofVerifier>(
&clock_, 4);
AddTestCertificate();
result = Verify(kTestCertificate);
EXPECT_EQ(result.status, QUIC_SUCCESS);
EXPECT_EQ(result.detailed_status,
WebTransportFingerprintProofVerifier::Status::kValidCertificate);
}
TEST_F(WebTransportFingerprintProofVerifierTest, InvalidCertificate) {
constexpr absl::string_view kInvalidCertificate = "Hello, world!";
ASSERT_TRUE(verifier_->AddFingerprint(WebTransportHash{
WebTransportHash::kSha256, RawSha256(kInvalidCertificate)}));
VerifyResult result = Verify(kInvalidCertificate);
EXPECT_EQ(result.status, QUIC_FAILURE);
EXPECT_EQ(
result.detailed_status,
WebTransportFingerprintProofVerifier::Status::kCertificateParseFailure);
}
TEST_F(WebTransportFingerprintProofVerifierTest, AddCertificate) {
verifier_ = std::make_unique<WebTransportFingerprintProofVerifier>(
&clock_, 365);
EXPECT_TRUE(verifier_->AddFingerprint(CertificateFingerprint{
CertificateFingerprint::kSha256,
"F2:E5:46:5E:2B:F7:EC:D6:F6:30:66:A5:A3:75:11:73:4A:A0:EB:"
"7C:47:01:0E:86:D6:75:8E:D4:F4:FA:1B:0F"}));
EXPECT_EQ(Verify(kTestCertificate).detailed_status,
WebTransportFingerprintProofVerifier::Status::kValidCertificate);
EXPECT_FALSE(verifier_->AddFingerprint(CertificateFingerprint{
"sha-1", "00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00"}));
EXPECT_FALSE(verifier_->AddFingerprint(
CertificateFingerprint{CertificateFingerprint::kSha256, "00:00:00:00"}));
EXPECT_FALSE(verifier_->AddFingerprint(CertificateFingerprint{
CertificateFingerprint::kSha256,
"00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00."
"00.00.00.00.00.00.00.00.00.00.00.00.00"}));
EXPECT_FALSE(verifier_->AddFingerprint(CertificateFingerprint{
CertificateFingerprint::kSha256,
"zz:zz:zz:zz:zz:zz:zz:zz:zz:zz:zz:zz:zz:zz:zz:zz:zz:zz:zz:"
"zz:zz:zz:zz:zz:zz:zz:zz:zz:zz:zz:zz:zz"}));
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/web_transport_fingerprint_proof_verifier.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/web_transport_fingerprint_proof_verifier_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
8485cc72-be10-4d28-b9e1-e103725c5321 | cpp | tensorflow/tensorflow | computation_partitioner | third_party/xla/xla/service/gpu/fusions/mlir/computation_partitioner.cc | third_party/xla/xla/service/gpu/fusions/mlir/computation_partitioner_test.cc | #include "xla/service/gpu/fusions/mlir/computation_partitioner.h"
#include <cstdint>
#include <functional>
#include <iterator>
#include <optional>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/node_hash_map.h"
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/LLVMIR/LLVMAttrs.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/ImplicitLocOpBuilder.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Value.h"
#include "mlir/Interfaces/DataLayoutInterfaces.h"
#include "mlir/Support/LLVM.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/fusions/fusion_emitter.h"
#include "xla/service/gpu/fusions/mlir/type_util.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/model/indexing_analysis.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
namespace xla {
namespace gpu {
namespace mlir_converter {
namespace {
int Arity(const Shape& shape) {
return shape.IsTuple() ? shape.tuple_shapes_size() : 1;
}
const Shape& TupleShape(const Shape& shape, int index) {
return shape.IsTuple() ? shape.tuple_shapes(index) : shape;
}
}
EpilogueSpecification EpilogueSpecification::FromIdentityIndexing(
const HloInstruction* hero, const HloInstruction* root,
mlir::MLIRContext* mlir_context) {
EpilogueSpecification result;
absl::c_copy(root->shape().dimensions(),
std::back_inserter(result.index_ranges));
result.roots.push_back(root);
result.root_indexing.push_back(
CreateIdentityMap(root->shape(), mlir_context));
result.heroes.push_back(hero);
return result;
}
EpilogueSpecification EpilogueSpecification::FromOutputIndexing(
const HloFusionAnalysis& analysis,
const std::vector<const HloInstruction*>& heroes,
const std::vector<const HloInstruction*>& roots,
const KernelFusionInterface& fusion, mlir::MLIRContext* mlir_context) {
EpilogueSpecification result;
absl::flat_hash_map<const HloInstruction*, const HloInstruction*>
root_to_hero;
for (auto [root, hero] :
llvm::zip(analysis.fusion_roots(), analysis.fusion_heroes())) {
root_to_hero[&root.instruction()] = &hero.instruction();
}
absl::flat_hash_map<const HloInstruction*, int> root_to_index;
for (auto [index, root] : llvm::enumerate(analysis.fusion_roots())) {
root_to_index[&root.instruction()] = root_to_index.size();
}
result.root_indexing.reserve(roots.size());
for (auto* root : roots) {
auto indexing = fusion.ComputeThreadIdToOutputIndexing(root_to_index[root],
mlir_context);
if (result.index_ranges.empty()) {
result.index_ranges.reserve(indexing->GetDimensionCount() +
indexing->GetSymbolCount());
for (const auto& dim : indexing->GetDimensionBounds()) {
result.index_ranges.push_back(dim.upper + 1);
}
for (const auto& sym : indexing->GetSymbolBounds()) {
result.index_ranges.push_back(sym.upper + 1);
}
}
auto* hero = root_to_hero[root];
auto epilogue_indexing = ComputeEpilogueInputToOutputIndexing(
{*hero, &analysis.fusion()}, {*root, &analysis.fusion()}, mlir_context);
result.root_indexing.push_back(
ComposeIndexingMaps(*indexing, epilogue_indexing));
}
result.heroes = heroes;
result.roots = roots;
return result;
}
std::string PartitionedComputation::Subgraph::ToString(int indentation) const {
std::string indent(indentation, ' ');
std::ostringstream ss;
ss << indent << "SUBGRAPH " << name << " {\n";
for (auto* instr :
(*instructions.begin())->parent()->MakeInstructionPostOrder()) {
if (!instructions.contains(instr)) continue;
ss << indent << " ";
if (absl::c_linear_search(roots, instr)) {
ss << "ROOT ";
}
ss << instr->ToString() << "\n";
}
ss << indent << "}";
return ss.str();
}
std::string PartitionedComputation::ToString(int indentation) const {
std::ostringstream ss;
ss << "PartitionedComputation " << computation_->name() << ":";
for (const Subgraph& subgraph : subgraphs_) {
ss << "\n" << subgraph.ToString(indentation);
}
return ss.str();
}
std::string PartitionedComputations::ToString() const {
std::ostringstream ss;
ss << "PartitionedComputations:";
for (const auto& partitioned_computation : partitioned_computations_) {
ss << "\n" << partitioned_computation.ToString();
}
return ss.str();
}
template <typename C, typename F>
bool AllIdentical(const C& c, F&& f) {
auto begin = std::begin(c);
auto end = std::end(c);
if (begin == end || begin + 1 == end) {
return true;
}
auto v = f(*begin);
++begin;
for (; begin != end; ++begin) {
if (f(*begin) != v) {
return false;
}
}
return true;
}
bool IsEvaluatedMoreThanOnce(const HloInstruction* instr) {
return absl::c_any_of(instr->users(), [&](const HloInstruction* user) {
if (user->opcode() == HloOpcode::kGather &&
absl::c_linear_search(user->OperandIndices(instr), 1) &&
instr->shape().rank() >= 2 && instr->shape().dimensions(1) > 1) {
return true;
}
if (user->opcode() == HloOpcode::kConcatenate &&
user->OperandIndices(instr).size() > 1) {
return true;
}
return false;
});
}
PartitionedComputation::PartitionedComputation(
const HloComputation* computation, mlir::MLIRContext* mlir_context,
std::function<bool(const HloInstruction*)> is_subgraph_root)
: computation_(computation) {
CHECK_NE(computation, nullptr);
int next_function_id = 0;
int next_indexing_id = 0;
auto pre_order = computation->MakeInstructionPostOrder();
absl::c_reverse(pre_order);
absl::flat_hash_map<const HloInstruction*, int> instr_indices;
for (auto [i, instr] : llvm::enumerate(pre_order)) {
instr_indices[instr] = i;
}
std::vector<std::pair<int, int>> ids(pre_order.size());
auto allocate_new_function = [&](const HloInstruction* instr) {
ids[instr_indices[instr]] = {next_function_id++, next_indexing_id++};
};
for (auto [instr_index, instr] : llvm::enumerate(pre_order)) {
bool is_root = instr->user_count() == 0 || is_subgraph_root(instr);
bool users_have_consistent_indexing = AllIdentical(
instr->users(),
[&](const HloInstruction* user) { return ids[instr_indices[user]]; });
bool all_users_elementwise =
absl::c_all_of(instr->users(), [&](const HloInstruction* user) {
return HloInstruction::IsOpElementwise(user->opcode());
});
if (!is_root && users_have_consistent_indexing && all_users_elementwise) {
ids[instr_index] = ids[instr_indices[instr->users().front()]];
} else if (is_root || instr->user_count() > 1 ||
IsEvaluatedMoreThanOnce(instr)) {
allocate_new_function(instr);
} else {
ids[instr_index] = ids[instr_indices[instr->users().front()]];
ids[instr_index].second = next_indexing_id++;
}
}
std::vector<std::vector<const HloInstruction*>> functions(next_function_id);
for (auto [id, instr] : llvm::reverse(llvm::zip(ids, pre_order))) {
functions[id.first].push_back(instr);
}
subgraphs_.reserve(functions.size());
for (auto&& [function_id, instructions] : llvm::enumerate(functions)) {
auto is_different_function = [&, function_id = function_id](auto* user) {
return ids[instr_indices[user]].first != function_id;
};
std::vector<const HloInstruction*> roots;
std::vector<IndexingMap> root_indexing;
const xla::Shape* first_root_shape = nullptr;
for (auto* instruction : instructions) {
if (instruction->user_count() == 0 ||
absl::c_any_of(instruction->users(), is_different_function)) {
roots.push_back(instruction);
if (first_root_shape) {
CHECK(!instruction->shape().IsTuple())
<< "Internal tuples are not supported";
if (ShapeUtil::EqualIgnoringElementType(*first_root_shape,
instruction->shape())) {
root_indexing.push_back(root_indexing.front());
} else {
root_indexing.push_back(GetBitcastMap(
*first_root_shape, instruction->shape(), mlir_context));
}
} else {
first_root_shape = &instruction->shape();
while (first_root_shape->IsTuple()) {
first_root_shape = &first_root_shape->tuple_shapes()[0];
}
root_indexing.push_back(
CreateIdentityMap(*first_root_shape, mlir_context));
}
}
}
std::vector<int64_t> ranges{first_root_shape->dimensions().begin(),
first_root_shape->dimensions().end()};
CHECK(!roots.empty()) << "No roots found";
std::string name = llvm_ir::SanitizeFunctionName(absl::StrCat(
roots.front()->parent()->name(), "_",
absl::StrJoin(roots, "_", [](std::string* out, const auto* root) {
absl::StrAppend(out, root->name());
})));
subgraphs_.push_back(Subgraph{
std::move(name),
{instructions.begin(), instructions.end()},
std::move(roots),
std::move(ranges),
std::move(root_indexing)});
}
for (const auto& subgraph : subgraphs_) {
for (const auto* instruction : subgraph.instructions) {
instructions_to_subgraphs_[instruction] = &subgraph;
}
}
}
PartitionedComputation::Subgraph PartitionedComputation::Subgraph::ForEpilogue(
const EpilogueSpecification& epilogue) {
if (epilogue.roots.empty()) return {};
const auto* computation = epilogue.heroes.front()->parent();
PartitionedComputation::Subgraph subgraph;
subgraph.name = llvm_ir::SanitizeFunctionName(
absl::StrCat(computation->name(), "__epilogue__",
absl::StrJoin(epilogue.roots, "_",
[](std::string* out, const auto* root) {
absl::StrAppend(out, root->name());
})));
subgraph.roots = epilogue.roots;
int index = 0;
for (auto* hero : epilogue.heroes) {
if (subgraph.injected_value_starts.insert({hero, index}).second) {
index += Arity(hero->shape());
}
}
subgraph.num_injected_values = index;
absl::flat_hash_set<const HloInstruction*> seen;
std::function<void(const HloInstruction*)> visit;
visit = [&](const HloInstruction* instruction) {
if (subgraph.injected_value_starts.contains(instruction)) return;
if (!seen.insert(instruction).second) return;
for (auto [index, operand] : llvm::enumerate(instruction->operands())) {
visit(operand);
}
};
visit(computation->root_instruction());
subgraph.instructions = std::move(seen);
subgraph.index_ranges = epilogue.index_ranges;
subgraph.root_indexing = epilogue.root_indexing;
return subgraph;
}
PartitionedComputations::PartitionedComputations(
const HloComputation* fusion, mlir::MLIRContext* mlir_context,
std::vector<EpilogueSpecification> epilogues)
: fusion_(fusion) {
absl::flat_hash_set<const HloComputation*> seen;
std::vector<const HloComputation*> computations;
std::function<void(const HloComputation*)> visit;
visit = [&](const HloComputation* computation) {
if (!seen.insert(computation).second) return;
computations.push_back(computation);
for (auto* instr : computation->instructions()) {
absl::c_for_each(instr->called_computations(), visit);
}
};
visit(fusion);
absl::flat_hash_set<const HloInstruction*> roots;
epilogues_.reserve(epilogues.size());
for (const auto& epilogue : epilogues) {
epilogues_.push_back(
PartitionedComputation::Subgraph::ForEpilogue(epilogue));
roots.insert(epilogue.heroes.begin(), epilogue.heroes.end());
for (auto* instruction : epilogue.heroes) {
roots.insert(instruction->operands().begin(),
instruction->operands().end());
}
}
auto is_root = [&](const HloInstruction* instruction) {
return roots.contains(instruction);
};
partitioned_computations_.reserve(computations.size());
for (auto* computation : computations) {
computation_to_partitioning_[computation] =
&partitioned_computations_.emplace_back(
PartitionedComputation{computation, mlir_context, is_root});
}
}
absl::flat_hash_map<const PartitionedComputation::Subgraph*, mlir::func::FuncOp>
PartitionedComputations::DeclareFunctions(mlir::ModuleOp module) const {
absl::flat_hash_map<const PartitionedComputation::Subgraph*,
mlir::func::FuncOp>
mapping;
mlir::ImplicitLocOpBuilder builder(module.getLoc(), module->getContext());
builder.setInsertionPointToEnd(module.getBody());
auto create_funcs =
[&](absl::Span<const PartitionedComputation::Subgraph> subgraphs) {
for (const auto& subgraph : subgraphs) {
if (subgraph.roots.empty()) continue;
auto func_op = CreateSubgraphMlirFunction(subgraph, builder);
func_op->setAttr("llvm.linkage", mlir::LLVM::LinkageAttr::get(
module->getContext(),
mlir::LLVM::Linkage::Internal));
mapping[&subgraph] = func_op;
}
};
for (const auto& computation : partitioned_computations_) {
create_funcs(computation.subgraphs());
}
create_funcs(epilogues_);
return mapping;
}
const PartitionedComputation::Subgraph& PartitionedComputations::FindSubgraph(
const HloInstruction* instr) const {
return FindPartitionedComputation(instr->parent()).FindSubgraph(instr);
}
CallTargetProvider PartitionedComputations::CreateCallTargetProvider(
const absl::flat_hash_map<const PartitionedComputation::Subgraph*,
mlir::func::FuncOp>& subgraph_to_func) const {
return [&, this](const HloInstruction* instr) {
const auto& subgraph = FindSubgraph(instr);
CHECK(subgraph_to_func.contains(&subgraph))
<< "No function found for subgraph with instruction "
<< instr->ToString();
return subgraph_to_func.at(&subgraph);
};
}
mlir::func::FuncOp CreateSubgraphMlirFunction(
const PartitionedComputation::Subgraph& subgraph,
mlir::ImplicitLocOpBuilder& b) {
auto* computation = subgraph.roots.front()->parent();
llvm::SmallVector<mlir::Type> parameter_types;
llvm::SmallVector<mlir::Type> result_types;
auto element_type = [&](const auto& shape) {
return PrimitiveTypeToMlirType(shape.element_type(), b);
};
for (auto* root : subgraph.roots) {
for (auto ty : ShapeToMlirTypes(root->shape(), b)) {
result_types.push_back(
mlir::cast<mlir::RankedTensorType>(ty).getElementType());
}
}
llvm::SmallVector<mlir::DictionaryAttr> arg_attrs;
if (computation->IsFusionComputation() || computation->IsEntryComputation()) {
for (auto* param : computation->parameter_instructions()) {
parameter_types.push_back(TensorShapeToMlirType(param->shape(), b));
arg_attrs.emplace_back();
}
for (int64_t size : subgraph.index_ranges) {
parameter_types.push_back(b.getIndexType());
arg_attrs.emplace_back(mlir::DictionaryAttr::get(
b.getContext(),
{b.getNamedAttr("xla.range", b.getIndexArrayAttr({0, size - 1}))}));
}
int operand_offset = parameter_types.size();
parameter_types.resize(operand_offset + subgraph.num_injected_values);
arg_attrs.resize(parameter_types.size());
for (auto [value, start] : subgraph.injected_value_starts) {
for (int index = 0; index < Arity(value->shape()); ++index) {
parameter_types[operand_offset + start + index] =
element_type(TupleShape(value->shape(), index));
}
}
} else {
for (auto* param : computation->parameter_instructions()) {
parameter_types.push_back(element_type(param->shape()));
}
}
auto ty = b.getFunctionType(parameter_types, result_types);
auto func_op = b.create<mlir::func::FuncOp>(
subgraph.name, ty,
llvm::ArrayRef<mlir::NamedAttribute>{}, arg_attrs);
func_op.setPrivate();
return func_op;
}
}
}
} | #include "xla/service/gpu/fusions/mlir/computation_partitioner.h"
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/ImplicitLocOpBuilder.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/model/indexing_analysis.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace gpu {
namespace mlir_converter {
namespace {
using ::testing::ElementsAre;
using ::testing::SizeIs;
using ::testing::UnorderedElementsAre;
class ComputationPartitionerTest : public HloTestBase {
protected:
ComputationPartitionerTest() {
mlir_context_.loadDialect<mlir::func::FuncDialect>();
}
mlir::MLIRContext mlir_context_;
};
std::string PrintAndErase(mlir::func::FuncOp func) {
std::string out;
llvm::raw_string_ostream os(out);
os << func;
func.erase();
return out;
}
TEST_F(ComputationPartitionerTest, PartitionDiamonds) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
fused_computation {
%param = f32[6] parameter(0)
%slice0.1 = f32[5] slice(f32[6]{0} %param), slice={[0:5]}
%slice0.2 = f32[5] slice(f32[6]{0} %param), slice={[1:6]}
%add0 = f32[5] add(f32[5]{0} %slice0.1, f32[5]{0} %slice0.2)
%slice1.1 = f32[4] slice(f32[5]{0} %add0), slice={[0:4]}
%slice1.2 = f32[4] slice(f32[5]{0} %add0), slice={[1:5]}
%add1 = f32[4] add(f32[4]{0} %slice1.1, f32[4]{0} %slice1.2)
%slice2.1 = f32[3] slice(f32[4]{0} %add1), slice={[0:3]}
%slice2.2 = f32[3] slice(f32[4]{0} %add1), slice={[1:4]}
%add2 = f32[3] add(f32[3]{0} %slice2.1, f32[3]{0} %slice2.2)
%slice3.1 = f32[2] slice(f32[3]{0} %add2), slice={[0:2]}
%slice3.2 = f32[2] slice(f32[3]{0} %add2), slice={[1:3]}
ROOT %add3 = f32[2] add(f32[2]{0} %slice3.1, f32[2]{0} %slice3.2)
})")
.value();
auto* fusion = module->GetComputationWithName("fused_computation");
ASSERT_NE(fusion, nullptr);
PartitionedComputation computation(fusion, &mlir_context_);
constexpr auto kExpected = R"(PartitionedComputation fused_computation:
SUBGRAPH fused_computation_add3 {
%slice3.1 = f32[2]{0} slice(f32[3]{0} %add2), slice={[0:2]}
%slice3.2 = f32[2]{0} slice(f32[3]{0} %add2), slice={[1:3]}
ROOT %add3 = f32[2]{0} add(f32[2]{0} %slice3.1, f32[2]{0} %slice3.2)
}
SUBGRAPH fused_computation_add2 {
%slice2.1 = f32[3]{0} slice(f32[4]{0} %add1), slice={[0:3]}
%slice2.2 = f32[3]{0} slice(f32[4]{0} %add1), slice={[1:4]}
ROOT %add2 = f32[3]{0} add(f32[3]{0} %slice2.1, f32[3]{0} %slice2.2)
}
SUBGRAPH fused_computation_add1 {
%slice1.1 = f32[4]{0} slice(f32[5]{0} %add0), slice={[0:4]}
%slice1.2 = f32[4]{0} slice(f32[5]{0} %add0), slice={[1:5]}
ROOT %add1 = f32[4]{0} add(f32[4]{0} %slice1.1, f32[4]{0} %slice1.2)
}
SUBGRAPH fused_computation_add0 {
%slice0.1 = f32[5]{0} slice(f32[6]{0} %param), slice={[0:5]}
%slice0.2 = f32[5]{0} slice(f32[6]{0} %param), slice={[1:6]}
ROOT %add0 = f32[5]{0} add(f32[5]{0} %slice0.1, f32[5]{0} %slice0.2)
}
SUBGRAPH fused_computation_param {
ROOT %param = f32[6]{0} parameter(0)
})";
EXPECT_EQ(computation.ToString(6), kExpected);
}
TEST_F(ComputationPartitionerTest, SimpleConcatenate) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
fused_computation {
%param1 = f32[6] parameter(0)
%param2 = f32[3] parameter(1)
%neg = f32[6] negate(%param1)
%exp = f32[3] exponential(%param2)
ROOT %concat = f32[9] concatenate(%neg, %exp), dimensions={0}
})")
.value();
auto* fusion = module->GetComputationWithName("fused_computation");
ASSERT_NE(fusion, nullptr);
PartitionedComputation computation(fusion, &mlir_context_);
EXPECT_THAT(computation.subgraphs(), SizeIs(1));
}
TEST_F(ComputationPartitionerTest, DiamondConcatenate) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
fused_computation {
%param1 = f32[6] parameter(0)
%param2 = f32[6] parameter(1)
%log = f32[6] log(%param1)
%add = f32[6] add(%log, %param2)
%neg = f32[6] negate(%log)
%exp = f32[6] exponential(%add)
ROOT %concat = f32[12] concatenate(%neg, %exp), dimensions={0}
})")
.value();
auto* fusion = module->GetComputationWithName("fused_computation");
ASSERT_NE(fusion, nullptr);
PartitionedComputation computation(fusion, &mlir_context_);
constexpr auto kExpected = R"(PartitionedComputation fused_computation:
SUBGRAPH fused_computation_concat {
%neg = f32[6]{0} negate(f32[6]{0} %log)
%param2 = f32[6]{0} parameter(1)
%add = f32[6]{0} add(f32[6]{0} %log, f32[6]{0} %param2)
%exp = f32[6]{0} exponential(f32[6]{0} %add)
ROOT %concat = f32[12]{0} concatenate(f32[6]{0} %neg, f32[6]{0} %exp), dimensions={0}
}
SUBGRAPH fused_computation_log {
%param1 = f32[6]{0} parameter(0)
ROOT %log = f32[6]{0} log(f32[6]{0} %param1)
})";
EXPECT_EQ(computation.ToString(6), kExpected);
}
TEST_F(ComputationPartitionerTest, TupleRoot) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
fused_computation {
%p0 = f32[6] parameter(0)
%p1 = f32[6] parameter(1)
%add = f32[6] add(p0, p1)
%sub = f32[6] subtract(p0, p1)
ROOT %root = (f32[6], f32[6]) tuple(%add, %sub)
})")
.value();
auto* fusion = module->GetComputationWithName("fused_computation");
ASSERT_NE(fusion, nullptr);
PartitionedComputation computation(fusion, &mlir_context_);
constexpr auto kExpected = R"(PartitionedComputation fused_computation:
SUBGRAPH fused_computation_root {
%add = f32[6]{0} add(f32[6]{0} %p0, f32[6]{0} %p1)
%sub = f32[6]{0} subtract(f32[6]{0} %p0, f32[6]{0} %p1)
ROOT %root = (f32[6]{0}, f32[6]{0}) tuple(f32[6]{0} %add, f32[6]{0} %sub)
}
SUBGRAPH fused_computation_p1 {
ROOT %p1 = f32[6]{0} parameter(1)
}
SUBGRAPH fused_computation_p0 {
ROOT %p0 = f32[6]{0} parameter(0)
})";
EXPECT_EQ(computation.ToString(6), kExpected);
}
TEST_F(ComputationPartitionerTest, Epilogue) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fused_computation {
p0 = f32[4] parameter(0)
c0 = f32[] constant(0)
reduce = f32[] reduce(p0, c0), dimensions={0}, to_apply=add
bitcast = f32[1] bitcast(reduce)
abs = f32[1] abs(bitcast)
log = f32[1] log(abs)
sign = f32[1] sign(bitcast)
ROOT tuple = (f32[1], f32[1]) tuple(log, sign)
})")
.value();
auto* fused_computation = module->GetComputationWithName("fused_computation");
EpilogueSpecification epilogue{
{fused_computation->GetInstructionWithName("reduce")},
{fused_computation->GetInstructionWithName("log"),
fused_computation->GetInstructionWithName("sign")},
{1, 42},
{CreateIdentityMap(
fused_computation->root_instruction()->shape().tuple_shapes(0),
&mlir_context_)}};
PartitionedComputations fusion(fused_computation, &mlir_context_, {epilogue});
mlir::ImplicitLocOpBuilder builder(mlir::UnknownLoc::get(&mlir_context_),
&mlir_context_);
EXPECT_EQ(
PrintAndErase(
CreateSubgraphMlirFunction(fusion.epilogues().front(), builder)),
"func.func private @fused_computation__epilogue__log_sign(tensor<4xf32>, "
"index {xla.range = [0 : index, 0 : index]}, "
"index {xla.range = [0 : index, 41 : index]}, "
"f32) -> (f32, f32)");
}
TEST_F(ComputationPartitionerTest, TransposeAsRoot) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
fused_computation {
%p0 = f32[64, 32] parameter(0)
%p1 = f32[64, 32] parameter(1)
%add = f32[64, 32] add(p0, p1)
%transpose = f32[32, 64] transpose(%add), dimensions={1, 0}
%exp = f32[32, 64] exponential(%transpose)
ROOT %root = f32[32, 64] tanh(%exp)
})")
.value();
auto* fusion = module->GetComputationWithName("fused_computation");
ASSERT_NE(fusion, nullptr);
PartitionedComputation computation(
fusion, &mlir_context_, [](const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kTranspose;
});
ASSERT_THAT(computation.subgraphs(), SizeIs(2));
EXPECT_THAT(computation.GetRootSubgraph().roots, SizeIs(1));
EXPECT_THAT(computation.GetRootSubgraph().instructions, SizeIs(2));
}
TEST_F(ComputationPartitionerTest, PartiallyMergable) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
fused_computation {
%p0 = f32[10,10] parameter(0)
%p1 = f32[10,10] parameter(1)
%add = f32[10,10] add(%p0, %p1)
%transpose = f32[10,10] transpose(%add), dimensions={1,0}
ROOT %sub = f32[10,10] subtract(%add, %transpose)
})")
.value();
auto* fusion = module->GetComputationWithName("fused_computation");
ASSERT_NE(fusion, nullptr);
PartitionedComputation computation(fusion, &mlir_context_);
auto transpose = fusion->GetInstructionWithName("transpose");
auto sub = fusion->GetInstructionWithName("sub");
ASSERT_THAT(computation.subgraphs(), SizeIs(2));
EXPECT_THAT(computation.GetRootSubgraph().instructions,
UnorderedElementsAre(transpose, sub));
}
TEST_F(ComputationPartitionerTest, SubgraphSignatures) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
%p0 = f32[] parameter(0)
%p1 = f32[] parameter(1)
ROOT %add = f32[] add(%p0, %p1)
}
fusion {
%p0 = f32[10,10]{0,1} parameter(0)
%p1 = f32[10,10]{1,0} parameter(1)
%c0 = f32[] constant(2)
%bc = f32[10,10]{0,1} bitcast(%p1)
%add = f32[10,10] add(%p0, %bc)
ROOT %reduce = f32[10] reduce(%add, %c0), dimensions={1}, to_apply=add
}
ENTRY main {
%p0 = f32[10,10] parameter(0)
%p1 = f32[10,10] parameter(1)
ROOT %fusion = f32[10] fusion(%p0, %p1), kind=kLoop, calls=fusion
})")
.value();
mlir::MLIRContext context;
context.loadDialect<mlir::func::FuncDialect>();
mlir::ImplicitLocOpBuilder builder(mlir::UnknownLoc::get(&context), &context);
PartitionedComputation fusion(module->GetComputationWithName("fusion"),
&mlir_context_);
EXPECT_EQ(
PrintAndErase(
CreateSubgraphMlirFunction(fusion.GetRootSubgraph(), builder)),
"func.func private @fusion_reduce(tensor<10x10xf32, dense<[0, 1]> : "
"tensor<2xi64>>, tensor<10x10xf32>, index {xla.range = [0 : index, 9 : "
"index]}) -> f32");
PartitionedComputation add(module->GetComputationWithName("add"),
&mlir_context_);
EXPECT_EQ(
PrintAndErase(CreateSubgraphMlirFunction(add.GetRootSubgraph(), builder)),
"func.func private @add_add(f32, f32) -> f32");
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/mlir/computation_partitioner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/mlir/computation_partitioner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f054a731-cf43-403c-8e10-e90641ca41dd | cpp | tensorflow/tensorflow | cupti_error_manager | third_party/xla/xla/backends/profiler/gpu/cupti_error_manager.cc | third_party/xla/xla/backends/profiler/gpu/cupti_error_manager_test.cc | #include "xla/backends/profiler/gpu/cupti_error_manager.h"
#include <utility>
#include "absl/debugging/leak_check.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace profiler {
using tsl::mutex_lock;
CuptiErrorManager::CuptiErrorManager(std::unique_ptr<CuptiInterface> interface)
: interface_(std::move(interface)), disabled_(0), undo_disabled_(false) {}
#define IGNORE_CALL_IF_DISABLED \
if (disabled_) { \
LOG(ERROR) << "cupti" << __func__ << ": ignored due to a previous error."; \
return CUPTI_ERROR_DISABLED; \
} \
VLOG(1) << "cupti" << __func__;
#define ALLOW_ERROR(e, ERROR) \
if (e == ERROR) { \
VLOG(1) << "cupti" << __func__ << ": error " << static_cast<int>(e) \
<< ": " << ResultString(e) << " (allowed)"; \
return e; \
}
#define LOG_AND_DISABLE_IF_ERROR(e) \
if (e != CUPTI_SUCCESS) { \
LOG(ERROR) << "cupti" << __func__ << ": error " << static_cast<int>(e) \
<< ": " << ResultString(e); \
UndoAndDisable(); \
}
void CuptiErrorManager::RegisterUndoFunction(
const CuptiErrorManager::UndoFunction& func) {
mutex_lock lock(undo_stack_mu_);
undo_stack_.push_back(func);
}
CUptiResult CuptiErrorManager::ActivityDisable(CUpti_ActivityKind kind) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->ActivityDisable(kind);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::ActivityEnable(CUpti_ActivityKind kind) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->ActivityEnable(kind);
if (error == CUPTI_SUCCESS) {
auto f = std::bind(&CuptiErrorManager::ActivityDisable, this, kind);
RegisterUndoFunction(f);
}
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::ActivityFlushAll(uint32_t flag) {
CUptiResult error = interface_->ActivityFlushAll(flag);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::ActivityGetNextRecord(
uint8_t* buffer, size_t valid_buffer_size_bytes, CUpti_Activity** record) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->ActivityGetNextRecord(
buffer, valid_buffer_size_bytes, record);
ALLOW_ERROR(error, CUPTI_ERROR_MAX_LIMIT_REACHED);
ALLOW_ERROR(error, CUPTI_ERROR_INVALID_KIND);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::ActivityGetNumDroppedRecords(CUcontext context,
uint32_t stream_id,
size_t* dropped) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error =
interface_->ActivityGetNumDroppedRecords(context, stream_id, dropped);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::ActivityConfigureUnifiedMemoryCounter(
CUpti_ActivityUnifiedMemoryCounterConfig* config, uint32_t count) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error =
interface_->ActivityConfigureUnifiedMemoryCounter(config, count);
return error;
}
CUptiResult CuptiErrorManager::ActivityRegisterCallbacks(
CUpti_BuffersCallbackRequestFunc func_buffer_requested,
CUpti_BuffersCallbackCompleteFunc func_buffer_completed) {
IGNORE_CALL_IF_DISABLED;
absl::LeakCheckDisabler disabler;
CUptiResult error = interface_->ActivityRegisterCallbacks(
func_buffer_requested, func_buffer_completed);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::ActivityUsePerThreadBuffer() {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->ActivityUsePerThreadBuffer();
return error;
}
CUptiResult CuptiErrorManager::SetActivityFlushPeriod(uint32_t period_ms) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->SetActivityFlushPeriod(period_ms);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
};
CUptiResult CuptiErrorManager::GetDeviceId(CUcontext context,
uint32_t* device_id) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->GetDeviceId(context, device_id);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::GetTimestamp(uint64_t* timestamp) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->GetTimestamp(timestamp);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::Finalize() {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->Finalize();
ALLOW_ERROR(error, CUPTI_ERROR_API_NOT_IMPLEMENTED);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::EnableCallback(uint32_t enable,
CUpti_SubscriberHandle subscriber,
CUpti_CallbackDomain domain,
CUpti_CallbackId callback_id) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error =
interface_->EnableCallback(enable, subscriber, domain, callback_id);
if (error == CUPTI_SUCCESS) {
if (enable == 1) {
auto f = std::bind(&CuptiErrorManager::EnableCallback, this,
0 , subscriber, domain, callback_id);
RegisterUndoFunction(f);
}
} else {
LOG(ERROR) << "cupti" << __func__
<< ": error with domain:" << static_cast<int>(domain)
<< " and callback_id:" << static_cast<int>(callback_id);
}
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::EnableDomain(uint32_t enable,
CUpti_SubscriberHandle subscriber,
CUpti_CallbackDomain domain) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->EnableDomain(enable, subscriber, domain);
if (error == CUPTI_SUCCESS) {
if (enable == 1) {
auto f = std::bind(&CuptiErrorManager::EnableDomain, this,
0 , subscriber, domain);
RegisterUndoFunction(f);
}
}
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::Subscribe(CUpti_SubscriberHandle* subscriber,
CUpti_CallbackFunc callback,
void* userdata) {
IGNORE_CALL_IF_DISABLED;
absl::LeakCheckDisabler disabler;
CUptiResult error = interface_->Subscribe(subscriber, callback, userdata);
if (error == CUPTI_SUCCESS) {
auto f = std::bind(&CuptiErrorManager::Unsubscribe, this, *subscriber);
RegisterUndoFunction(f);
}
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::Unsubscribe(CUpti_SubscriberHandle subscriber) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->Unsubscribe(subscriber);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
void CuptiErrorManager::UndoAndDisable() {
if (undo_disabled_) {
return;
}
mutex_lock lock(undo_stack_mu_);
undo_disabled_ = true;
while (!undo_stack_.empty()) {
LOG(ERROR) << "CuptiErrorManager is disabling profiling automatically.";
undo_stack_.back()();
undo_stack_.pop_back();
}
undo_disabled_ = false;
disabled_ = 1;
}
CUptiResult CuptiErrorManager::GetResultString(CUptiResult result,
const char** str) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->GetResultString(result, str);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::GetContextId(CUcontext context,
uint32_t* context_id) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->GetContextId(context, context_id);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::GetStreamIdEx(CUcontext context, CUstream stream,
uint8_t per_thread_stream,
uint32_t* stream_id) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error =
interface_->GetStreamIdEx(context, stream, per_thread_stream, stream_id);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::GetGraphId(CUgraph graph, uint32_t* graph_id) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->GetGraphId(graph, graph_id);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::GetGraphExecId(CUgraphExec graph_exec,
uint32_t* graph_id) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->GetGraphExecId(graph_exec, graph_id);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
void CuptiErrorManager::CleanUp() {
if (undo_disabled_) {
return;
}
mutex_lock lock(undo_stack_mu_);
undo_disabled_ = true;
while (!undo_stack_.empty()) {
undo_stack_.pop_back();
}
undo_disabled_ = false;
}
std::string CuptiErrorManager::ResultString(CUptiResult error) const {
const char* error_message = nullptr;
if (interface_->GetResultString(error, &error_message) == CUPTI_SUCCESS &&
error_message != nullptr) {
return error_message;
}
return "";
}
}
} | #if GOOGLE_CUDA
#include "xla/backends/profiler/gpu/cupti_error_manager.h"
#include <cstdint>
#include <memory>
#include <utility>
#include "absl/memory/memory.h"
#include "xla/backends/profiler/gpu/cuda_test.h"
#include "xla/backends/profiler/gpu/cupti_interface.h"
#include "xla/backends/profiler/gpu/cupti_tracer.h"
#include "xla/backends/profiler/gpu/cupti_wrapper.h"
#include "xla/backends/profiler/gpu/mock_cupti.h"
#include "xla/tsl/profiler/utils/time_utils.h"
#include "tsl/platform/test.h"
namespace xla {
namespace profiler {
namespace test {
using xla::profiler::CuptiInterface;
using xla::profiler::CuptiTracer;
using xla::profiler::CuptiTracerCollectorOptions;
using xla::profiler::CuptiTracerOptions;
using xla::profiler::CuptiWrapper;
using ::testing::_;
using ::testing::Invoke;
using ::testing::Return;
using ::testing::Sequence;
using ::testing::StrictMock;
class TestableCuptiTracer : public CuptiTracer {
public:
explicit TestableCuptiTracer(CuptiInterface* cupti_interface)
: CuptiTracer(cupti_interface) {}
};
class CuptiErrorManagerTest : public ::testing::Test {
protected:
CuptiErrorManagerTest() {}
void SetUp() override {
ASSERT_GT(CuptiTracer::NumGpus(), 0) << "No devices found";
auto mock_cupti = std::make_unique<StrictMock<MockCupti>>();
mock_ = mock_cupti.get();
cupti_error_manager_ =
std::make_unique<CuptiErrorManager>(std::move(mock_cupti));
cupti_tracer_ =
std::make_unique<TestableCuptiTracer>(cupti_error_manager_.get());
cupti_wrapper_ = std::make_unique<CuptiWrapper>();
CuptiTracerCollectorOptions collector_options;
collector_options.num_gpus = CuptiTracer::NumGpus();
uint64_t start_gputime_ns = CuptiTracer::GetTimestamp();
uint64_t start_walltime_ns = tsl::profiler::GetCurrentTimeNanos();
cupti_collector_ = CreateCuptiCollector(
collector_options, start_walltime_ns, start_gputime_ns);
}
void EnableProfiling(const CuptiTracerOptions& option) {
cupti_tracer_->Enable(option, cupti_collector_.get());
}
void DisableProfiling() { cupti_tracer_->Disable(); }
bool CuptiDisabled() const { return cupti_error_manager_->Disabled(); }
void RunGpuApp() {
MemCopyH2D();
PrintfKernel(10);
Synchronize();
MemCopyD2H();
}
StrictMock<MockCupti>* mock_;
std::unique_ptr<TestableCuptiTracer> cupti_tracer_ = nullptr;
std::unique_ptr<CuptiInterface> cupti_error_manager_;
std::unique_ptr<CuptiWrapper> cupti_wrapper_;
std::unique_ptr<xla::profiler::CuptiTraceCollector> cupti_collector_;
};
TEST_F(CuptiErrorManagerTest, GpuTraceActivityEnableTest) {
Sequence s1;
EXPECT_CALL(*mock_, Subscribe(_, _, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::Subscribe));
const int cb_enable_times = IsCudaNewEnoughForGraphTraceTest() ? 4 : 1;
EXPECT_CALL(*mock_, EnableCallback(1, _, _, _))
.Times(cb_enable_times)
.InSequence(s1)
.WillRepeatedly(
Invoke(cupti_wrapper_.get(), &CuptiWrapper::EnableCallback));
EXPECT_CALL(*mock_, ActivityUsePerThreadBuffer())
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(),
&CuptiWrapper::ActivityUsePerThreadBuffer));
EXPECT_CALL(*mock_, ActivityRegisterCallbacks(_, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(),
&CuptiWrapper::ActivityRegisterCallbacks));
EXPECT_CALL(*mock_, ActivityEnable(CUPTI_ACTIVITY_KIND_KERNEL))
.InSequence(s1)
.WillOnce(Return(CUPTI_ERROR_UNKNOWN));
EXPECT_CALL(*mock_, GetResultString(CUPTI_ERROR_UNKNOWN, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::GetResultString));
EXPECT_CALL(*mock_, EnableCallback(0, _, _, _))
.Times(cb_enable_times)
.InSequence(s1)
.WillRepeatedly(
Invoke(cupti_wrapper_.get(), &CuptiWrapper::EnableCallback));
EXPECT_CALL(*mock_, Unsubscribe(_))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::Unsubscribe));
EXPECT_FALSE(CuptiDisabled());
CuptiTracerOptions options;
options.activities_selected.push_back(CUPTI_ACTIVITY_KIND_KERNEL);
options.cbids_selected.push_back(CUPTI_DRIVER_TRACE_CBID_cuLaunchKernel);
EnableProfiling(options);
EXPECT_TRUE(CuptiDisabled());
RunGpuApp();
EXPECT_TRUE(CuptiDisabled());
DisableProfiling();
EXPECT_TRUE(CuptiDisabled());
}
TEST_F(CuptiErrorManagerTest, GpuTraceAutoEnableTest) {
EXPECT_FALSE(CuptiDisabled());
Sequence s1;
EXPECT_CALL(*mock_, Subscribe(_, _, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::Subscribe));
const int cb_enable_times = IsCudaNewEnoughForGraphTraceTest() ? 3 : 0;
if (cb_enable_times > 0) {
EXPECT_CALL(*mock_, EnableCallback(1, _, _, _))
.Times(cb_enable_times)
.InSequence(s1)
.WillRepeatedly(
Invoke(cupti_wrapper_.get(), &CuptiWrapper::EnableCallback));
}
EXPECT_CALL(*mock_, EnableDomain(1, _, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::EnableDomain));
EXPECT_CALL(*mock_, ActivityUsePerThreadBuffer())
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(),
&CuptiWrapper::ActivityUsePerThreadBuffer));
EXPECT_CALL(*mock_, ActivityRegisterCallbacks(_, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(),
&CuptiWrapper::ActivityRegisterCallbacks));
EXPECT_CALL(*mock_, ActivityEnable(CUPTI_ACTIVITY_KIND_MEMCPY))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::ActivityEnable));
EXPECT_CALL(*mock_, ActivityEnable(CUPTI_ACTIVITY_KIND_MEMCPY2))
.InSequence(s1)
.WillOnce(Return(CUPTI_ERROR_UNKNOWN));
EXPECT_CALL(*mock_, GetResultString(CUPTI_ERROR_UNKNOWN, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::GetResultString));
EXPECT_CALL(*mock_, ActivityDisable(CUPTI_ACTIVITY_KIND_MEMCPY))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::ActivityDisable));
EXPECT_CALL(*mock_, EnableDomain(0, _, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::EnableDomain));
if (cb_enable_times > 0) {
EXPECT_CALL(*mock_, EnableCallback(0, _, _, _))
.Times(cb_enable_times)
.InSequence(s1)
.WillRepeatedly(
Invoke(cupti_wrapper_.get(), &CuptiWrapper::EnableCallback));
}
EXPECT_CALL(*mock_, Unsubscribe(_))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::Unsubscribe));
EXPECT_FALSE(CuptiDisabled());
CuptiTracerOptions options;
options.activities_selected.push_back(CUPTI_ACTIVITY_KIND_MEMCPY);
options.activities_selected.push_back(CUPTI_ACTIVITY_KIND_MEMCPY2);
options.activities_selected.push_back(CUPTI_ACTIVITY_KIND_KERNEL);
EnableProfiling(options);
EXPECT_TRUE(CuptiDisabled());
RunGpuApp();
EXPECT_TRUE(CuptiDisabled());
DisableProfiling();
EXPECT_TRUE(CuptiDisabled());
}
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/profiler/gpu/cupti_error_manager.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/profiler/gpu/cupti_error_manager_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6a26331c-d8b1-4846-aa6d-ed53d0ece25a | cpp | tensorflow/tensorflow | matrix_diag | tensorflow/lite/kernels/matrix_diag.cc | tensorflow/lite/kernels/matrix_diag_test.cc | #include <stdint.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace matrix_diag {
constexpr int kInputTensor = 0;
constexpr int kOutputTensor = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteIntArray* input_dims = input->dims;
int input_dims_size = input_dims->size;
TF_LITE_ENSURE(context, input_dims_size >= 1);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(input_dims_size + 1);
for (int i = 0; i < input_dims_size; i++) {
output_shape->data[i] = input_dims->data[i];
}
output_shape->data[input_dims_size] = input_dims->data[input_dims_size - 1];
output->type = input->type;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output, output_shape));
return kTfLiteOk;
}
template <typename T>
void FillDiagImpl(const T* in, T* out, const int batch_size, const int row_size,
const int col_size) {
int idx = 0;
for (int b = 0; b < batch_size; b++) {
for (int i = 0; i < row_size; i++) {
for (int j = 0; j < col_size; ++j) {
if (i == j) {
out[i * col_size + j] = in[idx];
idx++;
} else {
out[i * col_size + j] = 0;
}
}
}
out += row_size * col_size;
}
}
template <typename T>
void FillDiag(const TfLiteTensor* input, TfLiteTensor* output,
const int batch_size, const int row_size, const int col_size) {
FillDiagImpl<T>(GetTensorData<T>(input), GetTensorData<T>(output), batch_size,
row_size, col_size);
}
void FillDiagHelper(const TfLiteTensor* input, TfLiteTensor* output) {
const int num_output_dims = output->dims->size;
int batch_size = 1;
for (int i = 0; i < num_output_dims - 2; ++i) {
batch_size *= output->dims->data[i];
}
const int row_size = output->dims->data[num_output_dims - 2];
const int col_size = output->dims->data[num_output_dims - 1];
switch (output->type) {
case kTfLiteInt64: {
return FillDiag<int64_t>(input, output, batch_size, row_size, col_size);
}
case kTfLiteInt32: {
return FillDiag<int32_t>(input, output, batch_size, row_size, col_size);
}
case kTfLiteInt16: {
return FillDiag<int16_t>(input, output, batch_size, row_size, col_size);
}
case kTfLiteInt8: {
return FillDiag<int8_t>(input, output, batch_size, row_size, col_size);
}
case kTfLiteUInt8: {
return FillDiag<uint8_t>(input, output, batch_size, row_size, col_size);
}
default:
return FillDiag<float>(input, output, batch_size, row_size, col_size);
}
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
FillDiagHelper(input, output);
return kTfLiteOk;
}
}
TfLiteRegistration* Register_MATRIX_DIAG() {
static TfLiteRegistration r = {nullptr, nullptr, matrix_diag::Prepare,
matrix_diag::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
template <typename T>
class MatrixDiagOpModel : public SingleOpModel {
public:
explicit MatrixDiagOpModel(const TensorData& input) {
input_ = AddInput(input);
output_ = AddOutput({input.type, {}});
SetBuiltinOp(BuiltinOperator_MATRIX_DIAG, BuiltinOptions_MatrixDiagOptions,
CreateMatrixDiagOptions(builder_).Union());
BuildInterpreter({GetShape(input_)});
}
int input() { return input_; }
std::vector<T> GetOutput() { return ExtractVector<T>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
TfLiteType GetOutputType() {
TfLiteTensor* t = interpreter_->tensor(output_);
return t->type;
}
private:
int input_;
int output_;
};
template <typename T>
class MatrixDiagOpTest : public ::testing::Test {};
using TypesUnderTest =
::testing::Types<TypeUnion<int32_t>, TypeUnion<float>, TypeUnion<int16_t>,
TypeUnion<int8_t>, TypeUnion<uint8_t>>;
TYPED_TEST_SUITE(MatrixDiagOpTest, TypesUnderTest);
TYPED_TEST(MatrixDiagOpTest, ThreeByThreeDiag) {
MatrixDiagOpModel<typename TypeParam::ScalarType> model(
{TypeParam::tensor_type, {3}});
model.template PopulateTensor<typename TypeParam::ScalarType>(model.input(),
{1, 2, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3, 3));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 0, 0,
0, 2, 0,
0, 0, 3}));
EXPECT_THAT(model.GetOutputType(), TypeParam::tflite_type);
}
TEST(MatrixDiagTest, Int32TestTwoDimDiag) {
MatrixDiagOpModel<int32_t> model({TensorType_INT32, {2, 4}});
model.PopulateTensor<int32_t>(model.input(), {1, 2, 3, 4, 5, 6, 7, 8});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(2, 4, 4));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 0, 0, 0,
0, 2, 0, 0,
0, 0, 3, 0,
0, 0, 0, 4,
5, 0, 0, 0,
0, 6, 0, 0,
0, 0, 7, 0,
0, 0, 0, 8}));
EXPECT_THAT(model.GetOutputType(), TfLiteType::kTfLiteInt32);
}
TEST(MatrixDiagTest, DegenerateCase) {
MatrixDiagOpModel<uint8_t> model({TensorType_UINT8, {1}});
model.PopulateTensor<uint8_t>(model.input(), {1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({1}));
EXPECT_THAT(model.GetOutputType(), TfLiteType::kTfLiteUInt8);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/matrix_diag.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/matrix_diag_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c14d0579-faed-4c33-bc1a-ab53d3937844 | cpp | tensorflow/tensorflow | aggregate_profile | third_party/xla/xla/python/aggregate_profile.cc | third_party/xla/xla/python/aggregate_profile_test.cc | #include "xla/python/aggregate_profile.h"
#include <algorithm>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/types/span.h"
#include "xla/python/xplane_to_profile_instructions.h"
namespace xla {
void AggregateProfiledInstructionsProto(
absl::Span<const tensorflow::profiler::ProfiledInstructionsProto> profiles,
int percentile,
tensorflow::profiler::ProfiledInstructionsProto *result_profile) {
if (percentile < 0 || percentile > 100) return;
absl::flat_hash_map<std::string, HloLatencyInfo> hlo_latency_info;
for (const auto &profile : profiles) {
for (const auto &cost : profile.costs()) {
hlo_latency_info[cost.name()].durations.emplace_back(cost.cost_us());
}
}
for (const auto &iter : hlo_latency_info) {
auto *cost = result_profile->add_costs();
std::vector<double> durations = iter.second.durations;
int index = 0;
if (durations.size() > 1) {
std::sort(durations.begin(), durations.end());
index = percentile / 100.0 * (durations.size() - 1);
}
cost->set_cost_us(durations[index]);
cost->set_name(iter.first);
}
}
} | #include "xla/python/aggregate_profile.h"
#include <map>
#include <string>
#include <vector>
#include "absl/types/span.h"
#include "tsl/platform/test.h"
#include "tsl/profiler/protobuf/profiled_instructions.pb.h"
namespace xla {
namespace {
using tensorflow::profiler::ProfiledInstructionsProto;
TEST(AggregateProfiledInstructionsProtoTest, aggregateAndGetPercentile) {
tensorflow::profiler::ProfiledInstructionsProto profile_a;
{
auto *cost_a = profile_a.add_costs();
cost_a->set_cost_us(10);
cost_a->set_name("reduce");
}
{
auto *cost_a = profile_a.add_costs();
cost_a->set_cost_us(30);
cost_a->set_name("copy");
}
tensorflow::profiler::ProfiledInstructionsProto profile_c;
{
auto *cost_c = profile_c.add_costs();
cost_c->set_cost_us(30);
cost_c->set_name("reduce");
}
std::vector<tensorflow::profiler::ProfiledInstructionsProto> profiles = {
profile_a, profile_c};
std::vector<int> custom_call_costs = {0, 10, 20, 30, 40, 50,
60, 70, 80, 90, 100};
for (int cost : custom_call_costs) {
tensorflow::profiler::ProfiledInstructionsProto profile_custom_call;
{
auto *cost_c = profile_custom_call.add_costs();
cost_c->set_cost_us(cost);
cost_c->set_name("custom-call");
}
profiles.push_back(profile_custom_call);
}
tensorflow::profiler::ProfiledInstructionsProto result_90th;
AggregateProfiledInstructionsProto(
absl::Span<const tensorflow::profiler::ProfiledInstructionsProto>(
profiles.data(), profiles.size()),
90, &result_90th);
EXPECT_EQ(result_90th.costs_size(), 3);
std::map<std::string, float> costs;
for (const auto &cost : result_90th.costs()) {
costs[cost.name()] = cost.cost_us();
}
EXPECT_EQ(costs["copy"], 30);
EXPECT_EQ(costs["custom-call"], 90);
EXPECT_EQ(costs["reduce"], 10);
tensorflow::profiler::ProfiledInstructionsProto result_10th;
AggregateProfiledInstructionsProto(
absl::Span<const tensorflow::profiler::ProfiledInstructionsProto>(
profiles.data(), profiles.size()),
10, &result_10th);
EXPECT_EQ(result_10th.costs_size(), 3);
for (const auto &cost : result_10th.costs()) {
costs[cost.name()] = cost.cost_us();
}
EXPECT_EQ(costs["copy"], 30);
EXPECT_EQ(costs["custom-call"], 10);
EXPECT_EQ(costs["reduce"], 10);
}
TEST(AggregateProfiledInstructionsProtoTest, getIncorrectPercentile) {
tensorflow::profiler::ProfiledInstructionsProto profile_a;
{
auto *cost_a = profile_a.add_costs();
cost_a->set_cost_us(10);
cost_a->set_name("reduce");
}
std::vector<tensorflow::profiler::ProfiledInstructionsProto> profiles = {
profile_a};
tensorflow::profiler::ProfiledInstructionsProto result;
AggregateProfiledInstructionsProto(
absl::Span<const tensorflow::profiler::ProfiledInstructionsProto>(
profiles.data(), profiles.size()),
-1, &result);
EXPECT_EQ(result.costs_size(), 0);
AggregateProfiledInstructionsProto(
absl::Span<const tensorflow::profiler::ProfiledInstructionsProto>(
profiles.data(), profiles.size()),
101, &result);
EXPECT_EQ(result.costs_size(), 0);
AggregateProfiledInstructionsProto(
absl::Span<const tensorflow::profiler::ProfiledInstructionsProto>(
profiles.data(), profiles.size()),
100, &result);
EXPECT_EQ(result.costs_size(), 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/aggregate_profile.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/aggregate_profile_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
986cfe0f-77b0-4b65-9c58-58db0f70470c | cpp | tensorflow/tensorflow | softmax1x1 | tensorflow/lite/delegates/gpu/common/tasks/softmax1x1.cc | tensorflow/lite/delegates/gpu/cl/kernels/softmax1x1_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/softmax1x1.h"
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/task/util.h"
namespace tflite {
namespace gpu {
namespace {
std::string MakeAccOp(OperationType op_type, const std::string& a,
const std::string& b) {
if (op_type == OperationType::ADD) {
return a + " = " + a + " + " + b;
} else if (op_type == OperationType::MAXIMUM) {
return a + " = max(" + a + ", " + b + ")";
} else {
return a;
}
}
std::string GetReduceCode(const std::string& value, OperationType op_type,
int group_reduction_size) {
std::vector<int> stages;
if (group_reduction_size == 1024) {
stages = {8, 8, 4, 4};
} else if (group_reduction_size == 512) {
stages = {8, 8, 8};
} else if (group_reduction_size == 256) {
stages = {8, 8, 4};
} else if (group_reduction_size == 128) {
stages = {8, 4, 4};
} else if (group_reduction_size == 64) {
stages = {8, 8};
} else if (group_reduction_size == 32) {
stages = {8, 4};
} else if (group_reduction_size == 16) {
stages = {4, 4};
} else if (group_reduction_size <= 8) {
stages = {group_reduction_size};
}
std::string c;
c += " LOCAL_MEM_BARRIER;\n";
c += " loc_mem[tid] = " + value + ";\n";
int stride = 1;
for (int i = 0; i < stages.size(); ++i) {
const bool last_stage = i == stages.size() - 1;
const std::string condition =
last_stage ? "tid == 0"
: "tid % " + std::to_string(stride * stages[i]) + " == 0";
const std::string location = last_stage ? "loc_mem[0]" : "loc_mem[tid]";
c += " LOCAL_MEM_BARRIER;\n";
c += " if (" + condition + ") {\n";
for (int j = 1; j < stages[i]; ++j) {
c += " " +
MakeAccOp(op_type, value,
"loc_mem[tid + " + std::to_string(stride * j) + "]") +
";\n";
}
c += " " + location + " = " + value + ";\n";
c += " }\n";
stride *= stages[i];
}
c += " LOCAL_MEM_BARRIER;\n";
c += " " + value + " = loc_mem[0];\n";
return c;
}
}
Softmax1x1::Softmax1x1(const OperationDef& definition, const GpuInfo& gpu_info,
const BHWC& shape)
: GPUOperation(definition) {
if (gpu_info.IsAdreno() && gpu_info.adreno_info.IsAdreno7xx()) {
work_group_size_ = int3(512, 1, 1);
} else if (gpu_info.IsMali()) {
work_group_size_ = int3(1024, 1, 1);
} else {
work_group_size_ = int3(128, 1, 1);
}
const int slices = DivideRoundUp(shape.c, 4);
while (work_group_size_.x >= slices * 2) {
work_group_size_.x /= 2;
}
while (work_group_size_.x >= gpu_info.GetMaxWorkGroupSizeForX()) {
work_group_size_.x /= 2;
}
code_ = GetSoftmaxKernelCode(definition_);
}
Softmax1x1::Softmax1x1(Softmax1x1&& kernel) : GPUOperation(std::move(kernel)) {}
Softmax1x1& Softmax1x1::operator=(Softmax1x1&& kernel) {
if (this != &kernel) {
GPUOperation::operator=(std::move(kernel));
}
return *this;
}
std::string Softmax1x1::GetSoftmaxKernelCode(const OperationDef& op_def) {
AddSrcTensor("src_tensor", op_def.src_tensors[0]);
AddDstTensor("dst_tensor", op_def.dst_tensors[0]);
args_.AddFloat("mask_x");
args_.AddFloat("mask_y");
args_.AddFloat("mask_z");
args_.AddFloat("mask_w");
std::string c;
c += "MAIN_FUNCTION($0) {\n";
if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
c += " int linear_id = GROUP_ID_1;\n";
c += " int X = linear_id / args.dst_tensor.Batch();\n";
c += " int B = linear_id % args.dst_tensor.Batch();\n";
c += " if (B >= args.dst_tensor.Batch()) return;\n";
c += " args.src_tensor.SetBatchRef(B);\n";
c += " args.dst_tensor.SetBatchRef(B);\n";
} else {
c += " int X = GROUP_ID_1;\n";
}
c += " int Y = GROUP_ID_2;\n";
c += " if (X >= args.dst_tensor.Width()) return;\n";
c += " if (Y >= args.dst_tensor.Height()) return;\n";
c += " float4 mask = INIT_FLOAT4v4(args.mask_x, args.mask_y, args.mask_z, "
"args.mask_w);\n";
c +=
" float4 maxx4 = INIT_FLOAT4(args.src_tensor.Read<float>(X, Y, 0).x);\n";
c += " int tid = LOCAL_ID_0;\n";
const int group_reduction_size = work_group_size_.x;
c += " for (int s = tid; s < args.src_tensor.Slices(); s += " +
std::to_string(group_reduction_size) + ") {\n";
c += " float4 mask_a = s == args.src_tensor.Slices() - 1 ? mask : "
"INIT_FLOAT4(1.0f);\n";
c += " float4 mask_b = INIT_FLOAT4(1.0f) - mask_a;\n";
c += " float4 src = args.src_tensor.Read<float>(X, Y, s);\n";
c += " src = src * mask_a + mask_b * src.x;\n";
c += " maxx4 = max(maxx4, src);\n";
c += " }\n";
c += " float maximum = max(maxx4.x, maxx4.y);\n";
c += " maximum = max(maximum, maxx4.z);\n";
c += " maximum = max(maximum, maxx4.w);\n";
c += " __local float loc_mem[" + std::to_string(group_reduction_size) +
"];\n";
c += GetReduceCode("maximum", OperationType::MAXIMUM, group_reduction_size);
c += " float sum = 0.0f;\n";
c += " for (int s = tid; s < args.src_tensor.Slices(); s += " +
std::to_string(group_reduction_size) + ") {\n";
c += " float4 mask_temp = s == args.src_tensor.Slices() - 1 ? mask : "
"INIT_FLOAT4(1.0f);\n";
c += " float4 src = args.src_tensor.Read<float>(X, Y, s) - "
"INIT_FLOAT4(maximum);\n";
c += " sum += dot(mask_temp, exp(src));\n";
c += " }\n";
c += GetReduceCode("sum", OperationType::ADD, group_reduction_size);
c += " sum = 1.0f / sum;\n";
c += " int dst_s = GLOBAL_ID_0;\n";
c += " if (dst_s < args.dst_tensor.Slices()) {\n";
c += " float4 src = args.src_tensor.Read<float>(X, Y, dst_s) - "
"INIT_FLOAT4(maximum);\n";
c += " FLT4 res = TO_FLT4(exp(src) * sum);\n";
c += " args.dst_tensor.Write(res, X, Y, dst_s);\n";
c += " }\n";
c += "}\n";
return c;
}
absl::Status Softmax1x1::BindArguments(ArgumentsBinder* args) {
float4 mask = GetMaskForLastPlane(src_[0]->Channels());
RETURN_IF_ERROR(args->SetFloat("mask_x", mask.x));
RETURN_IF_ERROR(args->SetFloat("mask_y", mask.y));
RETURN_IF_ERROR(args->SetFloat("mask_z", mask.z));
RETURN_IF_ERROR(args->SetFloat("mask_w", mask.w));
return absl::OkStatus();
}
int3 Softmax1x1::GetGridSize() const {
return int3(dst_[0]->Slices(), dst_[0]->Width() * dst_[0]->Batch(),
dst_[0]->Height());
}
Softmax1x1 CreateSoftmax1x1(const OperationDef& definition,
const GpuInfo& gpu_info, const BHWC& shape) {
return Softmax1x1(definition, gpu_info, shape);
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/softmax_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, Softmax1x1) {
auto status = Softmax1x1Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, Softmax1x1BigNumber) {
auto status = Softmax1x1BigNumberTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/softmax1x1.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/softmax1x1_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4b460faa-3e02-4fe9-9e44-a87764cff48c | cpp | google/tensorstore | status | tensorstore/serialization/status.cc | tensorstore/serialization/status_test.cc | #include "absl/status/status.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/status.h"
namespace tensorstore {
namespace serialization {
bool ErrorStatusSerializer::Encode(EncodeSink& sink,
const absl::Status& status) {
assert(!status.ok());
return serialization::Encode(sink, status);
}
bool ErrorStatusSerializer::Decode(DecodeSource& source, absl::Status& status) {
if (!serialization::Decode(source, status)) return false;
if (status.ok()) {
source.Fail(absl::DataLossError("Expected error status"));
return false;
}
return true;
}
bool Serializer<absl::Status>::Encode(EncodeSink& sink,
const absl::Status& value) {
if (!serialization::Encode(sink, value.code())) return false;
if (value.ok()) return true;
if (!serialization::Encode(sink, value.message())) return false;
bool ok = true;
value.ForEachPayload([&](std::string_view url, const absl::Cord& payload) {
if (!ok) return;
ok = serialization::EncodeTuple(sink, true, payload, url);
});
if (!ok) return false;
return serialization::Encode(sink, false);
}
bool Serializer<absl::Status>::Decode(DecodeSource& source,
absl::Status& value) {
absl::StatusCode code;
if (!serialization::Decode(source, code)) return false;
if (code == absl::StatusCode::kOk) {
value = absl::OkStatus();
return true;
}
std::string_view message;
if (!serialization::Decode(source, message)) return false;
value = absl::Status(code, message);
while (true) {
bool has_payload;
if (!serialization::Decode(source, has_payload)) return false;
if (!has_payload) break;
absl::Cord payload;
std::string_view url;
if (!serialization::DecodeTuple(source, payload, url)) return false;
value.SetPayload(url, payload);
}
return true;
}
}
} | #include "tensorstore/serialization/status.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/test_util.h"
namespace {
using ::tensorstore::serialization::TestSerializationRoundTrip;
TEST(StatusTest, SimpleRoundTrip) {
TestSerializationRoundTrip(absl::OkStatus());
TestSerializationRoundTrip(absl::InvalidArgumentError("abc"));
}
TEST(StatusTest, PayloadRoundTrip) {
auto status = absl::InternalError("xyz");
status.SetPayload("a", absl::Cord("b"));
TestSerializationRoundTrip(status);
status.SetPayload("c", absl::Cord("d"));
TestSerializationRoundTrip(status);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/serialization/status.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/serialization/status_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
3a36959a-e048-406a-8180-3a5c2b174527 | cpp | google/tensorstore | encode_time | tensorstore/proto/encode_time.cc | tensorstore/proto/encode_time_test.cc | #include "tensorstore/proto/encode_time.h"
#include "google/protobuf/duration.pb.h"
#include "google/protobuf/timestamp.pb.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal {
void AbslTimeToProto(absl::Time t, google::protobuf::Timestamp* proto) {
if (t == absl::InfiniteFuture()) {
proto->set_seconds(0x7FFFFFFFFFFFFFFFll);
proto->set_nanos(0);
} else if (t == absl::InfinitePast()) {
proto->set_seconds(0x8000000000000000ll);
proto->set_nanos(0);
} else {
const int64_t s = absl::ToUnixSeconds(t);
const int64_t n = (t - absl::FromUnixSeconds(s)) / absl::Nanoseconds(1);
proto->set_seconds(s);
proto->set_nanos(n);
}
}
tensorstore::Result<absl::Time> ProtoToAbslTime(
const google::protobuf::Timestamp& proto) {
const auto sec = proto.seconds();
const auto ns = proto.nanos();
if (sec == 0x7FFFFFFFFFFFFFFFll) {
return absl::InfiniteFuture();
}
if (sec == 0x8000000000000000ll) {
return absl::InfinitePast();
}
if (sec < -62135596800 || sec > 253402300799) {
return absl::InvalidArgumentError(tensorstore::StrCat("seconds=", sec));
}
if (ns < 0 || ns > 999999999) {
return absl::InvalidArgumentError(tensorstore::StrCat("nanos=", ns));
}
return absl::FromUnixSeconds(sec) + absl::Nanoseconds(ns);
}
void AbslDurationToProto(absl::Duration d, google::protobuf::Duration* proto) {
if (d == absl::InfiniteDuration()) {
proto->set_seconds(0x7FFFFFFFFFFFFFFFll);
proto->set_nanos(0);
} else if (d == -absl::InfiniteDuration()) {
proto->set_seconds(0x8000000000000000ll);
proto->set_nanos(0);
} else {
const int64_t s = absl::IDivDuration(d, absl::Seconds(1), &d);
const int64_t n = absl::IDivDuration(d, absl::Nanoseconds(1), &d);
proto->set_seconds(s);
proto->set_nanos(n);
}
}
Result<absl::Duration> ProtoToAbslDuration(
const google::protobuf::Duration& proto) {
const auto sec = proto.seconds();
if (sec == 0x7FFFFFFFFFFFFFFFll) {
return absl::InfiniteDuration();
}
if (sec == 0x8000000000000000ll) {
return -absl::InfiniteDuration();
}
const auto ns = proto.nanos();
if (sec < -315576000000 || sec > 315576000000) {
return absl::InvalidArgumentError(tensorstore::StrCat("seconds=", sec));
}
if (ns < -999999999 || ns > 999999999) {
return absl::InvalidArgumentError(tensorstore::StrCat("nanos=", ns));
}
if ((sec < 0 && ns > 0) || (sec > 0 && ns < 0)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Sign mismatch between seconds=", sec, ", nanos=", ns));
}
return absl::Seconds(sec) + absl::Nanoseconds(ns);
}
}
} | #include "tensorstore/proto/encode_time.h"
#include "google/protobuf/duration.pb.h"
#include "google/protobuf/timestamp.pb.h"
#include <gtest/gtest.h>
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::internal::AbslDurationToProto;
using ::tensorstore::internal::AbslTimeToProto;
using ::tensorstore::internal::ProtoToAbslDuration;
using ::tensorstore::internal::ProtoToAbslTime;
TEST(EncodeTimestamp, Basic) {
auto roundtrip = [](absl::Time ts) {
google::protobuf::Timestamp proto;
AbslTimeToProto(ts, &proto);
return ProtoToAbslTime(proto);
};
tensorstore::Result<absl::Time> result;
result = roundtrip(absl::InfinitePast());
TENSORSTORE_ASSERT_OK(result);
EXPECT_EQ(absl::InfinitePast(), *result);
result = roundtrip(absl::InfiniteFuture());
TENSORSTORE_ASSERT_OK(result);
EXPECT_EQ(absl::InfiniteFuture(), *result);
auto now = absl::Now();
result = roundtrip(now);
TENSORSTORE_ASSERT_OK(result);
EXPECT_EQ(now, *result);
}
TEST(EncodeDuration, Basic) {
auto roundtrip = [](absl::Duration d) {
google::protobuf::Duration proto;
AbslDurationToProto(d, &proto);
return ProtoToAbslDuration(proto);
};
auto test_roundtrip = [&](absl::Duration d) {
SCOPED_TRACE(tensorstore::StrCat("duration=", d));
EXPECT_THAT(roundtrip(d), ::testing::Optional(d));
};
test_roundtrip(absl::InfiniteDuration());
test_roundtrip(-absl::InfiniteDuration());
test_roundtrip(absl::Seconds(5));
test_roundtrip(absl::Seconds(-5));
test_roundtrip(absl::ZeroDuration());
test_roundtrip(absl::Milliseconds(12345));
test_roundtrip(absl::Milliseconds(-12345));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/proto/encode_time.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/proto/encode_time_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
59a59525-5523-4242-bd39-1cd2892685de | cpp | tensorflow/tensorflow | redzone_allocator | third_party/xla/xla/stream_executor/gpu/redzone_allocator.cc | third_party/xla/xla/stream_executor/gpu/redzone_allocator_test.cc | #include "xla/stream_executor/gpu/redzone_allocator.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include "absl/container/fixed_array.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_handle.h"
#include "xla/stream_executor/gpu/gpu_asm_opts.h"
#include "xla/stream_executor/gpu/redzone_allocator_kernel.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tsl/framework/allocator.h"
#include "xla/tsl/lib/math/math_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace stream_executor {
template <typename T>
static T RoundUpToNearest(T value, T divisor) {
return tsl::MathUtil::CeilOfRatio(value, divisor) * divisor;
}
constexpr int64_t kRhsRedzoneAlign = 4;
using RedzoneCheckStatus = RedzoneAllocator::RedzoneCheckStatus;
RedzoneAllocator::RedzoneAllocator(Stream* stream,
DeviceMemoryAllocator* memory_allocator,
const GpuAsmOpts& gpu_compilation_opts,
int64_t memory_limit, int64_t redzone_size,
uint8_t redzone_pattern)
: device_ordinal_(stream->parent()->device_ordinal()),
stream_(stream),
memory_limit_(memory_limit),
redzone_size_(RoundUpToNearest(
redzone_size,
static_cast<int64_t>(tsl::Allocator::kAllocatorAlignment))),
redzone_pattern_(redzone_pattern),
memory_allocator_(memory_allocator),
gpu_compilation_opts_(gpu_compilation_opts) {}
absl::StatusOr<DeviceMemory<uint8_t>> RedzoneAllocator::AllocateBytes(
int64_t byte_size) {
CHECK_GE(byte_size, 0) << "byte_size must be positive.";
if (byte_size > GetMemoryLimitInBytes()) {
return absl::ResourceExhaustedError(absl::StrFormat(
"Allocating %d bytes exceeds the memory limit of %d bytes.", byte_size,
GetMemoryLimitInBytes()));
}
int64_t rhs_slop = RoundUpToNearest(byte_size, kRhsRedzoneAlign) - byte_size;
TF_ASSIGN_OR_RETURN(
OwningDeviceMemory allocated_buffer,
memory_allocator_->Allocate(device_ordinal_,
byte_size + 2 * redzone_size_ + rhs_slop,
false));
allocated_bytes_excluding_redzones_ += byte_size;
static_assert(sizeof(uint8_t) == 1, "Unexpected size");
DeviceMemory<uint8_t> allocated_buffer_memory(*allocated_buffer);
DeviceMemory<uint8_t> lhs_redzone =
allocated_buffer_memory.GetSlice(0, redzone_size_);
DeviceMemory<uint8_t> data_chunk =
allocated_buffer_memory.GetSlice(redzone_size_, byte_size);
DeviceMemory<uint8_t> rhs_redzone_slop =
allocated_buffer_memory.GetSlice(redzone_size_ + byte_size, rhs_slop);
DeviceMemory<uint8_t> rhs_redzone_nonslop = allocated_buffer_memory.GetSlice(
redzone_size_ + byte_size + rhs_slop, redzone_size_);
uint8_t pattern_arr[] = {redzone_pattern_, redzone_pattern_, redzone_pattern_,
redzone_pattern_};
uint32_t pattern32;
std::memcpy(&pattern32, pattern_arr, sizeof(pattern32));
TF_RETURN_IF_ERROR(stream_->Memset32(&lhs_redzone, pattern32, redzone_size_));
if (rhs_slop != 0) {
TF_RETURN_IF_ERROR(
stream_->Memcpy(&rhs_redzone_slop, &pattern32, rhs_slop));
}
TF_RETURN_IF_ERROR(
stream_->Memset32(&rhs_redzone_nonslop, pattern32, redzone_size_));
allocated_buffers_.emplace_back(std::move(allocated_buffer), byte_size);
return data_chunk;
}
static absl::StatusOr<RedzoneCheckStatus> CheckRedzoneHost(
DeviceMemoryBase redzone, DeviceMemoryBase user_allocation,
absl::string_view name, Stream* stream, uint8_t redzone_pattern) {
uint64_t size = redzone.size();
auto redzone_data = std::make_unique<uint8_t[]>(size);
TF_RETURN_IF_ERROR(stream->Memcpy(redzone_data.get(), redzone, size));
TF_RETURN_IF_ERROR(stream->BlockHostUntilDone());
std::array<uint8_t, sizeof(uint64_t)> pattern_arr;
pattern_arr.fill(redzone_pattern);
uint64_t pattern64;
std::memcpy(&pattern64, pattern_arr.data(), sizeof(uint64_t));
int64_t i;
for (i = 0; i + 7 < size; i += sizeof(uint64_t)) {
uint64_t rz_value = *reinterpret_cast<uint64_t*>(&redzone_data[i]);
if (rz_value != pattern64) {
return RedzoneCheckStatus(name, user_allocation.opaque(), i, pattern64,
rz_value);
}
}
for (; i < size; ++i) {
uint8_t rz_value = redzone_data[i];
if (rz_value != redzone_pattern) {
return RedzoneCheckStatus(name, user_allocation.opaque(), i,
redzone_pattern, rz_value);
}
}
return RedzoneCheckStatus::OK();
}
static absl::Status RunRedzoneChecker(
Stream* stream, const DeviceMemory<uint8_t>& redzone,
uint8_t redzone_pattern, const DeviceMemory<uint64_t>& out_param,
const ComparisonKernel& comparison_kernel) {
StreamExecutor* executor = stream->parent();
if (redzone.size() == 0) {
return absl::OkStatus();
}
int64_t num_elements = redzone.size();
int64_t threads_per_block = std::min(
executor->GetDeviceDescription().threads_per_block_limit(), num_elements);
int64_t block_count =
tsl::MathUtil::CeilOfRatio(num_elements, threads_per_block);
TF_RETURN_IF_ERROR(stream->ThenLaunch(
ThreadDim(threads_per_block), BlockDim(block_count), comparison_kernel,
redzone, redzone_pattern, redzone.size(), out_param));
return absl::OkStatus();
}
static absl::Status ReinitializeRedzone(Stream* stream,
DeviceMemoryBase redzone,
uint8_t redzone_pattern) {
absl::FixedArray<uint8_t> redzone_array(redzone.size());
redzone_array.fill(redzone_pattern);
TF_RETURN_IF_ERROR(
stream->Memcpy(&redzone, redzone_array.data(), redzone.size()));
TF_RETURN_IF_ERROR(stream->BlockHostUntilDone());
return absl::OkStatus();
}
static absl::StatusOr<RedzoneCheckStatus> CheckRedzonesForBuffer(
Stream* stream, DeviceMemoryBase memory,
const DeviceMemory<uint64_t>& out_param,
const ComparisonKernel& comparison_kernel, int64_t user_allocation_size,
uint64_t redzone_size, uint8_t redzone_pattern) {
int64_t rhs_slop =
RoundUpToNearest<int64_t>(user_allocation_size, kRhsRedzoneAlign) -
user_allocation_size;
CHECK_EQ(memory.size(), user_allocation_size + rhs_slop + 2 * redzone_size);
DeviceMemory<uint8_t> buffer_uint8(memory);
DeviceMemory<uint8_t> lhs_redzone =
buffer_uint8.GetSlice(0,
redzone_size);
DeviceMemory<uint8_t> user_allocation =
buffer_uint8.GetSlice(redzone_size,
user_allocation_size);
DeviceMemory<uint8_t> rhs_redzone =
buffer_uint8.GetSlice(redzone_size + user_allocation_size,
redzone_size + rhs_slop);
TF_RETURN_IF_ERROR(RunRedzoneChecker(stream, lhs_redzone, redzone_pattern,
out_param, comparison_kernel));
TF_RETURN_IF_ERROR(RunRedzoneChecker(stream, rhs_redzone, redzone_pattern,
out_param, comparison_kernel));
int64_t result;
CHECK_EQ(out_param.size(), sizeof(result));
TF_RETURN_IF_ERROR(stream->Memcpy(&result, out_param, sizeof(result)));
TF_RETURN_IF_ERROR(stream->BlockHostUntilDone());
if (result != 0) {
TF_ASSIGN_OR_RETURN(RedzoneCheckStatus lhs_check,
CheckRedzoneHost(lhs_redzone, user_allocation, "LHS",
stream, redzone_pattern));
TF_ASSIGN_OR_RETURN(RedzoneCheckStatus rhs_check,
CheckRedzoneHost(rhs_redzone, user_allocation, "RHS",
stream, redzone_pattern));
CHECK(!lhs_check.ok() || !rhs_check.ok())
<< "Mismatched results with host and device comparison";
TF_RETURN_IF_ERROR(
ReinitializeRedzone(stream, lhs_redzone, redzone_pattern));
TF_RETURN_IF_ERROR(
ReinitializeRedzone(stream, rhs_redzone, redzone_pattern));
return !lhs_check.ok() ? lhs_check : rhs_check;
}
return RedzoneCheckStatus::OK();
}
absl::StatusOr<RedzoneCheckStatus> RedzoneAllocator::CheckRedzones() const {
StreamExecutor* executor = stream_->parent();
TF_ASSIGN_OR_RETURN(
const ComparisonKernel* kernel,
GetComparisonKernel(stream_->parent(), gpu_compilation_opts_));
stream_executor::DeviceMemoryHandle out_param(
executor, executor->AllocateScalar<uint64_t>());
TF_RETURN_IF_ERROR(
stream_->MemZero(out_param.memory_ptr(), sizeof(uint64_t)));
for (const auto& buf_and_size : allocated_buffers_) {
TF_ASSIGN_OR_RETURN(
RedzoneCheckStatus redzone_status,
CheckRedzonesForBuffer(stream_, *buf_and_size.first,
DeviceMemory<uint64_t>(out_param.memory()),
*kernel, buf_and_size.second, redzone_size_,
redzone_pattern_));
if (!redzone_status.ok()) {
return redzone_status;
}
}
return RedzoneCheckStatus::OK();
}
std::string RedzoneCheckStatus::RedzoneFailureMsg() const {
return absl::StrFormat(
"Redzone mismatch in %s redzone of buffer %p at offset %d; "
"expected %08x but was %08x.",
buffer_name, user_buffer_address, offset, expected_value, actual_value);
}
} | #include "xla/stream_executor/gpu/redzone_allocator.h"
#include <cstdint>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/gpu/gpu_asm_opts.h"
#include "xla/stream_executor/gpu/gpu_init.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream_executor_memory_allocator.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace stream_executor {
namespace gpu {
using RedzoneCheckStatus = RedzoneAllocator::RedzoneCheckStatus;
static void EXPECT_REDZONE_OK(absl::StatusOr<RedzoneCheckStatus> status) {
EXPECT_TRUE(status.ok());
EXPECT_TRUE(status.value().ok());
}
static void EXPECT_REDZONE_VIOLATION(
absl::StatusOr<RedzoneCheckStatus> status) {
EXPECT_TRUE(status.ok());
EXPECT_FALSE(status.value().ok());
}
TEST(RedzoneAllocatorTest, WriteToRedzone) {
constexpr int64_t kRedzoneSize = 1 << 23;
constexpr uint8_t kRedzonePattern = 0x7e;
constexpr int64_t kAllocSize = (1 << 25) + 1;
Platform* platform =
PlatformManager::PlatformWithName(GpuPlatformName()).value();
StreamExecutor* stream_exec = platform->ExecutorForDevice(0).value();
GpuAsmOpts opts;
StreamExecutorMemoryAllocator se_allocator(platform, {stream_exec});
TF_ASSERT_OK_AND_ASSIGN(auto stream, stream_exec->CreateStream());
RedzoneAllocator allocator(stream.get(), &se_allocator, opts,
(1LL << 32),
kRedzoneSize,
kRedzonePattern);
TF_ASSERT_OK_AND_ASSIGN(DeviceMemory<uint8_t> buf,
allocator.AllocateBytes(kAllocSize));
EXPECT_REDZONE_OK(allocator.CheckRedzones());
char* buf_addr = reinterpret_cast<char*>(buf.opaque());
DeviceMemoryBase lhs_redzone(buf_addr - kRedzoneSize, kRedzoneSize);
DeviceMemoryBase rhs_redzone(buf_addr + kAllocSize, kRedzoneSize);
auto check_redzone = [&](DeviceMemoryBase redzone, absl::string_view name) {
std::vector<uint8_t> host_buf(kRedzoneSize);
TF_ASSERT_OK(stream->Memcpy(host_buf.data(), redzone, kRedzoneSize));
TF_ASSERT_OK(stream->BlockHostUntilDone());
const int64_t kMaxMismatches = 16;
int64_t mismatches = 0;
for (int64_t i = 0; i < host_buf.size(); ++i) {
if (mismatches == kMaxMismatches) {
ADD_FAILURE() << "Hit max number of mismatches; skipping others.";
break;
}
if (host_buf[i] != kRedzonePattern) {
++mismatches;
EXPECT_EQ(host_buf[i], kRedzonePattern)
<< "at index " << i << " of " << name << " redzone";
}
}
};
check_redzone(lhs_redzone, "lhs");
check_redzone(rhs_redzone, "rhs");
auto modify_redzone = [&](DeviceMemoryBase redzone, int64_t offset,
absl::string_view name) {
SCOPED_TRACE(absl::StrCat(name, ", offset=", offset));
DeviceMemoryBase redzone_at_offset(
reinterpret_cast<char*>(redzone.opaque()) + offset, 1);
char old_redzone_value = 0;
{ EXPECT_REDZONE_OK(allocator.CheckRedzones()); }
TF_ASSERT_OK(stream->Memcpy(&old_redzone_value, redzone_at_offset, 1));
TF_ASSERT_OK(stream->MemZero(&redzone_at_offset, 1));
EXPECT_REDZONE_VIOLATION(allocator.CheckRedzones());
EXPECT_REDZONE_OK(allocator.CheckRedzones());
};
modify_redzone(lhs_redzone, 0, "lhs");
modify_redzone(lhs_redzone, kRedzoneSize - 1, "lhs");
modify_redzone(rhs_redzone, 0, "rhs");
modify_redzone(rhs_redzone, kRedzoneSize - 1, "rhs");
}
TEST(RedzoneAllocatorTest, VeryLargeRedzone) {
constexpr int64_t kRedzoneSize = 65535 * 1024 + 1;
Platform* platform =
PlatformManager::PlatformWithName(GpuPlatformName()).value();
StreamExecutor* stream_exec = platform->ExecutorForDevice(0).value();
GpuAsmOpts opts;
StreamExecutorMemoryAllocator se_allocator(platform, {stream_exec});
TF_ASSERT_OK_AND_ASSIGN(auto stream, stream_exec->CreateStream());
RedzoneAllocator allocator(stream.get(), &se_allocator, opts,
(1LL << 32),
kRedzoneSize,
-1);
(void)allocator.AllocateBytes(1);
EXPECT_REDZONE_OK(allocator.CheckRedzones());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/gpu/redzone_allocator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/gpu/redzone_allocator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b556b66a-2f27-4a98-b434-a184b2ab59b1 | cpp | google/arolla | strings_buffer | arolla/memory/strings_buffer.cc | arolla/memory/strings_buffer_test.cc | #include "arolla/memory/strings_buffer.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <limits>
#include <tuple>
#include <utility>
#include "absl/log/check.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/memory/optional_value.h"
#include "arolla/memory/raw_buffer_factory.h"
#include "arolla/memory/simple_buffer.h"
#include "arolla/util/fingerprint.h"
namespace arolla {
StringsBuffer::Builder::Builder(int64_t max_size, RawBufferFactory* factory)
: factory_(factory) {
size_t initial_char_buffer_size = max_size * 16;
DCHECK_LT(initial_char_buffer_size, std::numeric_limits<offset_type>::max());
size_t offsets_size = max_size * sizeof(Offsets);
InitDataPointers(
factory->CreateRawBuffer(offsets_size + initial_char_buffer_size),
max_size, initial_char_buffer_size);
std::memset(offsets_.data(), 0, offsets_size);
}
StringsBuffer::ReshuffleBuilder::ReshuffleBuilder(
int64_t max_size, const StringsBuffer& buffer,
const OptionalValue<absl::string_view>& default_value,
RawBufferFactory* buf_factory)
: offsets_bldr_(max_size, buf_factory),
old_offsets_(buffer.offsets()),
characters_(buffer.characters()),
base_offset_(buffer.base_offset()) {
if (default_value.present && !default_value.value.empty()) {
int64_t def_value_size = default_value.value.size();
offsets_bldr_.SetNConst(
0, max_size, {characters_.size(), def_value_size + characters_.size()});
SimpleBuffer<char>::Builder chars_bldr(characters_.size() + def_value_size,
buf_factory);
char* data = chars_bldr.GetMutableSpan().data();
std::memcpy(data, characters_.begin(), characters_.size());
std::memcpy(data + characters_.size(), default_value.value.begin(),
def_value_size);
characters_ = std::move(chars_bldr).Build();
} else {
std::memset(offsets_bldr_.GetMutableSpan().begin(), 0,
max_size * sizeof(Offsets));
}
}
StringsBuffer StringsBuffer::Builder::Build(int64_t size) && {
DCHECK_LE(size, offsets_.size());
if (num_chars_ != characters_.size()) {
ResizeCharacters(num_chars_);
}
SimpleBuffer<Offsets> offsets(buf_, offsets_.subspan(0, size));
SimpleBuffer<char> characters(std::move(buf_),
characters_.subspan(0, num_chars_));
return StringsBuffer(std::move(offsets), std::move(characters));
}
void StringsBuffer::Builder::ResizeCharacters(size_t new_size) {
DCHECK_LT(new_size, std::numeric_limits<offset_type>::max());
size_t offsets_size = offsets_.size() * sizeof(Offsets);
InitDataPointers(factory_->ReallocRawBuffer(std::move(buf_), offsets_.begin(),
offsets_size + characters_.size(),
offsets_size + new_size),
offsets_.size(), new_size);
}
void StringsBuffer::Builder::InitDataPointers(
std::tuple<RawBufferPtr, void*>&& buf, int64_t offsets_count,
int64_t characters_size) {
buf_ = std::move(std::get<0>(buf));
void* data = std::get<1>(buf);
offsets_ =
absl::Span<Offsets>(reinterpret_cast<Offsets*>(data), offsets_count);
characters_ = absl::Span<char>(
reinterpret_cast<char*>(data) + offsets_count * sizeof(Offsets),
characters_size);
}
StringsBuffer::StringsBuffer(SimpleBuffer<StringsBuffer::Offsets> offsets,
SimpleBuffer<char> characters,
offset_type base_offset)
: offsets_(std::move(offsets)),
characters_(std::move(characters)),
base_offset_(base_offset) {
for (int64_t i = 0; i < offsets_.size(); ++i) {
DCHECK_LE(base_offset_, offsets_[i].start);
DCHECK_LE(offsets_[i].start, offsets_[i].end);
DCHECK_LE(offsets_[i].end, base_offset_ + characters_.size());
}
}
bool StringsBuffer::operator==(const StringsBuffer& other) const {
if (this == &other) {
return true;
}
if (size() != other.size()) {
return false;
}
return std::equal(begin(), end(), other.begin());
}
StringsBuffer StringsBuffer::Slice(int64_t offset, int64_t count) const& {
if (count == 0) {
return StringsBuffer{};
}
return StringsBuffer{offsets_.Slice(offset, count), characters_,
base_offset_};
}
StringsBuffer StringsBuffer::Slice(int64_t offset, int64_t count) && {
if (count == 0) {
return StringsBuffer{};
}
return StringsBuffer{std::move(offsets_).Slice(offset, count),
std::move(characters_), base_offset_};
}
StringsBuffer StringsBuffer::ShallowCopy() const {
return StringsBuffer(offsets_.ShallowCopy(), characters_.ShallowCopy(),
base_offset_);
}
StringsBuffer StringsBuffer::DeepCopy(RawBufferFactory* buffer_factory) const {
if (size() == 0) {
return StringsBuffer{};
}
offset_type min_offset = offsets_[0].start;
offset_type max_offset = offsets_[0].end;
for (int64_t i = 1; i < size(); ++i) {
min_offset = std::min(min_offset, offsets_[i].start);
max_offset = std::max(max_offset, offsets_[i].end);
}
auto characters_slice =
characters_.Slice(min_offset - base_offset_, max_offset - min_offset);
return StringsBuffer(offsets_.DeepCopy(buffer_factory),
characters_slice.DeepCopy(buffer_factory), min_offset);
}
void FingerprintHasherTraits<StringsBuffer>::operator()(
FingerprintHasher* hasher, const StringsBuffer& value) const {
hasher->Combine(value.size());
if (!value.empty()) {
auto offsets_span = value.offsets().span();
hasher->CombineRawBytes(offsets_span.data(),
offsets_span.size() * sizeof(offsets_span[0]));
hasher->CombineSpan(value.characters().span());
}
}
} | #include <array>
#include <cstddef>
#include <initializer_list>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/hash/hash_testing.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "arolla/memory/buffer.h"
#include "arolla/util/fingerprint.h"
namespace arolla {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::IsEmpty;
using ::testing::Not;
class StringsBufferTest : public ::testing::Test {
public:
Buffer<std::string> CreateTestBuffer(int num_rows) {
std::vector<std::string> values(num_rows);
for (int i = 0; i < num_rows; i++) {
values[i] = absl::StrFormat("str%d", i);
}
return Buffer<std::string>::Create(values.begin(), values.end());
}
template <typename T>
Buffer<std::string> CreateTestBuffer(std::initializer_list<T> values) {
return Buffer<std::string>::Create(values.begin(), values.end());
}
};
TEST_F(StringsBufferTest, Simple) {
Buffer<std::string> buffer = CreateTestBuffer(4);
EXPECT_TRUE(buffer.is_owner());
EXPECT_THAT(buffer, ElementsAre("str0", "str1", "str2", "str3"));
EXPECT_EQ(buffer[0], "str0");
EXPECT_EQ(buffer[3], "str3");
}
TEST_F(StringsBufferTest, Empty) {
Buffer<std::string> buffer1 = CreateTestBuffer(0);
EXPECT_THAT(buffer1, IsEmpty());
Buffer<std::string> buffer2 = buffer1.DeepCopy();
EXPECT_THAT(buffer2, IsEmpty());
Buffer<std::string> buffer3;
EXPECT_THAT(buffer3, IsEmpty());
}
TEST_F(StringsBufferTest, Move) {
size_t num_rows = 4;
Buffer<std::string> buffer = CreateTestBuffer(num_rows);
EXPECT_TRUE(buffer.is_owner());
Buffer<std::string> buffer2 = std::move(buffer);
EXPECT_TRUE(buffer2.is_owner());
EXPECT_FALSE(buffer.is_owner());
EXPECT_THAT(buffer2, ElementsAre("str0", "str1", "str2", "str3"));
Buffer<std::string> buffer3;
EXPECT_TRUE(buffer3.is_owner());
buffer3 = std::move(buffer2);
EXPECT_TRUE(buffer3.is_owner());
EXPECT_FALSE(buffer2.is_owner());
EXPECT_THAT(buffer3, ElementsAre("str0", "str1", "str2", "str3"));
}
TEST_F(StringsBufferTest, MemoryUsage) {
EXPECT_EQ(sizeof(Buffer<StringsBuffer::Offsets>), 4 * sizeof(void*));
EXPECT_EQ(sizeof(Buffer<char>), 4 * sizeof(void*));
EXPECT_EQ(sizeof(Buffer<std::string>),
sizeof(Buffer<StringsBuffer::Offsets>) + sizeof(Buffer<char>) + 8);
for (size_t sz = 0; sz < 10; sz += 1) {
const size_t chars = sz * 4;
const size_t offsets = sz * sizeof(StringsBuffer::Offsets);
Buffer<std::string> buffer = CreateTestBuffer(sz);
EXPECT_EQ(chars + offsets, buffer.memory_usage());
}
}
TEST_F(StringsBufferTest, MoveSlice) {
size_t num_rows = 10;
Buffer<std::string> buffer = CreateTestBuffer(num_rows);
EXPECT_TRUE(buffer.is_owner());
buffer = std::move(buffer).Slice(0, 5);
EXPECT_TRUE(buffer.is_owner());
EXPECT_THAT(buffer, ElementsAre("str0", "str1", "str2", "str3", "str4"));
Buffer<std::string> buffer2 = std::move(buffer).Slice(2, 3);
EXPECT_TRUE(buffer2.is_owner());
EXPECT_FALSE(buffer.is_owner());
EXPECT_THAT(buffer2, ElementsAre("str2", "str3", "str4"));
}
TEST_F(StringsBufferTest, ShallowCopy) {
size_t num_rows = 10;
Buffer<std::string> buffer = CreateTestBuffer(num_rows);
Buffer<std::string> buffer_copy1 = buffer.ShallowCopy();
EXPECT_FALSE(buffer_copy1.is_owner());
EXPECT_EQ(buffer.begin(), buffer_copy1.begin());
EXPECT_EQ(buffer.end(), buffer_copy1.end());
EXPECT_THAT(buffer, ElementsAreArray(buffer_copy1));
Buffer<std::string> buffer_copy2 = buffer.Slice(5, 5);
EXPECT_THAT(buffer, Not(ElementsAreArray(buffer_copy2)));
EXPECT_TRUE(buffer_copy2.is_owner());
EXPECT_EQ(buffer[5], buffer_copy2[0]);
}
TEST_F(StringsBufferTest, DeepCopy) {
size_t num_rows = 5;
Buffer<std::string> buffer = CreateTestBuffer(num_rows);
Buffer<std::string> buffer_copy = buffer.DeepCopy();
Buffer<std::string> buffer_slice_copy = buffer.Slice(1, 3).DeepCopy();
buffer = Buffer<std::string>();
EXPECT_TRUE(buffer_copy.is_owner());
EXPECT_THAT(buffer_copy, ElementsAre("str0", "str1", "str2", "str3", "str4"));
EXPECT_TRUE(buffer_slice_copy.is_owner());
EXPECT_THAT(buffer_slice_copy, ElementsAre("str1", "str2", "str3"));
buffer_copy = buffer.DeepCopy();
EXPECT_THAT(buffer_copy, IsEmpty());
}
TEST_F(StringsBufferTest, EmptySlice) {
size_t num_rows = 10;
Buffer<std::string> buffer = CreateTestBuffer(num_rows);
Buffer<std::string> copy = buffer.Slice(3, 0);
EXPECT_THAT(copy, IsEmpty());
buffer = std::move(buffer).Slice(3, 0);
EXPECT_THAT(buffer, IsEmpty());
copy = buffer.Slice(0, 0);
EXPECT_THAT(copy, IsEmpty());
}
TEST_F(StringsBufferTest, HugeString) {
StringsBuffer::Builder builder(2);
builder.Set(0, "small string");
std::string huge_string;
for (int i = 0; i < 1000; ++i) huge_string.append("huge string; ");
builder.Set(1, huge_string);
StringsBuffer buffer = std::move(builder).Build(2);
EXPECT_EQ(buffer.size(), 2);
EXPECT_EQ(buffer[0], "small string");
EXPECT_EQ(buffer[1], huge_string);
}
TEST_F(StringsBufferTest, SupportsAbslHash) {
StringsBuffer empty;
std::array<absl::string_view, 5> values = {"one", "two", "three", "four",
"five"};
StringsBuffer test1 = StringsBuffer::Create(values.begin(), values.end());
StringsBuffer test2 = StringsBuffer::Create(values.rbegin(), values.rend());
EXPECT_TRUE(
absl::VerifyTypeImplementsAbslHashCorrectly({empty, test1, test2}));
}
TEST_F(StringsBufferTest, Fingerprint) {
std::array<absl::string_view, 5> values = {"one", "two", "three", "four",
"five"};
StringsBuffer test1 = StringsBuffer::Create(values.begin(), values.end());
StringsBuffer test2 = StringsBuffer::Create(values.begin(), values.end());
StringsBuffer test3 = StringsBuffer::Create(values.rbegin(), values.rend());
Fingerprint f1 = FingerprintHasher("salt").Combine(test1).Finish();
Fingerprint f2 = FingerprintHasher("salt").Combine(test2).Finish();
Fingerprint f3 = FingerprintHasher("salt").Combine(test3).Finish();
EXPECT_EQ(f1, f2);
EXPECT_NE(f1, f3);
}
TEST(StringsBufferBuilder, Inserter) {
Buffer<std::string>::Builder builder(10);
auto inserter = builder.GetInserter(1);
for (int i = 0; i < 4; ++i) inserter.Add(absl::StrFormat("str%d", i));
builder.Set(0, "aba");
auto buffer = std::move(builder).Build(inserter);
EXPECT_THAT(buffer, ElementsAre("aba", "str0", "str1", "str2", "str3"));
}
TEST(StringsBufferBuilder, InserterCord) {
Buffer<std::string>::Builder builder(10);
auto inserter = builder.GetInserter(1);
for (int i = 0; i < 4; ++i) {
inserter.Add(absl::Cord(absl::StrFormat("str%d", i)));
}
builder.Set(0, "aba");
auto buffer = std::move(builder).Build(inserter);
EXPECT_THAT(buffer, ElementsAre("aba", "str0", "str1", "str2", "str3"));
}
TEST(StringsBufferBuilder, Generator) {
Buffer<std::string>::Builder builder(10);
builder.SetNConst(0, 10, "default");
int i = 0;
builder.SetN(2, 3, [&]() { return absl::StrFormat("str%d", ++i); });
auto buffer = std::move(builder).Build(6);
EXPECT_THAT(buffer, ElementsAre("default", "default", "str1", "str2", "str3",
"default"));
}
TEST(StringsBufferBuilder, RandomAccess) {
Buffer<std::string>::Builder builder(10);
builder.Set(4, "s1");
builder.Set(2, "s2");
builder.Set(1, "s3");
builder.Set(0, "s4");
builder.Set(3, "s5");
builder.Set(1, "s6");
auto buffer = std::move(builder).Build(5);
EXPECT_THAT(buffer, ElementsAre("s4", "s6", "s2", "s5", "s1"));
}
TEST(StringsBufferBuilder, RandomAccessCord) {
Buffer<std::string>::Builder builder(10);
builder.Set(4, absl::Cord("s1"));
builder.Set(2, absl::Cord("s2"));
builder.Set(1, absl::Cord("s3"));
builder.Set(0, absl::Cord("s4"));
builder.Set(3, absl::Cord("s5"));
builder.Set(1, absl::Cord("s6"));
auto buffer = std::move(builder).Build(5);
EXPECT_THAT(buffer, ElementsAre("s4", "s6", "s2", "s5", "s1"));
}
TEST(StringsBufferBuilder, ReshuffleBuilder) {
auto buf = CreateBuffer<std::string>({"5v", "4ab", "3", "2", "1"});
{
Buffer<std::string>::ReshuffleBuilder bldr(7, buf, std::nullopt);
bldr.CopyValue(3, 1);
bldr.CopyValue(1, 2);
bldr.CopyValue(2, 0);
bldr.CopyValueToRange(4, 7, 0);
auto res = std::move(bldr).Build();
EXPECT_THAT(res, ElementsAre("", "3", "5v", "4ab", "5v", "5v", "5v"));
EXPECT_EQ(res.characters().begin(), buf.characters().begin());
}
{
Buffer<std::string>::ReshuffleBuilder bldr(4, buf, {true, ""});
bldr.CopyValue(3, 1);
bldr.CopyValue(1, 2);
bldr.CopyValue(2, 0);
auto res = std::move(bldr).Build();
EXPECT_THAT(res, ElementsAre("", "3", "5v", "4ab"));
EXPECT_EQ(res.characters().begin(), buf.characters().begin());
}
{
Buffer<std::string>::ReshuffleBuilder bldr(4, buf, {true, "0abc"});
bldr.CopyValue(3, 1);
bldr.CopyValue(1, 2);
bldr.CopyValue(2, 0);
auto res = std::move(bldr).Build();
EXPECT_THAT(res, ElementsAre("0abc", "3", "5v", "4ab"));
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/memory/strings_buffer.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/memory/strings_buffer_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
822ba1e5-6daf-48a0-96e7-6ab374d53667 | cpp | tensorflow/tensorflow | sanitize_constant_names | third_party/xla/xla/service/gpu/transforms/sanitize_constant_names.cc | third_party/xla/xla/service/gpu/transforms/sanitize_constant_names_test.cc | #include "xla/service/gpu/transforms/sanitize_constant_names.h"
#include <string>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/llvm_ir/buffer_assignment_util.h"
#include "xla/service/name_uniquer.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace gpu {
absl::StatusOr<bool> SanitizeConstantNames::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
NameUniquer instr_name_uniquer("_");
for (HloComputation* computation : module->computations(execution_threads)) {
for (HloInstruction* instr : computation->instructions()) {
if (instr->opcode() == HloOpcode::kConstant) {
continue;
}
instr_name_uniquer.GetUniqueName(instr->name());
}
}
for (HloComputation* computation : module->computations(execution_threads)) {
for (HloInstruction* instr : computation->instructions()) {
if (instr->opcode() != HloOpcode::kConstant) {
continue;
}
std::string sanitized_name = llvm_ir::SanitizeConstantName(*instr);
instr->SetAndSanitizeName(sanitized_name);
instr->UniquifyName(&instr_name_uniquer);
module->instruction_name_uniquer().GetUniqueName(instr->name());
changed = true;
}
}
return changed;
}
}
} | #include "xla/service/gpu/transforms/sanitize_constant_names.h"
#include <cstdint>
#include <memory>
#include <utility>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/literal_util.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
using SanitizeConstantNamesTest = HloTestBase;
TEST_F(SanitizeConstantNamesTest, InstructionNameWithHyphenSanitized) {
const char *const kHloString = R"(
HloModule HyphenInInstructionName
ENTRY kernelEntry {
ROOT equal-to = s32[2]{0} constant({42, 73})
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_TRUE(SanitizeConstantNames().Run(module.get()).value());
HloInstruction *root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->name(), "equal_to");
}
TEST_F(SanitizeConstantNamesTest, InstructionNameWithDotSanitized) {
const char *const kHloString = R"(
HloModule HyphenInInstructionName
ENTRY kernelEntry {
ROOT equal.to = s32[2]{0} constant({42, 73})
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_TRUE(SanitizeConstantNames().Run(module.get()).value());
HloInstruction *root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->name(), "equal_to");
}
TEST_F(SanitizeConstantNamesTest, NewInstructionNameRegisteredWithModule) {
const char *const kHloString = R"(
HloModule HyphenInInstructionName
ENTRY kernelEntry {
ROOT equal.to = s32[2]{0} constant({42, 73})
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_TRUE(SanitizeConstantNames().Run(module.get()).value());
HloInstruction *root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->name(), "equal_to");
auto constant_instr =
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1));
constant_instr->SetAndSanitizeName("equal_to");
module->entry_computation()->AddInstruction(std::move(constant_instr));
EXPECT_THAT(FindInstruction(module.get(), "equal_to.1"),
GmockMatch(m::Constant()));
}
TEST_F(SanitizeConstantNamesTest, BufferSanitizedNameCollisionResolved) {
const char *const kHloString = R"(
HloModule BufferSanitizedName
ENTRY kernelEntry {
equal.to = s32[2]{0} constant({42, 73})
equal-to = s32[2]{0} constant({67, 3})
ROOT equal_to = s32[2]{0} add(equal.to, equal-to)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_TRUE(SanitizeConstantNames().Run(module.get()).value());
EXPECT_THAT(FindInstruction(module.get(), "equal_to_1"),
GmockMatch(m::Constant()));
EXPECT_THAT(FindInstruction(module.get(), "equal_to_2"),
GmockMatch(m::Constant()));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/sanitize_constant_names.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/sanitize_constant_names_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
229b23b1-a820-4bd3-ae84-ec9d912fadd4 | cpp | tensorflow/tensorflow | infeed_thunk | third_party/xla/xla/service/gpu/runtime/infeed_thunk.cc | third_party/xla/xla/backends/cpu/runtime/infeed_thunk_test.cc | #include "xla/service/gpu/runtime/infeed_thunk.h"
#include <cstddef>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "xla/service/gpu/buffer_allocations.h"
#include "xla/service/gpu/gpu_transfer_manager.h"
#include "xla/service/gpu/infeed_manager.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_handle.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace gpu {
InfeedThunk::InfeedThunk(ThunkInfo thunk_info,
std::vector<ShapedSlice> dest_slices)
: Thunk(Kind::kInfeed, thunk_info), dest_slices_(std::move(dest_slices)) {}
absl::Status InfeedThunk::ExecuteOnStream(const ExecuteParams& params) {
se::Stream& stream = *params.stream;
const BufferAllocations& buffer_allocations = *params.buffer_allocations;
VLOG(2) << "Infeeding to GPU";
ShapeTree<se::DeviceMemoryHandle> source_buffers =
GpuTransferManager::GetOrCreateInfeedManager(stream.parent())
->BlockingGetNextDestination();
size_t index = 0;
for (auto& source : source_buffers.leaves()) {
const ShapeIndex& shape_index = source.first;
se::DeviceMemoryHandle& buffer = source.second;
const Shape& source_shape =
ShapeUtil::GetSubshape(source_buffers.shape(), shape_index);
TF_RET_CHECK(
ShapeUtil::ReshapeIsBitcast(dest_slices_[index].shape, source_shape))
<< "Mismatch between infeed source buffer shape "
<< ShapeUtil::HumanStringWithLayout(source_shape)
<< " and infeed dest buffer shape "
<< ShapeUtil::HumanStringWithLayout(dest_slices_[index].shape);
se::DeviceMemoryBase dest_address =
buffer_allocations.GetDeviceAddress(dest_slices_[index++].slice);
TF_RETURN_IF_ERROR(
stream.Memcpy(&dest_address, buffer.memory(), buffer.memory().size()));
}
CHECK_EQ(index, dest_slices_.size())
<< "Infeed did not populate all destination buffers";
absl::Status block_status = stream.BlockHostUntilDone();
if (!block_status.ok()) {
return Internal("Failed to complete data transfer on stream %p: %s",
&stream, block_status.message());
}
VLOG(2) << "Infeeding to GPU complete";
return absl::OkStatus();
}
}
} | #include "xla/backends/cpu/runtime/infeed_thunk.h"
#include <memory>
#include "xla/backends/cpu/runtime/resource_use.h"
#include "xla/backends/cpu/runtime/thunk.h"
#include "xla/runtime/buffer_use.h"
#include "xla/service/buffer_assignment.h"
#include "xla/shape_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
TEST(InfeedThunkTest, BufferAndResourceUses) {
BufferAllocation alloc(0, 1024, 0);
BufferAllocation::Slice infeed_slice(&alloc, 10, 40);
InfeedThunk::InfeedBuffer infeed_buffer = {
infeed_slice,
ShapeUtil::MakeShape(F32, {10}),
};
auto consume_token = Resource::Create(Resource::kToken);
auto produce_token = Resource::Create(Resource::kToken);
TF_ASSERT_OK_AND_ASSIGN(auto thunk,
InfeedThunk::Create({"infeed"}, {infeed_buffer},
{consume_token, produce_token}));
EXPECT_EQ(thunk->buffer_uses().size(), 1);
EXPECT_EQ(thunk->buffer_uses()[0], BufferUse::Write(infeed_slice));
EXPECT_EQ(thunk->resource_uses().size(), 2);
EXPECT_EQ(thunk->resource_uses()[0], ResourceUse::Read(consume_token));
EXPECT_EQ(thunk->resource_uses()[1], ResourceUse::Write(produce_token));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime/infeed_thunk.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/cpu/runtime/infeed_thunk_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
272d3b74-3a55-4eba-a682-59190d83978f | cpp | tensorflow/tensorflow | distribution_sampler | third_party/xla/xla/tsl/lib/random/distribution_sampler.cc | third_party/xla/xla/tsl/lib/random/distribution_sampler_test.cc | #include "xla/tsl/lib/random/distribution_sampler.h"
#include <memory>
#include <vector>
#include "absl/types/span.h"
namespace tsl {
namespace random {
DistributionSampler::DistributionSampler(
const absl::Span<const float> weights) {
DCHECK(!weights.empty());
int n = weights.size();
num_ = n;
data_.reset(new std::pair<float, int>[n]);
std::unique_ptr<double[]> pr(new double[n]);
double sum = 0.0;
for (int i = 0; i < n; i++) {
sum += weights[i];
set_alt(i, -1);
}
std::vector<int> high;
high.reserve(n);
std::vector<int> low;
low.reserve(n);
for (int i = 0; i < n; i++) {
double p = (weights[i] * n) / sum;
pr[i] = p;
if (p < 1.0) {
low.push_back(i);
} else {
high.push_back(i);
}
}
while (!high.empty() && !low.empty()) {
int l = low.back();
low.pop_back();
int h = high.back();
high.pop_back();
set_alt(l, h);
DCHECK_GE(pr[h], 1.0);
double remaining = pr[h] - (1.0 - pr[l]);
pr[h] = remaining;
if (remaining < 1.0) {
low.push_back(h);
} else {
high.push_back(h);
}
}
for (int i = 0; i < n; i++) {
set_prob(i, pr[i]);
}
for (size_t i = 0; i < high.size(); i++) {
int idx = high[i];
set_prob(idx, 1.0);
set_alt(idx, idx);
}
for (size_t i = 0; i < low.size(); i++) {
int idx = low[i];
set_prob(idx, 1.0);
set_alt(idx, idx);
}
}
}
} | #include "xla/tsl/lib/random/distribution_sampler.h"
#include <string.h>
#include <memory>
#include <vector>
#include "xla/tsl/lib/random/simple_philox.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace random {
class DistributionSamplerTest : public ::testing::Test {
protected:
float TestWeights(const std::vector<float>& weights, int trials_per_bin) {
int iters = weights.size() * trials_per_bin;
std::unique_ptr<float[]> counts(new float[weights.size()]);
memset(counts.get(), 0, sizeof(float) * weights.size());
DistributionSampler sampler(weights);
PhiloxRandom philox(testing::RandomSeed(), 17);
SimplePhilox random(&philox);
for (int i = 0; i < iters; i++) {
int r = sampler.Sample(&random);
EXPECT_LT(r, weights.size());
EXPECT_GE(r, 0);
counts[r] += 1.0;
}
float chi2 = 0.0;
for (size_t i = 0; i < weights.size(); i++) {
counts[i] /= iters;
float err = (counts[i] - weights[i]);
chi2 += (err * err) / weights[i];
}
return chi2;
}
void TestDistribution(float* arr, int n) {
std::vector<float> w;
w.reserve(n);
for (int i = 0; i < n; i++) {
w.push_back(arr[i]);
}
float var = TestWeights(w, 1000);
if (var < 0.001) return;
var = TestWeights(w, 100000);
if (var < 0.001) return;
EXPECT_TRUE(false) << "Chi2 is " << var << " in " << n * 100000
<< "iterations";
}
};
TEST_F(DistributionSamplerTest, KnownDistribution) {
float kEven2[] = {0.5, 0.5};
float kEven3[] = {0.33333333, 0.33333333, 0.33333333};
float kEven4[] = {0.25, 0.25, 0.25, 0.25};
float kDist1[] = {0.8, 0.15, 0.05};
TestDistribution(kEven2, TF_ARRAYSIZE(kEven2));
TestDistribution(kEven3, TF_ARRAYSIZE(kEven3));
TestDistribution(kEven4, TF_ARRAYSIZE(kEven4));
TestDistribution(kDist1, TF_ARRAYSIZE(kDist1));
}
static void BM_DistributionSampler(::testing::benchmark::State& state) {
const int n = state.range(0);
PhiloxRandom philox(173, 371);
SimplePhilox rand(&philox);
std::vector<float> weights(n, 0);
for (int i = 0; i < n; i++) {
weights[i] = rand.Uniform(100);
}
DistributionSampler picker(weights);
int r = 0;
for (auto s : state) {
r |= picker.Sample(&rand);
}
CHECK_NE(r, kint32max);
}
BENCHMARK(BM_DistributionSampler)->Arg(10)->Arg(100)->Arg(1000);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/random/distribution_sampler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/random/distribution_sampler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
82cb7774-5af3-4c61-8b54-eb605a8ae3f4 | cpp | tensorflow/tensorflow | triton_emitter_constraints | third_party/xla/xla/service/gpu/model/triton_emitter_constraints.cc | third_party/xla/xla/service/gpu/model/triton_emitter_constraints_test.cc | #include "xla/service/gpu/model/triton_emitter_constraints.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/memory/memory.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/MathExtras.h"
#include "mlir/IR/AffineMap.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/model/affine_map_evaluator.h"
#include "xla/service/gpu/model/indexing_analysis.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/gpu/model/symbolic_tile.h"
#include "xla/service/gpu/model/symbolic_tile_analysis.h"
#include "xla/service/gpu/model/symbolic_tiled_hlo_instruction.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
namespace {
constexpr int64_t kMaxTensorNumElements = 1048576;
llvm::SmallVector<int64_t> GetPaddedTileSizes(
llvm::SmallVector<int64_t> tile_sizes) {
llvm::SmallVector<int64_t> result;
result.reserve(tile_sizes.size());
for (int64_t value : tile_sizes) {
result.push_back(llvm::PowerOf2Ceil(value));
}
return result;
}
}
std::vector<TritonEmitterConstraints::CustomConstraints>
TritonEmitterConstraints::DeriveCustomConstraints(
const std::vector<std::unique_ptr<SymbolicTiledHloInstruction>>&
instructions,
const HloFusionAdaptor& fusion_adaptor) {
std::vector<CustomConstraints> result;
for (const auto& instruction : instructions) {
const HloInstruction* hlo = instruction->hlo();
if (hlo->opcode() == HloOpcode::kReshape ||
hlo->opcode() == HloOpcode::kBitcast) {
if (!fusion_adaptor.ContainsInstruction(hlo)) {
continue;
}
mlir::MLIRContext* ctx =
instruction->symbolic_tile().size_map().getContext();
IndexingMap reshape_indexing_map =
*ComputeOutputToInputIndexing(hlo, 0, ctx)
.indexing_maps[0]
.begin();
std::optional<SymbolicTile> reshape_symbolic_tile =
SymbolicTile::FromIndexingMap(reshape_indexing_map);
CHECK(reshape_symbolic_tile.has_value());
ConstraintExpression reshape_constraints =
reshape_symbolic_tile->constraints();
result.push_back(
CustomConstraints{instruction->symbolic_tile().size_map(),
std::move(reshape_constraints)});
}
}
return result;
}
EmitterSpecificConstraintsBuilder
TritonEmitterConstraints::GetBuilder(
const se::DeviceDescription& device_description) {
return [=](const std::vector<std::unique_ptr<SymbolicTiledHloInstruction>>&
instructions,
const HloFusionAdaptor& fusion_adaptor) {
llvm::DenseSet<mlir::AffineMap> unique_tile_size_maps;
for (const auto& tiled_hlo_instruction : instructions) {
unique_tile_size_maps.insert(
tiled_hlo_instruction->symbolic_tile().size_map());
}
std::vector<CustomConstraints> custom_constraints =
DeriveCustomConstraints(instructions, fusion_adaptor);
llvm::SmallVector<mlir::AffineMap, 4> tile_size_maps(
unique_tile_size_maps.begin(), unique_tile_size_maps.end());
return std::unique_ptr<TritonEmitterConstraints>(
absl::WrapUnique(new TritonEmitterConstraints(
std::move(tile_size_maps), std::move(custom_constraints),
instructions.back()->hlo()->shape(),
device_description)));
};
}
absl::StatusOr<bool> TritonEmitterConstraints::ParametersSatisfyConstraints(
absl::Span<const int64_t> tile_parameters) const {
for (const auto& tile_size_map : tile_size_maps_) {
int64_t tile_size = 1;
for (auto expr : tile_size_map.getResults()) {
tile_size *= llvm::PowerOf2Ceil(
EvaluateAffineExpr(expr, tile_parameters));
}
if (tile_size > kMaxTensorNumElements) {
return false;
}
}
int64_t num_tiles = 1;
for (auto [dim_size, tile_size] :
llvm::zip(root_shape_.dimensions(), tile_parameters)) {
num_tiles *= (dim_size + tile_size - 1) / tile_size;
}
if (num_tiles >= device_info_.block_dim_limit().x) {
return false;
}
for (const auto& custom_constraint : custom_constraints_) {
llvm::SmallVector<int64_t> transformed_tile_parameters =
EvaluateAffineMap(custom_constraint.tile_parameters_transform,
tile_parameters);
if (!custom_constraint.constraints.IsSatisfiedBy(
GetPaddedTileSizes(transformed_tile_parameters))) {
return false;
}
}
return true;
}
}
} | #include "xla/service/gpu/model/triton_emitter_constraints.h"
#include <memory>
#include <optional>
#include <utility>
#include <variant>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/model/symbolic_tile_analysis.h"
#include "xla/service/instruction_fusion.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
using ::tsl::testing::IsOkAndHolds;
class TritonEmitterConstraintsTest : public HloTestBase {
public:
std::optional<SymbolicTileAnalysis> TryAnalyzeModule(
HloModule* module, bool with_triton_emitter_specific_constraints = true) {
EmitterSpecificConstraintsBuilder constraints_builder = nullptr;
if (with_triton_emitter_specific_constraints) {
constraints_builder =
TritonEmitterConstraints::GetBuilder(device_description_);
}
SymbolicTileAnalysisOrError analysis_or_error =
SymbolicTileAnalysis::AnalyzeComputation(
*module->entry_computation()
->root_instruction()
->fused_instructions_computation(),
&mlir_context_, constraints_builder);
if (std::holds_alternative<SymbolicTileAnalysis>(analysis_or_error)) {
return std::get<SymbolicTileAnalysis>(std::move(analysis_or_error));
}
VLOG(1) << "Cannot analyze module: "
<< std::get<FusionDecision>(analysis_or_error).Explain();
return std::nullopt;
}
mlir::MLIRContext mlir_context_;
se::DeviceDescription device_description_ =
TestGpuDeviceInfo::RTXA6000DeviceInfo();
};
TEST_F(TritonEmitterConstraintsTest, TooBigTileSizesConstraintIsEnforced) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule m
max_computation {
param_0 = f32[] parameter(0)
param_1 = f32[] parameter(1)
ROOT maximum = f32[] maximum(param_0, param_1)
}
fused_computation {
param_0 = f32[8192,50304] parameter(0)
constant = f32[] constant(-inf)
reduce = f32[8192] reduce(param_0, constant), dimensions={1}, to_apply=max_computation
broadcast = f32[8192,50304] broadcast(reduce), dimensions={0}
ROOT subtract = f32[8192,50304] subtract(param_0, broadcast)
}
ENTRY entry_computation {
param_0 = f32[8192,50304] parameter(0)
ROOT fusion = f32[8192,50304] fusion(param_0), kind=kCustom, calls=fused_computation, backend_config={"fusion_backend_config":{"kind":"__triton"}}
}
)"));
std::optional<SymbolicTileAnalysis> analysis = TryAnalyzeModule(module.get());
ASSERT_TRUE(analysis.has_value());
EXPECT_THAT(analysis->ParametersSatisfyConstraints({8, 128}),
IsOkAndHolds(true));
EXPECT_THAT(analysis->ParametersSatisfyConstraints({18, 50304}),
IsOkAndHolds(false));
EXPECT_THAT(analysis->ParametersSatisfyConstraints({1024, 1}),
IsOkAndHolds(false));
}
TEST_F(TritonEmitterConstraintsTest, TooManyBlocksConstraintIsEnforced) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule m
max_computation {
param_0 = f32[] parameter(0)
param_1 = f32[] parameter(1)
ROOT maximum = f32[] maximum(param_0, param_1)
}
fused_computation {
param_0 = f32[65536,65536] parameter(0)
ROOT log = f32[65536,65536] log(param_0)
}
ENTRY entry_computation {
param_0 = f32[65536,65536] parameter(0)
ROOT fusion = f32[65536,65536] fusion(param_0), kind=kCustom, calls=fused_computation, backend_config={"fusion_backend_config":{"kind":"__triton"}}
}
)"));
std::optional<SymbolicTileAnalysis> analysis = TryAnalyzeModule(module.get());
ASSERT_TRUE(analysis.has_value());
EXPECT_THAT(analysis->ParametersSatisfyConstraints({128, 128}),
IsOkAndHolds(true));
EXPECT_THAT(analysis->ParametersSatisfyConstraints({1, 1}),
IsOkAndHolds(false));
}
TEST_F(TritonEmitterConstraintsTest, CustomReshapeConstraintsAreEnforced) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
triton_computation {
p = s8[36] parameter(0)
ROOT bitcast = s8[6,6] bitcast(p)
}
ENTRY entry_computation {
p = s8[36] parameter(0)
ROOT fusion = s8[6,6] fusion(p), kind=kCustom, calls=triton_computation
})"));
std::optional<SymbolicTileAnalysis> analysis_without_triton_constraints =
TryAnalyzeModule(module.get(),
false);
ASSERT_TRUE(analysis_without_triton_constraints.has_value());
EXPECT_THAT(
analysis_without_triton_constraints->ParametersSatisfyConstraints({2, 6}),
IsOkAndHolds(true));
std::optional<SymbolicTileAnalysis> analysis_with_triton_constraints =
TryAnalyzeModule(module.get(),
true);
ASSERT_TRUE(analysis_with_triton_constraints.has_value());
EXPECT_THAT(
analysis_with_triton_constraints->ParametersSatisfyConstraints({2, 6}),
IsOkAndHolds(false));
EXPECT_THAT(
analysis_with_triton_constraints->ParametersSatisfyConstraints({1, 6}),
IsOkAndHolds(true));
}
TEST_F(TritonEmitterConstraintsTest,
ReshapeConstraintsAreNotDerivedForFusionOperands) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
triton_computation {
p = s8[6,6] parameter(0)
ROOT add = s8[6,6] add(p, p)
}
ENTRY entry_computation {
p = s8[36] parameter(0)
bitcast = s8[6,6] bitcast(p)
ROOT fusion = s8[6,6] fusion(bitcast),
kind=kCustom, calls=triton_computation
})"));
std::optional<SymbolicTileAnalysis> analysis = TryAnalyzeModule(module.get());
ASSERT_TRUE(analysis.has_value());
const HloComputation* triton_computation =
FindComputation(module.get(), "triton_computation");
std::unique_ptr<EmitterSpecificConstraints> constraints =
TritonEmitterConstraints::GetBuilder(device_description_)(
analysis->GetSymbolicTiledHloComputation(),
*HloFusionAdaptor::ForComputation(triton_computation));
EXPECT_FALSE(reinterpret_cast<TritonEmitterConstraints*>(constraints.get())
->HasCustomConstraints());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/triton_emitter_constraints.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/triton_emitter_constraints_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ba632a46-406b-400f-a370-41f10b15368d | cpp | tensorflow/tensorflow | device_resolver_local | tensorflow/core/common_runtime/device_resolver_local.cc | tensorflow/core/common_runtime/device_resolver_local_test.cc | #include "tensorflow/core/common_runtime/device_resolver_local.h"
#include "absl/status/status.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/platform/errors.h"
namespace tensorflow {
Status DeviceResolverLocal::GetDeviceAttributes(const string& device,
DeviceAttributes* attributes) {
Device* dev;
Status s = dev_mgr_->LookupDevice(device, &dev);
if (absl::IsInvalidArgument(s)) {
return errors::NotFound(device, " not found");
} else if (!s.ok()) {
return s;
}
*attributes = dev->attributes();
return absl::OkStatus();
}
Status DeviceResolverLocal::GetAllDeviceAttributes(
const string& task, std::vector<DeviceAttributes>* attributes) {
return errors::Internal(
"GetTaskCached is not supposed to be called in local collectives");
}
Status DeviceResolverLocal::UpdateDeviceAttributes(
const std::vector<DeviceAttributes>& attributes) {
return errors::Internal(
"UpdateDeviceAttributes shouldn't be called with local collectives");
}
} | #include "tensorflow/core/common_runtime/device_resolver_local.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
#define NUM_DEVS 3
class DeviceResolverLocalTest : public ::testing::Test {
protected:
DeviceResolverLocalTest() {
SessionOptions options;
string task_name = "/job:localhost/replica:0/task:0";
auto* device_count = options.config.mutable_device_count();
device_count->insert({"CPU", NUM_DEVS});
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::AddDevices(options, task_name, &devices));
device_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(devices));
drl_.reset(new DeviceResolverLocal(device_mgr_.get()));
}
std::unique_ptr<DeviceMgr> device_mgr_;
std::unique_ptr<DeviceResolverLocal> drl_;
};
TEST_F(DeviceResolverLocalTest, GetDeviceAttributesKnown) {
DeviceAttributes attributes;
TF_EXPECT_OK(drl_->GetDeviceAttributes(
"/job:localhost/replica:0/task:0/device:CPU:1", &attributes));
EXPECT_EQ(attributes.name(), "/job:localhost/replica:0/task:0/device:CPU:1");
}
TEST_F(DeviceResolverLocalTest, GetDeviceAttributesUnknown) {
DeviceAttributes attributes;
EXPECT_TRUE(errors::IsNotFound(drl_->GetDeviceAttributes(
"/job:localhost/replica:0/task:0/device:CPU:9", &attributes)));
}
TEST_F(DeviceResolverLocalTest, GetAllDeviceAttributes) {
std::vector<DeviceAttributes> attributes;
EXPECT_TRUE(errors::IsInternal(
drl_->GetAllDeviceAttributes( "", &attributes)));
}
TEST_F(DeviceResolverLocalTest, UpdateDeviceAttributes) {
std::vector<DeviceAttributes> attributes;
EXPECT_TRUE(errors::IsInternal(drl_->UpdateDeviceAttributes(attributes)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/device_resolver_local.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/device_resolver_local_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
eb2df994-4722-456c-b7b4-40583fe8b690 | cpp | google/tsl | stringpiece | tsl/platform/stringpiece.h | tsl/platform/stringpiece_test.cc | #ifndef TENSORFLOW_TSL_PLATFORM_STRINGPIECE_H_
#define TENSORFLOW_TSL_PLATFORM_STRINGPIECE_H_
#include "absl/base/macros.h"
#include "absl/strings/string_view.h"
#ifndef ABSL_DEPRECATE_AND_INLINE
#define ABSL_DEPRECATE_AND_INLINE()
#endif
namespace tsl {
using StringPiece ABSL_DEPRECATE_AND_INLINE() = absl::string_view;
}
#endif | #include "tsl/platform/stringpiece.h"
#include <unordered_map>
#include "tsl/platform/test.h"
namespace tsl {
TEST(StringPiece, Ctor) {
{
const char* hello = "hello";
absl::string_view s20(hello);
EXPECT_TRUE(s20.data() == hello);
EXPECT_EQ(5, s20.size());
absl::string_view s21(hello, 4);
EXPECT_TRUE(s21.data() == hello);
EXPECT_EQ(4, s21.size());
absl::string_view s22(hello, 6);
EXPECT_TRUE(s22.data() == hello);
EXPECT_EQ(6, s22.size());
}
{
string hola = "hola";
absl::string_view s30(hola);
EXPECT_TRUE(s30.data() == hola.data());
EXPECT_EQ(4, s30.size());
hola.push_back('\0');
hola.append("h2");
hola.push_back('\0');
absl::string_view s31(hola);
EXPECT_TRUE(s31.data() == hola.data());
EXPECT_EQ(8, s31.size());
}
}
TEST(StringPiece, ConversionToString) {
EXPECT_EQ("", string(absl::string_view("")));
EXPECT_EQ("foo", string(absl::string_view("foo")));
}
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/stringpiece.h | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/stringpiece_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
ed7f002c-4012-480a-8b57-224f7c1db26b | cpp | tensorflow/tensorflow | enable_gradient_descent | tensorflow/core/grappler/optimizers/data/enable_gradient_descent.cc | tensorflow/core/grappler/optimizers/data/enable_gradient_descent_test.cc | #include "tensorflow/core/grappler/optimizers/data/enable_gradient_descent.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kAlgorithm[] = "algorithm";
constexpr char kModelDataset[] = "ModelDataset";
constexpr int64_t HILL_CLIMB = 0;
constexpr int64_t GRADIENT_DESCENT = 1;
}
Status EnableGradientDescent::OptimizeAndCollectStats(
Cluster* cluster, const GrapplerItem& item, GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
if (!autotune_) {
VLOG(1) << "The optimization enable_gradient_descent is not applied if "
"autotune is off.";
return absl::OkStatus();
}
MutableGraphView graph(output);
if (graph_utils::IsItemDerivedFromFunctionDef(item, graph))
return absl::OkStatus();
int index = graph_utils::FindGraphNodeWithOp(kModelDataset, *output);
NodeDef& model_node = *(output->mutable_node(index));
if (model_node.attr().at(kAlgorithm).i() == HILL_CLIMB) {
(*model_node.mutable_attr())[kAlgorithm].set_i(GRADIENT_DESCENT);
stats->num_changes++;
}
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(EnableGradientDescent, "enable_gradient_descent");
}
} | #include "tensorflow/core/grappler/optimizers/data/enable_gradient_descent.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
Status OptimizeWithEnableGradientDescent(const GrapplerItem &item,
GraphDef *output, bool autotune) {
EnableGradientDescent optimizer;
RewriterConfig_CustomGraphOptimizer config;
if (autotune) {
(*config.mutable_parameter_map())["autotune"].set_s("true");
} else {
(*config.mutable_parameter_map())["autotune"].set_s("false");
}
TF_RETURN_IF_ERROR(optimizer.Init(&config));
return optimizer.Optimize(nullptr, item, output);
}
class SimpleRewrite
: public ::testing::TestWithParam<std::tuple<bool, int64_t, string>> {};
TEST_P(SimpleRewrite, EnableGradientDescentTest) {
const bool autotune = std::get<0>(GetParam());
const int64_t algorithm_index = std::get<1>(GetParam());
const string op = std::get<2>(GetParam());
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 5}, {"dtype", DT_INT32}}),
NDef("batch", "BatchDataset", {"range", "batch_size"}, {}),
NDef("model", "ModelDataset", {"batch"},
{{"algorithm", algorithm_index}}),
NDef("Sink", op, {"model"}, {})});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithEnableGradientDescent(item, &output, autotune));
EXPECT_EQ(item.graph.node().size(), output.node().size());
NodeDef model_node =
output.node(graph_utils::FindGraphNodeWithName("model", output));
EXPECT_EQ(model_node.attr().at("algorithm").i(),
(autotune && op != "_Retval") ? 1 : algorithm_index);
}
INSTANTIATE_TEST_SUITE_P(
Test, SimpleRewrite,
::testing::Combine(::testing::Values(false, true), ::testing::Values(0, 1),
::testing::Values("Identity", "_Retval")));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/enable_gradient_descent.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/enable_gradient_descent_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0777f39b-1bf7-4e05-bb83-f4957bcc97b6 | cpp | tensorflow/tensorflow | pgle_accuracy_checker | third_party/xla/xla/service/gpu/transforms/pgle_accuracy_checker.cc | third_party/xla/xla/service/gpu/transforms/pgle_accuracy_checker_test.cc | #include "xla/service/gpu/transforms/pgle_accuracy_checker.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "tsl/platform/errors.h"
namespace xla::gpu {
absl::StatusOr<bool> PGLEAccuracyChecker::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
TF_RETURN_IF_ERROR(pgle_estimator_.CheckAccuracy(*module));
return false;
}
} | #include "xla/service/gpu/transforms/pgle_accuracy_checker.h"
#include <memory>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/gpu_latency_hiding_scheduler.h"
#include "xla/service/latency_hiding_scheduler.h"
#include "xla/service/profile_guided_latency_estimator.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
using PGLEAccuracyCheckerTest = HloTestBase;
using ::tensorflow::profiler::ProfiledInstructionsProto;
using ::tsl::protobuf::TextFormat;
using ::tsl::testing::StatusIs;
std::unique_ptr<ProfileGuidedLatencyEstimator> GetProfileGuidedLatencyEstimator(
ProfiledInstructionsProto& profile) {
auto gpu_latency_estimator =
std::make_unique<GpuLatencyEstimator>(8);
SchedulerConfig config;
auto aggregator = std::make_unique<GPUProfileStatisticsAggregator>();
return std::make_unique<ProfileGuidedLatencyEstimator>(
config, std::move(gpu_latency_estimator), profile, std::move(aggregator));
}
TEST_F(PGLEAccuracyCheckerTest,
ReturnsOkAndNoIRChangeIfAllInstructionsAreFoundInTheProfile) {
const absl::string_view kHloString = R"(
HloModule m
apply_op {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT apply_op = f32[] add(x, y)
}
ENTRY ar {
p0 = f32[32] parameter(0)
p1 = f32[32,32] parameter(1)
p2 = f32[32,32] parameter(2)
p3 = f32[32] parameter(3)
dot0 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm"
dot1 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm"
add0 = f32[32,32] add(dot0, dot1)
ar-start = f32[32] all-reduce-start(p0), to_apply=apply_op
ar-done = f32[32] all-reduce-done(ar-start)
ar-start1 = f32[32] all-reduce-start(p3), to_apply=apply_op
ar-done1 = f32[32] all-reduce-done(ar-start1)
ROOT _ = (f32[32],f32[32],f32[32,32]) tuple(ar-done, ar-done1, add0)
})";
const std::string kProfileString = R"pb(
costs { name: "dot0" cost_us: 1.0 }
costs { name: "dot1" cost_us: 1.0 }
costs { name: "add0" cost_us: 1.0 }
costs { name: "ar-start" cost_us: 1.0 }
costs { name: "ar-start1" cost_us: 1.0 }
)pb";
ProfiledInstructionsProto profile;
ASSERT_TRUE(TextFormat::ParseFromString(kProfileString, &profile));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
*module->mutable_config().mutable_fdo_profile() = kProfileString;
auto pgle_estimator = GetProfileGuidedLatencyEstimator(profile);
PGLEAccuracyChecker pgle_accuracy_checker(*pgle_estimator);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
pgle_accuracy_checker.Run(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(PGLEAccuracyCheckerTest,
ReturnsInvalidArgumentIfThereAreMissingInstructionsFromTheProfile) {
const absl::string_view kHloString = R"(
HloModule m
apply_op {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT apply_op = f32[] add(x, y)
}
ENTRY ar {
p0 = f32[32] parameter(0)
p1 = f32[32,32] parameter(1)
p2 = f32[32,32] parameter(2)
p3 = f32[32] parameter(3)
dot0 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm"
dot1 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm"
add0 = f32[32,32] add(dot0, dot1)
ar-start = f32[32] all-reduce-start(p0), to_apply=apply_op
ar-done = f32[32] all-reduce-done(ar-start)
ar-start1 = f32[32] all-reduce-start(p3), to_apply=apply_op
ar-done1 = f32[32] all-reduce-done(ar-start1)
ROOT _ = (f32[32],f32[32],f32[32,32]) tuple(ar-done, ar-done1, add0)
})";
const std::string kProfileString = R"pb(
costs { name: "dot0" cost_us: 1.0 }
costs { name: "add0" cost_us: 1.0 }
costs { name: "ar-start1" cost_us: 1.0 }
)pb";
ProfiledInstructionsProto profile;
ASSERT_TRUE(TextFormat::ParseFromString(kProfileString, &profile));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
*module->mutable_config().mutable_fdo_profile() = kProfileString;
auto pgle_estimator = GetProfileGuidedLatencyEstimator(profile);
PGLEAccuracyChecker pgle_accuracy_checker(*pgle_estimator);
EXPECT_THAT(pgle_accuracy_checker.Run(module.get()),
StatusIs(absl::StatusCode::kInvalidArgument));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/pgle_accuracy_checker.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/pgle_accuracy_checker_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6bc61fe2-d66b-43c8-afde-73c141c7c124 | cpp | tensorflow/tensorflow | preprocess_xplane | third_party/xla/xla/tsl/profiler/utils/preprocess_xplane.cc | third_party/xla/xla/tsl/profiler/utils/preprocess_xplane_test.cc | #include "xla/tsl/profiler/utils/preprocess_xplane.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "xla/tsl/profiler/utils/xplane_builder.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "tsl/profiler/lib/context_types.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tsl {
namespace profiler {
namespace {
using ::tsl::profiler::HostEventType;
using ::tsl::profiler::StatType;
using ::tsl::profiler::XEventBuilder;
using ::tsl::profiler::XLineBuilder;
using ::tsl::profiler::XPlane;
using ::tsl::profiler::XPlaneBuilder;
using ::tsl::profiler::XSpace;
void MutateXPlane(XPlane& plane,
const std::vector<std::unique_ptr<XplaneEventMutatorFactory>>&
mutator_factories) {
XPlaneBuilder plane_builder(&plane);
absl::flat_hash_map<int64_t, std::vector<std::unique_ptr<XplaneEventMutator>>>
mutators_from_event_metadata_id;
std::vector<std::unique_ptr<XplaneEventMutator>> line_mutators;
for (const auto& mutator_factory : mutator_factories) {
auto mutators = mutator_factory->CreateMutators(plane_builder);
for (auto& mutator : mutators) {
if (mutator->event_metadata()) {
auto id = mutator->event_metadata()->id();
mutators_from_event_metadata_id[id].push_back(std::move(mutator));
} else {
line_mutators.push_back(std::move(mutator));
}
}
}
if (mutators_from_event_metadata_id.empty() && line_mutators.empty()) {
return;
}
plane_builder.ForEachLine([&](XLineBuilder line_builder) {
for (const auto& mutator : line_mutators) {
mutator->MutateEventsInLine(line_builder);
}
if (mutators_from_event_metadata_id.empty()) return;
line_builder.ForEachEvent([&](XEventBuilder event_builder) {
auto event_mutators =
mutators_from_event_metadata_id.find(event_builder.MetadataId());
if (event_mutators != mutators_from_event_metadata_id.end()) {
for (const auto& mutator : event_mutators->second) {
mutator->Mutate(event_builder);
}
}
});
});
}
std::vector<std::unique_ptr<XplaneEventMutatorFactory>>
CreateMutatorFactories() {
std::vector<std::unique_ptr<XplaneEventMutatorFactory>> mutator_factories;
mutator_factories.push_back(ThreadpoolLineMutatorFactory::CreateFactory());
mutator_factories.push_back(XplaneRootEventMutatorFactory::CreateFactory(
HostEventType::kProcessBatch, 2));
mutator_factories.push_back(XplaneRootEventMutatorFactory::CreateFactory(
HostEventType::kBatchingSessionRun, 1));
mutator_factories.push_back(
XplaneConnectedEventMutatorFactory<
HostEventType::kExecutorStateProcess,
HostEventType::kTpuExecuteOp, ContextType::kLegacy,
false,
XContextStatsAccessor<uint64_t, StatType::kStepId>,
XContextStatsAccessor<uint64_t,
StatType::kIterNum>>::CreateFactory());
#define ADD_QUEUE_CONNECTION(__enque_event__, __deque_event__) \
mutator_factories.push_back( \
XplaneConnectedEventMutatorFactory< \
HostEventType::__enque_event__, HostEventType::__deque_event__, \
ContextType::kTpuStream, true, \
XContextStatsAccessor<uint64, StatType::kRequestId>, \
XContextStatsAccessor<uint64, \
StatType::kQueueAddr>>::CreateFactory())
ADD_QUEUE_CONNECTION(kEnqueueRequestLocked, kRunProgramRequest);
ADD_QUEUE_CONNECTION(kEnqueueRequestLocked, kHostCallbackRequest);
ADD_QUEUE_CONNECTION(kEnqueueRequestLocked, kTransferH2DRequest);
ADD_QUEUE_CONNECTION(kEnqueueRequestLocked, kTransferPreprocessedH2DRequest);
ADD_QUEUE_CONNECTION(kEnqueueRequestLocked, kTransferD2HRequest);
ADD_QUEUE_CONNECTION(kEnqueueRequestLocked, kOnDeviceSendRequest);
ADD_QUEUE_CONNECTION(kEnqueueRequestLocked, kOnDeviceRecvRequest);
ADD_QUEUE_CONNECTION(kEnqueueRequestLocked, kOnDeviceSendRecvLocalRequest);
ADD_QUEUE_CONNECTION(kEnqueueRequestLocked, kCustomWait);
ADD_QUEUE_CONNECTION(kEnqueueRequestLocked, kOnDeviceSendRequestMulti);
ADD_QUEUE_CONNECTION(kEnqueueRequestLocked, kOnDeviceRecvRequestMulti);
ADD_QUEUE_CONNECTION(kEnqueueRequestLocked, kPjrtAsyncWait);
#undef ADD_QUEUE_CONNECTION
mutator_factories.push_back(
HostRunIdMutatorFactory<
HostEventType::kDoEnqueueProgram>::CreateFactory());
mutator_factories.push_back(
HostRunIdMutatorFactory<
HostEventType::kCompleteCallbacks>::CreateFactory());
mutator_factories.push_back(
HostRunIdMutatorFactory<
HostEventType::kDoEnqueueContinuationProgram>::CreateFactory());
mutator_factories.push_back(
XplaneConnectedEventMutatorFactory<
HostEventType::kDoEnqueueProgram,
HostEventType::kCompleteCallbacks,
ContextType::kTpuLaunch,
true,
XContextStatsAccessor<uint64_t, StatType::kDeviceOrdinal>,
XContextStatsAccessor<uint64_t, StatType::kQueueId>,
XContextStatsAccessor<uint64_t, StatType::kRunId>,
XContextStatsAccessorWithDefault<uint64_t, StatType::kCoreType,
0ULL>>::CreateFactory());
mutator_factories.push_back(TpuModuleLineMutatorFactory::CreateFactory());
return mutator_factories;
}
}
void PreprocessXPlane(XPlane* plane) {
if (plane == nullptr) return;
auto mutator_factories = CreateMutatorFactories();
MutateXPlane(*plane, mutator_factories);
}
void PreprocessXSpace(XSpace* space) {
if (space == nullptr) return;
auto mutator_factories = CreateMutatorFactories();
for (XPlane& plane : *space->mutable_planes()) {
MutateXPlane(plane, mutator_factories);
}
}
}
} | #include "xla/tsl/profiler/utils/preprocess_xplane.h"
#include <cstdint>
#include <memory>
#include <optional>
#include "absl/container/flat_hash_map.h"
#include "absl/hash/hash.h"
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "xla/tsl/profiler/utils/xplane_builder.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "xla/tsl/profiler/utils/xplane_test_utils.h"
#include "xla/tsl/profiler/utils/xplane_visitor.h"
#include "tsl/platform/test.h"
#include "tsl/profiler/lib/connected_traceme.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tsl {
namespace profiler {
namespace {
using ::tsl::profiler::CreateTfXPlaneVisitor;
using ::tsl::profiler::CreateXEvent;
using ::tsl::profiler::GetHostEventTypeStr;
using ::tsl::profiler::HostEventType;
using ::tsl::profiler::StatType;
using ::tsl::profiler::XEventVisitor;
using ::tsl::profiler::XLineVisitor;
using ::tsl::profiler::XPlane;
using ::tsl::profiler::XPlaneBuilder;
using ::tsl::profiler::XPlaneVisitor;
using ::tsl::profiler::XSpace;
TEST(PreprocessXPlane, IsRootStatsTest) {
XSpace space;
XPlane* plane = space.add_planes();
XPlaneBuilder plane_builder(plane);
plane_builder.ReserveLines(1);
auto line_builder = plane_builder.GetOrCreateLine(0);
CreateXEvent(&plane_builder, &line_builder,
GetHostEventTypeStr(HostEventType::kProcessBatch), 100, 100);
CreateXEvent(&plane_builder, &line_builder,
GetHostEventTypeStr(HostEventType::kBatchingSessionRun), 200,
100);
PreprocessXSpace(&space);
XPlaneVisitor plane_visitor = CreateTfXPlaneVisitor(plane);
plane_visitor.ForEachLine([&](const XLineVisitor& line) {
line.ForEachEvent([&](const XEventVisitor& event) {
ASSERT_TRUE(event.GetStat(StatType::kIsRoot).has_value());
int64_t is_root = event.GetStat(StatType::kIsRoot)->IntValue();
if (event.Type() == HostEventType::kBatchingSessionRun) {
EXPECT_EQ(is_root, 1);
} else if (event.Type() == HostEventType::kProcessBatch) {
EXPECT_EQ(is_root, 2);
} else {
CHECK(false);
}
});
});
}
TEST(PreprocessXPlane, ProducerConsumerTest) {
XSpace space;
XPlane* plane = space.add_planes();
XPlaneBuilder plane_builder(plane);
plane_builder.ReserveLines(2);
auto line_builder = plane_builder.GetOrCreateLine(0);
CreateXEvent(
&plane_builder, &line_builder,
GetHostEventTypeStr(HostEventType::kExecutorStateProcess), 100, 100,
{{StatType::kStepId, int64_t{123}}, {StatType::kIterNum, int64_t{456}}});
line_builder = plane_builder.GetOrCreateLine(1);
CreateXEvent(
&plane_builder, &line_builder,
GetHostEventTypeStr(HostEventType::kTpuExecuteOp), 200, 100,
{{StatType::kStepId, int64_t{123}}, {StatType::kIterNum, int64_t{456}}});
PreprocessXSpace(&space);
std::optional<uint64_t> producer_context_id, consumer_context_id;
XPlaneVisitor plane_visitor = CreateTfXPlaneVisitor(plane);
plane_visitor.ForEachLine([&](const XLineVisitor& line) {
line.ForEachEvent([&](const XEventVisitor& event) {
if (event.Type() == HostEventType::kExecutorStateProcess) {
auto producer_type = event.GetStat(StatType::kProducerType);
ASSERT_TRUE(producer_type.has_value());
EXPECT_EQ(producer_type->IntValue(),
static_cast<int64_t>(ContextType::kLegacy));
auto producer_id = event.GetStat(StatType::kProducerId);
ASSERT_TRUE(producer_id.has_value());
producer_context_id = producer_id->IntOrUintValue();
} else if (event.Type() == HostEventType::kTpuExecuteOp) {
auto consumer_type = event.GetStat(StatType::kConsumerType);
ASSERT_TRUE(consumer_type.has_value());
EXPECT_EQ(consumer_type->IntValue(),
static_cast<int64_t>(ContextType::kLegacy));
auto consumer_id = event.GetStat(StatType::kConsumerId);
ASSERT_TRUE(consumer_id.has_value());
consumer_context_id = consumer_id->IntOrUintValue();
} else {
CHECK(false);
}
});
});
ASSERT_TRUE(producer_context_id && consumer_context_id);
ASSERT_EQ(*producer_context_id, *consumer_context_id);
}
TEST(PreprocessXPlane, ProducerConsumerNotMatchedTest) {
XSpace space;
XPlane* plane = space.add_planes();
XPlaneBuilder plane_builder(plane);
plane_builder.ReserveLines(2);
auto line_builder = plane_builder.GetOrCreateLine(0);
CreateXEvent(&plane_builder, &line_builder,
GetHostEventTypeStr(HostEventType::kExecutorStateProcess), 100,
100,
{{StatType::kStepId, int64_t{123}},
{StatType::kIterNum, int64_t{456}},
{StatType::kDeviceOrdinal, int64_t{789}}});
line_builder = plane_builder.GetOrCreateLine(1);
CreateXEvent(
&plane_builder, &line_builder,
GetHostEventTypeStr(HostEventType::kTpuExecuteOp), 200, 100,
{{StatType::kStepId, int64_t{123}}, {StatType::kIterNum, int64_t{789}}});
PreprocessXSpace(&space);
std::optional<uint64_t> producer_context_id, consumer_context_id;
XPlaneVisitor plane_visitor = CreateTfXPlaneVisitor(plane);
plane_visitor.ForEachLine([&](const XLineVisitor& line) {
line.ForEachEvent([&](const XEventVisitor& event) {
if (event.Type() == HostEventType::kExecutorStateProcess) {
auto producer_type = event.GetStat(StatType::kProducerType);
ASSERT_TRUE(producer_type.has_value());
EXPECT_EQ(producer_type->IntValue(),
static_cast<int64_t>(ContextType::kLegacy));
auto producer_id = event.GetStat(StatType::kProducerId);
ASSERT_TRUE(producer_id.has_value());
producer_context_id = producer_id->IntOrUintValue();
} else if (event.Type() == HostEventType::kTpuExecuteOp) {
auto consumer_type = event.GetStat(StatType::kConsumerType);
ASSERT_TRUE(consumer_type.has_value());
EXPECT_EQ(consumer_type->IntValue(),
static_cast<int64_t>(ContextType::kLegacy));
auto consumer_id = event.GetStat(StatType::kConsumerId);
ASSERT_TRUE(consumer_id.has_value());
consumer_context_id = consumer_id->IntOrUintValue();
} else {
CHECK(false);
}
});
});
ASSERT_TRUE(producer_context_id && consumer_context_id);
ASSERT_NE(*producer_context_id, *consumer_context_id);
}
TEST(PreprocessXPlane, MissingLegacyStatTest) {
XSpace space;
XPlane* plane = space.add_planes();
XPlaneBuilder plane_builder(plane);
plane_builder.ReserveLines(2);
auto line_builder = plane_builder.GetOrCreateLine(0);
CreateXEvent(&plane_builder, &line_builder,
GetHostEventTypeStr(HostEventType::kExecutorStateProcess), 100,
100, {{StatType::kStepId, int64_t{123}}});
line_builder = plane_builder.GetOrCreateLine(1);
CreateXEvent(&plane_builder, &line_builder,
GetHostEventTypeStr(HostEventType::kTpuExecuteOp), 200, 100,
{{StatType::kStepId, int64_t{123}}});
PreprocessXSpace(&space);
XPlaneVisitor plane_visitor = CreateTfXPlaneVisitor(plane);
plane_visitor.ForEachLine([&](const XLineVisitor& line) {
line.ForEachEvent([&](const XEventVisitor& event) {
if (event.Type() == HostEventType::kExecutorStateProcess) {
auto producer_type = event.GetStat(StatType::kProducerType);
ASSERT_FALSE(producer_type.has_value());
auto producer_id = event.GetStat(StatType::kProducerId);
ASSERT_FALSE(producer_id.has_value());
} else if (event.Type() == HostEventType::kTpuExecuteOp) {
auto consumer_type = event.GetStat(StatType::kConsumerType);
ASSERT_FALSE(consumer_type.has_value());
auto consumer_id = event.GetStat(StatType::kConsumerId);
ASSERT_FALSE(consumer_id.has_value());
} else {
CHECK(false);
}
});
});
}
TEST(PreprocessXPlane, HostRunIdPreprocessorTest) {
XSpace space;
XPlane* plane = space.add_planes();
XPlaneBuilder plane_builder(plane);
plane_builder.ReserveLines(2);
auto line_builder = plane_builder.GetOrCreateLine(0);
int64_t host_run_id = int64_t{582974244};
int64_t device_run_id = int64_t{46103332};
CreateXEvent(
&plane_builder, &line_builder,
GetHostEventTypeStr(HostEventType::kDoEnqueueContinuationProgram), 100,
100, {});
CreateXEvent(&plane_builder, &line_builder,
GetHostEventTypeStr(HostEventType::kDoEnqueueProgram), 100, 100,
{{StatType::kRunId, int64_t{host_run_id}}});
CreateXEvent(&plane_builder, &line_builder,
GetHostEventTypeStr(HostEventType::kTpuExecuteOp), 200, 100,
{{StatType::kRunId, int64_t{device_run_id}}});
CreateXEvent(&plane_builder, &line_builder,
GetHostEventTypeStr(HostEventType::kCompleteCallbacks), 300, 100,
{{StatType::kRunId, int64_t{host_run_id}}});
line_builder = plane_builder.GetOrCreateLine(1);
PreprocessXSpace(&space);
XPlaneVisitor plane_visitor = CreateTfXPlaneVisitor(plane);
plane_visitor.ForEachLine([&](const XLineVisitor& line) {
line.ForEachEvent([&](const XEventVisitor& event) {
if (event.Type() == HostEventType::kDoEnqueueContinuationProgram) {
auto run_id = event.GetStat(StatType::kRunId);
ASSERT_FALSE(run_id.has_value());
} else if (event.Type() == HostEventType::kDoEnqueueProgram) {
auto run_id = event.GetStat(StatType::kRunId);
ASSERT_TRUE(run_id.has_value());
ASSERT_EQ(run_id->IntValue(), device_run_id);
} else if (event.Type() == HostEventType::kTpuExecuteOp) {
auto run_id = event.GetStat(StatType::kRunId);
ASSERT_TRUE(run_id.has_value());
ASSERT_EQ(run_id->IntValue(), device_run_id);
} else if (event.Type() == HostEventType::kCompleteCallbacks) {
auto run_id = event.GetStat(StatType::kRunId);
ASSERT_TRUE(run_id.has_value());
ASSERT_EQ(run_id->IntValue(), device_run_id);
} else {
CHECK(false);
}
});
});
}
TEST(PreprocessXPlane, ThreadPoolPreprocessorTest) {
XSpace space;
XPlane* plane = space.add_planes();
XPlaneBuilder plane_builder(plane);
auto main_line = plane_builder.GetOrCreateLine(0);
CreateXEvent(&plane_builder, &main_line, kThreadpoolListenerRecord, 100, 100,
{{StatType::kProducerType,
static_cast<int64_t>(ContextType::kThreadpoolEvent)},
{StatType::kProducerId, int64_t{123}}});
auto thread_pool_line = plane_builder.GetOrCreateLine(1);
CreateXEvent(&plane_builder, &thread_pool_line,
kThreadpoolListenerStartRegion, 200, 0,
{{StatType::kConsumerType,
static_cast<int64_t>(ContextType::kThreadpoolEvent)},
{StatType::kConsumerId, int64_t{123}}});
CreateXEvent(&plane_builder, &thread_pool_line, kThreadpoolListenerStopRegion,
300, 0,
{{StatType::kConsumerType,
static_cast<int64_t>(ContextType::kThreadpoolEvent)},
{StatType::kConsumerId, int64_t{123}}});
bool new_event_added = false;
PreprocessXSpace(&space);
XPlaneVisitor plane_visitor = CreateTfXPlaneVisitor(plane);
plane_visitor.ForEachLine([&](const XLineVisitor& line) {
line.ForEachEvent([&](const XEventVisitor& event) {
if (event.Name() == kThreadpoolListenerRegion) {
new_event_added = true;
EXPECT_EQ(event.DurationPs(), 100);
EXPECT_EQ(event.TimestampPs(), 200);
auto stat = event.GetStat(StatType::kConsumerId);
EXPECT_TRUE(stat.has_value());
EXPECT_EQ(stat->IntOrUintValue(), 123);
}
});
});
EXPECT_TRUE(new_event_added);
}
TEST(PreprocessXPlane, XContextStatsAccessorNPETest) {
auto xplane = std::make_unique<XPlane>();
XPlaneBuilder xplane_builder(xplane.get());
XLine xline;
XLineBuilder xline_builder(&xline, &xplane_builder);
XEvent xevent;
XEventBuilder xevent_builder(&xline, &xplane_builder, &xevent);
XContextStatsAccessor<int64_t, StatType::kRunId> run_id_accessor;
ASSERT_FALSE(run_id_accessor.Initialize(xplane_builder));
EXPECT_EQ(run_id_accessor.GetStat(xevent_builder), std::nullopt);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/preprocess_xplane.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/preprocess_xplane_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
866239b7-d9d3-4781-9af5-36f32ea4978f | cpp | google/tensorstore | diagonal_op | tensorstore/index_space/internal/diagonal_op.cc | tensorstore/index_space/diagonal_op_test.cc | #include "tensorstore/index_space/internal/diagonal_op.h"
#include <algorithm>
namespace tensorstore {
namespace internal_index_space {
namespace {
template <typename R>
void ShiftRangeForwardByOne(R range) {
for (DimensionIndex i = range.size() - 1; i > 0; --i) {
range[i] = range[i - 1];
}
}
void ExtractDiagonal(TransformRep* original, TransformRep* result,
DimensionIndexBuffer* dimensions, bool domain_only) {
const DimensionIndex orig_input_rank = original->input_rank;
const DimensionIndex output_rank = domain_only ? 0 : original->output_rank;
const DimensionIndex new_input_rank =
orig_input_rank - dimensions->size() + 1;
assert(result->input_rank_capacity >= new_input_rank);
const DimensionIndex diag_input_dim = 0;
DimensionIndex orig_to_new_input_dim[kMaxRank];
std::fill_n(&orig_to_new_input_dim[0], orig_input_rank,
static_cast<DimensionIndex>(-1));
bool lower_diagonal_bound_implicit = true,
upper_diagonal_bound_implicit = true;
IndexInterval diagonal_bounds;
for (DimensionIndex orig_input_dim : *dimensions) {
orig_to_new_input_dim[orig_input_dim] = diag_input_dim;
const auto d = original->input_dimension(orig_input_dim);
diagonal_bounds = Intersect(diagonal_bounds, d.domain());
if (!d.implicit_lower_bound()) {
lower_diagonal_bound_implicit = false;
}
if (!d.implicit_upper_bound()) {
upper_diagonal_bound_implicit = false;
}
}
for (DimensionIndex orig_input_dim = 0, new_input_dim = 1;
orig_input_dim < orig_input_rank; ++orig_input_dim) {
if (orig_to_new_input_dim[orig_input_dim] == -1) {
orig_to_new_input_dim[orig_input_dim] = new_input_dim++;
}
}
const bool domain_is_explicitly_empty = !lower_diagonal_bound_implicit &&
!upper_diagonal_bound_implicit &&
diagonal_bounds.empty();
span<const OutputIndexMap> orig_maps =
original->output_index_maps().first(output_rank);
span<OutputIndexMap> result_maps =
result->output_index_maps().first(output_rank);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const auto& orig_map = orig_maps[output_dim];
auto& result_map = result_maps[output_dim];
result_map.stride() = orig_map.stride();
result_map.offset() = orig_map.offset();
switch (orig_map.method()) {
case OutputIndexMethod::constant:
result_map.SetConstant();
break;
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex orig_input_dim = orig_map.input_dimension();
assert(orig_input_dim >= 0 && orig_input_dim < orig_input_rank);
const DimensionIndex new_input_dim =
orig_to_new_input_dim[orig_input_dim];
result_map.SetSingleInputDimension(new_input_dim);
break;
}
case OutputIndexMethod::array: {
if (domain_is_explicitly_empty) {
result_map.SetConstant();
result_map.stride() = 0;
result_map.offset() = 0;
break;
}
auto& result_index_array = result_map.SetArrayIndexing(new_input_rank);
const auto& orig_index_array = orig_map.index_array_data();
assert(orig_index_array.rank_capacity >= orig_input_rank);
Index diag_byte_stride = 0;
for (DimensionIndex orig_input_dim : *dimensions) {
diag_byte_stride += orig_index_array.byte_strides[orig_input_dim];
}
for (DimensionIndex orig_input_dim = 0;
orig_input_dim < orig_input_rank; ++orig_input_dim) {
const DimensionIndex new_input_dim =
orig_to_new_input_dim[orig_input_dim];
if (new_input_dim == diag_input_dim) continue;
assert(new_input_dim - 1 <= orig_input_dim);
result_index_array.byte_strides[new_input_dim - 1] =
orig_index_array.byte_strides[orig_input_dim];
}
ShiftRangeForwardByOne(
span(result_index_array.byte_strides, new_input_rank));
result_index_array.byte_strides[diag_input_dim] = diag_byte_stride;
result_index_array.index_range = orig_index_array.index_range;
result_index_array.element_pointer =
orig_index_array.element_pointer.pointer();
break;
}
}
}
for (DimensionIndex orig_input_dim = 0; orig_input_dim < orig_input_rank;
++orig_input_dim) {
const DimensionIndex new_input_dim = orig_to_new_input_dim[orig_input_dim];
if (new_input_dim == diag_input_dim) continue;
assert(new_input_dim - 1 <= orig_input_dim);
result->input_dimension(new_input_dim - 1) =
original->input_dimension(orig_input_dim);
}
ShiftRangeForwardByOne(result->all_input_dimensions(new_input_rank));
{
const auto d = result->input_dimension(diag_input_dim);
d.domain() = diagonal_bounds;
d.implicit_lower_bound() = lower_diagonal_bound_implicit;
d.implicit_upper_bound() = upper_diagonal_bound_implicit;
d.SetEmptyLabel();
}
result->input_rank = new_input_rank;
result->output_rank = output_rank;
dimensions->clear();
dimensions->push_back(diag_input_dim);
NormalizeImplicitBounds(*result);
}
}
Result<IndexTransform<>> ApplyDiagonal(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
bool domain_only) {
TransformRep* rep = TransformAccess::rep(transform);
const DimensionIndex new_input_rank =
rep->input_rank - dimensions->size() + 1;
TransformRep::Ptr<> new_rep =
NewOrMutableRep(rep, new_input_rank, rep->output_rank, domain_only);
ExtractDiagonal(rep, new_rep.get(), dimensions, domain_only);
internal_index_space::DebugCheckInvariants(new_rep.get());
return TransformAccess::Make<IndexTransform<>>(std::move(new_rep));
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/dim_expression_testutil.h"
namespace {
using ::tensorstore::Dims;
using ::tensorstore::Index;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::kInfIndex;
using ::tensorstore::kInfSize;
using ::tensorstore::MakeArray;
using ::tensorstore::internal_index_space::EquivalentIndices;
using ::tensorstore::internal_index_space::TestDimExpression;
TEST(DiagonalTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({5, 4, 5})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform = IndexTransformBuilder<2, 3>()
.input_origin({3, 2})
.input_shape({3, 4})
.input_labels({"", "y"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 0)
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{4, 3, 4}, {4, 3}},
};
TestDimExpression(original_transform,
Dims(0, 2).Diagonal(),
{0},
expected_new_transform,
expected_new_transform,
equivalent_indices);
TestDimExpression(original_transform,
Dims("x", "z").Diagonal(),
{0},
expected_new_transform,
expected_new_transform,
equivalent_indices);
}
TEST(DiagonalTest, ZeroDimensional) {
TestDimExpression(IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({5, 4})
.input_labels({"x", "y"})
.output_single_input_dimension(0, 5, 1, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
Dims().Diagonal(),
{0},
IndexTransformBuilder<3, 2>()
.input_origin({-kInfIndex, 1, 2})
.input_shape({kInfSize, 5, 4})
.implicit_lower_bounds({1, 0, 0})
.implicit_upper_bounds({1, 0, 0})
.input_labels({"", "x", "y"})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 2)
.Finalize()
.value(),
IndexTransformBuilder<3, 2>()
.input_origin({-kInfIndex, 1, 2})
.input_shape({kInfSize, 5, 4})
.implicit_lower_bounds({1, 0, 0})
.implicit_upper_bounds({1, 0, 0})
.input_labels({"", "x", "y"})
.output_single_input_dimension(0, 5, 1, 1)
.output_single_input_dimension(1, 2)
.Finalize()
.value(),
{{{3, 4}, {8, 3, 4}}},
false);
}
TEST(DiagonalTest, OneDimensional) {
TestDimExpression(IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({5, 4, 5})
.input_labels({"x", "y", "z"})
.output_single_input_dimension(0, 5, 1, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 2)
.Finalize()
.value(),
Dims(1).Diagonal(),
{0},
IndexTransformBuilder<3, 3>()
.input_origin({2, 1, 3})
.input_shape({4, 5, 5})
.input_labels({"", "x", "z"})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 0)
.output_single_input_dimension(2, 2)
.Finalize()
.value(),
IndexTransformBuilder<3, 3>()
.input_origin({2, 1, 3})
.input_shape({4, 5, 5})
.input_labels({"", "x", "z"})
.output_single_input_dimension(0, 5, 1, 1)
.output_single_input_dimension(1, 0)
.output_single_input_dimension(2, 2)
.Finalize()
.value(),
{{{4, 3, 5}, {3, 4, 5}}});
}
TEST(DiagonalTest, TwoDimensionalSimple) {
TestDimExpression(IndexTransformBuilder<3, 3>()
.input_origin({5, 6, 7})
.input_shape({10, 9, 15})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2, 2, 0)
.output_single_input_dimension(2, 3, 3, 2)
.Finalize()
.value(),
Dims(2, 0).Diagonal(),
{0},
IndexTransformBuilder<2, 3>()
.input_origin({7, 6})
.input_shape({8, 9})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 0)
.Finalize()
.value(),
IndexTransformBuilder<2, 3>()
.input_origin({7, 6})
.input_shape({8, 9})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2, 2, 0)
.output_single_input_dimension(2, 3, 3, 0)
.Finalize()
.value(),
{{{10, 11, 10}, {10, 11}}});
}
TEST(DiagonalTest, TwoDimensionalSimpleImplicitLower) {
TestDimExpression(
IndexTransformBuilder<3, 3>()
.input_origin({5, 6, 7})
.input_shape({10, 9, 15})
.implicit_lower_bounds({1, 0, 1})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2, 2, 0)
.output_single_input_dimension(2, 3, 3, 2)
.Finalize()
.value(),
Dims(2, 0).Diagonal(),
{0},
IndexTransformBuilder<2, 3>()
.input_origin({7, 6})
.input_shape({8, 9})
.implicit_lower_bounds({1, 0})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 0)
.Finalize()
.value(),
IndexTransformBuilder<2, 3>()
.input_origin({7, 6})
.input_shape({8, 9})
.implicit_lower_bounds({1, 0})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2, 2, 0)
.output_single_input_dimension(2, 3, 3, 0)
.Finalize()
.value(),
{{{10, 11, 10}, {10, 11}}});
}
TEST(DiagonalTest, TwoDimensionalSimpleImplicitUpper) {
TestDimExpression(
IndexTransformBuilder<3, 3>()
.input_origin({5, 6, 7})
.input_shape({10, 9, 15})
.implicit_upper_bounds({1, 0, 1})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2, 2, 0)
.output_single_input_dimension(2, 3, 3, 2)
.Finalize()
.value(),
Dims(2, 0).Diagonal(),
{0},
IndexTransformBuilder<2, 3>()
.input_origin({7, 6})
.input_shape({8, 9})
.implicit_upper_bounds({1, 0})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 0)
.Finalize()
.value(),
IndexTransformBuilder<2, 3>()
.input_origin({7, 6})
.input_shape({8, 9})
.implicit_upper_bounds({1, 0})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2, 2, 0)
.output_single_input_dimension(2, 3, 3, 0)
.Finalize()
.value(),
{{{10, 11, 10}, {10, 11}}});
}
TEST(DiagonalTest, IndexArray) {
TestDimExpression(
IndexTransformBuilder<3, 2>()
.input_origin({5, 6, 6})
.input_shape({4, 5, 2})
.output_index_array(
0, 2, 3,
MakeArray<Index>(
{{{1, 4}}, {{2, 5}}, {{3, 6}}, {{4, 7}}}))
.output_constant(1, 0)
.Finalize()
.value(),
Dims(0, 2).Diagonal(),
{0},
IndexTransformBuilder<2, 3>()
.input_origin({6, 6})
.input_shape({2, 5})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 0)
.Finalize()
.value(),
IndexTransformBuilder<2, 2>()
.input_origin({6, 6})
.input_shape({2, 5})
.output_index_array(0, 2, 3,
MakeArray<Index>({{2}, {6}}))
.output_constant(1, 0)
.Finalize()
.value(),
{{{6, 8, 6}, {6, 8}}});
}
TEST(DiagonalTest, IndexArrayZeroSize) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_shape({0, 2})
.implicit_upper_bounds({1, 0})
.output_single_input_dimension(0, 0)
.output_index_array(1, 0, 1, MakeArray<Index>({{1, 2}}))
.Finalize()
.value(),
Dims(0, 1).Diagonal(),
{0},
IndexTransformBuilder<1, 2>()
.input_shape({0})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 2>()
.input_shape({0})
.output_single_input_dimension(0, 0)
.output_constant(1, 0)
.Finalize()
.value(),
{});
}
TEST(DiagonalTest, Labeled) {
TestDimExpression(
IndexTransformBuilder<3, 2>()
.input_origin({5, 6, 6})
.input_shape({4, 5, 2})
.input_labels({"a", "b", "c"})
.output_index_array(
0, 2, 3,
MakeArray<Index>(
{{{1, 4}}, {{2, 5}}, {{3, 6}}, {{4, 7}}}))
.output_constant(1, 0)
.Finalize()
.value(),
Dims(0, 2).Diagonal().Label("diag"),
{0},
IndexTransformBuilder<2, 3>()
.input_origin({6, 6})
.input_shape({2, 5})
.input_labels({"diag", "b"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 0)
.Finalize()
.value(),
IndexTransformBuilder<2, 2>()
.input_origin({6, 6})
.input_shape({2, 5})
.input_labels({"diag", "b"})
.output_index_array(0, 2, 3,
MakeArray<Index>({{2}, {6}}))
.output_constant(1, 0)
.Finalize()
.value(),
{{{6, 8, 6}, {6, 8}}});
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/internal/diagonal_op.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/diagonal_op_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
3d7f0d8b-ca02-430c-84bd-cbb68e3a0073 | cpp | tensorflow/tensorflow | unpack | tensorflow/lite/kernels/unpack.cc | tensorflow/lite/kernels/unpack_test.cc | #include <stdint.h>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace unpack {
namespace {
constexpr int kInputTensor = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const TfLiteUnpackParams* data =
reinterpret_cast<TfLiteUnpackParams*>(node->builtin_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), data->num);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TF_LITE_ENSURE(context, NumElements(input) > 0);
int axis = data->axis;
if (axis < 0) {
axis += NumDimensions(input);
}
TF_LITE_ENSURE(context, 0 <= axis && axis < NumDimensions(input));
if (input->type != kTfLiteInt32 && input->type != kTfLiteFloat32 &&
input->type != kTfLiteUInt8 && input->type != kTfLiteInt8 &&
input->type != kTfLiteInt16 && input->type != kTfLiteBool) {
TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by unpack.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
const TfLiteIntArray* input_shape = input->dims;
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(NumDimensions(input) - 1);
int o = 0;
for (int index = 0; index < NumDimensions(input); ++index) {
if (index != axis) {
output_shape->data[o++] = input_shape->data[index];
}
}
TF_LITE_ENSURE_EQ(context, data->num, input_shape->data[axis]);
for (int i = 0; i < data->num; ++i) {
TfLiteIntArray* copied_output_shape = TfLiteIntArrayCopy(output_shape);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, i, &output));
TF_LITE_ENSURE_TYPES_EQ(context, output->type, input->type);
TF_LITE_ENSURE_EQ(context, input->params.zero_point,
output->params.zero_point);
TF_LITE_ENSURE_EQ(context, input->params.scale, output->params.scale);
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, output, copied_output_shape));
}
TfLiteIntArrayFree(output_shape);
return kTfLiteOk;
}
template <typename T>
void UnpackImpl(TfLiteContext* context, TfLiteNode* node,
const TfLiteTensor* input, int output_count, int axis) {
tflite::UnpackParams op_params;
op_params.axis = axis;
op_params.num_split = output_count;
VectorOfTensors<T> all_outputs(*context, *node->outputs);
reference_ops::Unpack<T>(op_params, GetTensorShape(input),
GetTensorData<T>(input), **all_outputs.shapes(),
all_outputs.data());
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteUnpackParams* data =
reinterpret_cast<TfLiteUnpackParams*>(node->builtin_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
switch (input->type) {
case kTfLiteFloat32: {
UnpackImpl<float>(context, node, input, data->num, data->axis);
break;
}
case kTfLiteInt32: {
UnpackImpl<int32_t>(context, node, input, data->num, data->axis);
break;
}
case kTfLiteUInt8: {
UnpackImpl<uint8_t>(context, node, input, data->num, data->axis);
break;
}
case kTfLiteInt8: {
UnpackImpl<int8_t>(context, node, input, data->num, data->axis);
break;
}
case kTfLiteBool: {
UnpackImpl<bool>(context, node, input, data->num, data->axis);
break;
}
case kTfLiteInt16: {
UnpackImpl<int16_t>(context, node, input, data->num, data->axis);
break;
}
default: {
TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by unpack.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
}
return kTfLiteOk;
}
}
}
TfLiteRegistration* Register_UNPACK() {
static TfLiteRegistration r = {nullptr, nullptr, unpack::Prepare,
unpack::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <initializer_list>
#include <iostream>
#include <type_traits>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
template <typename T>
class UnpackOpModel : public SingleOpModel {
public:
UnpackOpModel(const TensorData& input, int axis) {
if (axis < 0) {
axis += input.shape.size();
}
const int num_outputs = input.shape[axis];
input_ = AddInput(input);
for (int i = 0; i < num_outputs; ++i) {
outputs_.push_back(AddOutput(input.type));
}
SetBuiltinOp(BuiltinOperator_UNPACK, BuiltinOptions_UnpackOptions,
CreateUnpackOptions(builder_, num_outputs, axis).Union());
BuildInterpreter({GetShape(input_)});
}
void SetInput(std::initializer_list<T> data) {
PopulateTensor<T>(input_, data);
}
std::vector<std::vector<T>> GetOutputDatas() {
std::vector<std::vector<T>> output_datas;
for (const int output : outputs_) {
std::cerr << "the output is " << output << std::endl;
output_datas.push_back(ExtractVector<T>(output));
}
return output_datas;
}
std::vector<std::vector<int>> GetOutputShapes() {
std::vector<std::vector<int>> output_shapes;
for (const int output : outputs_) {
output_shapes.push_back(GetTensorShape(output));
}
return output_shapes;
}
private:
int input_;
std::vector<int> outputs_;
};
template <typename T>
void Check(int axis, const std::initializer_list<int>& input_shape,
const std::initializer_list<T>& input_data,
const std::vector<std::vector<int>>& exp_output_shape,
const std::vector<std::vector<T>>& exp_output_data,
const TensorType& type = TensorType_FLOAT32) {
UnpackOpModel<T> m({type, input_shape}, axis);
m.SetInput(input_data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShapes(), ElementsAreArray(exp_output_shape));
EXPECT_THAT(m.GetOutputDatas(), ElementsAreArray(exp_output_data));
}
template <typename InputType>
struct UnpackOpTest : public ::testing::Test {
using TypeToTest = InputType;
TensorType TENSOR_TYPE =
(std::is_same<InputType, int16_t>::value
? TensorType_INT16
: (std::is_same<InputType, uint8_t>::value
? TensorType_UINT8
: (std::is_same<InputType, int8_t>::value
? TensorType_INT8
: (std::is_same<InputType, int32_t>::value
? TensorType_INT32
: TensorType_FLOAT32))));
};
using TestTypes = testing::Types<float, int32_t, int8_t, uint8_t, int16_t>;
TYPED_TEST_CASE(UnpackOpTest, TestTypes);
TYPED_TEST(UnpackOpTest, ThreeOutputs) {
Check<typename TestFixture::TypeToTest>(
0, {3, 2},
{1, 2, 3, 4, 5, 6},
{{2}, {2}, {2}},
{{1, 2}, {3, 4}, {5, 6}}, TestFixture::TENSOR_TYPE);
}
TYPED_TEST(UnpackOpTest, ThreeOutputsAxisOne) {
Check<typename TestFixture::TypeToTest>(
1, {3, 2},
{1, 2, 3, 4, 5, 6},
{{3}, {3}},
{{1, 3, 5}, {2, 4, 6}}, TestFixture::TENSOR_TYPE);
}
TYPED_TEST(UnpackOpTest, ThreeOutputsNegativeAxisOne) {
Check<typename TestFixture::TypeToTest>(
-1, {3, 2},
{1, 2, 3, 4, 5, 6},
{{3}, {3}},
{{1, 3, 5}, {2, 4, 6}}, TestFixture::TENSOR_TYPE);
}
TYPED_TEST(UnpackOpTest, OneOutput) {
Check<typename TestFixture::TypeToTest>(
0, {1, 6},
{1, 2, 3, 4, 5, 6},
{{6}},
{{1, 2, 3, 4, 5, 6}}, TestFixture::TENSOR_TYPE);
}
TYPED_TEST(UnpackOpTest, ThreeDimensionsOutputs) {
Check<typename TestFixture::TypeToTest>(
2, {2, 2, 2},
{1, 2, 3, 4, 5, 6, 7, 8},
{{2, 2}, {2, 2}},
{{1, 3, 5, 7}, {2, 4, 6, 8}},
TestFixture::TENSOR_TYPE);
}
TYPED_TEST(UnpackOpTest, FiveDimensionsOutputs) {
Check<typename TestFixture::TypeToTest>(
2, {2, 2, 2, 2, 1},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
{{2, 2, 2, 1}, {2, 2, 2, 1}},
{{1, 2, 5, 6, 9, 10, 13, 14}, {3, 4, 7, 8, 11, 12, 15, 16}},
TestFixture::TENSOR_TYPE);
}
TYPED_TEST(UnpackOpTest, VectorToScalar) {
Check<typename TestFixture::TypeToTest>(
0, {5},
{1, 2, 3, 4, 5},
{{}, {}, {}, {}, {}},
{{1}, {2}, {3}, {4}, {5}}, TestFixture::TENSOR_TYPE);
}
TEST(UnpackOpTestBool, BoolThreeOutputs) {
Check<bool>(
0, {3, 2},
{true, false, true, false, true, false},
{{2}, {2}, {2}},
{{true, false}, {true, false}, {true, false}},
TensorType_BOOL);
}
TEST(UnpackOpTestBool, BoolThreeOutputsAxisOne) {
Check<bool>(
1, {3, 2},
{true, false, true, false, true, false},
{{3}, {3}},
{{true, true, true}, {false, false, false}},
TensorType_BOOL);
}
TEST(UnpackOpTestBool, BoolThreeOutputsNegativeAxisOne) {
Check<bool>(
-1, {3, 2},
{true, false, true, false, true, false},
{{3}, {3}},
{{true, true, true}, {false, false, false}},
TensorType_BOOL);
}
TEST(UnpackOpTestBool, BoolThreeOutputsNegativeAxisTwo) {
Check<bool>(
-2, {3, 2},
{true, false, true, false, true, false},
{{2}, {2}, {2}},
{{true, false}, {true, false}, {true, false}},
TensorType_BOOL);
}
TEST(UnpackOpTestBool, BoolOneOutput) {
Check<bool>(
0, {1, 6},
{true, false, true, false, true, false},
{{6}},
{{true, false, true, false, true, false}},
TensorType_BOOL);
}
TEST(UnpackOpTestBool, BoolThreeDimensionsOutputs) {
Check<bool>(
2, {2, 2, 2},
{true, false, true, false, true, false, true, false},
{{2, 2}, {2, 2}},
{{true, true, true, true}, {false, false, false, false}},
TensorType_BOOL);
}
TEST(UnpackOpTest, BoolFiveDimensionsOutputs) {
Check<bool>(
2, {2, 2, 2, 2, 1},
{true, false, true, false, true, false, true, false, true, true, true,
true, true, true, true, true},
{{2, 2, 2, 1}, {2, 2, 2, 1}},
{{true, false, true, false, true, true, true, true},
{true, false, true, false, true, true, true, true}},
TensorType_BOOL);
}
TEST(UnpackOpTestBool, BoolVectorToScalar) {
Check<bool>(0, {5},
{true, false, true, false, true},
{{}, {}, {}, {}, {}},
{{true}, {false}, {true}, {false}, {true}},
TensorType_BOOL);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/unpack.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/unpack_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
daaa7d5d-62e4-4907-b147-eab5061f05d7 | cpp | tensorflow/tensorflow | quantized_batch_norm_op | tensorflow/core/kernels/quantized_batch_norm_op.cc | tensorflow/core/kernels/quantized_batch_norm_op_test.cc | #define EIGEN_USE_THREADS
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/numeric_op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/quantization_utils.h"
namespace tensorflow {
namespace {
template <typename T1, typename T2>
void ReferenceBatchNorm(const Tensor& input, const float input_min,
const float input_max, const Tensor& mean,
float mean_min, float mean_max, const Tensor& var,
float var_min, float var_max, const Tensor& beta,
float beta_min, float beta_max, const Tensor& gamma,
float gamma_min, float gamma_max,
float variance_epsilon, bool scale_after_normalization,
Tensor* output, float* output_min, float* output_max) {
auto input_flat = input.flat<T1>();
auto mean_flat = mean.flat<T1>();
auto var_flat = var.flat<T1>();
auto beta_flat = beta.flat<T1>();
auto gamma_flat = gamma.flat<T1>();
auto output_flat = output->flat<T2>();
const int depth = mean.dim_size(0);
const int row_count = input_flat.size() / depth;
*output_min = std::numeric_limits<float>::max();
*output_max = std::numeric_limits<float>::lowest();
for (int pass = 0; pass < 2; ++pass) {
const bool is_range_pass = (pass == 0);
for (int row_index = 0; row_index < row_count; ++row_index) {
for (int channel = 0; channel < depth; ++channel) {
const int input_index = (row_index * depth) + channel;
const float input_value =
QuantizedToFloat(input_flat(input_index), input_min, input_max);
const float mean_value =
QuantizedToFloat(mean_flat(channel), mean_min, mean_max);
const float var_value =
QuantizedToFloat(var_flat(channel), var_min, var_max);
const float beta_value =
QuantizedToFloat(beta_flat(channel), beta_min, beta_max);
const float gamma_value =
QuantizedToFloat(gamma_flat(channel), gamma_min, gamma_max);
float output_value;
if (scale_after_normalization) {
output_value = (((input_value - mean_value) /
sqrtf(var_value + variance_epsilon)) *
gamma_value) +
beta_value;
} else {
output_value = ((input_value - mean_value) /
sqrtf(var_value + variance_epsilon)) +
beta_value;
}
if (is_range_pass) {
*output_min = std::min(output_value, *output_min);
*output_max = std::max(output_value, *output_max);
} else {
output_flat(input_index) =
FloatToQuantized<T2>(output_value, *output_min, *output_max);
}
}
}
}
}
template <typename T1, typename T2>
void FixedPointBatchNorm(const Tensor& input, const float input_min,
const float input_max, const Tensor& mean,
float mean_min, float mean_max, const Tensor& var,
float var_min, float var_max, const Tensor& beta,
float beta_min, float beta_max, const Tensor& gamma,
float gamma_min, float gamma_max,
float variance_epsilon, bool scale_after_normalization,
Tensor* output, float* output_min, float* output_max) {
auto input_flat = input.flat<T1>();
auto mean_flat = mean.flat<T1>();
auto var_flat = var.flat<T1>();
auto beta_flat = beta.flat<T1>();
auto gamma_flat = gamma.flat<T1>();
auto output_flat = output->flat<T2>();
const int depth = mean.dim_size(0);
const int row_count = input_flat.size() / depth;
*output_min = -(1 << 20);
*output_max = (1 << 20);
Tensor scale_tensor(DataTypeToEnum<T2>::v(), {depth});
auto scale_flat = scale_tensor.flat<T2>();
Tensor offset_tensor(DataTypeToEnum<T2>::v(), {depth});
auto offset_flat = offset_tensor.flat<T2>();
for (int channel = 0; channel < depth; ++channel) {
const float mean_value =
QuantizedToFloat(mean_flat(channel), mean_min, mean_max);
const float var_value =
QuantizedToFloat(var_flat(channel), var_min, var_max);
const float beta_value =
QuantizedToFloat(beta_flat(channel), beta_min, beta_max);
const float gamma_value =
QuantizedToFloat(gamma_flat(channel), gamma_min, gamma_max);
float scale_value;
if (scale_after_normalization) {
scale_value = (1.0f / sqrtf(var_value + variance_epsilon)) * gamma_value;
} else {
scale_value = (1.0f / sqrtf(var_value + variance_epsilon));
}
const float offset_value = (-mean_value * scale_value) + beta_value;
scale_flat(channel) =
FloatToQuantized<T2>(scale_value, *output_min, *output_max);
offset_flat(channel) =
FloatToQuantized<T2>(offset_value, *output_min, *output_max);
}
const T2 one_in_output_space =
FloatToQuantized<T2>(1.0f, *output_min, *output_max);
for (int row_index = 0; row_index < row_count; ++row_index) {
for (int channel = 0; channel < depth; ++channel) {
const int input_index = (row_index * depth) + channel;
const T2 input_value =
RequantizeInNewRange<T1, T2>(input_flat(input_index), input_min,
input_max, *output_min, *output_max);
const T2 scale_value = scale_flat(channel);
const T2 offset_value = offset_flat(channel);
const T2 output_value =
((input_value * scale_value) / one_in_output_space) + offset_value;
output_flat(input_index) = output_value;
}
}
}
}
template <typename T1, typename T2>
class QuantizedBatchNormOp : public OpKernel {
public:
explicit QuantizedBatchNormOp(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context,
context->GetAttr("variance_epsilon", &variance_epsilon_));
OP_REQUIRES_OK(context, context->GetAttr("scale_after_normalization",
&scale_after_normalization_));
}
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
const auto& input_min_tensor = context->input(1);
OP_REQUIRES(context, input_min_tensor.NumElements() == 1,
errors::InvalidArgument("input_min must have 1 element"));
const float input_min = input_min_tensor.flat<float>()(0);
const auto& input_max_tensor = context->input(2);
OP_REQUIRES(context, input_max_tensor.NumElements() == 1,
errors::InvalidArgument("input_max must have 1 element"));
const float input_max = input_max_tensor.flat<float>()(0);
const Tensor& mean = context->input(3);
const auto& mean_min_tensor = context->input(4);
OP_REQUIRES(context, mean_min_tensor.NumElements() == 1,
errors::InvalidArgument("mean_min must have 1 element"));
const float mean_min = mean_min_tensor.flat<float>()(0);
const auto& mean_max_tensor = context->input(5);
OP_REQUIRES(context, mean_max_tensor.NumElements() == 1,
errors::InvalidArgument("mean_max must have 1 element"));
const float mean_max = mean_max_tensor.flat<float>()(0);
const Tensor& var = context->input(6);
const auto& var_min_tensor = context->input(7);
OP_REQUIRES(context, var_min_tensor.NumElements() == 1,
errors::InvalidArgument("var_min must have 1 element"));
const float var_min = var_min_tensor.flat<float>()(0);
const auto& var_max_tensor = context->input(8);
OP_REQUIRES(context, var_max_tensor.NumElements() == 1,
errors::InvalidArgument("var_max must have 1 element"));
const float var_max = var_max_tensor.flat<float>()(0);
const Tensor& beta = context->input(9);
const auto& beta_min_tensor = context->input(10);
OP_REQUIRES(context, beta_min_tensor.NumElements() == 1,
errors::InvalidArgument("beta_min must have 1 element"));
const float beta_min = beta_min_tensor.flat<float>()(0);
const auto& beta_max_tensor = context->input(11);
OP_REQUIRES(context, beta_max_tensor.NumElements() == 1,
errors::InvalidArgument("beta_max must have 1 element"));
const float beta_max = beta_max_tensor.flat<float>()(0);
const Tensor& gamma = context->input(12);
const auto& gamma_min_tensor = context->input(13);
OP_REQUIRES(context, gamma_min_tensor.NumElements() == 1,
errors::InvalidArgument("gamma_min must have 1 element"));
const float gamma_min = gamma_min_tensor.flat<float>()(0);
const auto& gamma_max_tensor = context->input(14);
OP_REQUIRES(context, gamma_max_tensor.NumElements() == 1,
errors::InvalidArgument("gamma_max must have 1 element"));
const float gamma_max = gamma_max_tensor.flat<float>()(0);
OP_REQUIRES(context, input.dims() == 4,
errors::InvalidArgument("input must be 4-dimensional",
input.shape().DebugString()));
OP_REQUIRES(context, mean.dims() == 1,
errors::InvalidArgument("mean must be 1-dimensional",
mean.shape().DebugString()));
OP_REQUIRES(context, var.dims() == 1,
errors::InvalidArgument("var must be 1-dimensional",
var.shape().DebugString()));
OP_REQUIRES(context, beta.dims() == 1,
errors::InvalidArgument("beta must be 1-dimensional",
beta.shape().DebugString()));
OP_REQUIRES(context, gamma.dims() == 1,
errors::InvalidArgument("gamma must be 1-dimensional",
gamma.shape().DebugString()));
OP_REQUIRES(context, mean.NumElements() > 1,
errors::InvalidArgument("Must have at least a mean value",
gamma.shape().DebugString()));
OP_REQUIRES(context, mean.NumElements() > 1,
errors::InvalidArgument("Must have at least a mean value"));
const auto last_dim = input.shape().dims() - 1;
OP_REQUIRES(context,
mean.shape().dim_size(0) == input.shape().dim_size(last_dim),
errors::InvalidArgument("Must provide as many means as the "
"last dimension of the input tensor: ",
mean.shape().DebugString(), " vs. ",
input.shape().DebugString()));
OP_REQUIRES(
context, mean.shape().dim_size(0) == var.shape().dim_size(0),
errors::InvalidArgument(
"Mean and variance tensors must have the same shape: ",
mean.shape().DebugString(), " vs. ", var.shape().DebugString()));
OP_REQUIRES(
context, mean.shape().dim_size(0) == beta.shape().dim_size(0),
errors::InvalidArgument(
"Mean and beta tensors must have the same shape: ",
mean.shape().DebugString(), " vs. ", beta.shape().DebugString()));
OP_REQUIRES(
context, mean.shape().dim_size(0) == gamma.shape().dim_size(0),
errors::InvalidArgument(
"Mean and gamma tensors must have the same shape: ",
mean.shape().DebugString(), " vs. ", gamma.shape().DebugString()));
Tensor* output = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(0, input.shape(), &output));
float output_min;
float output_max;
FixedPointBatchNorm<T1, T2>(input, input_min, input_max, mean, mean_min,
mean_max, var, var_min, var_max, beta, beta_min,
beta_max, gamma, gamma_min, gamma_max,
variance_epsilon_, scale_after_normalization_,
output, &output_min, &output_max);
Tensor* output_min_tensor = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(1, {}, &output_min_tensor));
output_min_tensor->flat<float>()(0) = output_min;
Tensor* output_max_tensor = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(2, {}, &output_max_tensor));
output_max_tensor->flat<float>()(0) = output_max;
}
private:
float variance_epsilon_;
bool scale_after_normalization_;
};
REGISTER_KERNEL_BUILDER(Name("QuantizedBatchNormWithGlobalNormalization")
.Device(DEVICE_CPU)
.TypeConstraint<quint8>("Tinput")
.TypeConstraint<qint32>("out_type"),
QuantizedBatchNormOp<quint8, qint32>);
} | #define EIGEN_USE_THREADS
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/batch_norm_op.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/quantization_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
using QuantizedBatchNormOpTest = OpsTestBase;
TEST_F(QuantizedBatchNormOpTest, Simple) {
TF_EXPECT_OK(NodeDefBuilder("quantized_batch_norm_op",
"QuantizedBatchNormWithGlobalNormalization")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("scale_after_normalization", false)
.Attr("variance_epsilon", 0.001)
.Attr("Tinput", DT_QUINT8)
.Attr("out_type", DT_QINT32)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
const float input_min = -128.0f;
const float input_max = 127.0f;
const int input_batch = 1;
const int input_height = 1;
const int input_width = 6;
const int input_depth = 2;
Tensor input_float(DT_FLOAT,
{input_batch, input_height, input_width, input_depth});
test::FillValues<float>(&input_float,
{1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6});
Tensor input_quantized =
FloatTensorToQuantized<quint8>(input_float, input_min, input_max);
const float mean_min = 0.0f;
const float mean_max = 20.0f;
Tensor mean_float(DT_FLOAT, {input_depth});
test::FillValues<float>(&mean_float, {10, 20});
Tensor mean_quantized =
FloatTensorToQuantized<quint8>(mean_float, mean_min, mean_max);
const float variance_min = 0.0f;
const float variance_max = 1.0f;
Tensor variance_float(DT_FLOAT, {input_depth});
test::FillValues<float>(&variance_float, {0.25, 0.5});
Tensor variance_quantized = FloatTensorToQuantized<quint8>(
variance_float, variance_min, variance_max);
const float beta_min = 0.0f;
const float beta_max = 1.0f;
Tensor beta_float(DT_FLOAT, {input_depth});
test::FillValues<float>(&beta_float, {0.1, 0.6});
Tensor beta_quantized =
FloatTensorToQuantized<quint8>(beta_float, beta_min, beta_max);
const float gamma_min = 0.0f;
const float gamma_max = 1.0f;
Tensor gamma_float(DT_FLOAT, {input_depth});
test::FillValues<float>(&gamma_float, {0.0, 0.0});
Tensor gamma_quantized =
FloatTensorToQuantized<quint8>(gamma_float, gamma_min, gamma_max);
AddInputFromArray<quint8>(input_quantized.shape(),
input_quantized.flat<quint8>());
AddInputFromArray<float>(TensorShape({1}), {input_min});
AddInputFromArray<float>(TensorShape({1}), {input_max});
AddInputFromArray<quint8>(mean_quantized.shape(),
mean_quantized.flat<quint8>());
AddInputFromArray<float>(TensorShape({1}), {mean_min});
AddInputFromArray<float>(TensorShape({1}), {mean_max});
AddInputFromArray<quint8>(variance_quantized.shape(),
variance_quantized.flat<quint8>());
AddInputFromArray<float>(TensorShape({1}), {variance_min});
AddInputFromArray<float>(TensorShape({1}), {variance_max});
AddInputFromArray<quint8>(beta_quantized.shape(),
beta_quantized.flat<quint8>());
AddInputFromArray<float>(TensorShape({1}), {beta_min});
AddInputFromArray<float>(TensorShape({1}), {beta_max});
AddInputFromArray<quint8>(gamma_quantized.shape(),
gamma_quantized.flat<quint8>());
AddInputFromArray<float>(TensorShape({1}), {gamma_min});
AddInputFromArray<float>(TensorShape({1}), {gamma_max});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_float(
allocator(), DT_FLOAT,
TensorShape({input_batch, input_height, input_width, input_depth}));
test::FillValues<float>(
&expected_float, {-17.86, -22.00, -15.87, -20.59, -13.87, -19.18, -21.86,
-33.31, -23.85, -34.72, -25.85, -36.13});
const Tensor& output_quantized = *GetOutput(0);
const float output_min = GetOutput(1)->flat<float>()(0);
const float output_max = GetOutput(2)->flat<float>()(0);
Tensor output_float =
QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max);
test::ExpectTensorNear<float>(expected_float, output_float, 0.1);
}
TEST_F(QuantizedBatchNormOpTest, SameAsFloat) {
TF_EXPECT_OK(NodeDefBuilder("quantized_batch_norm_op",
"QuantizedBatchNormWithGlobalNormalization")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("scale_after_normalization", false)
.Attr("variance_epsilon", 0.001)
.Attr("Tinput", DT_QUINT8)
.Attr("out_type", DT_QINT32)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
const float input_min = -128.0f;
const float input_max = 127.0f;
const int input_batch = 1;
const int input_height = 1;
const int input_width = 6;
const int input_depth = 2;
Tensor input_float(DT_FLOAT,
{input_batch, input_height, input_width, input_depth});
test::FillValues<float>(&input_float,
{1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6});
Tensor input_quantized =
FloatTensorToQuantized<quint8>(input_float, input_min, input_max);
const float mean_min = 0.0f;
const float mean_max = 20.0f;
Tensor mean_float(DT_FLOAT, {input_depth});
test::FillValues<float>(&mean_float, {10, 20});
Tensor mean_quantized =
FloatTensorToQuantized<quint8>(mean_float, mean_min, mean_max);
const float variance_min = 0.0f;
const float variance_max = 1.0f;
Tensor variance_float(DT_FLOAT, {input_depth});
test::FillValues<float>(&variance_float, {0.25, 0.5});
Tensor variance_quantized = FloatTensorToQuantized<quint8>(
variance_float, variance_min, variance_max);
const float beta_min = 0.0f;
const float beta_max = 1.0f;
Tensor beta_float(DT_FLOAT, {input_depth});
test::FillValues<float>(&beta_float, {0.1, 0.6});
Tensor beta_quantized =
FloatTensorToQuantized<quint8>(beta_float, beta_min, beta_max);
const float gamma_min = 0.0f;
const float gamma_max = 1.0f;
Tensor gamma_float(DT_FLOAT, {input_depth});
test::FillValues<float>(&gamma_float, {0.0, 0.0});
Tensor gamma_quantized =
FloatTensorToQuantized<quint8>(gamma_float, gamma_min, gamma_max);
AddInputFromArray<quint8>(input_quantized.shape(),
input_quantized.flat<quint8>());
AddInputFromArray<float>(TensorShape({1}), {input_min});
AddInputFromArray<float>(TensorShape({1}), {input_max});
AddInputFromArray<quint8>(mean_quantized.shape(),
mean_quantized.flat<quint8>());
AddInputFromArray<float>(TensorShape({1}), {mean_min});
AddInputFromArray<float>(TensorShape({1}), {mean_max});
AddInputFromArray<quint8>(variance_quantized.shape(),
variance_quantized.flat<quint8>());
AddInputFromArray<float>(TensorShape({1}), {variance_min});
AddInputFromArray<float>(TensorShape({1}), {variance_max});
AddInputFromArray<quint8>(beta_quantized.shape(),
beta_quantized.flat<quint8>());
AddInputFromArray<float>(TensorShape({1}), {beta_min});
AddInputFromArray<float>(TensorShape({1}), {beta_max});
AddInputFromArray<quint8>(gamma_quantized.shape(),
gamma_quantized.flat<quint8>());
AddInputFromArray<float>(TensorShape({1}), {gamma_min});
AddInputFromArray<float>(TensorShape({1}), {gamma_max});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_float(
allocator(), DT_FLOAT,
TensorShape({input_batch, input_height, input_width, input_depth}));
thread::ThreadPool threadpool(Env::Default(), "test", 1);
Eigen::ThreadPoolDevice eigen_cpu_device(threadpool.AsEigenThreadPool(), 1);
const Tensor& const_input_float = input_float;
const Tensor& const_mean_float = mean_float;
const Tensor& const_variance_float = variance_float;
const Tensor& const_beta_float = beta_float;
const Tensor& const_gamma_float = gamma_float;
functor::BatchNorm<Eigen::ThreadPoolDevice, float>()(
eigen_cpu_device, const_input_float.tensor<float, 4>(),
const_mean_float.vec<float>(), const_variance_float.vec<float>(),
const_beta_float.vec<float>(), const_gamma_float.vec<float>(), 0.001,
false, expected_float.tensor<float, 4>());
const Tensor& output_quantized = *GetOutput(0);
const float output_min = GetOutput(1)->flat<float>()(0);
const float output_max = GetOutput(2)->flat<float>()(0);
Tensor output_float =
QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max);
test::ExpectTensorNear<float>(expected_float, output_float, 0.1);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_batch_norm_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_batch_norm_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
862266e5-6eee-42ce-88d0-6997e0b227b4 | cpp | tensorflow/tensorflow | split | tensorflow/lite/delegates/gpu/common/tasks/split.cc | tensorflow/lite/delegates/xnnpack/split_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/split.h"
#include <map>
#include <string>
#include <vector>
namespace tflite {
namespace gpu {
Split::Split(const GpuInfo& gpu_info, const OperationDef& definition,
const SplitAttributes& attr, const std::vector<int>& channels)
: GPUOperation(definition), attr_(attr) {
work_group_size_ = int3(8, 4, 1);
code_ = attr.axis == Axis::CHANNELS ? GetSplitChannelsCode(gpu_info, channels)
: GetSplitCode();
}
std::string Split::GetSplitCode() {
AddSrcTensor("src_tensor", definition_.src_tensors[0]);
for (int i = 0; i < definition_.dst_tensors.size(); ++i) {
AddDstTensor("dst_tensor_" + std::to_string(i), definition_.dst_tensors[i]);
}
const std::string task_width =
attr_.axis == Axis::WIDTH ? "1" : "args.src_tensor.Width()";
const std::string task_height =
attr_.axis == Axis::HEIGHT ? "1" : "args.src_tensor.Height()";
const std::string task_depth =
attr_.axis == Axis::DEPTH ? "1" : "args.src_tensor.Depth()";
const std::string task_batch =
attr_.axis == Axis::BATCH ? "1" : "args.src_tensor.Batch()";
const std::string task_slices =
attr_.axis == Axis::CHANNELS ? "1" : "args.src_tensor.Slices()";
std::map<Axis, std::string> axis_to_selector = {
{Axis::WIDTH, "Width"}, {Axis::HEIGHT, "Height"},
{Axis::DEPTH, "Depth"}, {Axis::CHANNELS, "Slices"},
{Axis::BATCH, "Batch"},
};
std::map<Axis, std::string> axis_to_coord = {
{Axis::WIDTH, "X"}, {Axis::HEIGHT, "Y"}, {Axis::DEPTH, "D"},
{Axis::CHANNELS, "S"}, {Axis::BATCH, "B"},
};
std::string c;
c += "MAIN_FUNCTION($0) {\n";
if (definition_.src_tensors[0].HasAxis(Axis::BATCH)) {
c += " int linear_id = GLOBAL_ID_0;\n";
c += " int X = linear_id / " + task_batch + ";\n";
c += " int B = linear_id % " + task_batch + ";\n";
c += " if (X >= " + task_width + ") return;\n";
} else {
c += " int X = GLOBAL_ID_0;\n";
c += " if (X >= " + task_width + ") return;\n";
}
if (definition_.src_tensors[0].HasAxis(Axis::DEPTH)) {
c += " int linear_id = GLOBAL_ID_1;\n";
c += " int Y = linear_id % " + task_height + ";\n";
c += " int D = linear_id / " + task_height + ";\n";
c += " if (D >= " + task_depth + ") return;\n";
} else {
c += " int Y = GLOBAL_ID_1;\n";
c += " if (Y >= " + task_height + ") return;\n";
}
c += " int S = GLOBAL_ID_2;\n";
c += " if (S >= " + task_slices + ") return;\n";
c += " int src_counter = 0;\n";
std::vector<std::string> src_coords;
for (auto axis :
{Axis::WIDTH, Axis::HEIGHT, Axis::DEPTH, Axis::CHANNELS, Axis::BATCH}) {
if (definition_.src_tensors[0].HasAxis(axis)) {
const std::string coord_name =
attr_.axis == axis ? "src_counter" : axis_to_coord[axis];
src_coords.push_back(coord_name);
}
}
std::string src_coords_str = src_coords[0];
for (int i = 1; i < src_coords.size(); ++i) {
src_coords_str += ", " + src_coords[i];
}
for (int i = 0; i < definition_.dst_tensors.size(); ++i) {
std::vector<std::string> dst_coords;
for (auto axis : {Axis::WIDTH, Axis::HEIGHT, Axis::DEPTH, Axis::CHANNELS,
Axis::BATCH}) {
if (definition_.dst_tensors[i].HasAxis(axis)) {
const std::string coord_name =
attr_.axis == axis ? "i" : axis_to_coord[axis];
dst_coords.push_back(coord_name);
}
}
std::string dst_coords_str = dst_coords[0];
for (int j = 1; j < dst_coords.size(); ++j) {
dst_coords_str += ", " + dst_coords[j];
}
const std::string dst_name = "args.dst_tensor_" + std::to_string(i);
c += " for (int i = 0; i < " + dst_name + "." +
axis_to_selector[attr_.axis] + "(); ++i, src_counter++) {\n";
c += " args.src_tensor::type result = args.src_tensor.Read(" +
src_coords_str + ");\n";
c += " " + dst_name + ".Write(result, " + dst_coords_str + ");\n";
c += " }\n";
}
c += "}\n";
return c;
}
std::string Split::GetSplitChannelsCode(const GpuInfo& gpu_info,
const std::vector<int>& channels) {
AddSrcTensor("src_tensor", definition_.src_tensors[0]);
for (int i = 0; i < definition_.dst_tensors.size(); ++i) {
AddDstTensor("dst_tensor_" + std::to_string(i), definition_.dst_tensors[i]);
}
const std::string batch_coord =
definition_.src_tensors[0].HasAxis(Axis::BATCH) ? ", B" : "";
std::string coords = "X, Y";
std::string c;
c += "MAIN_FUNCTION($0) {\n";
if (definition_.src_tensors[0].HasAxis(Axis::BATCH)) {
c += " int linear_id = GLOBAL_ID_0;\n";
c += " int X = linear_id / args.src_tensor.Batch();\n";
c += " int B = linear_id % args.src_tensor.Batch();\n";
c += " if (X >= args.src_tensor.Width()) return;\n";
} else {
c += " int X = GLOBAL_ID_0;\n";
c += " if (X >= args.src_tensor.Width()) return;\n";
}
if (definition_.src_tensors[0].HasAxis(Axis::DEPTH)) {
c += " int linear_id = GLOBAL_ID_1;\n";
c += " int Y = linear_id % args.src_tensor.Height();\n";
c += " int Z = linear_id / args.src_tensor.Height();\n";
c += " if (Z >= args.src_tensor.Depth()) return;\n";
coords += ", Z";
} else {
c += " int Y = GLOBAL_ID_1;\n";
c += " if (Y >= args.src_tensor.Height()) return;\n";
}
int src_channels = 0;
for (auto dst_ch : channels) {
src_channels += dst_ch;
}
const int src_slices = DivideRoundUp(src_channels, 4);
int dst_ch = 0;
int dst_slice = 0;
int dst_tensor = 0;
const std::string postfix[] = {".x", ".y", ".z", ".w"};
c += " args.src_tensor::type dst_val;\n";
for (int s = 0; s < src_slices; ++s) {
c += " if (" + std::to_string(s) + " < args.src_tensor.Slices()) {\n";
c += " args.src_tensor::type src_val = args.src_tensor.Read(" + coords +
", " + std::to_string(s) + batch_coord + ");\n";
for (int k = 0; k < 4 && s * 4 + k < src_channels; ++k) {
c += " dst_val" + postfix[dst_ch % 4] + " = src_val" + postfix[k] +
";\n";
dst_ch++;
if (dst_ch == channels[dst_tensor]) {
const std::string dst_name =
"args.dst_tensor_" + std::to_string(dst_tensor);
c += " " + dst_name + ".Write(dst_val, " + coords + ", " +
std::to_string(dst_slice) + batch_coord + ");\n";
dst_tensor += 1;
dst_ch = 0;
dst_slice = 0;
}
if (dst_ch != 0 && dst_ch % 4 == 0) {
const std::string dst_name =
"args.dst_tensor_" + std::to_string(dst_tensor);
c += " " + dst_name + ".Write(dst_val, " + coords + ", " +
std::to_string(dst_slice) + batch_coord + ");\n";
dst_slice += 1;
}
}
if (gpu_info.IsMali()) {
c += " } else { return; }\n";
} else {
c += " }\n";
}
}
c += "}\n";
return c;
}
int3 Split::GetGridSize() const {
const int width = attr_.axis == Axis::WIDTH ? 1 : src_[0]->Width();
const int height = attr_.axis == Axis::HEIGHT ? 1 : src_[0]->Height();
const int depth = attr_.axis == Axis::DEPTH ? 1 : src_[0]->Depth();
const int batch = attr_.axis == Axis::BATCH ? 1 : src_[0]->Batch();
const int slices = attr_.axis == Axis::CHANNELS ? 1 : src_[0]->Slices();
const int grid_x = width * batch;
const int grid_y = height * depth;
const int grid_z = slices;
return int3(grid_x, grid_y, grid_z);
}
Split CreateSplit(const GpuInfo& gpu_info, const OperationDef& definition,
const SplitAttributes& attr,
const std::vector<int>& channels) {
return Split(gpu_info, definition, attr, channels);
}
}
} | #include <algorithm>
#include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/split_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(Split, 1D_to_2_outputs) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 5), std::ref(rng));
const std::vector<int32_t> shape({shape_rng() * 2});
for (int i = -1; i < 1; i++) {
SplitTester()
.InputShape(shape)
.SplitDimension(i)
.NumSplits(2)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Split, 2D_to_2_outputs) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto split_dim_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 5), std::ref(rng));
for (int i = -2; i < 2; i++) {
std::vector<int32_t> shape({shape_rng(), shape_rng()});
shape[i < 0 ? i + shape.size() : i] = split_dim_rng() * 2;
SplitTester()
.InputShape(shape)
.SplitDimension(i)
.NumSplits(2)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Split, 3D_to_2_outputs) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto split_dim_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 5), std::ref(rng));
for (int i = -3; i < 3; i++) {
std::vector<int32_t> shape({shape_rng(), shape_rng(), shape_rng()});
shape[i < 0 ? i + shape.size() : i] = split_dim_rng() * 2;
SplitTester()
.InputShape(shape)
.SplitDimension(i)
.NumSplits(2)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Split, 4D_to_2_outputs) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto split_dim_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 5), std::ref(rng));
for (int i = -4; i < 4; i++) {
std::vector<int32_t> shape(
{shape_rng(), shape_rng(), shape_rng(), shape_rng()});
shape[i < 0 ? i + shape.size() : i] = split_dim_rng() * 2;
SplitTester()
.InputShape(shape)
.SplitDimension(i)
.NumSplits(2)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Split, 1D_to_3_outputs) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 5), std::ref(rng));
const std::vector<int32_t> shape({shape_rng() * 3});
for (int i = -1; i < 1; i++) {
SplitTester()
.InputShape(shape)
.SplitDimension(i)
.NumSplits(3)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Split, 2D_to_3_outputs) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto split_dim_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 5), std::ref(rng));
for (int i = -2; i < 2; i++) {
std::vector<int32_t> shape({shape_rng(), shape_rng()});
shape[i < 0 ? i + shape.size() : i] = split_dim_rng() * 3;
SplitTester()
.InputShape(shape)
.SplitDimension(i)
.NumSplits(3)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Split, 3D_to_3_outputs) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto split_dim_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 5), std::ref(rng));
for (int i = -3; i < 3; i++) {
std::vector<int32_t> shape({shape_rng(), shape_rng(), shape_rng()});
shape[i < 0 ? i + shape.size() : i] = split_dim_rng() * 3;
SplitTester()
.InputShape(shape)
.SplitDimension(i)
.NumSplits(3)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Split, 4D_to_3_outputs) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto split_dim_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 5), std::ref(rng));
for (int i = -4; i < 4; i++) {
std::vector<int32_t> shape(
{shape_rng(), shape_rng(), shape_rng(), shape_rng()});
shape[i < 0 ? i + shape.size() : i] = split_dim_rng() * 3;
SplitTester()
.InputShape(shape)
.SplitDimension(i)
.NumSplits(3)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Split, 1D_to_4_outputs) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 5), std::ref(rng));
const std::vector<int32_t> shape({shape_rng() * 4});
for (int i = -1; i < 1; i++) {
SplitTester()
.InputShape(shape)
.SplitDimension(i)
.NumSplits(4)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Split, 2D_to_4_outputs) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto split_dim_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 5), std::ref(rng));
for (int i = -2; i < 2; i++) {
std::vector<int32_t> shape({shape_rng(), shape_rng()});
shape[i < 0 ? i + shape.size() : i] = split_dim_rng() * 4;
SplitTester()
.InputShape(shape)
.SplitDimension(i)
.NumSplits(4)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Split, 3D_to_4_outputs) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto split_dim_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 5), std::ref(rng));
for (int i = -3; i < 3; i++) {
std::vector<int32_t> shape({shape_rng(), shape_rng(), shape_rng()});
shape[i < 0 ? i + shape.size() : i] = split_dim_rng() * 4;
SplitTester()
.InputShape(shape)
.SplitDimension(i)
.NumSplits(4)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
TEST(Split, 4D_to_4_outputs) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto split_dim_rng =
std::bind(std::uniform_int_distribution<int32_t>(1, 5), std::ref(rng));
for (int i = -4; i < 4; i++) {
std::vector<int32_t> shape(
{shape_rng(), shape_rng(), shape_rng(), shape_rng()});
shape[i < 0 ? i + shape.size() : i] = split_dim_rng() * 4;
SplitTester()
.InputShape(shape)
.SplitDimension(i)
.NumSplits(4)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/split.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/split_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
18d69bf5-16fa-4ed1-b717-955a47cf33ac | cpp | google/libaddressinput | retriever | cpp/src/retriever.cc | cpp/test/retriever_test.cc | #include "retriever.h"
#include <libaddressinput/callback.h>
#include <libaddressinput/source.h>
#include <libaddressinput/storage.h>
#include <cassert>
#include <cstddef>
#include <memory>
#include <string>
#include "validating_storage.h"
namespace i18n {
namespace addressinput {
namespace {
class Helper {
public:
Helper(const Helper&) = delete;
Helper& operator=(const Helper&) = delete;
Helper(const std::string& key,
const Retriever::Callback& retrieved,
const Source& source,
ValidatingStorage* storage)
: retrieved_(retrieved),
source_(source),
storage_(storage),
fresh_data_ready_(BuildCallback(this, &Helper::OnFreshDataReady)),
validated_data_ready_(
BuildCallback(this, &Helper::OnValidatedDataReady)),
stale_data_() {
assert(storage_ != nullptr);
storage_->Get(key, *validated_data_ready_);
}
private:
~Helper() = default;
void OnValidatedDataReady(bool success,
const std::string& key,
std::string* data) {
if (success) {
assert(data != nullptr);
retrieved_(success, key, *data);
delete this;
} else {
if (data != nullptr && !data->empty()) {
stale_data_ = *data;
}
source_.Get(key, *fresh_data_ready_);
}
delete data;
}
void OnFreshDataReady(bool success,
const std::string& key,
std::string* data) {
if (success) {
assert(data != nullptr);
retrieved_(true, key, *data);
storage_->Put(key, data);
data = nullptr;
} else if (!stale_data_.empty()) {
retrieved_(true, key, stale_data_);
} else {
retrieved_(false, key, std::string());
}
delete data;
delete this;
}
const Retriever::Callback& retrieved_;
const Source& source_;
ValidatingStorage* storage_;
const std::unique_ptr<const Source::Callback> fresh_data_ready_;
const std::unique_ptr<const Storage::Callback> validated_data_ready_;
std::string stale_data_;
};
}
Retriever::Retriever(const Source* source, Storage* storage)
: source_(source), storage_(new ValidatingStorage(storage)) {
assert(source_ != nullptr);
assert(storage_ != nullptr);
}
Retriever::~Retriever() = default;
void Retriever::Retrieve(const std::string& key,
const Callback& retrieved) const {
new Helper(key, retrieved, *source_, storage_.get());
}
}
} | #include "retriever.h"
#include <libaddressinput/callback.h>
#include <libaddressinput/null_storage.h>
#include <libaddressinput/storage.h>
#include <cstddef>
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "mock_source.h"
#include "testdata_source.h"
#define CHECKSUM "dd63dafcbd4d5b28badfcaf86fb6fcdb"
#define DATA "{'foo': 'bar'}"
#define OLD_TIMESTAMP "0"
namespace {
using i18n::addressinput::BuildCallback;
using i18n::addressinput::MockSource;
using i18n::addressinput::NullStorage;
using i18n::addressinput::Retriever;
using i18n::addressinput::Storage;
using i18n::addressinput::TestdataSource;
const char kKey[] = "data/CA/AB--fr";
const char kEmptyData[] = "{}";
const char kStaleData[] = DATA;
const char kStaleWrappedData[] = "timestamp=" OLD_TIMESTAMP "\n"
"checksum=" CHECKSUM "\n"
DATA;
class RetrieverTest : public testing::Test {
public:
RetrieverTest(const RetrieverTest&) = delete;
RetrieverTest& operator=(const RetrieverTest&) = delete;
protected:
RetrieverTest()
: retriever_(new TestdataSource(false), new NullStorage),
success_(false),
key_(),
data_(),
data_ready_(BuildCallback(this, &RetrieverTest::OnDataReady)) {}
Retriever retriever_;
bool success_;
std::string key_;
std::string data_;
const std::unique_ptr<const Retriever::Callback> data_ready_;
private:
void OnDataReady(bool success,
const std::string& key,
const std::string& data) {
success_ = success;
key_ = key;
data_ = data;
}
};
TEST_F(RetrieverTest, RetrieveData) {
retriever_.Retrieve(kKey, *data_ready_);
EXPECT_TRUE(success_);
EXPECT_EQ(kKey, key_);
EXPECT_FALSE(data_.empty());
EXPECT_NE(kEmptyData, data_);
}
TEST_F(RetrieverTest, ReadDataFromStorage) {
retriever_.Retrieve(kKey, *data_ready_);
retriever_.Retrieve(kKey, *data_ready_);
EXPECT_TRUE(success_);
EXPECT_EQ(kKey, key_);
EXPECT_FALSE(data_.empty());
EXPECT_NE(kEmptyData, data_);
}
TEST_F(RetrieverTest, MissingKeyReturnsEmptyData) {
static const char kMissingKey[] = "junk";
retriever_.Retrieve(kMissingKey, *data_ready_);
EXPECT_TRUE(success_);
EXPECT_EQ(kMissingKey, key_);
EXPECT_EQ(kEmptyData, data_);
}
TEST_F(RetrieverTest, FaultySource) {
Retriever bad_retriever(new MockSource, new NullStorage);
bad_retriever.Retrieve(kKey, *data_ready_);
EXPECT_FALSE(success_);
EXPECT_EQ(kKey, key_);
EXPECT_TRUE(data_.empty());
}
class StaleStorage : public Storage {
public:
StaleStorage(const StaleStorage&) = delete;
StaleStorage& operator=(const StaleStorage&) = delete;
StaleStorage() : data_updated_(false) {}
~StaleStorage() override = default;
void Get(const std::string& key, const Callback& data_ready) const override {
data_ready(true, key, new std::string(kStaleWrappedData));
}
void Put(const std::string& key, std::string* value) override {
ASSERT_TRUE(value != nullptr);
data_updated_ = true;
delete value;
}
bool data_updated_;
};
TEST_F(RetrieverTest, UseStaleDataWhenSourceFails) {
auto* stale_storage = new StaleStorage;
Retriever resilient_retriever(new MockSource, stale_storage);
resilient_retriever.Retrieve(kKey, *data_ready_);
EXPECT_TRUE(success_);
EXPECT_EQ(kKey, key_);
EXPECT_EQ(kStaleData, data_);
EXPECT_FALSE(stale_storage->data_updated_);
}
TEST_F(RetrieverTest, DoNotUseStaleDataWhenSourceSucceeds) {
auto* stale_storage = new StaleStorage;
Retriever resilient_retriever(new TestdataSource(false), stale_storage);
resilient_retriever.Retrieve(kKey, *data_ready_);
EXPECT_TRUE(success_);
EXPECT_EQ(kKey, key_);
EXPECT_FALSE(data_.empty());
EXPECT_NE(kEmptyData, data_);
EXPECT_NE(kStaleData, data_);
EXPECT_TRUE(stale_storage->data_updated_);
}
} | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/src/retriever.cc | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/test/retriever_test.cc | 2610f7b1043d6784ada41392fc9392d1ea09ea07 |
2a66fc09-36a5-4a49-b77b-1dfda971ec7d | cpp | tensorflow/tensorflow | gauge | tensorflow/core/lib/monitoring/gauge.h | tensorflow/core/lib/monitoring/gauge_test.cc | #ifndef TENSORFLOW_CORE_LIB_MONITORING_GAUGE_H_
#define TENSORFLOW_CORE_LIB_MONITORING_GAUGE_H_
#include "xla/tsl/lib/monitoring/gauge.h"
#include "tensorflow/core/lib/monitoring/collection_registry.h"
#include "tensorflow/core/lib/monitoring/metric_def.h"
namespace tensorflow {
namespace monitoring {
using tsl::monitoring::Gauge;
using tsl::monitoring::GaugeCell;
}
}
#endif | #include "tensorflow/core/lib/monitoring/gauge.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace monitoring {
namespace {
auto* gauge_with_labels = Gauge<int64_t, 1>::New(
"/tensorflow/test/gauge_with_labels", "Gauge with one label.", "MyLabel");
TEST(LabeledGaugeTest, InitializedWithZero) {
EXPECT_EQ(0, gauge_with_labels->GetCell("Empty")->value());
}
TEST(LabeledGaugeTest, GetCell) {
auto* cell = gauge_with_labels->GetCell("GetCellOp");
EXPECT_EQ(0, cell->value());
cell->Set(1);
EXPECT_EQ(1, cell->value());
auto* same_cell = gauge_with_labels->GetCell("GetCellOp");
EXPECT_EQ(1, same_cell->value());
same_cell->Set(10);
EXPECT_EQ(10, cell->value());
EXPECT_EQ(10, same_cell->value());
}
auto* gauge_without_labels = Gauge<int64_t, 0>::New(
"/tensorflow/test/gauge_without_labels", "Gauge without any labels.");
TEST(UnlabeledGaugeTest, InitializedWithZero) {
EXPECT_EQ(0, gauge_without_labels->GetCell()->value());
}
TEST(UnlabeledGaugeTest, GetCell) {
auto* cell = gauge_without_labels->GetCell();
EXPECT_EQ(0, cell->value());
cell->Set(1);
EXPECT_EQ(1, cell->value());
auto* same_cell = gauge_without_labels->GetCell();
EXPECT_EQ(1, same_cell->value());
same_cell->Set(10);
EXPECT_EQ(10, cell->value());
EXPECT_EQ(10, same_cell->value());
}
auto* string_gauge = Gauge<string, 0>::New("/tensorflow/test/string_gauge",
"Gauge of string value.");
TEST(GaugeOfStringValue, InitializedWithEmptyString) {
EXPECT_EQ("", string_gauge->GetCell()->value());
}
TEST(GaugeOfStringValue, GetCell) {
auto* cell = string_gauge->GetCell();
EXPECT_EQ("", cell->value());
cell->Set("foo");
EXPECT_EQ("foo", cell->value());
auto* same_cell = string_gauge->GetCell();
EXPECT_EQ("foo", cell->value());
same_cell->Set("bar");
EXPECT_EQ("bar", cell->value());
EXPECT_EQ("bar", same_cell->value());
}
auto* bool_gauge =
Gauge<bool, 0>::New("/tensorflow/test/bool_gauge", "Gauge of bool value.");
TEST(GaugeOfBoolValue, InitializedWithFalseValue) {
EXPECT_EQ(false, bool_gauge->GetCell()->value());
}
TEST(GaugeOfBoolValue, GetCell) {
auto* cell = bool_gauge->GetCell();
EXPECT_EQ(false, cell->value());
cell->Set(true);
EXPECT_EQ(true, cell->value());
auto* same_cell = bool_gauge->GetCell();
EXPECT_EQ(true, cell->value());
same_cell->Set(false);
EXPECT_EQ(false, cell->value());
EXPECT_EQ(false, same_cell->value());
}
TEST(LabeledGaugeTest, SameName) {
auto* same_gauge = Gauge<int64_t, 1>::New(
"/tensorflow/test/gauge_with_labels", "Gauge with one label.", "MyLabel");
EXPECT_TRUE(gauge_with_labels->GetStatus().ok());
EXPECT_TRUE(same_gauge->GetStatus().ok());
delete same_gauge;
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/monitoring/gauge.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/monitoring/gauge_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
19496dcf-cbb6-4057-b940-876a7c3c6632 | cpp | tensorflow/tensorflow | conv_ops | tensorflow/compiler/tf2xla/kernels/conv_ops.cc | tensorflow/core/kernels/conv_ops_test.cc | #include <cstdint>
#include <vector>
#include "tensorflow/compiler/tf2xla/kernels/conv_op_helpers.h"
#include "tensorflow/compiler/tf2xla/shape_util.h"
#include "tensorflow/compiler/tf2xla/type_util.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/lib/matrix.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/literal_util.h"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/numeric_op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/ops_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_slice.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/util/padding.h"
#include "tensorflow/core/util/tensor_format.h"
namespace tensorflow {
namespace {
class ConvOp : public XlaOpKernel {
public:
explicit ConvOp(OpKernelConstruction* ctx, int num_spatial_dims,
bool depthwise)
: XlaOpKernel(ctx) {
absl::StatusOr<ConvOpAttrs> attrs =
ConvOpAttrs::Create(num_spatial_dims, depthwise, ctx);
OP_REQUIRES_OK(ctx, attrs.status());
attrs_ = attrs.value();
}
void Compile(XlaOpKernelContext* ctx) override {
absl::StatusOr<xla::XlaOp> conv = MakeXlaForwardConvOp(
ctx->op_kernel().type_string(), ctx->Input(0), ctx->Input(1), attrs_);
OP_REQUIRES_OK(ctx, conv.status());
ctx->SetOutput(0, conv.value());
}
protected:
ConvOpAttrs attrs_;
private:
ConvOp(const ConvOp&) = delete;
void operator=(const ConvOp&) = delete;
};
class ConvNDOp : public XlaOpKernel {
public:
explicit ConvNDOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
absl::StatusOr<ConvNDOpAttrs> attrs = ConvNDOpAttrs::Create(ctx);
OP_REQUIRES_OK(ctx, attrs.status());
attrs_ = attrs.value();
}
void Compile(XlaOpKernelContext* ctx) override {
OP_REQUIRES_VALUE(xla::Shape input_shape, ctx, ctx->InputXlaShape(0));
int num_spatial_dims = input_shape.rank() - 1 - attrs_.batch_dims;
OP_REQUIRES_OK(ctx,
CheckValidPadding(attrs_.padding, attrs_.explicit_paddings,
num_spatial_dims + 2,
attrs_.data_format));
ConvOpAttrs forward_attrs;
forward_attrs.depthwise = false;
forward_attrs.num_spatial_dims = num_spatial_dims;
forward_attrs.dilations = attrs_.dilations.empty()
? std::vector<int32>(num_spatial_dims + 2, 1)
: attrs_.dilations;
forward_attrs.strides = attrs_.strides;
forward_attrs.padding = attrs_.padding;
forward_attrs.explicit_paddings = attrs_.explicit_paddings;
forward_attrs.data_format = attrs_.data_format;
xla::XlaOp input = ctx->Input(0);
xla::XlaOp filter = ctx->Input(1);
if (attrs_.batch_dims == 0) {
xla::Shape expanded_input_shape(input_shape);
for (int i = 0; i < expanded_input_shape.rank() - 1; ++i) {
expanded_input_shape.set_dimensions(i + 1, input_shape.dimensions(i));
}
expanded_input_shape.set_dimensions(0, 1);
input = xla::Reshape(input, expanded_input_shape.dimensions());
} else if (attrs_.batch_dims > 1) {
std::vector<int64_t> to_collapse(attrs_.batch_dims);
for (int i = 0; i < attrs_.batch_dims; ++i) {
to_collapse[i] = i;
}
input = xla::Collapse(input, to_collapse);
}
absl::StatusOr<xla::XlaOp> forward = MakeXlaForwardConvOp(
ctx->op_kernel().type_string(), input, filter, forward_attrs);
OP_REQUIRES_OK(ctx, forward.status());
xla::XlaOp out = forward.value();
auto* builder = out.builder();
OP_REQUIRES_VALUE(xla::Shape out_shape, ctx, builder->GetShape(out));
if (attrs_.batch_dims == 0) {
xla::Shape no_batch_shape(out_shape);
no_batch_shape.DeleteDimension(0);
out = xla::Reshape(out, no_batch_shape.dimensions());
} else if (attrs_.batch_dims > 1) {
xla::Shape expanded_out_shape(input_shape);
for (int i = attrs_.batch_dims; i < input_shape.rank(); ++i) {
expanded_out_shape.set_dimensions(
i, out_shape.dimensions(i - (attrs_.batch_dims - 1)));
}
out = xla::Reshape(out, expanded_out_shape.dimensions());
}
ctx->SetOutput(0, out);
}
protected:
ConvNDOpAttrs attrs_;
};
REGISTER_XLA_CONV_OP(Name("Conv"), ConvNDOp);
class Conv2DOp : public ConvOp {
public:
explicit Conv2DOp(OpKernelConstruction* ctx)
: ConvOp(ctx, 2, false) {}
};
REGISTER_XLA_CONV_OP(Name("Conv2D"), Conv2DOp);
class Conv3DOp : public ConvOp {
public:
explicit Conv3DOp(OpKernelConstruction* ctx)
: ConvOp(ctx, 3, false) {}
};
REGISTER_XLA_CONV_OP(Name("Conv3D"), Conv3DOp);
class DepthwiseConv2DOp : public ConvOp {
public:
explicit DepthwiseConv2DOp(OpKernelConstruction* ctx)
: ConvOp(ctx, 2, true) {}
};
REGISTER_XLA_CONV_OP(Name("DepthwiseConv2dNative"), DepthwiseConv2DOp);
class ConvBackpropInputOp : public XlaOpKernel {
public:
explicit ConvBackpropInputOp(OpKernelConstruction* ctx, int num_spatial_dims,
bool depthwise)
: XlaOpKernel(ctx) {
absl::StatusOr<ConvOpAttrs> attrs =
ConvOpAttrs::Create(num_spatial_dims, depthwise, ctx);
OP_REQUIRES_OK(ctx, attrs.status());
attrs_ = attrs.value();
}
void Compile(XlaOpKernelContext* ctx) override {
TensorShape input_tensor_shape;
OP_REQUIRES_OK(
ctx, ctx->ConstantInputAsShape(0, &input_tensor_shape,
xla::ValueInferenceMode::kUpperBound));
xla::Shape input_shape =
TensorShapeToXLAShape(ctx->input_xla_type(1), input_tensor_shape);
OP_REQUIRES(ctx, input_shape.rank() == attrs_.num_spatial_dims + 2,
errors::InvalidArgument(
"The rank of the specified input shape must be "
"num_spatial_dims + 2. Expected ",
attrs_.num_spatial_dims + 2, " got ", input_shape.rank()));
xla::XlaOp input_sizes = ctx->Input(0);
absl::StatusOr<xla::XlaOp> in_backprop = MakeXlaBackpropInputConvOp(
ctx->op_kernel().type_string(), input_shape, ctx->Input(1),
ctx->Input(2), attrs_, &input_sizes);
OP_REQUIRES_OK(ctx, in_backprop.status());
ctx->SetOutput(0, in_backprop.value());
}
protected:
ConvOpAttrs attrs_;
private:
ConvBackpropInputOp(const ConvBackpropInputOp&) = delete;
void operator=(const ConvBackpropInputOp&) = delete;
};
class Conv2DBackpropInputOp : public ConvBackpropInputOp {
public:
explicit Conv2DBackpropInputOp(OpKernelConstruction* ctx)
: ConvBackpropInputOp(ctx, 2, false) {}
};
REGISTER_XLA_CONV_OP(
Name("Conv2DBackpropInput").CompileTimeConstantInput("input_sizes"),
Conv2DBackpropInputOp);
class Conv3DBackpropInputOp : public ConvBackpropInputOp {
public:
explicit Conv3DBackpropInputOp(OpKernelConstruction* ctx)
: ConvBackpropInputOp(ctx, 3, false) {}
};
REGISTER_XLA_CONV_OP(
Name("Conv3DBackpropInputV2").CompileTimeConstantInput("input_sizes"),
Conv3DBackpropInputOp);
class DepthwiseConv2DBackpropInputOp : public ConvBackpropInputOp {
public:
explicit DepthwiseConv2DBackpropInputOp(OpKernelConstruction* ctx)
: ConvBackpropInputOp(ctx, 2, true) {}
};
REGISTER_XLA_CONV_OP(Name("DepthwiseConv2dNativeBackpropInput")
.CompileTimeConstantInput("input_sizes"),
DepthwiseConv2DBackpropInputOp);
class ConvBackpropFilterOp : public XlaOpKernel {
public:
explicit ConvBackpropFilterOp(OpKernelConstruction* ctx, int num_spatial_dims,
bool depthwise)
: XlaOpKernel(ctx) {
absl::StatusOr<ConvOpAttrs> attrs =
ConvOpAttrs::Create(num_spatial_dims, depthwise, ctx);
OP_REQUIRES_OK(ctx, attrs.status());
attrs_ = attrs.value();
}
void Compile(XlaOpKernelContext* ctx) override {
TensorShape filter_tensor_shape;
OP_REQUIRES_OK(
ctx, ctx->ConstantInputAsShape(1, &filter_tensor_shape,
xla::ValueInferenceMode::kUpperBound));
xla::Shape filter_shape =
TensorShapeToXLAShape(ctx->input_xla_type(0), filter_tensor_shape);
absl::StatusOr<xla::XlaOp> filter_backprop = MakeXlaBackpropFilterConvOp(
ctx->op_kernel().type_string(), ctx->Input(0), filter_shape,
ctx->Input(2), attrs_);
OP_REQUIRES_OK(ctx, filter_backprop.status());
ctx->SetOutput(0, filter_backprop.value());
}
protected:
ConvOpAttrs attrs_;
private:
ConvBackpropFilterOp(const ConvBackpropFilterOp&) = delete;
void operator=(const ConvBackpropFilterOp&) = delete;
};
class Conv2DBackpropFilterOp : public ConvBackpropFilterOp {
public:
explicit Conv2DBackpropFilterOp(OpKernelConstruction* ctx)
: ConvBackpropFilterOp(ctx, 2, false) {
}
};
REGISTER_XLA_CONV_OP(
Name("Conv2DBackpropFilter").CompileTimeConstantInput("filter_sizes"),
Conv2DBackpropFilterOp);
class Conv3DBackpropFilterOp : public ConvBackpropFilterOp {
public:
explicit Conv3DBackpropFilterOp(OpKernelConstruction* ctx)
: ConvBackpropFilterOp(ctx, 3, false) {
}
};
REGISTER_XLA_CONV_OP(
Name("Conv3DBackpropFilterV2").CompileTimeConstantInput("filter_sizes"),
Conv3DBackpropFilterOp);
class DepthwiseConv2DBackpropFilterOp : public ConvBackpropFilterOp {
public:
explicit DepthwiseConv2DBackpropFilterOp(OpKernelConstruction* ctx)
: ConvBackpropFilterOp(ctx, 2, true) {}
};
REGISTER_XLA_CONV_OP(Name("DepthwiseConv2dNativeBackpropFilter")
.CompileTimeConstantInput("filter_sizes"),
DepthwiseConv2DBackpropFilterOp);
}
} | #include <cmath>
#include <optional>
#include <string>
#include <type_traits>
#include <vector>
#include "absl/algorithm/container.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/nn_ops_internal.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/kernel_shape_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/conv_ops_gpu.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/tensor_float_32_utils.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/tensor_format.h"
namespace tensorflow {
class FusedResizePadConvOpTest : public OpsTestBase {
protected:
template <typename T>
void HandwrittenConv(DataType dtype) {
const int stride = 1;
TF_EXPECT_OK(NodeDefBuilder("fused_resize_op", "FusedResizeAndPadConv2D")
.Input(FakeInput(dtype))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(dtype))
.Attr("T", dtype)
.Attr("resize_align_corners", false)
.Attr("mode", "REFLECT")
.Attr("strides", {1, stride, stride, 1})
.Attr("padding", "SAME")
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
const int depth = 1;
const int image_width = 4;
const int image_height = 3;
const int image_batch_count = 1;
Tensor image(dtype, {image_batch_count, image_height, image_width, depth});
test::FillValues<T>(&image, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
const int filter_size = 3;
const int filter_count = 1;
Tensor filter(dtype, {filter_size, filter_size, depth, filter_count});
test::FillValues<T>(&filter, {1, 4, 7, 2, 5, 8, 3, 6, 9});
const int resized_width = image_width;
const int resized_height = image_height;
const int top_padding = 0;
const int bottom_padding = 0;
const int left_padding = 0;
const int right_padding = 0;
AddInputFromArray<T>(image.shape(), image.flat<T>());
AddInputFromArray<int32>(TensorShape({2}), {resized_height, resized_width});
AddInputFromArray<int32>(
TensorShape({4, 2}),
{0, 0, top_padding, bottom_padding, left_padding, right_padding, 0, 0});
AddInputFromArray<T>(filter.shape(), filter.flat<T>());
TF_ASSERT_OK(RunOpKernel());
const int expected_width = image_width;
const int expected_height = image_height * filter_count;
Tensor expected(dtype, TensorShape({image_batch_count, expected_height,
expected_width, filter_count}));
test::FillValues<T>(
&expected, {105, 150, 183, 95, 235, 312, 357, 178, 187, 234, 261, 121});
const Tensor& output = *GetOutput(0);
test::ExpectTensorNear<T>(expected, output, 1e-5);
}
template <typename T>
void CompareFusedAndSeparate(int input_width, int input_height,
int input_depth, int resize_width,
int resize_height, int y_padding, int x_padding,
int filter_size, int filter_count,
bool resize_align_corners,
const string& pad_mode, int stride,
const string& padding, DataType dtype) {
Scope root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor input_data(DT_FLOAT,
TensorShape({1, input_height, input_width, input_depth}));
test::FillIota<float>(&input_data, 1.0f);
Output input =
Const(root.WithOpName("input"), Input::Initializer(input_data));
Output casted_input = Cast(root.WithOpName("casted_input"), input, dtype);
Tensor filter_data(DT_FLOAT, TensorShape({filter_size, filter_size,
input_depth, filter_count}));
test::FillIota<float>(&filter_data, 1.0f);
Output filter =
Const(root.WithOpName("filter"), Input::Initializer(filter_data));
Output casted_filter =
Cast(root.WithOpName("casted_filter"), filter, dtype);
Output resize_size =
Const(root.WithOpName("resize_size"), {resize_height, resize_width});
Output resize =
ResizeBilinear(root.WithOpName("resize"), input, resize_size,
ResizeBilinear::AlignCorners(resize_align_corners));
Output casted_resize = Cast(root.WithOpName("cast"), resize, dtype);
Output paddings =
Const(root.WithOpName("paddings"),
{{0, 0}, {y_padding, y_padding}, {x_padding, x_padding}, {0, 0}});
Output mirror_pad = MirrorPad(root.WithOpName("mirror_pad"), casted_resize,
paddings, pad_mode);
Output conv = Conv2D(root.WithOpName("conv"), mirror_pad, casted_filter,
{1, stride, stride, 1}, padding);
Output fused_conv = FusedResizeAndPadConv2D(
root.WithOpName("fused_conv"), casted_input, resize_size, paddings,
casted_filter, pad_mode, {1, stride, stride, 1}, padding,
FusedResizeAndPadConv2D::ResizeAlignCorners(resize_align_corners));
tensorflow::GraphDef graph;
TF_ASSERT_OK(root.ToGraphDef(&graph));
std::unique_ptr<tensorflow::Session> session(
tensorflow::NewSession(tensorflow::SessionOptions()));
TF_ASSERT_OK(session->Create(graph));
std::vector<Tensor> unfused_tensors;
TF_ASSERT_OK(session->Run({}, {"conv"}, {}, &unfused_tensors));
std::vector<Tensor> fused_tensors;
TF_ASSERT_OK(session->Run({}, {"fused_conv"}, {}, &fused_tensors));
test::ExpectClose(unfused_tensors[0], fused_tensors[0]);
}
template <typename T>
void CompareFusedPadOnlyAndSeparate(int input_width, int input_height,
int input_depth, int y_padding,
int x_padding, int filter_size,
int filter_count, const string& pad_mode,
int stride, const string& padding,
DataType dtype) {
Scope root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor input_data(DT_FLOAT,
TensorShape({1, input_height, input_width, input_depth}));
test::FillIota<float>(&input_data, 1.0f);
Output input =
Const(root.WithOpName("input"), Input::Initializer(input_data));
Output casted_input = Cast(root.WithOpName("casted_input"), input, dtype);
Tensor filter_data(DT_FLOAT, TensorShape({filter_size, filter_size,
input_depth, filter_count}));
test::FillIota<float>(&filter_data, 1.0f);
Output filter =
Const(root.WithOpName("filter"), Input::Initializer(filter_data));
Output casted_filter =
Cast(root.WithOpName("casted_filter"), filter, dtype);
Output paddings =
Const(root.WithOpName("paddings"),
{{0, 0}, {y_padding, y_padding}, {x_padding, x_padding}, {0, 0}});
Output mirror_pad = MirrorPad(root.WithOpName("mirror_pad"), casted_input,
paddings, pad_mode);
Output conv = Conv2D(root.WithOpName("conv"), mirror_pad, casted_filter,
{1, stride, stride, 1}, padding);
Output fused_conv = FusedPadConv2D(
root.WithOpName("fused_conv"), casted_input, paddings, casted_filter,
pad_mode, {1, stride, stride, 1}, padding);
tensorflow::GraphDef graph;
TF_ASSERT_OK(root.ToGraphDef(&graph));
std::unique_ptr<tensorflow::Session> session(
tensorflow::NewSession(tensorflow::SessionOptions()));
TF_ASSERT_OK(session->Create(graph));
std::vector<Tensor> unfused_tensors;
TF_ASSERT_OK(session->Run({}, {"conv"}, {}, &unfused_tensors));
std::vector<Tensor> fused_tensors;
TF_ASSERT_OK(session->Run({}, {"fused_conv"}, {}, &fused_tensors));
test::ExpectClose(unfused_tensors[0], fused_tensors[0]);
}
};
TEST_F(FusedResizePadConvOpTest, HandwrittenConvHalf) {
HandwrittenConv<Eigen::half>(DT_HALF);
}
TEST_F(FusedResizePadConvOpTest, HandwrittenConvFloat) {
HandwrittenConv<float>(DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, HandwrittenConvDouble) {
HandwrittenConv<double>(DT_DOUBLE);
}
TEST_F(FusedResizePadConvOpTest, IdentityComparativeHalf) {
CompareFusedAndSeparate<Eigen::half>(10, 10, 1, 10, 10, 0, 0, 1, 1, false,
"REFLECT", 1, "SAME", DT_HALF);
}
TEST_F(FusedResizePadConvOpTest, IdentityComparativeFloat) {
CompareFusedAndSeparate<float>(10, 10, 1, 10, 10, 0, 0, 1, 1, false,
"REFLECT", 1, "SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, IdentityComparativeDouble) {
CompareFusedAndSeparate<double>(10, 10, 1, 10, 10, 0, 0, 1, 1, false,
"REFLECT", 1, "SAME", DT_DOUBLE);
}
TEST_F(FusedResizePadConvOpTest, ConvOnlyComparative) {
CompareFusedAndSeparate<float>(10, 10, 3, 10, 10, 0, 0, 4, 4, false,
"REFLECT", 1, "SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, ResizeOnlyComparative) {
CompareFusedAndSeparate<float>(10, 10, 1, 20, 20, 0, 0, 1, 1, false,
"REFLECT", 1, "SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, ResizeAndConvComparative) {
CompareFusedAndSeparate<float>(2, 2, 4, 4, 2, 0, 0, 2, 2, false, "REFLECT", 1,
"SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, ResizeAlignAndConvComparative) {
CompareFusedAndSeparate<float>(2, 2, 4, 4, 2, 0, 0, 2, 2, true, "REFLECT", 1,
"SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, ResizeAndConvStridedComparative) {
CompareFusedAndSeparate<float>(2, 2, 4, 4, 2, 0, 0, 2, 2, false, "REFLECT", 2,
"SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, ResizeAlignAndConvValidComparative) {
CompareFusedAndSeparate<float>(2, 2, 4, 4, 2, 0, 0, 2, 2, true, "REFLECT", 1,
"VALID", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, PadOnlyComparative) {
CompareFusedAndSeparate<float>(4, 4, 1, 4, 4, 2, 2, 1, 1, false, "REFLECT", 1,
"SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, PadOnlyWithChannelsComparative) {
CompareFusedAndSeparate<float>(4, 4, 3, 4, 4, 2, 2, 1, 1, false, "REFLECT", 1,
"SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, ResizeAndPadComparative) {
CompareFusedAndSeparate<float>(4, 4, 1, 6, 6, 2, 2, 1, 1, false, "REFLECT", 1,
"SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, PadOnlySymmetricComparative) {
CompareFusedAndSeparate<float>(4, 4, 1, 4, 4, 2, 2, 1, 1, false, "SYMMETRIC",
1, "SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, ResizeAndPadSymmetricComparative) {
CompareFusedAndSeparate<float>(4, 4, 3, 6, 6, 2, 2, 1, 1, false, "SYMMETRIC",
1, "SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, ResizeAndPadSymmetricComparativeLarge) {
CompareFusedAndSeparate<float>(1000, 1000, 3, 1006, 1006, 2, 2, 1, 1, false,
"SYMMETRIC", 1, "SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, NoResizeIdentityComparativeHalf) {
CompareFusedPadOnlyAndSeparate<Eigen::half>(10, 10, 1, 0, 0, 1, 1, "REFLECT",
1, "SAME", DT_HALF);
}
TEST_F(FusedResizePadConvOpTest, NoResizeIdentityComparativeBFloat16) {
CompareFusedPadOnlyAndSeparate<bfloat16>(10, 10, 1, 0, 0, 1, 1, "REFLECT", 1,
"SAME", DT_BFLOAT16);
}
TEST_F(FusedResizePadConvOpTest, NoResizeIdentityComparativeFloat) {
CompareFusedPadOnlyAndSeparate<float>(10, 10, 1, 0, 0, 1, 1, "REFLECT", 1,
"SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, NoResizeIdentityComparativeDouble) {
CompareFusedPadOnlyAndSeparate<double>(10, 10, 1, 0, 0, 1, 1, "REFLECT", 1,
"SAME", DT_DOUBLE);
}
TEST_F(FusedResizePadConvOpTest, NoResizeConvOnlyComparative) {
CompareFusedPadOnlyAndSeparate<float>(10, 10, 3, 0, 0, 4, 4, "REFLECT", 1,
"SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, NoResizePadOnlyComparative) {
CompareFusedPadOnlyAndSeparate<float>(4, 4, 1, 2, 2, 1, 1, "REFLECT", 1,
"SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, NoResizePadOnlyWithChannelsComparative) {
CompareFusedPadOnlyAndSeparate<float>(4, 4, 3, 2, 2, 1, 1, "REFLECT", 1,
"SAME", DT_FLOAT);
}
TEST_F(FusedResizePadConvOpTest, NoResizePadOnlySymmetricComparative) {
CompareFusedPadOnlyAndSeparate<float>(4, 4, 1, 2, 2, 1, 1, "SYMMETRIC", 1,
"SAME", DT_FLOAT);
}
class ConvOpTest : public OpsTestBase {
protected:
void HandwrittenConv() {
const int stride = 1;
TF_EXPECT_OK(NodeDefBuilder("conv_op", "Conv2D")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("T", DT_FLOAT)
.Attr("strides", {1, stride, stride, 1})
.Attr("padding", "SAME")
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
const int depth = 1;
const int image_width = 4;
const int image_height = 3;
const int image_batch_count = 1;
Tensor image(DT_FLOAT,
{image_batch_count, image_height, image_width, depth});
test::FillValues<float>(&image, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
const int filter_size = 3;
const int filter_count = 1;
Tensor filter(DT_FLOAT, {filter_size, filter_size, depth, filter_count});
test::FillValues<float>(&filter, {1, 4, 7, 2, 5, 8, 3, 6, 9});
AddInputFromArray<float>(image.shape(), image.flat<float>());
AddInputFromArray<float>(filter.shape(), filter.flat<float>());
TF_ASSERT_OK(RunOpKernel());
const int expected_width = image_width;
const int expected_height = image_height * filter_count;
Tensor expected(DT_FLOAT, TensorShape({image_batch_count, expected_height,
expected_width, filter_count}));
test::FillValues<float>(
&expected, {105, 150, 183, 95, 235, 312, 357, 178, 187, 234, 261, 121});
const Tensor& output = *GetOutput(0);
test::ExpectTensorNear<float>(expected, output, 1e-5);
}
void AnisotropicStrides() {
const int stride_width = 3;
const int stride_height = 1;
TF_EXPECT_OK(NodeDefBuilder("conv_op", "Conv2D")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("T", DT_FLOAT)
.Attr("strides", {1, stride_height, stride_width, 1})
.Attr("padding", "VALID")
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
const int depth = 1;
const int image_width = 6;
const int image_height = 3;
const int image_batch_count = 1;
Tensor image(DT_FLOAT,
{image_batch_count, image_height, image_width, depth});
test::FillValues<float>(&image, {
3, 2, 1, -1, -2, -3,
4, 3, 2, -2, -3, -4,
5, 4, 3, -3, -4, -5,
});
const int filter_size = 2;
const int filter_count = 1;
Tensor filter(DT_FLOAT, {filter_size, filter_size, depth, filter_count});
test::FillValues<float>(&filter, {
1, 2,
3, 4,
});
AddInputFromArray<float>(image.shape(), image.flat<float>());
AddInputFromArray<float>(filter.shape(), filter.flat<float>());
TF_ASSERT_OK(RunOpKernel());
const int expected_width = 2;
const int expected_height = 2;
Tensor expected(DT_FLOAT, TensorShape({image_batch_count, expected_height,
expected_width, filter_count}));
test::FillValues<float>(&expected, {31, -23, 41, -33});
const Tensor& output = *GetOutput(0);
test::ExpectTensorNear<float>(expected, output, 1e-5);
}
};
TEST_F(ConvOpTest, HandwrittenConv) { HandwrittenConv(); }
TEST_F(ConvOpTest, AnisotropicStride) { AnisotropicStrides(); }
template <typename T>
class FusedConv2DOpTest : public OpsTestBase {
protected:
static constexpr int kDepth = 4;
static constexpr int kImageWidth = 32;
static constexpr int kImageHeight = 32;
static constexpr int kImageBatchCount = 8;
static constexpr bool kIsInt8 =
std::is_same<T, int8>::value || std::is_same<T, qint8>::value;
using BiasAddGraphRunner =
std::function<void(const Tensor& input_data, const Tensor& filter_data,
const Tensor& bias_data, Tensor* out)>;
using BatchNormGraphRunner = std::function<void(
const Tensor& input_data, const Tensor& filter_data,
const Tensor& scale_data, const Tensor& offset_data,
const Tensor& mean_data, const Tensor& variance_data, Tensor* out)>;
static bool HasGpuDevice() {
tensorflow::SessionOptions session_options;
std::unique_ptr<tensorflow::Session> session(
tensorflow::NewSession(session_options));
std::vector<DeviceAttributes> available_devices;
[&]() { TF_ASSERT_OK(session->ListDevices(&available_devices)); }();
const bool has_gpu_device =
absl::c_any_of(available_devices, [](const DeviceAttributes& device) {
return device.device_type() == DEVICE_GPU;
});
return has_gpu_device;
}
void RunAndFetch(const tensorflow::Scope& root, const std::string& fetch,
Tensor* output, bool allow_gpu_device,
const NodeDef* fetch_node = nullptr) {
tensorflow::GraphDef graph;
TF_ASSERT_OK(root.ToGraphDef(&graph));
if (fetch_node) {
*graph.add_node() = *fetch_node;
}
tensorflow::SessionOptions session_options;
session_options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_opt_level(OptimizerOptions::L0);
tensorflow::RewriterConfig* cfg =
session_options.config.mutable_graph_options()
->mutable_rewrite_options();
cfg->set_constant_folding(tensorflow::RewriterConfig::OFF);
cfg->set_layout_optimizer(tensorflow::RewriterConfig::OFF);
cfg->set_remapping(tensorflow::RewriterConfig::OFF);
std::unique_ptr<tensorflow::Session> session(
tensorflow::NewSession(session_options));
const bool has_gpu_device = HasGpuDevice();
const bool place_all_on_gpu = allow_gpu_device && has_gpu_device;
const std::string device =
place_all_on_gpu ? "/device:GPU:0" : "/device:CPU:0";
for (NodeDef& mutable_node : *graph.mutable_node()) {
mutable_node.set_device(device);
}
TF_ASSERT_OK(session->Create(graph));
std::vector<Tensor> unfused_tensors;
TF_ASSERT_OK(session->Run({}, {fetch}, {}, &unfused_tensors));
*output = unfused_tensors[0];
}
void RunConv2DWithBias(const Tensor& input_data, const Tensor& filter_data,
const Tensor& bias_data, const std::string& padding,
const std::vector<int>& explicit_paddings,
Tensor* output, bool allow_gpu_device = false,
int stride = 1) {
RunConv2DWithBiasAndActivation(input_data, filter_data, bias_data,
std::nullopt, padding, explicit_paddings,
output, allow_gpu_device, stride);
}
template <typename From, typename To>
static Tensor Cast(
const Tensor& from, const std::function<To(From)>& cast = [](From v) {
return static_cast<To>(v);
}) {
Tensor to(DataTypeToEnum<To>::v(), from.shape());
for (int i = 0; i < from.NumElements(); ++i) {
to.flat<To>()(i) = cast(from.flat<From>()(i));
}
return to;
}
void RunConv2DWithBiasAndActivation(
Tensor input_data, Tensor filter_data, Tensor bias_data,
std::optional<std::string> activation_type, const std::string& padding,
const std::vector<int>& explicit_paddings, Tensor* output,
bool allow_gpu_device = false, int stride = 1) {
Scope root = tensorflow::Scope::NewRootScope();
if (kIsInt8) {
input_data = Cast<T, float>(input_data);
filter_data = Cast<T, float>(filter_data);
bias_data = Cast<T, float>(bias_data);
}
ops::Conv2D conv = ops::Conv2D(
root.WithOpName("conv"),
ops::Const(root.WithOpName("input"), Input::Initializer(input_data)),
ops::Const(root.WithOpName("filter"), Input::Initializer(filter_data)),
{1, stride, stride, 1}, padding,
ops::Conv2D::Attrs().ExplicitPaddings(explicit_paddings));
ops::BiasAdd with_bias = ops::BiasAdd(
root.WithOpName("with_bias"), conv,
ops::Const(root.WithOpName("bias"), Input::Initializer(bias_data)));
if (activation_type.has_value()) {
if (*activation_type == "Relu") {
ops::Relu(root.WithOpName("with_activation"), with_bias);
} else if (*activation_type == "Relu6") {
ops::Relu6(root.WithOpName("with_activation"), with_bias);
} else if (*activation_type == "Elu") {
ops::Elu(root.WithOpName("with_activation"), with_bias);
} else if (*activation_type == "LeakyRelu") {
ops::internal::LeakyRelu(root.WithOpName("with_activation"), with_bias);
} else {
ops::Identity(root.WithOpName("with_activation"), with_bias);
}
}
RunAndFetch(root,
activation_type.has_value() ? "with_activation" : "with_bias",
output, allow_gpu_device);
if (kIsInt8) {
*output = Cast<float, T>(
*output, [](float v) { return static_cast<T>(std::lround(v)); });
}
}
void RunConv2DWithBatchNorm(
const Tensor& input_data, const Tensor& filter_data,
const Tensor& scale_data, const Tensor& offset_data,
const Tensor& mean_data, const Tensor& variance_data,
const std::string& padding, const std::vector<int>& explicit_paddings,
Tensor* output, bool allow_gpu_device = false, int stride = 1) {
Scope root = tensorflow::Scope::NewRootScope();
ops::Conv2D conv = ops::Conv2D(
root.WithOpName("conv"),
ops::Const(root.WithOpName("input"), Input::Initializer(input_data)),
ops::Const(root.WithOpName("filter"), Input::Initializer(filter_data)),
{1, stride, stride, 1}, padding,
ops::Conv2D::Attrs().ExplicitPaddings(explicit_paddings));
ops::FusedBatchNorm::Attrs attr;
attr = attr.IsTraining(false);
ops::FusedBatchNorm with_fused_batch_norm = ops::FusedBatchNorm(
root.WithOpName("with_fused_batch_norm"), conv,
ops::Const(root.WithOpName("scale"), Input::Initializer(scale_data)),
ops::Const(root.WithOpName("offset"), Input::Initializer(offset_data)),
ops::Const(root.WithOpName("mean"), Input::Initializer(mean_data)),
ops::Const(root.WithOpName("var"), Input::Initializer(variance_data)),
attr);
RunAndFetch(root, "with_fused_batch_norm", output, allow_gpu_device);
}
void RunConv2DWithBatchNormAndActivation(
const Tensor& input_data, const Tensor& filter_data,
const Tensor& scale_data, const Tensor& offset_data,
const Tensor& mean_data, const Tensor& variance_data,
const string& activation_type, const std::string& padding,
const std::vector<int>& explicit_paddings, Tensor* output,
bool allow_gpu_device = false, int stride = 1) {
Scope root = tensorflow::Scope::NewRootScope();
ops::Conv2D conv = ops::Conv2D(
root.WithOpName("conv"),
ops::Const(root.WithOpName("input"), Input::Initializer(input_data)),
ops::Const(root.WithOpName("filter"), Input::Initializer(filter_data)),
{1, stride, stride, 1}, padding,
ops::Conv2D::Attrs().ExplicitPaddings(explicit_paddings));
ops::FusedBatchNorm::Attrs attr;
attr = attr.IsTraining(false);
ops::FusedBatchNorm with_fused_batch_norm = ops::FusedBatchNorm(
root.WithOpName("with_fused_batch_norm"), conv,
ops::Const(root.WithOpName("scale"), Input::Initializer(scale_data)),
ops::Const(root.WithOpName("offset"), Input::Initializer(offset_data)),
ops::Const(root.WithOpName("mean"), Input::Initializer(mean_data)),
ops::Const(root.WithOpName("var"), Input::Initializer(variance_data)),
attr);
if (activation_type == "Relu") {
ops::Relu(root.WithOpName("with_activation"), with_fused_batch_norm.y);
} else if (activation_type == "Relu6") {
ops::Relu6(root.WithOpName("with_activation"), with_fused_batch_norm.y);
} else if (activation_type == "Elu") {
ops::Elu(root.WithOpName("with_activation"), with_fused_batch_norm.y);
} else if (activation_type == "LeakyRelu") {
ops::internal::LeakyRelu(root.WithOpName("with_activation"),
with_fused_batch_norm.y);
} else {
ops::Identity(root.WithOpName("with_activation"),
with_fused_batch_norm.y);
}
RunAndFetch(root, "with_activation", output, allow_gpu_device);
}
void RunFusedConv2DOp(Tensor input_data, Tensor filter_data,
std::vector<Tensor> args_data,
const std::vector<std::string>& fused_ops,
const std::string& padding,
const std::vector<int>& explicit_paddings,
Tensor* output, bool allow_gpu_device = false,
int stride = 1) {
Scope root = tensorflow::Scope::NewRootScope();
DataType dtype = DataTypeToEnum<T>::v();
const bool has_gpu_device = HasGpuDevice();
const bool has_extra_parameters = kIsInt8;
const bool has_float_bias = kIsInt8;
DataType dtype_args =
has_float_bias ? DataTypeToEnum<float>::v() : DataTypeToEnum<T>::v();
const int n = GetTensorDim(input_data, FORMAT_NHWC, 'N');
const int h = GetTensorDim(input_data, FORMAT_NHWC, 'H');
const int w = GetTensorDim(input_data, FORMAT_NHWC, 'W');
const int kh = GetFilterDim(filter_data, FORMAT_HWIO, 'H');
const int kw = GetFilterDim(filter_data, FORMAT_HWIO, 'W');
const int ic = GetFilterDim(filter_data, FORMAT_HWIO, 'I');
const int oc = GetFilterDim(filter_data, FORMAT_HWIO, 'O');
const int v = (kIsInt8 && allow_gpu_device && has_gpu_device) ? 4 : 1;
if (v > 1) {
{
TensorShape shape;
TF_EXPECT_OK(
ShapeFromFormatWithStatus(FORMAT_NCHW_VECT_C, n, h, w, ic, &shape));
Tensor input_data_nchwv(dtype, shape);
input_data_nchwv.tensor<T, 5>() =
input_data.shaped<T, 5>({n, h, w, ic / v, v})
.shuffle(Eigen::array<int, 5>{0, 3, 1, 2, 4});
input_data = input_data_nchwv;
}
{
Tensor filter_data_oihwv(
dtype,
ShapeFromFilterTensorFormat(FORMAT_OIHW_VECT_I, kh, kw, ic, oc));
filter_data_oihwv.tensor<T, 5>() =
filter_data.shaped<T, 4>({kh, kw, ic, oc})
.reshape(Eigen::array<int, 5>{kh, kw, ic / v, v, oc})
.shuffle(Eigen::array<int, 5>{4, 2, 0, 1, 3});
filter_data = filter_data_oihwv;
}
}
if (has_float_bias) {
for (Tensor& arg_data : args_data) {
TensorShape shape = arg_data.shape();
Tensor arg_data_float = Tensor(dtype_args, shape);
for (int index = 0; index < arg_data.NumElements(); index++) {
int8 v = *(reinterpret_cast<int8*>(arg_data.data()) + index);
*(reinterpret_cast<float*>(arg_data_float.data()) + index) =
static_cast<float>(v);
}
arg_data = arg_data_float;
}
}
int num_args = static_cast<int>(args_data.size());
Output input =
ops::Const(root.WithOpName("input"), Input::Initializer(input_data));
Output filter =
ops::Const(root.WithOpName("filter"), Input::Initializer(filter_data));
std::vector<NodeDefBuilder::NodeOut> args;
std::vector<DataType> args_dtypes;
for (int i = 0; i < num_args; ++i) {
Output arg = ops::Const(root.WithOpName(absl::StrCat("arg", i)),
Input::Initializer(args_data[i]));
args.emplace_back(arg.name(), 0, dtype_args);
args_dtypes.emplace_back(dtype_args);
}
Tensor side_input(dtype);
if (has_extra_parameters) {
Padding padding_type;
ASSERT_TRUE(GetPaddingFromString(padding, &padding_type).ok());
int64_t oh, oh_padding;
ASSERT_TRUE(GetWindowedOutputSize(h, kh, 1, stride,
padding_type, &oh, &oh_padding)
.ok());
int64_t ow, ow_padding;
ASSERT_TRUE(GetWindowedOutputSize(w, kw, 1, stride,
padding_type, &ow, &ow_padding)
.ok());
TensorShape shape;
TF_EXPECT_OK(
ShapeFromFormatWithStatus(FORMAT_NCHW_VECT_C, n, oh, ow, oc, &shape));
side_input = Tensor(dtype, shape);
side_input.flat<T>() = side_input.flat<T>().setConstant(0);
}
Tensor conv_input_scale(DT_FLOAT, {1});
Tensor side_input_scale(DT_FLOAT, {1});
std::vector<NodeDefBuilder::NodeOut> host_args;
int num_host_args = 0;
if (has_extra_parameters) {
++num_args;
Output arg2 = ops::Const(root.WithOpName("side_input"),
Input::Initializer(side_input));
args.emplace_back(arg2.name(), 0, dtype);
args_dtypes.emplace_back(dtype);
++num_host_args;
conv_input_scale.scalar<float>()() = 1;
Output arg3 = ops::Const(root.WithOpName("conv_input_scale"),
Input::Initializer(conv_input_scale));
host_args.emplace_back(arg3.name(), 0, DT_FLOAT);
++num_host_args;
side_input_scale.scalar<float>()() = 1;
Output arg4 = ops::Const(root.WithOpName("side_input_scale"),
Input::Initializer(side_input_scale));
host_args.emplace_back(arg4.name(), 0, DT_FLOAT);
}
NodeDef fused_conv2d;
TF_EXPECT_OK(NodeDefBuilder("fused_conv", "_FusedConv2D")
.Input({input.name(), 0, dtype})
.Input({filter.name(), 0, dtype})
.Input(args)
.Input(host_args)
.Attr("num_args", num_args)
.Attr("num_host_args", num_host_args)
.Attr("T", dtype)
.Attr("TArgs", args_dtypes)
.Attr("data_format", v > 1 ? "NCHW_VECT_C" : "NHWC")
.Attr("strides", {1, stride, stride, 1})
.Attr("padding", padding)
.Attr("explicit_paddings", explicit_paddings)
.Attr("fused_ops", fused_ops)
.Finalize(&fused_conv2d));
RunAndFetch(root, fused_conv2d.name(), output, allow_gpu_device,
&fused_conv2d);
if (v > 1) {
const int oh = GetTensorDim(*output, FORMAT_NCHW_VECT_C, 'H');
const int ow = GetTensorDim(*output, FORMAT_NCHW_VECT_C, 'W');
TensorShape shape;
TF_EXPECT_OK(
ShapeFromFormatWithStatus(FORMAT_NHWC, n, oh, ow, oc, &shape));
Tensor output_nhwc(dtype, shape);
output_nhwc.tensor<T, 4>() =
output->tensor<T, 5>()
.shuffle(Eigen::array<int, 5>{0, 2, 3, 1, 4})
.reshape(Eigen::array<int, 4>{n, oh, ow, oc});
*output = output_nhwc;
}
}
void ExpectMatch(const Tensor& x, const Tensor& y, double atol) {
constexpr bool exact_match =
std::is_same<T, int8>::value || std::is_same<T, qint8>::value;
if (exact_match) {
test::ExpectEqual(x, y);
} else {
test::ExpectClose(x, y, atol);
}
}
void VerifyBiasAddTensorsNear(int depth, int image_width, int image_height,
int image_batch_count, int filter_size,
int filter_count,
const BiasAddGraphRunner& run_default,
const BiasAddGraphRunner& run_fused) {
DataType dtype = DataTypeToEnum<T>::v();
constexpr int int8_scale = 80;
using ConvT = typename std::conditional<kIsInt8, int8, T>::type;
DataType dtype_conv = DataTypeToEnum<ConvT>::v();
TensorShape image_shape{image_batch_count, image_height, image_width,
depth};
Tensor image_tmp(dtype_conv, image_shape);
image_tmp.flat<ConvT>() = image_tmp.flat<ConvT>().setRandom();
if (kIsInt8) {
image_tmp.flat<ConvT>() /= image_tmp.flat<ConvT>().constant(int8_scale);
}
Tensor image(dtype, image_shape);
ASSERT_TRUE(image.BitcastFrom(image_tmp, dtype, image_shape).ok());
TensorShape filter_shape{filter_size, filter_size, depth, filter_count};
Tensor filter_tmp(dtype_conv, filter_shape);
filter_tmp.flat<ConvT>() = filter_tmp.flat<ConvT>().setRandom();
if (kIsInt8) {
filter_tmp.flat<ConvT>() /= filter_tmp.flat<ConvT>().constant(int8_scale);
} else {
filter_tmp.flat<ConvT>() -=
filter_tmp.flat<ConvT>().constant(static_cast<ConvT>(0.5f));
}
Tensor filter(dtype, filter_shape);
ASSERT_TRUE(filter.BitcastFrom(filter_tmp, dtype, filter_shape).ok());
const int bias_size = filter_count;
TensorShape bias_shape{bias_size};
Tensor bias_tmp(dtype_conv, bias_shape);
bias_tmp.flat<ConvT>() = bias_tmp.flat<ConvT>().setRandom();
if (kIsInt8) {
bias_tmp.flat<ConvT>() /= bias_tmp.flat<ConvT>().constant(int8_scale);
} else {
bias_tmp.flat<ConvT>() +=
bias_tmp.flat<ConvT>().constant(static_cast<ConvT>(0.5f));
}
Tensor bias(dtype, bias_shape);
ASSERT_TRUE(bias.BitcastFrom(bias_tmp, dtype, bias_shape).ok());
Tensor conv_2d;
Tensor fused_conv_2d;
run_default(image, filter, bias, &conv_2d);
run_fused(image, filter, bias, &fused_conv_2d);
ASSERT_EQ(conv_2d.dtype(), fused_conv_2d.dtype());
ASSERT_EQ(conv_2d.shape(), fused_conv_2d.shape());
if (image_width == filter_size && image_height == filter_size) {
ExpectMatch(conv_2d, fused_conv_2d, 1e-4);
} else {
ExpectMatch(conv_2d, fused_conv_2d, 1e-5);
}
}
void VerifyFusedBatchNormTensorsNear(int depth, int image_width,
int image_height, int image_batch_count,
int filter_size, int filter_count,
const BatchNormGraphRunner& run_default,
const BatchNormGraphRunner& run_fused) {
DataType dtype = DataTypeToEnum<T>::v();
Tensor image(dtype, {image_batch_count, image_height, image_width, depth});
image.flat<T>() = image.flat<T>().setRandom();
Tensor filter(dtype, {filter_size, filter_size, depth, filter_count});
filter.flat<T>() = filter.flat<T>().setRandom();
filter.flat<T>() -= filter.flat<T>().constant(static_cast<T>(0.5f));
const int scale_size = filter_count;
Tensor scale(dtype, {scale_size});
scale.flat<T>() = scale.flat<T>().setRandom();
Tensor offset(dtype, {scale_size});
offset.flat<T>() = offset.flat<T>().setRandom();
Tensor mean(dtype, {scale_size});
mean.flat<T>() = mean.flat<T>().setRandom();
Tensor variance(dtype, {scale_size});
variance.flat<T>() = variance.flat<T>().setRandom();
variance.flat<T>() += variance.flat<T>().constant(static_cast<T>(0.5f));
Tensor conv_2d;
Tensor fused_conv_2d;
run_default(image, filter, scale, offset, mean, variance, &conv_2d);
run_fused(image, filter, scale, offset, mean, variance, &fused_conv_2d);
ASSERT_EQ(conv_2d.dtype(), fused_conv_2d.dtype());
ASSERT_EQ(conv_2d.shape(), fused_conv_2d.shape());
if (image_width == filter_size && image_height == filter_size) {
test::ExpectClose(conv_2d, fused_conv_2d, 1e-4);
} else {
test::ExpectClose(conv_2d, fused_conv_2d, 1e-5);
}
}
void VerifyConv2DWithBias(int filter_size, int filter_count,
const std::vector<int>& explicit_paddings = {},
int depth = kDepth, int image_width = kImageWidth,
int image_height = kImageHeight,
int image_batch_count = kImageBatchCount) {
if (kIsInt8 && !explicit_paddings.empty()) {
return;
}
std::string padding = explicit_paddings.empty() ? "SAME" : "EXPLICIT";
const BiasAddGraphRunner run_default =
[&](const Tensor& input_data, const Tensor& filter_data,
const Tensor& bias_data, Tensor* out) {
RunConv2DWithBias(input_data, filter_data, bias_data, padding,
explicit_paddings, out);
};
const BiasAddGraphRunner run_fused =
[&](const Tensor& input_data, const Tensor& filter_data,
const Tensor& bias_data, Tensor* out) {
RunFusedConv2DOp(input_data, filter_data, {bias_data}, {"BiasAdd"},
padding, explicit_paddings, out,
kIsInt8);
};
VerifyBiasAddTensorsNear(depth, image_width, image_height,
image_batch_count, filter_size, filter_count,
run_default, run_fused);
}
void VerifyConv2DWithBiasAndActivation(
const std::string& activation, int filter_size, int filter_count,
const std::vector<int>& explicit_paddings = {}, int depth = kDepth,
int image_width = kImageWidth, int image_height = kImageHeight,
int image_batch_count = kImageBatchCount) {
if (kIsInt8 && (activation != "Relu" || !explicit_paddings.empty())) {
return;
}
std::string padding = explicit_paddings.empty() ? "SAME" : "EXPLICIT";
const BiasAddGraphRunner run_default =
[this, &activation, &explicit_paddings, &padding](
const Tensor& input_data, const Tensor& filter_data,
const Tensor& bias_data, Tensor* out) {
RunConv2DWithBiasAndActivation(
input_data, filter_data, bias_data, activation, padding,
explicit_paddings, out,
activation == "Relu");
};
const BiasAddGraphRunner run_fused = [this, &activation, &explicit_paddings,
padding](const Tensor& input_data,
const Tensor& filter_data,
const Tensor& bias_data,
Tensor* out) {
RunFusedConv2DOp(input_data, filter_data, {bias_data},
{"BiasAdd", activation}, padding, explicit_paddings, out,
activation == "Relu");
};
VerifyBiasAddTensorsNear(depth, image_width, image_height,
image_batch_count, filter_size, filter_count,
run_default, run_fused);
}
void VerifyConv2DWithBatchNorm(int filter_size, int filter_count,
const std::vector<int>& explicit_paddings = {},
int depth = kDepth,
int image_width = kImageWidth,
int image_height = kImageHeight,
int image_batch_count = kImageBatchCount) {
std::string padding = explicit_paddings.empty() ? "SAME" : "EXPLICIT";
const BatchNormGraphRunner run_default =
[this, explicit_paddings, padding](
const Tensor& input_data, const Tensor& filter_data,
const Tensor& scale_data, const Tensor& offset_data,
const Tensor& mean_data, const Tensor& variance_data, Tensor* out) {
RunConv2DWithBatchNorm(input_data, filter_data, scale_data,
offset_data, mean_data, variance_data, padding,
explicit_paddings, out);
};
const BatchNormGraphRunner run_fused =
[this, explicit_paddings, padding](
const Tensor& input_data, const Tensor& filter_data,
const Tensor& scale_data, const Tensor& offset_data,
const Tensor& mean_data, const Tensor& variance_data, Tensor* out) {
RunFusedConv2DOp(input_data, filter_data,
{scale_data, offset_data, mean_data, variance_data},
{"FusedBatchNorm"}, padding, explicit_paddings, out);
};
VerifyFusedBatchNormTensorsNear(depth, image_width, image_height,
image_batch_count, filter_size,
filter_count, run_default, run_fused);
}
void VerifyConv2DWithBatchNormAndActivation(
const string& activation, int filter_size, int filter_count,
const std::vector<int>& explicit_paddings = {}, int depth = kDepth,
int image_width = kImageWidth, int image_height = kImageHeight,
int image_batch_count = kImageBatchCount) {
std::string padding = explicit_paddings.empty() ? "SAME" : "EXPLICIT";
const BatchNormGraphRunner run_default =
[this, &activation, explicit_paddings, padding](
const Tensor& input_data, const Tensor& filter_data,
const Tensor& scale_data, const Tensor& offset_data,
const Tensor& mean_data, const Tensor& variance_data, Tensor* out) {
RunConv2DWithBatchNormAndActivation(
input_data, filter_data, scale_data, offset_data, mean_data,
variance_data, activation, padding, explicit_paddings, out);
};
const BatchNormGraphRunner run_fused =
[this, &activation, explicit_paddings, padding](
const Tensor& input_data, const Tensor& filter_data,
const Tensor& scale_data, const Tensor& offset_data,
const Tensor& mean_data, const Tensor& variance_data, Tensor* out) {
RunFusedConv2DOp(input_data, filter_data,
{scale_data, offset_data, mean_data, variance_data},
{"FusedBatchNorm", activation}, padding,
explicit_paddings, out);
};
VerifyFusedBatchNormTensorsNear(depth, image_width, image_height,
image_batch_count, filter_size,
filter_count, run_default, run_fused);
}
};
template <typename T>
class FusedConv2DWithBiasOpTest : public FusedConv2DOpTest<T> {};
template <typename T>
class FusedConv2DWithBatchNormOpTest : public FusedConv2DOpTest<T> {};
TYPED_TEST_SUITE_P(FusedConv2DWithBiasOpTest);
TYPED_TEST_SUITE_P(FusedConv2DWithBatchNormOpTest);
#ifndef TENSORFLOW_USE_ROCM
TYPED_TEST_P(FusedConv2DWithBiasOpTest, OneByOneConvolution) {
const int filter_size = 1;
const int filter_count = 12;
this->VerifyConv2DWithBias(filter_size, filter_count);
}
TYPED_TEST_P(FusedConv2DWithBiasOpTest, ImageSizeConvolution) {
const int filter_size = TestFixture::kImageWidth;
const int filter_count = 12;
this->VerifyConv2DWithBias(filter_size, filter_count);
}
TYPED_TEST_P(FusedConv2DWithBiasOpTest, SpatialConvolution) {
const int filter_size = 3;
const int filter_count = 12;
this->VerifyConv2DWithBias(filter_size, filter_count);
}
#ifndef INTEL_MKL
TYPED_TEST_P(FusedConv2DWithBiasOpTest, ExplicitPaddingConvolution) {
const int filter_size = 3;
const int filter_count = 12;
this->VerifyConv2DWithBias(filter_size, filter_count,
{0, 0, 1, 2, 3, 4, 0, 0});
}
#endif
static auto activations = {"Relu", "Relu6", "Elu", "LeakyRelu"};
TYPED_TEST_P(FusedConv2DWithBiasOpTest, OneByOneConvolutionAndActivation) {
tensorflow::enable_tensor_float_32_execution(false);
const int filter_size = 1;
const int filter_count = 12;
for (const std::string& activation : activations) {
this->VerifyConv2DWithBiasAndActivation(activation, filter_size,
filter_count);
}
}
TYPED_TEST_P(FusedConv2DWithBiasOpTest, ImageSizeConvolutionAndActivation) {
const int filter_size = TestFixture::kImageWidth;
const int filter_count = 12;
for (const std::string& activation : activations) {
this->VerifyConv2DWithBiasAndActivation(activation, filter_size,
filter_count);
}
}
TYPED_TEST_P(FusedConv2DWithBiasOpTest, SpatialConvolutionAndActivation) {
const int filter_size = 3;
const int filter_count = 12;
for (const std::string& activation : activations) {
this->VerifyConv2DWithBiasAndActivation(activation, filter_size,
filter_count);
}
}
#ifndef INTEL_MKL
TYPED_TEST_P(FusedConv2DWithBiasOpTest,
ExplicitPaddingConvolutionAndActivation) {
const int filter_size = 3;
const int filter_count = 12;
for (const std::string& activation : activations) {
this->VerifyConv2DWithBiasAndActivation(
activation, filter_size, filter_count,
{0, 0, 1, 2, 3, 4, 0, 0});
}
}
#endif
TYPED_TEST_P(FusedConv2DWithBatchNormOpTest, OneByOneConvolution) {
const int filter_size = 1;
const int filter_count = 12;
this->VerifyConv2DWithBatchNorm(filter_size, filter_count);
}
TYPED_TEST_P(FusedConv2DWithBatchNormOpTest, ImageSizeConvolution) {
const int filter_size = TestFixture::kImageWidth;
const int filter_count = 12;
this->VerifyConv2DWithBatchNorm(filter_size, filter_count);
}
TYPED_TEST_P(FusedConv2DWithBatchNormOpTest, SpatialConvolution) {
const int filter_size = 3;
const int filter_count = 12;
this->VerifyConv2DWithBatchNorm(filter_size, filter_count);
}
#ifndef INTEL_MKL
TYPED_TEST_P(FusedConv2DWithBatchNormOpTest, ExplicitPaddingConvolution) {
const int filter_size = 3;
const int filter_count = 12;
this->VerifyConv2DWithBatchNorm(
filter_size, filter_count,
{0, 0, 1, 2, 3, 4, 0, 0});
}
#endif
TYPED_TEST_P(FusedConv2DWithBatchNormOpTest, OneByOneConvolutionAndActivation) {
const int filter_size = 1;
const int filter_count = 12;
for (const std::string& activation : activations) {
this->VerifyConv2DWithBatchNormAndActivation(activation, filter_size,
filter_count);
}
}
TYPED_TEST_P(FusedConv2DWithBatchNormOpTest,
ImageSizeConvolutionAndActivation) {
const int filter_size = TestFixture::kImageWidth;
const int filter_count = 12;
for (const std::string& activation : activations) {
this->VerifyConv2DWithBatchNormAndActivation(activation, filter_size,
filter_count);
}
}
TYPED_TEST_P(FusedConv2DWithBatchNormOpTest, SpatialConvolutionAndActivation) {
const int filter_size = 3;
const int filter_count = 12;
for (const std::string& activation : activations) {
this->VerifyConv2DWithBatchNormAndActivation(activation, filter_size,
filter_count);
}
}
#ifndef INTEL_MKL
TYPED_TEST_P(FusedConv2DWithBatchNormOpTest,
ExplicitPaddingConvolutionAndActivation) {
const int filter_size = 3;
const int filter_count = 12;
for (const std::string& activation : activations) {
this->VerifyConv2DWithBatchNormAndActivation(
activation, filter_size, filter_count,
{0, 0, 1, 2, 3, 4, 0, 0});
}
}
#endif
#ifndef INTEL_MKL
REGISTER_TYPED_TEST_SUITE_P(FusedConv2DWithBiasOpTest,
OneByOneConvolution,
ImageSizeConvolution,
SpatialConvolution,
ExplicitPaddingConvolution,
OneByOneConvolutionAndActivation,
ImageSizeConvolutionAndActivation,
SpatialConvolutionAndActivation,
ExplicitPaddingConvolutionAndActivation);
REGISTER_TYPED_TEST_SUITE_P(FusedConv2DWithBatchNormOpTest,
OneByOneConvolution,
ImageSizeConvolution,
SpatialConvolution,
ExplicitPaddingConvolution,
OneByOneConvolutionAndActivation,
ImageSizeConvolutionAndActivation,
SpatialConvolutionAndActivation,
ExplicitPaddingConvolutionAndActivation);
#else
REGISTER_TYPED_TEST_SUITE_P(FusedConv2DWithBiasOpTest,
OneByOneConvolution,
ImageSizeConvolution,
SpatialConvolution,
OneByOneConvolutionAndActivation,
ImageSizeConvolutionAndActivation,
SpatialConvolutionAndActivation);
REGISTER_TYPED_TEST_SUITE_P(FusedConv2DWithBatchNormOpTest,
OneByOneConvolution,
ImageSizeConvolution,
SpatialConvolution,
OneByOneConvolutionAndActivation,
ImageSizeConvolutionAndActivation,
SpatialConvolutionAndActivation);
#endif
using FusedBiasAddDataTypes = ::testing::Types<float, double, int8, qint8>;
INSTANTIATE_TYPED_TEST_SUITE_P(Test, FusedConv2DWithBiasOpTest,
FusedBiasAddDataTypes);
using FusedBatchNormDataTypes = ::testing::Types<float>;
INSTANTIATE_TYPED_TEST_SUITE_P(Test, FusedConv2DWithBatchNormOpTest,
FusedBatchNormDataTypes);
#endif
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/conv_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/conv_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6be32d49-4f0d-4035-bcc7-0e47c6499953 | cpp | google/quiche | qpack_instruction_decoder | quiche/quic/core/qpack/qpack_instruction_decoder.cc | quiche/quic/core/qpack/qpack_instruction_decoder_test.cc | #include "quiche/quic/core/qpack/qpack_instruction_decoder.h"
#include <algorithm>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_logging.h"
namespace quic {
namespace {
const size_t kStringLiteralLengthLimit = 1024 * 1024;
}
QpackInstructionDecoder::QpackInstructionDecoder(const QpackLanguage* language,
Delegate* delegate)
: language_(language),
delegate_(delegate),
s_bit_(false),
varint_(0),
varint2_(0),
is_huffman_encoded_(false),
string_length_(0),
error_detected_(false),
state_(State::kStartInstruction) {}
bool QpackInstructionDecoder::Decode(absl::string_view data) {
QUICHE_DCHECK(!data.empty());
QUICHE_DCHECK(!error_detected_);
while (true) {
bool success = true;
size_t bytes_consumed = 0;
switch (state_) {
case State::kStartInstruction:
success = DoStartInstruction(data);
break;
case State::kStartField:
success = DoStartField();
break;
case State::kReadBit:
success = DoReadBit(data);
break;
case State::kVarintStart:
success = DoVarintStart(data, &bytes_consumed);
break;
case State::kVarintResume:
success = DoVarintResume(data, &bytes_consumed);
break;
case State::kVarintDone:
success = DoVarintDone();
break;
case State::kReadString:
success = DoReadString(data, &bytes_consumed);
break;
case State::kReadStringDone:
success = DoReadStringDone();
break;
}
if (!success) {
return false;
}
QUICHE_DCHECK(!error_detected_);
QUICHE_DCHECK_LE(bytes_consumed, data.size());
data = absl::string_view(data.data() + bytes_consumed,
data.size() - bytes_consumed);
if (data.empty() && (state_ != State::kStartField) &&
(state_ != State::kVarintDone) && (state_ != State::kReadStringDone)) {
return true;
}
}
}
bool QpackInstructionDecoder::AtInstructionBoundary() const {
return state_ == State::kStartInstruction;
}
bool QpackInstructionDecoder::DoStartInstruction(absl::string_view data) {
QUICHE_DCHECK(!data.empty());
instruction_ = LookupOpcode(data[0]);
field_ = instruction_->fields.begin();
state_ = State::kStartField;
return true;
}
bool QpackInstructionDecoder::DoStartField() {
if (field_ == instruction_->fields.end()) {
if (!delegate_->OnInstructionDecoded(instruction_)) {
return false;
}
state_ = State::kStartInstruction;
return true;
}
switch (field_->type) {
case QpackInstructionFieldType::kSbit:
case QpackInstructionFieldType::kName:
case QpackInstructionFieldType::kValue:
state_ = State::kReadBit;
return true;
case QpackInstructionFieldType::kVarint:
case QpackInstructionFieldType::kVarint2:
state_ = State::kVarintStart;
return true;
default:
QUIC_BUG(quic_bug_10767_1) << "Invalid field type.";
return false;
}
}
bool QpackInstructionDecoder::DoReadBit(absl::string_view data) {
QUICHE_DCHECK(!data.empty());
switch (field_->type) {
case QpackInstructionFieldType::kSbit: {
const uint8_t bitmask = field_->param;
s_bit_ = (data[0] & bitmask) == bitmask;
++field_;
state_ = State::kStartField;
return true;
}
case QpackInstructionFieldType::kName:
case QpackInstructionFieldType::kValue: {
const uint8_t prefix_length = field_->param;
QUICHE_DCHECK_GE(7, prefix_length);
const uint8_t bitmask = 1 << prefix_length;
is_huffman_encoded_ = (data[0] & bitmask) == bitmask;
state_ = State::kVarintStart;
return true;
}
default:
QUIC_BUG(quic_bug_10767_2) << "Invalid field type.";
return false;
}
}
bool QpackInstructionDecoder::DoVarintStart(absl::string_view data,
size_t* bytes_consumed) {
QUICHE_DCHECK(!data.empty());
QUICHE_DCHECK(field_->type == QpackInstructionFieldType::kVarint ||
field_->type == QpackInstructionFieldType::kVarint2 ||
field_->type == QpackInstructionFieldType::kName ||
field_->type == QpackInstructionFieldType::kValue);
http2::DecodeBuffer buffer(data.data() + 1, data.size() - 1);
http2::DecodeStatus status =
varint_decoder_.Start(data[0], field_->param, &buffer);
*bytes_consumed = 1 + buffer.Offset();
switch (status) {
case http2::DecodeStatus::kDecodeDone:
state_ = State::kVarintDone;
return true;
case http2::DecodeStatus::kDecodeInProgress:
state_ = State::kVarintResume;
return true;
case http2::DecodeStatus::kDecodeError:
OnError(ErrorCode::INTEGER_TOO_LARGE, "Encoded integer too large.");
return false;
default:
QUIC_BUG(quic_bug_10767_3) << "Unknown decode status " << status;
return false;
}
}
bool QpackInstructionDecoder::DoVarintResume(absl::string_view data,
size_t* bytes_consumed) {
QUICHE_DCHECK(!data.empty());
QUICHE_DCHECK(field_->type == QpackInstructionFieldType::kVarint ||
field_->type == QpackInstructionFieldType::kVarint2 ||
field_->type == QpackInstructionFieldType::kName ||
field_->type == QpackInstructionFieldType::kValue);
http2::DecodeBuffer buffer(data);
http2::DecodeStatus status = varint_decoder_.Resume(&buffer);
*bytes_consumed = buffer.Offset();
switch (status) {
case http2::DecodeStatus::kDecodeDone:
state_ = State::kVarintDone;
return true;
case http2::DecodeStatus::kDecodeInProgress:
QUICHE_DCHECK_EQ(*bytes_consumed, data.size());
QUICHE_DCHECK(buffer.Empty());
return true;
case http2::DecodeStatus::kDecodeError:
OnError(ErrorCode::INTEGER_TOO_LARGE, "Encoded integer too large.");
return false;
default:
QUIC_BUG(quic_bug_10767_4) << "Unknown decode status " << status;
return false;
}
}
bool QpackInstructionDecoder::DoVarintDone() {
QUICHE_DCHECK(field_->type == QpackInstructionFieldType::kVarint ||
field_->type == QpackInstructionFieldType::kVarint2 ||
field_->type == QpackInstructionFieldType::kName ||
field_->type == QpackInstructionFieldType::kValue);
if (field_->type == QpackInstructionFieldType::kVarint) {
varint_ = varint_decoder_.value();
++field_;
state_ = State::kStartField;
return true;
}
if (field_->type == QpackInstructionFieldType::kVarint2) {
varint2_ = varint_decoder_.value();
++field_;
state_ = State::kStartField;
return true;
}
string_length_ = varint_decoder_.value();
if (string_length_ > kStringLiteralLengthLimit) {
OnError(ErrorCode::STRING_LITERAL_TOO_LONG, "String literal too long.");
return false;
}
std::string* const string =
(field_->type == QpackInstructionFieldType::kName) ? &name_ : &value_;
string->clear();
if (string_length_ == 0) {
++field_;
state_ = State::kStartField;
return true;
}
string->reserve(string_length_);
state_ = State::kReadString;
return true;
}
bool QpackInstructionDecoder::DoReadString(absl::string_view data,
size_t* bytes_consumed) {
QUICHE_DCHECK(!data.empty());
QUICHE_DCHECK(field_->type == QpackInstructionFieldType::kName ||
field_->type == QpackInstructionFieldType::kValue);
std::string* const string =
(field_->type == QpackInstructionFieldType::kName) ? &name_ : &value_;
QUICHE_DCHECK_LT(string->size(), string_length_);
*bytes_consumed = std::min(string_length_ - string->size(), data.size());
string->append(data.data(), *bytes_consumed);
QUICHE_DCHECK_LE(string->size(), string_length_);
if (string->size() == string_length_) {
state_ = State::kReadStringDone;
}
return true;
}
bool QpackInstructionDecoder::DoReadStringDone() {
QUICHE_DCHECK(field_->type == QpackInstructionFieldType::kName ||
field_->type == QpackInstructionFieldType::kValue);
std::string* const string =
(field_->type == QpackInstructionFieldType::kName) ? &name_ : &value_;
QUICHE_DCHECK_EQ(string->size(), string_length_);
if (is_huffman_encoded_) {
huffman_decoder_.Reset();
std::string decoded_value;
huffman_decoder_.Decode(*string, &decoded_value);
if (!huffman_decoder_.InputProperlyTerminated()) {
OnError(ErrorCode::HUFFMAN_ENCODING_ERROR,
"Error in Huffman-encoded string.");
return false;
}
*string = std::move(decoded_value);
}
++field_;
state_ = State::kStartField;
return true;
}
const QpackInstruction* QpackInstructionDecoder::LookupOpcode(
uint8_t byte) const {
for (const auto* instruction : *language_) {
if ((byte & instruction->opcode.mask) == instruction->opcode.value) {
return instruction;
}
}
QUICHE_DCHECK(false);
return nullptr;
}
void QpackInstructionDecoder::OnError(ErrorCode error_code,
absl::string_view error_message) {
QUICHE_DCHECK(!error_detected_);
error_detected_ = true;
delegate_->OnInstructionDecodingError(error_code, error_message);
}
} | #include "quiche/quic/core/qpack/qpack_instruction_decoder.h"
#include <algorithm>
#include <memory>
#include <string>
#include "absl/strings/escaping.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/qpack/qpack_instructions.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/qpack/qpack_test_utils.h"
using ::testing::_;
using ::testing::Eq;
using ::testing::Expectation;
using ::testing::InvokeWithoutArgs;
using ::testing::Return;
using ::testing::StrictMock;
using ::testing::Values;
namespace quic {
namespace test {
namespace {
const QpackInstruction* TestInstruction1() {
static const QpackInstruction* const instruction =
new QpackInstruction{QpackInstructionOpcode{0x00, 0x80},
{{QpackInstructionFieldType::kSbit, 0x40},
{QpackInstructionFieldType::kVarint, 6},
{QpackInstructionFieldType::kVarint2, 8}}};
return instruction;
}
const QpackInstruction* TestInstruction2() {
static const QpackInstruction* const instruction =
new QpackInstruction{QpackInstructionOpcode{0x80, 0x80},
{{QpackInstructionFieldType::kName, 6},
{QpackInstructionFieldType::kValue, 7}}};
return instruction;
}
const QpackLanguage* TestLanguage() {
static const QpackLanguage* const language =
new QpackLanguage{TestInstruction1(), TestInstruction2()};
return language;
}
class MockDelegate : public QpackInstructionDecoder::Delegate {
public:
MockDelegate() {
ON_CALL(*this, OnInstructionDecoded(_)).WillByDefault(Return(true));
}
MockDelegate(const MockDelegate&) = delete;
MockDelegate& operator=(const MockDelegate&) = delete;
~MockDelegate() override = default;
MOCK_METHOD(bool, OnInstructionDecoded, (const QpackInstruction*),
(override));
MOCK_METHOD(void, OnInstructionDecodingError,
(QpackInstructionDecoder::ErrorCode error_code,
absl::string_view error_message),
(override));
};
class QpackInstructionDecoderTest : public QuicTestWithParam<FragmentMode> {
protected:
QpackInstructionDecoderTest()
: decoder_(std::make_unique<QpackInstructionDecoder>(TestLanguage(),
&delegate_)),
fragment_mode_(GetParam()) {}
~QpackInstructionDecoderTest() override = default;
void SetUp() override {
ON_CALL(delegate_, OnInstructionDecodingError(_, _))
.WillByDefault(InvokeWithoutArgs([this]() { decoder_.reset(); }));
}
void DecodeInstruction(absl::string_view data) {
EXPECT_TRUE(decoder_->AtInstructionBoundary());
FragmentSizeGenerator fragment_size_generator =
FragmentModeToFragmentSizeGenerator(fragment_mode_);
while (!data.empty()) {
size_t fragment_size = std::min(fragment_size_generator(), data.size());
bool success = decoder_->Decode(data.substr(0, fragment_size));
if (!decoder_) {
EXPECT_FALSE(success);
return;
}
EXPECT_TRUE(success);
data = data.substr(fragment_size);
if (!data.empty()) {
EXPECT_FALSE(decoder_->AtInstructionBoundary());
}
}
EXPECT_TRUE(decoder_->AtInstructionBoundary());
}
StrictMock<MockDelegate> delegate_;
std::unique_ptr<QpackInstructionDecoder> decoder_;
private:
const FragmentMode fragment_mode_;
};
INSTANTIATE_TEST_SUITE_P(All, QpackInstructionDecoderTest,
Values(FragmentMode::kSingleChunk,
FragmentMode::kOctetByOctet));
TEST_P(QpackInstructionDecoderTest, SBitAndVarint2) {
std::string encoded_data;
EXPECT_CALL(delegate_, OnInstructionDecoded(TestInstruction1()));
ASSERT_TRUE(absl::HexStringToBytes("7f01ff65", &encoded_data));
DecodeInstruction(encoded_data);
EXPECT_TRUE(decoder_->s_bit());
EXPECT_EQ(64u, decoder_->varint());
EXPECT_EQ(356u, decoder_->varint2());
EXPECT_CALL(delegate_, OnInstructionDecoded(TestInstruction1()));
ASSERT_TRUE(absl::HexStringToBytes("05c8", &encoded_data));
DecodeInstruction(encoded_data);
EXPECT_FALSE(decoder_->s_bit());
EXPECT_EQ(5u, decoder_->varint());
EXPECT_EQ(200u, decoder_->varint2());
}
TEST_P(QpackInstructionDecoderTest, NameAndValue) {
std::string encoded_data;
EXPECT_CALL(delegate_, OnInstructionDecoded(TestInstruction2()));
ASSERT_TRUE(absl::HexStringToBytes("83666f6f03626172", &encoded_data));
DecodeInstruction(encoded_data);
EXPECT_EQ("foo", decoder_->name());
EXPECT_EQ("bar", decoder_->value());
EXPECT_CALL(delegate_, OnInstructionDecoded(TestInstruction2()));
ASSERT_TRUE(absl::HexStringToBytes("8000", &encoded_data));
DecodeInstruction(encoded_data);
EXPECT_EQ("", decoder_->name());
EXPECT_EQ("", decoder_->value());
EXPECT_CALL(delegate_, OnInstructionDecoded(TestInstruction2()));
ASSERT_TRUE(absl::HexStringToBytes("c294e7838c767f", &encoded_data));
DecodeInstruction(encoded_data);
EXPECT_EQ("foo", decoder_->name());
EXPECT_EQ("bar", decoder_->value());
}
TEST_P(QpackInstructionDecoderTest, InvalidHuffmanEncoding) {
std::string encoded_data;
EXPECT_CALL(delegate_,
OnInstructionDecodingError(
QpackInstructionDecoder::ErrorCode::HUFFMAN_ENCODING_ERROR,
Eq("Error in Huffman-encoded string.")));
ASSERT_TRUE(absl::HexStringToBytes("c1ff", &encoded_data));
DecodeInstruction(encoded_data);
}
TEST_P(QpackInstructionDecoderTest, InvalidVarintEncoding) {
std::string encoded_data;
EXPECT_CALL(delegate_,
OnInstructionDecodingError(
QpackInstructionDecoder::ErrorCode::INTEGER_TOO_LARGE,
Eq("Encoded integer too large.")));
ASSERT_TRUE(absl::HexStringToBytes("ffffffffffffffffffffff", &encoded_data));
DecodeInstruction(encoded_data);
}
TEST_P(QpackInstructionDecoderTest, StringLiteralTooLong) {
std::string encoded_data;
EXPECT_CALL(delegate_,
OnInstructionDecodingError(
QpackInstructionDecoder::ErrorCode::STRING_LITERAL_TOO_LONG,
Eq("String literal too long.")));
ASSERT_TRUE(absl::HexStringToBytes("bfffff7f", &encoded_data));
DecodeInstruction(encoded_data);
}
TEST_P(QpackInstructionDecoderTest, DelegateSignalsError) {
Expectation first_call =
EXPECT_CALL(delegate_, OnInstructionDecoded(TestInstruction1()))
.WillOnce(InvokeWithoutArgs([this]() -> bool {
EXPECT_EQ(1u, decoder_->varint());
return true;
}));
EXPECT_CALL(delegate_, OnInstructionDecoded(TestInstruction1()))
.After(first_call)
.WillOnce(InvokeWithoutArgs([this]() -> bool {
EXPECT_EQ(2u, decoder_->varint());
return false;
}));
std::string encoded_data;
ASSERT_TRUE(absl::HexStringToBytes("01000200030004000500", &encoded_data));
EXPECT_FALSE(decoder_->Decode(encoded_data));
}
TEST_P(QpackInstructionDecoderTest, DelegateSignalsErrorAndDestroysDecoder) {
EXPECT_CALL(delegate_, OnInstructionDecoded(TestInstruction1()))
.WillOnce(InvokeWithoutArgs([this]() -> bool {
EXPECT_EQ(1u, decoder_->varint());
decoder_.reset();
return false;
}));
std::string encoded_data;
ASSERT_TRUE(absl::HexStringToBytes("0100", &encoded_data));
DecodeInstruction(encoded_data);
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/qpack/qpack_instruction_decoder.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/qpack/qpack_instruction_decoder_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
afcc0e7d-5d93-494e-8de0-9fb0bc1f48ef | cpp | google/cel-cpp | comprehension_vulnerability_check | eval/compiler/comprehension_vulnerability_check.cc | runtime/comprehension_vulnerability_check_test.cc | #include "eval/compiler/comprehension_vulnerability_check.h"
#include <algorithm>
#include <memory>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/variant.h"
#include "base/ast_internal/ast_impl.h"
#include "base/ast_internal/expr.h"
#include "base/builtins.h"
#include "eval/compiler/flat_expr_builder_extensions.h"
namespace google::api::expr::runtime {
namespace {
using ::cel::ast_internal::Comprehension;
int ComprehensionAccumulationReferences(const cel::ast_internal::Expr& expr,
absl::string_view var_name) {
struct Handler {
const cel::ast_internal::Expr& expr;
absl::string_view var_name;
int operator()(const cel::ast_internal::Call& call) {
int references = 0;
absl::string_view function = call.function();
if (function == cel::builtin::kTernary && call.args().size() == 3) {
return std::max(
ComprehensionAccumulationReferences(call.args()[1], var_name),
ComprehensionAccumulationReferences(call.args()[2], var_name));
}
if (function == cel::builtin::kAdd) {
for (int i = 0; i < call.args().size(); i++) {
references +=
ComprehensionAccumulationReferences(call.args()[i], var_name);
}
return references;
}
if ((function == cel::builtin::kIndex && call.args().size() == 2) ||
(function == cel::builtin::kDyn && call.args().size() == 1)) {
return ComprehensionAccumulationReferences(call.args()[0], var_name);
}
return 0;
}
int operator()(const cel::ast_internal::Comprehension& comprehension) {
absl::string_view accu_var = comprehension.accu_var();
absl::string_view iter_var = comprehension.iter_var();
int result_references = 0;
int loop_step_references = 0;
int sum_of_accumulator_references = 0;
if (accu_var != var_name && iter_var != var_name) {
loop_step_references = ComprehensionAccumulationReferences(
comprehension.loop_step(), var_name);
}
if (accu_var != var_name) {
result_references = ComprehensionAccumulationReferences(
comprehension.result(), var_name);
}
sum_of_accumulator_references = ComprehensionAccumulationReferences(
comprehension.accu_init(), var_name);
sum_of_accumulator_references += ComprehensionAccumulationReferences(
comprehension.iter_range(), var_name);
return std::max({loop_step_references, result_references,
sum_of_accumulator_references});
}
int operator()(const cel::ast_internal::CreateList& list) {
int references = 0;
for (int i = 0; i < list.elements().size(); i++) {
references += ComprehensionAccumulationReferences(
list.elements()[i].expr(), var_name);
}
return references;
}
int operator()(const cel::ast_internal::CreateStruct& map) {
int references = 0;
for (int i = 0; i < map.fields().size(); i++) {
const auto& entry = map.fields()[i];
if (entry.has_value()) {
references +=
ComprehensionAccumulationReferences(entry.value(), var_name);
}
}
return references;
}
int operator()(const cel::MapExpr& map) {
int references = 0;
for (int i = 0; i < map.entries().size(); i++) {
const auto& entry = map.entries()[i];
if (entry.has_value()) {
references +=
ComprehensionAccumulationReferences(entry.value(), var_name);
}
}
return references;
}
int operator()(const cel::ast_internal::Select& select) {
if (select.test_only()) {
return 0;
}
return ComprehensionAccumulationReferences(select.operand(), var_name);
}
int operator()(const cel::ast_internal::Ident& ident) {
return ident.name() == var_name ? 1 : 0;
}
int operator()(const cel::ast_internal::Constant& constant) { return 0; }
int operator()(const cel::UnspecifiedExpr&) { return 0; }
} handler{expr, var_name};
return absl::visit(handler, expr.kind());
}
bool ComprehensionHasMemoryExhaustionVulnerability(
const Comprehension& comprehension) {
absl::string_view accu_var = comprehension.accu_var();
const auto& loop_step = comprehension.loop_step();
return ComprehensionAccumulationReferences(loop_step, accu_var) >= 2;
}
class ComprehensionVulnerabilityCheck : public ProgramOptimizer {
public:
absl::Status OnPreVisit(PlannerContext& context,
const cel::ast_internal::Expr& node) override {
if (node.has_comprehension_expr() &&
ComprehensionHasMemoryExhaustionVulnerability(
node.comprehension_expr())) {
return absl::InvalidArgumentError(
"Comprehension contains memory exhaustion vulnerability");
}
return absl::OkStatus();
}
absl::Status OnPostVisit(PlannerContext& context,
const cel::ast_internal::Expr& node) override {
return absl::OkStatus();
}
};
}
ProgramOptimizerFactory CreateComprehensionVulnerabilityCheck() {
return [](PlannerContext&, const cel::ast_internal::AstImpl&) {
return std::make_unique<ComprehensionVulnerabilityCheck>();
};
}
} | #include "runtime/comprehension_vulnerability_check.h"
#include <utility>
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "extensions/protobuf/runtime_adapter.h"
#include "internal/testing.h"
#include "parser/parser.h"
#include "runtime/runtime_builder.h"
#include "runtime/runtime_options.h"
#include "runtime/standard_runtime_builder_factory.h"
#include "google/protobuf/text_format.h"
namespace cel {
namespace {
using ::absl_testing::IsOk;
using ::absl_testing::StatusIs;
using ::cel::extensions::ProtobufRuntimeAdapter;
using ::google::api::expr::v1alpha1::ParsedExpr;
using ::google::api::expr::parser::Parse;
using ::google::protobuf::TextFormat;
using ::testing::HasSubstr;
constexpr absl::string_view kVulnerableExpr = R"pb(
expr {
id: 1
comprehension_expr {
iter_var: "unused"
accu_var: "accu"
result {
id: 2
ident_expr { name: "accu" }
}
accu_init {
id: 11
list_expr {
elements {
id: 12
const_expr { int64_value: 0 }
}
}
}
loop_condition {
id: 13
const_expr { bool_value: true }
}
loop_step {
id: 3
call_expr {
function: "_+_"
args {
id: 4
ident_expr { name: "accu" }
}
args {
id: 5
ident_expr { name: "accu" }
}
}
}
iter_range {
id: 6
list_expr {
elements {
id: 7
const_expr { int64_value: 0 }
}
elements {
id: 8
const_expr { int64_value: 0 }
}
elements {
id: 9
const_expr { int64_value: 0 }
}
elements {
id: 10
const_expr { int64_value: 0 }
}
}
}
}
}
)pb";
TEST(ComprehensionVulnerabilityCheck, EnabledVulnerable) {
RuntimeOptions runtime_options;
ASSERT_OK_AND_ASSIGN(RuntimeBuilder builder,
CreateStandardRuntimeBuilder(runtime_options));
ASSERT_OK(EnableComprehensionVulnerabiltyCheck(builder));
ASSERT_OK_AND_ASSIGN(auto runtime, std::move(builder).Build());
ParsedExpr expr;
ASSERT_TRUE(TextFormat::ParseFromString(kVulnerableExpr, &expr));
EXPECT_THAT(
ProtobufRuntimeAdapter::CreateProgram(*runtime, expr),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr("Comprehension contains memory exhaustion vulnerability")));
}
TEST(ComprehensionVulnerabilityCheck, EnabledNotVulnerable) {
RuntimeOptions runtime_options;
ASSERT_OK_AND_ASSIGN(RuntimeBuilder builder,
CreateStandardRuntimeBuilder(runtime_options));
ASSERT_OK(EnableComprehensionVulnerabiltyCheck(builder));
ASSERT_OK_AND_ASSIGN(auto runtime, std::move(builder).Build());
ASSERT_OK_AND_ASSIGN(ParsedExpr expr, Parse("[0, 0, 0, 0].map(x, x + 1)"));
EXPECT_THAT(ProtobufRuntimeAdapter::CreateProgram(*runtime, expr), IsOk());
}
TEST(ComprehensionVulnerabilityCheck, DisabledVulnerable) {
RuntimeOptions runtime_options;
ASSERT_OK_AND_ASSIGN(RuntimeBuilder builder,
CreateStandardRuntimeBuilder(runtime_options));
ASSERT_OK_AND_ASSIGN(auto runtime, std::move(builder).Build());
ParsedExpr expr;
ASSERT_TRUE(TextFormat::ParseFromString(kVulnerableExpr, &expr));
EXPECT_THAT(ProtobufRuntimeAdapter::CreateProgram(*runtime, expr), IsOk());
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/compiler/comprehension_vulnerability_check.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/runtime/comprehension_vulnerability_check_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
dc8e519b-c16d-49d3-97a5-72a90d4fd5f7 | cpp | tensorflow/tensorflow | subgraph | tensorflow/lite/delegates/gpu/common/selectors/subgraph.cc | tensorflow/lite/core/subgraph_test.cc | #include "tensorflow/lite/delegates/gpu/common/selectors/subgraph.h"
#include <memory>
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/task/gpu_operation.h"
#include "tensorflow/lite/delegates/gpu/common/task/tensor_desc.h"
namespace tflite {
namespace gpu {
int GPUOperationsSubgraph::AddTensor(const TensorDescriptor& desc) {
new_tensors.push_back(desc);
return -new_tensors.size();
}
int GPUOperationsSubgraph::AddTensor(const BHWC& shape,
const TensorDescriptor& desc) {
TensorDescriptor desc_with_shape = desc;
desc_with_shape.SetBHWCShape(shape);
return AddTensor(desc_with_shape);
}
int GPUOperationsSubgraph::AddTensor(const OHWI& shape,
const TensorDescriptor& desc) {
const BHWC shape_as_bhwc(shape.o, shape.h, shape.w, shape.i);
return AddTensor(shape_as_bhwc, desc);
}
std::unique_ptr<GPUOperation>* InitSingleOpSubgraph(
const std::vector<Value*>& inputs, const std::vector<Value*>& outputs,
GPUOperationsSubgraph* gpu_subgraph) {
gpu_subgraph->operations.clear();
gpu_subgraph->new_tensors.clear();
gpu_subgraph->operations.push_back({});
for (int i = 0; i < inputs.size(); ++i) {
gpu_subgraph->operations[0].input_ids.push_back(inputs[i]->id);
}
for (int i = 0; i < outputs.size(); ++i) {
gpu_subgraph->operations[0].output_ids.push_back(outputs[i]->id);
}
return &gpu_subgraph->operations[0].operation;
}
}
} | #include "tensorflow/lite/core/subgraph.h"
#include <algorithm>
#include <cstddef>
#include <functional>
#include <memory>
#include <numeric>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/stderr_reporter.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace ops {
namespace builtin {
TfLiteRegistration* Register_PADV2();
TfLiteRegistration* Register_NEG();
}
}
namespace {
using testing::ElementsAreArray;
using testing::Not;
TEST(RemoveUnusedInputs, NothingToRemove) {
Interpreter interpreter;
auto& subgraph = interpreter.primary_subgraph();
subgraph.AddTensors(4);
subgraph.SetInputs({0, 1});
subgraph.SetOutputs({3});
TfLiteRegistration* pad_op = tflite::ops::builtin::Register_PADV2();
TfLiteRegistration* neg_op = tflite::ops::builtin::Register_NEG();
subgraph.AddNodeWithParameters({0, 1}, {2}, {}, nullptr, 0, nullptr, pad_op);
subgraph.AddNodeWithParameters({2}, {3}, {}, nullptr, 0, nullptr, neg_op);
ASSERT_EQ(subgraph.RemoveUnusedInputs(), kTfLiteOk);
ASSERT_EQ(subgraph.inputs(), std::vector<int>({0, 1}));
}
TEST(RemoveUnusedInputs, HasUnusedInputs) {
Interpreter interpreter;
auto& subgraph = interpreter.primary_subgraph();
subgraph.AddTensors(4);
subgraph.SetInputs({0, 1, 2});
subgraph.SetOutputs({3});
TfLiteRegistration* neg_op = tflite::ops::builtin::Register_NEG();
subgraph.AddNodeWithParameters({2}, {3}, {}, nullptr, 0, nullptr, neg_op);
ASSERT_EQ(subgraph.RemoveUnusedInputs(), kTfLiteOk);
ASSERT_EQ(subgraph.inputs(), std::vector<int>({-1, -1, 2}));
}
TEST(RemoveUnusedInputs, BypassInputsWithoutOp) {
Interpreter interpreter;
auto& subgraph = interpreter.primary_subgraph();
subgraph.AddTensors(3);
subgraph.SetInputs({0, 1, 2});
subgraph.SetOutputs({0, 2});
ASSERT_EQ(subgraph.RemoveUnusedInputs(), kTfLiteOk);
ASSERT_EQ(subgraph.inputs(), std::vector<int>({0, -1, 2}));
}
TEST(GetSubgraphContext, NonConstGetSubgraphContext) {
Interpreter interpreter;
auto& subgraph = interpreter.primary_subgraph();
TfLiteContext* context = nullptr;
EXPECT_EQ(kTfLiteError, subgraph.AcquireSubgraphContext(-1, &context));
ASSERT_EQ(context, nullptr);
EXPECT_EQ(kTfLiteError, subgraph.AcquireSubgraphContext(1, &context));
ASSERT_EQ(context, nullptr);
EXPECT_EQ(kTfLiteOk, subgraph.AcquireSubgraphContext(0, &context));
ASSERT_NE(context, nullptr);
EXPECT_EQ(kTfLiteOk, subgraph.ReleaseSubgraphContext(0));
}
TEST(MarkSubgraphAsDelegationSkippable, MarkSubgraphAsDelegationSkippable) {
static StderrReporter* error_reporter = new StderrReporter;
std::vector<std::unique_ptr<Subgraph>> subgraphs;
for (int i = 0; i < 2; ++i) {
subgraphs.emplace_back(new Subgraph(error_reporter,
nullptr,
&subgraphs,
nullptr,
nullptr,
nullptr,
i));
}
ASSERT_EQ(subgraphs[0]->MarkSubgraphAsDelegationSkippable(0), kTfLiteError);
ASSERT_FALSE(subgraphs[0]->IsDelegationSkippable());
ASSERT_EQ(subgraphs[0]->MarkSubgraphAsDelegationSkippable(2), kTfLiteError);
ASSERT_EQ(subgraphs[0]->MarkSubgraphAsDelegationSkippable(1), kTfLiteOk);
ASSERT_TRUE(subgraphs[1]->IsDelegationSkippable());
}
size_t BytesFor(const TfLiteType type, const int* const data,
const size_t size) {
size_t type_size;
CHECK_EQ(GetSizeOfType(nullptr, type, &type_size), kTfLiteOk)
<< "Type is not supported by GetSizeOfType";
return std::accumulate(data, data + size, type_size, std::multiplies<int>());
}
size_t BytesFor(const TfLiteType type, const TfLiteIntArray& dims) {
return BytesFor(type, dims.data, dims.size);
}
size_t BytesFor(const TfLiteType type, const std::vector<int>& dims) {
return BytesFor(type, dims.data(), dims.size());
}
class SubgraphResizeTensorTest : public testing::Test {
public:
SubgraphResizeTensorTest() {
tensor_.type = type_;
tensor_.allocation_type = kTfLiteDynamic;
}
~SubgraphResizeTensorTest() override { TfLiteTensorFree(&tensor_); }
protected:
const TfLiteType type_ = kTfLiteInt32;
Interpreter interpreter_;
TfLiteContext& context_ = *interpreter_.primary_subgraph().context();
const std::vector<int> reference_shape_ = {5, 4, 3};
const size_t reference_dims_bytes_ = BytesFor(type_, reference_shape_);
TfLiteTensor tensor_ = {};
TfLiteIntArray* dims_ = ConvertVectorToTfLiteIntArray(reference_shape_);
};
TEST_F(SubgraphResizeTensorTest, ResizeEmptyDynamicTensorAllocateData) {
ASSERT_EQ(context_.ResizeTensor(&context_, &tensor_, dims_), kTfLiteOk);
EXPECT_EQ(tensor_.dims, dims_);
EXPECT_GE(tensor_.bytes, reference_dims_bytes_);
std::fill_n(tensor_.data.raw, reference_dims_bytes_, 0);
std::fill_n(tensor_.dims->data, tensor_.dims->size, 1);
}
TEST_F(SubgraphResizeTensorTest,
ResizeEmptyDynamicTensorWithStoredShapeAllocatesData) {
tensor_.dims = dims_;
ASSERT_EQ(context_.ResizeTensor(&context_, &tensor_, tensor_.dims),
kTfLiteOk);
EXPECT_GE(tensor_.bytes, reference_dims_bytes_);
std::fill_n(tensor_.data.raw, reference_dims_bytes_, 0);
std::fill_n(tensor_.dims->data, tensor_.dims->size, 1);
}
TEST_F(SubgraphResizeTensorTest, ResizeDynamicTensorWithTheEqualShapeIsANoop) {
ASSERT_EQ(context_.ResizeTensor(&context_, &tensor_, dims_), kTfLiteOk);
const void* const initial_data = tensor_.data.data;
TfLiteIntArray* dims2 = ConvertVectorToTfLiteIntArray(reference_shape_);
ASSERT_EQ(context_.ResizeTensor(&context_, &tensor_, dims2), kTfLiteOk);
EXPECT_EQ(tensor_.dims, dims2);
EXPECT_GE(tensor_.bytes, reference_dims_bytes_);
EXPECT_GE(tensor_.data.data, initial_data);
std::fill_n(tensor_.data.raw, reference_dims_bytes_, 0);
std::fill_n(tensor_.dims->data, tensor_.dims->size, 1);
}
TEST_F(SubgraphResizeTensorTest, ResizeDynamicTensorWithStoredShapeIsANoop) {
tensor_.dims = dims_;
ASSERT_EQ(context_.ResizeTensor(&context_, &tensor_, tensor_.dims),
kTfLiteOk);
const void* const initial_data = tensor_.data.data;
ASSERT_EQ(context_.ResizeTensor(&context_, &tensor_, tensor_.dims),
kTfLiteOk);
EXPECT_GE(tensor_.bytes, reference_dims_bytes_);
EXPECT_GE(tensor_.data.data, initial_data);
std::fill_n(tensor_.data.raw, reference_dims_bytes_, 0);
std::fill_n(tensor_.dims->data, tensor_.dims->size, 1);
}
TEST_F(SubgraphResizeTensorTest,
ResizeDynamicTensorWithEquivalentBufferSizeIsANoop) {
ASSERT_EQ(context_.ResizeTensor(&context_, &tensor_, dims_), kTfLiteOk);
const void* const initial_data = tensor_.data.data;
const std::vector<int> new_shape = {3, 4, 5};
ASSERT_THAT(new_shape, Not(ElementsAreArray(reference_shape_)));
TfLiteIntArray* dims2 = ConvertVectorToTfLiteIntArray(new_shape);
ASSERT_EQ(BytesFor(type_, *dims2), reference_dims_bytes_);
ASSERT_EQ(context_.ResizeTensor(&context_, &tensor_, dims2), kTfLiteOk);
EXPECT_GE(tensor_.bytes, reference_dims_bytes_);
EXPECT_EQ(tensor_.data.data, initial_data);
EXPECT_EQ(tensor_.dims, dims2);
std::fill_n(tensor_.data.raw, reference_dims_bytes_, 0);
std::fill_n(tensor_.dims->data, tensor_.dims->size, 1);
}
TEST_F(SubgraphResizeTensorTest,
ResizeDynamicTensorWithDifferentShapeReallocatesData) {
ASSERT_EQ(context_.ResizeTensor(&context_, &tensor_, dims_), kTfLiteOk);
TfLiteIntArray* dims2 = ConvertVectorToTfLiteIntArray({5, 4, 6});
const int dims2_bytes = BytesFor(type_, *dims2);
ASSERT_NE(dims2_bytes, reference_dims_bytes_);
ASSERT_EQ(context_.ResizeTensor(&context_, &tensor_, dims2), kTfLiteOk);
EXPECT_GE(tensor_.bytes, dims2_bytes);
EXPECT_EQ(tensor_.dims, dims2);
std::fill_n(tensor_.data.raw, dims2_bytes, 0);
std::fill_n(tensor_.dims->data, tensor_.dims->size, 1);
}
TEST_F(SubgraphResizeTensorTest,
ResizeDynamicTensorWithSameShapeButDifferentBytesReallocatesData) {
ASSERT_EQ(context_.ResizeTensor(&context_, &tensor_, dims_), kTfLiteOk);
TfLiteTensorResizeMaybeCopy(tensor_.bytes + 15, &tensor_,
true);
ASSERT_GT(tensor_.bytes, reference_dims_bytes_);
ASSERT_EQ(context_.ResizeTensor(&context_, &tensor_, tensor_.dims),
kTfLiteOk);
EXPECT_GE(tensor_.bytes, reference_dims_bytes_);
EXPECT_EQ(tensor_.dims, dims_);
std::fill_n(tensor_.data.raw, tensor_.bytes, 0);
std::fill_n(tensor_.dims->data, tensor_.dims->size, 1);
}
TEST_F(SubgraphResizeTensorTest,
ResizeDynamicTensorWithSameShapeButStringTypeSizeReallocatesData) {
constexpr size_t manual_bytes = 10;
TfLiteTensorResizeMaybeCopy(manual_bytes, &tensor_, true);
tensor_.dims = dims_;
tensor_.type = kTfLiteString;
ASSERT_EQ(context_.ResizeTensor(&context_, &tensor_, tensor_.dims),
kTfLiteOk);
EXPECT_EQ(tensor_.dims, dims_);
std::fill_n(tensor_.data.raw, tensor_.bytes, 0);
std::fill_n(tensor_.dims->data, tensor_.dims->size, 1);
}
TEST_F(SubgraphResizeTensorTest,
ResizeDynamicTensorWithSameShapeButRessourceTypeSizeReallocatesData) {
constexpr size_t manual_bytes = 10;
TfLiteTensorResizeMaybeCopy(manual_bytes, &tensor_, true);
tensor_.dims = dims_;
tensor_.type = kTfLiteResource;
ASSERT_EQ(context_.ResizeTensor(&context_, &tensor_, tensor_.dims),
kTfLiteOk);
EXPECT_EQ(tensor_.dims, dims_);
std::fill_n(tensor_.data.raw, tensor_.bytes, 0);
std::fill_n(tensor_.dims->data, tensor_.dims->size, 1);
}
TEST_F(SubgraphResizeTensorTest,
ResizeDynamicTensorWithSameShapeButVariantTypeSizeReallocatesData) {
constexpr size_t manual_bytes = 10;
TfLiteTensorResizeMaybeCopy(manual_bytes, &tensor_, true);
tensor_.dims = dims_;
tensor_.type = kTfLiteVariant;
ASSERT_EQ(context_.ResizeTensor(&context_, &tensor_, tensor_.dims),
kTfLiteOk);
EXPECT_EQ(tensor_.dims, dims_);
std::fill_n(tensor_.data.raw, tensor_.bytes, 0);
std::fill_n(tensor_.dims->data, tensor_.dims->size, 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/selectors/subgraph.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/subgraph_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b698b8f6-d114-4655-8a28-439344c2c25d | cpp | google/leveldb | dbformat | db/dbformat.cc | db/dbformat_test.cc | #include "db/dbformat.h"
#include <cstdio>
#include <sstream>
#include "port/port.h"
#include "util/coding.h"
namespace leveldb {
static uint64_t PackSequenceAndType(uint64_t seq, ValueType t) {
assert(seq <= kMaxSequenceNumber);
assert(t <= kValueTypeForSeek);
return (seq << 8) | t;
}
void AppendInternalKey(std::string* result, const ParsedInternalKey& key) {
result->append(key.user_key.data(), key.user_key.size());
PutFixed64(result, PackSequenceAndType(key.sequence, key.type));
}
std::string ParsedInternalKey::DebugString() const {
std::ostringstream ss;
ss << '\'' << EscapeString(user_key.ToString()) << "' @ " << sequence << " : "
<< static_cast<int>(type);
return ss.str();
}
std::string InternalKey::DebugString() const {
ParsedInternalKey parsed;
if (ParseInternalKey(rep_, &parsed)) {
return parsed.DebugString();
}
std::ostringstream ss;
ss << "(bad)" << EscapeString(rep_);
return ss.str();
}
const char* InternalKeyComparator::Name() const {
return "leveldb.InternalKeyComparator";
}
int InternalKeyComparator::Compare(const Slice& akey, const Slice& bkey) const {
int r = user_comparator_->Compare(ExtractUserKey(akey), ExtractUserKey(bkey));
if (r == 0) {
const uint64_t anum = DecodeFixed64(akey.data() + akey.size() - 8);
const uint64_t bnum = DecodeFixed64(bkey.data() + bkey.size() - 8);
if (anum > bnum) {
r = -1;
} else if (anum < bnum) {
r = +1;
}
}
return r;
}
void InternalKeyComparator::FindShortestSeparator(std::string* start,
const Slice& limit) const {
Slice user_start = ExtractUserKey(*start);
Slice user_limit = ExtractUserKey(limit);
std::string tmp(user_start.data(), user_start.size());
user_comparator_->FindShortestSeparator(&tmp, user_limit);
if (tmp.size() < user_start.size() &&
user_comparator_->Compare(user_start, tmp) < 0) {
PutFixed64(&tmp,
PackSequenceAndType(kMaxSequenceNumber, kValueTypeForSeek));
assert(this->Compare(*start, tmp) < 0);
assert(this->Compare(tmp, limit) < 0);
start->swap(tmp);
}
}
void InternalKeyComparator::FindShortSuccessor(std::string* key) const {
Slice user_key = ExtractUserKey(*key);
std::string tmp(user_key.data(), user_key.size());
user_comparator_->FindShortSuccessor(&tmp);
if (tmp.size() < user_key.size() &&
user_comparator_->Compare(user_key, tmp) < 0) {
PutFixed64(&tmp,
PackSequenceAndType(kMaxSequenceNumber, kValueTypeForSeek));
assert(this->Compare(*key, tmp) < 0);
key->swap(tmp);
}
}
const char* InternalFilterPolicy::Name() const { return user_policy_->Name(); }
void InternalFilterPolicy::CreateFilter(const Slice* keys, int n,
std::string* dst) const {
Slice* mkey = const_cast<Slice*>(keys);
for (int i = 0; i < n; i++) {
mkey[i] = ExtractUserKey(keys[i]);
}
user_policy_->CreateFilter(keys, n, dst);
}
bool InternalFilterPolicy::KeyMayMatch(const Slice& key, const Slice& f) const {
return user_policy_->KeyMayMatch(ExtractUserKey(key), f);
}
LookupKey::LookupKey(const Slice& user_key, SequenceNumber s) {
size_t usize = user_key.size();
size_t needed = usize + 13;
char* dst;
if (needed <= sizeof(space_)) {
dst = space_;
} else {
dst = new char[needed];
}
start_ = dst;
dst = EncodeVarint32(dst, usize + 8);
kstart_ = dst;
std::memcpy(dst, user_key.data(), usize);
dst += usize;
EncodeFixed64(dst, PackSequenceAndType(s, kValueTypeForSeek));
dst += 8;
end_ = dst;
}
} | #include "db/dbformat.h"
#include "gtest/gtest.h"
#include "util/logging.h"
namespace leveldb {
static std::string IKey(const std::string& user_key, uint64_t seq,
ValueType vt) {
std::string encoded;
AppendInternalKey(&encoded, ParsedInternalKey(user_key, seq, vt));
return encoded;
}
static std::string Shorten(const std::string& s, const std::string& l) {
std::string result = s;
InternalKeyComparator(BytewiseComparator()).FindShortestSeparator(&result, l);
return result;
}
static std::string ShortSuccessor(const std::string& s) {
std::string result = s;
InternalKeyComparator(BytewiseComparator()).FindShortSuccessor(&result);
return result;
}
static void TestKey(const std::string& key, uint64_t seq, ValueType vt) {
std::string encoded = IKey(key, seq, vt);
Slice in(encoded);
ParsedInternalKey decoded("", 0, kTypeValue);
ASSERT_TRUE(ParseInternalKey(in, &decoded));
ASSERT_EQ(key, decoded.user_key.ToString());
ASSERT_EQ(seq, decoded.sequence);
ASSERT_EQ(vt, decoded.type);
ASSERT_TRUE(!ParseInternalKey(Slice("bar"), &decoded));
}
TEST(FormatTest, InternalKey_EncodeDecode) {
const char* keys[] = {"", "k", "hello", "longggggggggggggggggggggg"};
const uint64_t seq[] = {1,
2,
3,
(1ull << 8) - 1,
1ull << 8,
(1ull << 8) + 1,
(1ull << 16) - 1,
1ull << 16,
(1ull << 16) + 1,
(1ull << 32) - 1,
1ull << 32,
(1ull << 32) + 1};
for (int k = 0; k < sizeof(keys) / sizeof(keys[0]); k++) {
for (int s = 0; s < sizeof(seq) / sizeof(seq[0]); s++) {
TestKey(keys[k], seq[s], kTypeValue);
TestKey("hello", 1, kTypeDeletion);
}
}
}
TEST(FormatTest, InternalKey_DecodeFromEmpty) {
InternalKey internal_key;
ASSERT_TRUE(!internal_key.DecodeFrom(""));
}
TEST(FormatTest, InternalKeyShortSeparator) {
ASSERT_EQ(IKey("foo", 100, kTypeValue),
Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 99, kTypeValue)));
ASSERT_EQ(
IKey("foo", 100, kTypeValue),
Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 101, kTypeValue)));
ASSERT_EQ(
IKey("foo", 100, kTypeValue),
Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 100, kTypeValue)));
ASSERT_EQ(
IKey("foo", 100, kTypeValue),
Shorten(IKey("foo", 100, kTypeValue), IKey("foo", 100, kTypeDeletion)));
ASSERT_EQ(IKey("foo", 100, kTypeValue),
Shorten(IKey("foo", 100, kTypeValue), IKey("bar", 99, kTypeValue)));
ASSERT_EQ(
IKey("g", kMaxSequenceNumber, kValueTypeForSeek),
Shorten(IKey("foo", 100, kTypeValue), IKey("hello", 200, kTypeValue)));
ASSERT_EQ(
IKey("foo", 100, kTypeValue),
Shorten(IKey("foo", 100, kTypeValue), IKey("foobar", 200, kTypeValue)));
ASSERT_EQ(
IKey("foobar", 100, kTypeValue),
Shorten(IKey("foobar", 100, kTypeValue), IKey("foo", 200, kTypeValue)));
}
TEST(FormatTest, InternalKeyShortestSuccessor) {
ASSERT_EQ(IKey("g", kMaxSequenceNumber, kValueTypeForSeek),
ShortSuccessor(IKey("foo", 100, kTypeValue)));
ASSERT_EQ(IKey("\xff\xff", 100, kTypeValue),
ShortSuccessor(IKey("\xff\xff", 100, kTypeValue)));
}
TEST(FormatTest, ParsedInternalKeyDebugString) {
ParsedInternalKey key("The \"key\" in 'single quotes'", 42, kTypeValue);
ASSERT_EQ("'The \"key\" in 'single quotes'' @ 42 : 1", key.DebugString());
}
TEST(FormatTest, InternalKeyDebugString) {
InternalKey key("The \"key\" in 'single quotes'", 42, kTypeValue);
ASSERT_EQ("'The \"key\" in 'single quotes'' @ 42 : 1", key.DebugString());
InternalKey invalid_key;
ASSERT_EQ("(bad)", invalid_key.DebugString());
}
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/db/dbformat.cc | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/db/dbformat_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
2af69398-20b4-4a9a-9f51-bca2455e1db4 | cpp | tensorflow/tensorflow | sample_stable_delegate_external | tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate_external.cc | tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate_external_test.cc | #include <memory>
#include <utility>
#include "tensorflow/lite/acceleration/configuration/c/delegate_plugin.h"
#include "tensorflow/lite/acceleration/configuration/c/stable_delegate.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate.h"
#include "tensorflow/lite/delegates/utils/experimental/stable_delegate/stable_delegate_interface.h"
#include "tensorflow/lite/delegates/utils/simple_opaque_delegate.h"
namespace {
TfLiteOpaqueDelegate* SampleStableDelegateCreateFunc(
const void* tflite_settings) {
auto delegate = std::make_unique<tflite::example::SampleStableDelegate>();
return tflite::TfLiteOpaqueDelegateFactory::CreateSimpleDelegate(
std::move(delegate));
}
void SampleStableDelegateDestroyFunc(
TfLiteOpaqueDelegate* sample_stable_delegate) {
tflite::TfLiteOpaqueDelegateFactory::DeleteSimpleDelegate(
sample_stable_delegate);
}
int SampleStableDelegateErrnoFunc(
TfLiteOpaqueDelegate* sample_stable_delegate) {
return 0;
}
const TfLiteOpaqueDelegatePlugin sample_stable_delegate_plugin = {
SampleStableDelegateCreateFunc, SampleStableDelegateDestroyFunc,
SampleStableDelegateErrnoFunc};
const TfLiteStableDelegate sample_stable_delegate = {
TFL_STABLE_DELEGATE_ABI_VERSION, tflite::example::kSampleStableDelegateName,
tflite::example::kSampleStableDelegateVersion,
&sample_stable_delegate_plugin};
}
extern "C" const TfLiteStableDelegate TFL_TheStableDelegate =
sample_stable_delegate; | #include <cstdint>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/acceleration/configuration/c/stable_delegate.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/c/c_api.h"
#include "tensorflow/lite/c/c_api_opaque.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate.h"
#include "tensorflow/lite/delegates/utils/experimental/stable_delegate/delegate_loader.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace {
using tflite::TFLiteSettings;
using tflite::TFLiteSettingsBuilder;
using tflite::delegates::utils::LoadDelegateFromSharedLibrary;
TEST(SampleStableDelegate, LoadFromSharedLibraryFile) {
const TfLiteStableDelegate* stable_delegate_handle =
LoadDelegateFromSharedLibrary(
"tensorflow/lite/delegates/utils/experimental/"
"sample_stable_delegate/libtensorflowlite_sample_stable_delegate.so");
ASSERT_NE(stable_delegate_handle, nullptr);
EXPECT_STREQ(stable_delegate_handle->delegate_abi_version,
TFL_STABLE_DELEGATE_ABI_VERSION);
EXPECT_STREQ(stable_delegate_handle->delegate_name,
tflite::example::kSampleStableDelegateName);
EXPECT_STREQ(stable_delegate_handle->delegate_version,
tflite::example::kSampleStableDelegateVersion);
ASSERT_NE(stable_delegate_handle->delegate_plugin, nullptr);
}
TEST(SampleStableDelegate, LoadFromSharedLibraryTestFile) {
const TfLiteStableDelegate* stable_delegate_handle =
LoadDelegateFromSharedLibrary(
"tensorflow/lite/delegates/utils/experimental/"
"sample_stable_delegate/"
"libtensorflowlite_sample_stable_delegate.so");
ASSERT_NE(stable_delegate_handle, nullptr);
EXPECT_STREQ(stable_delegate_handle->delegate_abi_version,
TFL_STABLE_DELEGATE_ABI_VERSION);
EXPECT_STREQ(stable_delegate_handle->delegate_name,
tflite::example::kSampleStableDelegateName);
EXPECT_STREQ(stable_delegate_handle->delegate_version,
tflite::example::kSampleStableDelegateVersion);
ASSERT_NE(stable_delegate_handle->delegate_plugin, nullptr);
flatbuffers::FlatBufferBuilder flatbuffer_builder;
TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder);
flatbuffers::Offset<TFLiteSettings> tflite_settings =
tflite_settings_builder.Finish();
flatbuffer_builder.Finish(tflite_settings);
const TFLiteSettings* settings = flatbuffers::GetRoot<TFLiteSettings>(
flatbuffer_builder.GetBufferPointer());
TfLiteOpaqueDelegate* opaque_delegate =
stable_delegate_handle->delegate_plugin->create(settings);
ASSERT_NE(opaque_delegate, nullptr);
TfLiteModel* model =
TfLiteModelCreateFromFile("tensorflow/lite/testdata/add.bin");
ASSERT_NE(model, nullptr);
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
ASSERT_NE(options, nullptr);
TfLiteInterpreterOptionsAddDelegate(options, opaque_delegate);
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
ASSERT_NE(interpreter, nullptr);
TfLiteInterpreterOptionsDelete(options);
ASSERT_EQ(TfLiteInterpreterAllocateTensors(interpreter), kTfLiteOk);
TfLiteTensor* input_tensor =
TfLiteInterpreterGetInputTensor(interpreter, 0);
ASSERT_NE(input_tensor, nullptr);
const float kTensorCellValue = 3.f;
std::int64_t n = tflite::NumElements(input_tensor);
std::vector<float> input(n, kTensorCellValue);
ASSERT_EQ(TfLiteTensorCopyFromBuffer(input_tensor, input.data(),
input.size() * sizeof(float)),
kTfLiteOk);
ASSERT_EQ(TfLiteInterpreterInvoke(interpreter), kTfLiteOk);
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
ASSERT_NE(output_tensor, nullptr);
std::vector<float> output(n, 0);
ASSERT_EQ(TfLiteTensorCopyToBuffer(output_tensor, output.data(),
output.size() * sizeof(float)),
kTfLiteOk);
for (int i = 0; i < output.size(); ++i) {
EXPECT_EQ(output[i], kTensorCellValue * 3);
}
TfLiteInterpreterDelete(interpreter);
TfLiteModelDelete(model);
stable_delegate_handle->delegate_plugin->destroy(opaque_delegate);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate_external.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/utils/experimental/sample_stable_delegate/sample_stable_delegate_external_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a088a86b-daf2-4447-b1ba-c4c90bd22950 | cpp | tensorflow/tensorflow | mlir_graph_optimization_pass | tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc | tensorflow/compiler/mlir/mlir_graph_optimization_pass_test.cc | #include "tensorflow/compiler/mlir/mlir_graph_optimization_pass.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/Extensions/AllExtensions.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Shape/IR/Shape.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/OwningOpRef.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_executor.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/import_model.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/device_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph.h"
#include "tensorflow/core/common_runtime/device_set.h"
#include "tensorflow/core/common_runtime/function_optimization_registry.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/framework/graph_debug_info.pb.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/monitoring/counter.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/file_system.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/util/debug_data_dumper.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
auto* mlir_function_pass_fallback_count = monitoring::Counter<1>::New(
"/tensorflow/core/mlir_function_pass_fallback_count",
"Track success/failure of MLIR pass runs when fallback used",
"status");
auto* mlir_graph_optimization_pass_fallback_count = monitoring::Counter<1>::New(
"/tensorflow/core/mlir_graph_optimization_pass_fallback_count",
"Track success/failure of MLIR graph optimization pass runs when fallback "
"used",
"status");
auto* mlir_function_pass_graph_conversion_count = monitoring::Counter<1>::New(
"/tensorflow/core/mlir_function_pass_graph_conversion_count",
"Track success/failure of Graph to MLIR conversions in function "
"optimization pass",
"status");
constexpr char kSuccess[] = "kSuccess";
constexpr char kFailure[] = "kFailure";
static inline absl::string_view StringRefToView(llvm::StringRef ref) {
return {ref.data(), ref.size()};
}
static void DumpModule(mlir::ModuleOp module, std::string file_prefix) {
std::string prefix = GetDumpDirFromEnvVar();
if (prefix.empty()) return;
auto* env = tensorflow::Env::Default();
auto status = env->RecursivelyCreateDir(prefix);
if (!status.ok()) {
LOG(WARNING) << "cannot create directory '" << prefix
<< "': " << status.message();
return;
}
prefix += "/" + file_prefix;
if (!tensorflow::Env::Default()->CreateUniqueFileName(&prefix, ".mlir")) {
LOG(WARNING) << "cannot create unique filename, won't dump MLIR module.";
return;
}
std::unique_ptr<WritableFile> file_writer;
status = env->NewWritableFile(prefix, &file_writer);
if (!status.ok()) {
LOG(WARNING) << "cannot open file '" << prefix << "': " << status.message();
return;
}
std::string txt_module;
{
llvm::raw_string_ostream os(txt_module);
module.print(os);
}
status = file_writer->Append(txt_module);
if (!status.ok()) {
LOG(WARNING) << "error writing to file '" << prefix
<< "': " << status.message();
return;
}
(void)file_writer->Close();
VLOG(1) << "Dumped MLIR module to " << prefix;
}
MlirOptimizationPassRegistry& MlirOptimizationPassRegistry::Global() {
static auto* global = new MlirOptimizationPassRegistry();
return *global;
}
static void RegisterDialects(mlir::DialectRegistry& registry) {
registry.insert<mlir::arith::ArithDialect,
mlir::func::FuncDialect,
mlir::TF::TensorFlowDialect,
mlir::shape::ShapeDialect,
mlir::tf_device::TensorFlowDeviceDialect,
mlir::tf_executor::TensorFlowExecutorDialect>();
mlir::func::registerAllExtensions(registry);
}
Status MlirFunctionOptimizationPass::Run(
const std::string& function_name, const DeviceSet& device_set,
const ConfigProto& config_proto,
const FunctionOptimizationPass::FunctionOptions& function_options,
std::unique_ptr<Graph>* graph, FunctionLibraryDefinition* flib_def,
std::vector<std::string>* control_ret_node_names,
bool* control_rets_updated) {
MlirOptimizationPassState overall_state = MlirOptimizationPassState::Disabled;
std::vector<MlirOptimizationPassState> per_pass_state;
per_pass_state.reserve(registry_->passes().size());
int num_passes_enabled = 0, num_passes_disabled = 0,
num_passes_fallback_enabled = 0;
for (const auto& pass_registration : registry_->passes()) {
MlirOptimizationPassState pass_state = pass_registration.pass->GetPassState(
&device_set, config_proto, **graph, *flib_def);
per_pass_state.push_back(pass_state);
switch (pass_state) {
case MlirOptimizationPassState::FallbackEnabled: {
if (overall_state != MlirOptimizationPassState::Enabled)
overall_state = MlirOptimizationPassState::FallbackEnabled;
++num_passes_fallback_enabled;
break;
}
case MlirOptimizationPassState::Enabled: {
overall_state = MlirOptimizationPassState::Enabled;
++num_passes_enabled;
break;
}
case MlirOptimizationPassState::Disabled: {
++num_passes_disabled;
break;
}
}
}
if (overall_state == MlirOptimizationPassState::Disabled) {
if (VLOG_IS_ON(1)) {
LOG_FIRST_N(INFO, 1)
<< "None of the MLIR Optimization Passes are enabled "
<< "(registered " << registry_->passes().size() << ")";
}
return absl::OkStatus();
}
if (VLOG_IS_ON(1)) {
LOG_FIRST_N(INFO, 1) << "MLIR Graph Optimization Passes."
<< " Enabled: " << num_passes_enabled
<< ", Disabled: " << num_passes_disabled
<< ", FallbackEnabled: " << num_passes_fallback_enabled
<< ", Total: " << registry_->passes().size();
}
GraphDebugInfo debug_info;
mlir::DialectRegistry registry;
RegisterDialects(registry);
mlir::MLIRContext context(registry);
GraphImportConfig import_config;
import_config.graph_as_function = true;
import_config.control_outputs = *control_ret_node_names;
import_config.upgrade_legacy = true;
import_config.enable_shape_inference = false;
import_config.xla_compile_device_type =
function_options.xla_compile_device_type;
import_config.enable_soft_placement = function_options.allow_soft_placement;
static const char* kTfMlirCategory = "TfMlir";
tensorflow::metrics::ScopedCounter<2> timings(
tensorflow::metrics::GetGraphOptimizationCounter(),
{kTfMlirCategory, "convert_graph_to_mlir"});
auto module_ref_status = ConvertGraphToMlir(**graph, debug_info, *flib_def,
import_config, &context);
mlir_function_pass_graph_conversion_count
->GetCell(absl::StatusCodeToString(module_ref_status.status().code()))
->IncrementBy(1);
timings.ReportAndStop();
if (!module_ref_status.ok()) {
if (overall_state == MlirOptimizationPassState::Enabled) {
return module_ref_status.status();
}
LOG(WARNING) << "Failed to convert graph to MLIR: "
<< module_ref_status.status()
<< " , continuing without MlirOptimizationPass because "
"fallback enabled.";
return absl::OkStatus();
}
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
std::move(module_ref_status.value());
AddDevicesToOp(*module_ref, &device_set);
int per_pass_state_index = 0;
bool is_module_updated = false;
for (auto& pass_registration : registry_->passes()) {
llvm::StringRef name = pass_registration.pass->name();
if (DEBUG_DATA_DUMPER()->ShouldDump(function_name, kDebugGroupMain) ||
VLOG_IS_ON(1)) {
::tensorflow::DumpMlirOpToFile(
DEBUG_DATA_DUMPER()->GetDumpFilename(
function_name, kDebugGroupMain,
llvm::formatv("mlir_{0}_before", name)),
*module_ref, llvm::StringRef(), nullptr);
}
Status pass_status = absl::OkStatus();
auto pass_state = per_pass_state[per_pass_state_index++];
if (pass_state == MlirOptimizationPassState::Enabled) {
VLOG(2) << "Run MLIR graph optimization pass: " << StringRefToView(name);
VLOG(2) << "Graph #nodes " << (*graph)->num_nodes() << " #edges "
<< (*graph)->num_edges();
timings.Reset({kTfMlirCategory, name.str()});
pass_status = pass_registration.pass->Run(
function_name, config_proto, *module_ref, **graph, *flib_def);
timings.ReportAndStop();
if (pass_status.ok()) {
VLOG(2) << "Finished MLIR graph optimization pass: "
<< StringRefToView(name);
VLOG(2) << "Graph #nodes " << (*graph)->num_nodes() << " #edges "
<< (*graph)->num_edges();
is_module_updated = true;
}
} else if (pass_state == MlirOptimizationPassState::FallbackEnabled) {
VLOG(2) << "Run MLIR graph optimization pass with fallback: "
<< StringRefToView(name);
VLOG(2) << "Graph #nodes " << (*graph)->num_nodes() << " #edges "
<< (*graph)->num_edges();
auto module_ref_clone = module_ref->clone();
timings.Reset({kTfMlirCategory, name.str() + "_fallback"});
pass_status = pass_registration.pass->Run(
function_name, config_proto, module_ref_clone, **graph, *flib_def);
timings.ReportAndStop();
if (pass_status.ok()) {
VLOG(2) << "Finished MLIR graph optimization pass with fallback: "
<< StringRefToView(name);
VLOG(2) << "Graph #nodes " << (*graph)->num_nodes() << " #edges "
<< (*graph)->num_edges();
module_ref = module_ref_clone;
is_module_updated = true;
} else {
module_ref_clone->destroy();
}
} else {
VLOG(2) << "MLIR graph optimization pass: " << StringRefToView(name)
<< " is disabled and will not be run.";
}
if (!pass_status.ok()) {
if (pass_state == MlirOptimizationPassState::FallbackEnabled) {
LOG(WARNING) << StringRefToView(name)
<< " pass failed, continuing without the pass because the "
"pass has fallback enabled";
mlir_function_pass_fallback_count->GetCell(kFailure)->IncrementBy(1);
} else if (pass_state == MlirOptimizationPassState::Enabled) {
return pass_status;
}
} else {
if (pass_state == MlirOptimizationPassState::FallbackEnabled) {
mlir_function_pass_fallback_count->GetCell(kSuccess)->IncrementBy(1);
}
}
if (DEBUG_DATA_DUMPER()->ShouldDump(function_name, kDebugGroupMain) ||
VLOG_IS_ON(1)) {
::tensorflow::DumpMlirOpToFile(DEBUG_DATA_DUMPER()->GetDumpFilename(
function_name, kDebugGroupMain,
llvm::formatv("mlir_{0}_after", name)),
*module_ref, llvm::StringRef(), nullptr);
}
}
if (!is_module_updated) {
VLOG(2) << "MLIR module is not updated. Using the original graph. "
<< "Do not convert mlir module back to graph";
return absl::OkStatus();
}
GraphExportConfig export_config;
absl::flat_hash_set<Node*> control_ret_nodes;
timings.Reset({kTfMlirCategory, "convert_mlir_to_graph"});
Status status = tensorflow::tf2xla::v2::ConvertTfExecutorToGraph(
*module_ref, export_config, graph, flib_def, &control_ret_nodes);
if (!status.ok()) {
errors::AppendToMessage(&status,
"Error converting MLIR module back to graph");
return status;
}
timings.ReportAndStop();
control_ret_node_names->clear();
control_ret_node_names->reserve(control_ret_nodes.size());
for (const auto* node : control_ret_nodes)
control_ret_node_names->push_back(node->name());
*control_rets_updated = true;
return absl::OkStatus();
}
MlirV1CompatOptimizationPassRegistry&
MlirV1CompatOptimizationPassRegistry::Global() {
static auto* global = new MlirV1CompatOptimizationPassRegistry();
return *global;
}
Status MlirV1CompatGraphOptimizationPass::Run(
const GraphOptimizationPassOptions& options) {
if (options.is_function_graph || !registry_->pass()) return absl::OkStatus();
auto pass = registry_->pass();
auto pass_state =
pass->GetPassState(options.device_set, options.session_options->config,
**options.graph, *options.flib_def);
if (pass_state == MlirOptimizationPassState::Disabled) {
LOG_FIRST_N(INFO, 1) << "MLIR V1 optimization pass is not enabled";
return absl::OkStatus();
}
LOG_FIRST_N(INFO, 1) << "Running MLIR Graph Optimization V1 Compat Pass";
GraphDebugInfo debug_info;
mlir::DialectRegistry registry;
RegisterDialects(registry);
mlir::MLIRContext context(registry);
GraphImportConfig import_config;
import_config.upgrade_legacy = true;
import_config.restrict_functionalization_to_compiled_nodes = true;
auto module_ref_status = ConvertGraphToMlir(
**options.graph, debug_info, *options.flib_def, import_config, &context);
if (!module_ref_status.ok()) {
if (pass_state == MlirOptimizationPassState::Enabled) {
return module_ref_status.status();
}
LOG(WARNING) << "Failed to convert graph to MLIR: "
<< module_ref_status.status()
<< " , continuing without MlirOptimizationPass because "
"fallback enabled.";
return absl::OkStatus();
}
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
std::move(module_ref_status.value());
AddDevicesToOp(*module_ref, options.device_set);
auto module_ref_clone = module_ref->clone();
llvm::StringRef name = pass->name();
VLOG(2) << "Run MLIR V1 graph optimization pass: " << StringRefToView(name);
if (VLOG_IS_ON(1)) {
DumpModule(*module_ref, llvm::formatv("mlir_{0}_before_", name));
}
Status pass_status = pass->Run(options, *module_ref);
bool is_module_updated = !mlir::OperationEquivalence::isEquivalentTo(
module_ref_clone, *module_ref,
mlir::OperationEquivalence::Flags::IgnoreLocations);
module_ref_clone->destroy();
if (!pass_status.ok()) {
if (pass_state == MlirOptimizationPassState::Enabled) return pass_status;
if (pass_state == MlirOptimizationPassState::FallbackEnabled) {
LOG(WARNING) << StringRefToView(name)
<< " pass failed, continuing without the pass because the "
"pass has fallback enabled";
mlir_graph_optimization_pass_fallback_count->GetCell(kFailure)
->IncrementBy(1);
return absl::OkStatus();
}
} else {
if (pass_state == MlirOptimizationPassState::FallbackEnabled) {
mlir_graph_optimization_pass_fallback_count->GetCell(kSuccess)
->IncrementBy(1);
}
}
if (VLOG_IS_ON(1)) {
DumpModule(*module_ref, llvm::formatv("mlir_{0}_after_", name));
}
if (!is_module_updated) {
VLOG(2) << "MLIR module is not updated. Using the original graph. "
<< "Do not convert mlir module back to graph";
return absl::OkStatus();
}
GraphExportConfig export_config;
absl::flat_hash_set<Node*> control_ret_nodes;
TF_RETURN_WITH_CONTEXT_IF_ERROR(
tensorflow::tf2xla::v2::ConvertTfExecutorToGraph(
*module_ref, export_config, options.graph, options.flib_def,
&control_ret_nodes),
"Error converting MLIR module back to graph");
return absl::OkStatus();
}
} | #include "tensorflow/compiler/mlir/mlir_graph_optimization_pass.h"
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include "absl/status/status.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "tensorflow/core/common_runtime/device_set.h"
#include "tensorflow/core/common_runtime/function_optimization_registry.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
using ::testing::_;
using ::testing::NiceMock;
using ::testing::Return;
using ::testing::Test;
constexpr char kOk[] = "OK";
constexpr char kInvalidArgument[] = "INVALID_ARGUMENT";
constexpr char kSuccess[] = "kSuccess";
constexpr char kFailure[] = "kFailure";
class MockMlirOptimizationPass : public MlirOptimizationPass {
public:
MOCK_METHOD(llvm::StringRef, name, (), (const, override));
MOCK_METHOD(MlirOptimizationPassState, GetPassState,
(const DeviceSet* device_set, const ConfigProto& config_proto,
const Graph& graph,
const FunctionLibraryDefinition& function_library),
(const, override));
MOCK_METHOD(Status, Run,
(const std::string& function_name,
const ConfigProto& config_proto, mlir::ModuleOp module,
const Graph& graph,
const FunctionLibraryDefinition& function_library),
(override));
};
class MockMlirV1CompatOptimizationPass : public MlirV1CompatOptimizationPass {
public:
MOCK_METHOD(llvm::StringRef, name, (), (const, override));
MOCK_METHOD(MlirOptimizationPassState, GetPassState,
(const DeviceSet* device_set, const ConfigProto& config_proto,
const Graph& graph,
const FunctionLibraryDefinition& function_library),
(const, override));
MOCK_METHOD(Status, Run,
(const GraphOptimizationPassOptions& options,
mlir::ModuleOp module),
(override));
};
class ModifyMlirModulePass : public MlirOptimizationPass {
public:
explicit ModifyMlirModulePass(Status run_status) : run_status_(run_status) {}
MOCK_METHOD(llvm::StringRef, name, (), (const, override));
MOCK_METHOD(MlirOptimizationPassState, GetPassState,
(const DeviceSet* device_set, const ConfigProto& config_proto,
const Graph& graph,
const FunctionLibraryDefinition& function_library),
(const, override));
Status Run(const std::string& function_name, const ConfigProto& config_proto,
mlir::ModuleOp module, const Graph& graph,
const FunctionLibraryDefinition& function_library) override {
mlir::Builder b(module.getContext());
auto producer = b.getNamedAttr("producer", b.getI32IntegerAttr(0));
auto min_consumer = b.getNamedAttr("min_consumer", b.getI32IntegerAttr(0));
auto bad_consumers =
b.getNamedAttr("bad_consumers", b.getI32ArrayAttr({1, 2, 3, 4}));
module->setAttr("tf.versions",
b.getDictionaryAttr(llvm::ArrayRef<mlir::NamedAttribute>(
{producer, min_consumer, bad_consumers})));
return run_status_;
}
Status run_status_;
};
FunctionDef XTimesTwo() {
const Tensor kTwo = test::AsScalar<int64>(2);
return FunctionDefHelper::Define(
"XTimesTwo",
{"x: T"},
{"y: T"},
{"T: {float, double, int32, int64}"},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"scale"}, "Cast", {"two"}, {{"SrcT", DT_INT64}, {"DstT", "$T"}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", "$T"}}},
});
}
class MlirGraphOptimizationPassTest : public Test {
public:
void Init(Status pass_run_result,
const std::vector<MlirOptimizationPassState>& pass_states) {
graph_ = std::make_unique<Graph>(OpRegistry::Global());
int pass_priority = 0;
for (const MlirOptimizationPassState& pass_state : pass_states) {
auto optimization_pass =
std::make_unique<NiceMock<MockMlirOptimizationPass>>();
ON_CALL(*optimization_pass, GetPassState(_, _, _, _))
.WillByDefault(Return(pass_state));
ON_CALL(*optimization_pass, Run(_, _, _, _, _))
.WillByDefault(Return(pass_run_result));
MlirOptimizationPassRegistry::Global().Add(pass_priority++,
std::move(optimization_pass));
pass_result_expected_[pass_state][pass_run_result.ok()]++;
}
flib_ = std::make_unique<FunctionLibraryDefinition>(graph_->flib_def());
}
void AddModuleModificationPass(MlirOptimizationPassState pass_state,
Status run_status) {
auto optimization_pass =
std::make_unique<NiceMock<ModifyMlirModulePass>>(run_status);
ON_CALL(*optimization_pass, GetPassState(_, _, _, _))
.WillByDefault(Return(pass_state));
MlirOptimizationPassRegistry::Global().Add(10,
std::move(optimization_pass));
pass_result_expected_[pass_state][run_status.ok()]++;
}
void TearDown() override {
MlirOptimizationPassRegistry::Global().ClearPasses();
}
void verifyGraph(const GraphDef& original_graph_def, bool changed = false) {
#if defined(PLATFORM_GOOGLE)
GraphDef resulted_graph_def;
graph_->ToGraphDef(&resulted_graph_def);
if (changed)
EXPECT_THAT(resulted_graph_def,
Not(::testing::proto::IgnoringRepeatedFieldOrdering(
::testing::EquivToProto(original_graph_def))));
else
EXPECT_THAT(resulted_graph_def,
::testing::proto::IgnoringRepeatedFieldOrdering(
::testing::EquivToProto(original_graph_def)));
#endif
}
void verifyCounters() {
EXPECT_EQ(mlir_function_pass_fallback_count_.Read(kSuccess),
pass_result_expected_[MlirOptimizationPassState::FallbackEnabled]
[true]);
EXPECT_EQ(mlir_function_pass_fallback_count_.Read(kFailure),
pass_result_expected_[MlirOptimizationPassState::FallbackEnabled]
[false]);
EXPECT_EQ(mlir_function_pass_graph_conversion_count_.Read(kOk), 1);
}
ConfigProto config_proto_;
FunctionOptimizationPass::FunctionOptions function_options_;
MlirFunctionOptimizationPass function_optimization_pass_;
DeviceSet device_set_;
std::unique_ptr<Graph> graph_;
std::unique_ptr<FunctionLibraryDefinition> flib_;
std::vector<std::string> control_ret_node_names_;
bool control_rets_updated_{false};
monitoring::testing::CellReader<int64_t> mlir_function_pass_fallback_count_ =
monitoring::testing::CellReader<int64_t>(
"/tensorflow/core/mlir_function_pass_fallback_count");
monitoring::testing::CellReader<int64_t>
mlir_graph_optimization_pass_fallback_count_ =
monitoring::testing::CellReader<int64_t>(
"/tensorflow/core/mlir_graph_optimization_pass_fallback_count");
monitoring::testing::CellReader<int64_t>
mlir_function_pass_graph_conversion_count_ =
monitoring::testing::CellReader<int64_t>(
"/tensorflow/core/mlir_function_pass_graph_conversion_count");
std::map<MlirOptimizationPassState, std::map<bool, int64_t>>
pass_result_expected_;
};
TEST_F(MlirGraphOptimizationPassTest, OptimizationPassFailsNoFallback) {
Init(Status(absl::StatusCode::kAborted, "aborted"),
{MlirOptimizationPassState::Enabled});
GraphDef original_graph_def;
graph_->ToGraphDef(&original_graph_def);
EXPECT_EQ(
function_optimization_pass_.Run(
"test_func", device_set_, config_proto_, function_options_, &graph_,
flib_.get(), &control_ret_node_names_, &control_rets_updated_),
Status(absl::StatusCode::kAborted, "aborted"));
verifyGraph(original_graph_def);
verifyCounters();
}
TEST_F(MlirGraphOptimizationPassTest, OptimizationPassFailsDisabledFallback) {
Init(Status(absl::StatusCode::kAborted, "aborted"),
{MlirOptimizationPassState::Disabled,
MlirOptimizationPassState::FallbackEnabled});
FunctionDefLibrary flib;
*flib.add_function() = XTimesTwo();
FunctionLibraryDefinition flib_def(OpRegistry::Global(), flib);
graph_ = std::make_unique<Graph>(flib_def);
GraphDef original_graph_def;
graph_->ToGraphDef(&original_graph_def);
AddModuleModificationPass(MlirOptimizationPassState::FallbackEnabled,
Status(absl::StatusCode::kAborted, "aborted"));
EXPECT_EQ(
function_optimization_pass_.Run(
"test_func", device_set_, config_proto_, function_options_, &graph_,
flib_.get(), &control_ret_node_names_, &control_rets_updated_),
absl::OkStatus());
verifyGraph(original_graph_def);
verifyCounters();
}
TEST_F(MlirGraphOptimizationPassTest, OptimizationPassDoesNotFailFallback) {
Init(absl::OkStatus(), {MlirOptimizationPassState::FallbackEnabled});
GraphDef original_graph_def;
graph_->ToGraphDef(&original_graph_def);
AddModuleModificationPass(MlirOptimizationPassState::FallbackEnabled,
absl::OkStatus());
EXPECT_EQ(
function_optimization_pass_.Run(
"test_func", device_set_, config_proto_, function_options_, &graph_,
flib_.get(), &control_ret_node_names_, &control_rets_updated_),
absl::OkStatus());
verifyGraph(original_graph_def, true);
verifyCounters();
}
TEST_F(MlirGraphOptimizationPassTest, GraphDoesntConvertUpdatesCounter) {
Init(absl::OkStatus(), {MlirOptimizationPassState::FallbackEnabled});
graph_ = std::make_unique<Graph>(OpRegistry::Global());
control_ret_node_names_.push_back("foo");
AddModuleModificationPass(MlirOptimizationPassState::FallbackEnabled,
absl::OkStatus());
EXPECT_EQ(
function_optimization_pass_.Run(
"test_func", device_set_, config_proto_, function_options_, &graph_,
flib_.get(), &control_ret_node_names_, &control_rets_updated_),
absl::OkStatus());
EXPECT_EQ(mlir_function_pass_graph_conversion_count_.Read(kOk), 0);
EXPECT_EQ(mlir_function_pass_graph_conversion_count_.Read(kInvalidArgument),
1);
}
TEST(MlirOptimizationPassRegistry, RegisterPassesWithTheSamePriorityFails) {
MlirOptimizationPassRegistry::Global().Add(
0, std::make_unique<NiceMock<MockMlirOptimizationPass>>());
EXPECT_DEATH(MlirOptimizationPassRegistry::Global().Add(
0, std::make_unique<NiceMock<MockMlirOptimizationPass>>()),
"Pass priority must be unique.");
}
TEST(MlirV1CompatOptimizationPassRegistry, RegisterMultiplePassesFails) {
MlirV1CompatOptimizationPassRegistry::Global().Add(
std::make_unique<NiceMock<MockMlirV1CompatOptimizationPass>>());
EXPECT_DEATH(
MlirV1CompatOptimizationPassRegistry::Global().Add(
std::make_unique<NiceMock<MockMlirV1CompatOptimizationPass>>()),
"Only a single pass can be registered");
}
class MlirGraphOptimizationV1PassTest : public Test {
public:
void Init(Status pass_run_result,
const std::vector<MlirOptimizationPassState>& pass_states) {
graph_ = std::make_unique<Graph>(OpRegistry::Global());
MlirV1CompatOptimizationPassRegistry::Global().ClearPass();
for (const MlirOptimizationPassState& pass_state : pass_states) {
auto optimization_pass =
std::make_unique<NiceMock<MockMlirV1CompatOptimizationPass>>();
ON_CALL(*optimization_pass, GetPassState(_, _, _, _))
.WillByDefault(Return(pass_state));
ON_CALL(*optimization_pass, Run(_, _))
.WillByDefault(Return(pass_run_result));
MlirV1CompatOptimizationPassRegistry::Global().Add(
std::move(optimization_pass));
pass_result_expected_[pass_state][pass_run_result.ok()]++;
}
flib_ = std::make_unique<FunctionLibraryDefinition>(graph_->flib_def());
InitGraphOptions();
}
void verifyGraph(const GraphDef& original_graph_def, bool changed = false) {
#if defined(PLATFORM_GOOGLE)
GraphDef resulted_graph_def;
graph_->ToGraphDef(&resulted_graph_def);
if (changed)
EXPECT_THAT(resulted_graph_def,
Not(::testing::proto::IgnoringRepeatedFieldOrdering(
::testing::EquivToProto(original_graph_def))));
else
EXPECT_THAT(resulted_graph_def,
::testing::proto::IgnoringRepeatedFieldOrdering(
::testing::EquivToProto(original_graph_def)));
#endif
}
void InitGraphOptions() {
session_options_.config = config_proto_;
graph_optimization_pass_options_.device_set = &device_set_;
graph_optimization_pass_options_.session_options = &session_options_;
graph_optimization_pass_options_.graph = &graph_;
graph_optimization_pass_options_.flib_def = flib_.get();
}
void verifyCounters() {
EXPECT_EQ(mlir_function_pass_fallback_count_.Read(kSuccess),
pass_result_expected_[MlirOptimizationPassState::FallbackEnabled]
[false]);
EXPECT_EQ(mlir_function_pass_fallback_count_.Read(kFailure),
pass_result_expected_[MlirOptimizationPassState::FallbackEnabled]
[false]);
EXPECT_EQ(mlir_function_pass_graph_conversion_count_.Read(kOk), 0);
}
void TearDown() override {
MlirV1CompatOptimizationPassRegistry::Global().ClearPass();
}
ConfigProto config_proto_;
FunctionOptimizationPass::FunctionOptions function_options_;
MlirV1CompatGraphOptimizationPass function_optimization_pass_;
DeviceSet device_set_;
std::unique_ptr<Graph> graph_;
std::unique_ptr<FunctionLibraryDefinition> flib_;
std::vector<std::string> control_ret_node_names_;
bool control_rets_updated_{false};
SessionOptions session_options_;
tensorflow::GraphOptimizationPassOptions graph_optimization_pass_options_;
std::map<MlirOptimizationPassState, std::map<bool, int64_t>>
pass_result_expected_;
monitoring::testing::CellReader<int64_t> mlir_function_pass_fallback_count_ =
monitoring::testing::CellReader<int64_t>(
"/tensorflow/core/mlir_function_pass_fallback_count");
monitoring::testing::CellReader<int64_t>
mlir_graph_optimization_pass_fallback_count_ =
monitoring::testing::CellReader<int64_t>(
"/tensorflow/core/mlir_graph_optimization_pass_fallback_count");
monitoring::testing::CellReader<int64_t>
mlir_function_pass_graph_conversion_count_ =
monitoring::testing::CellReader<int64_t>(
"/tensorflow/core/mlir_function_pass_graph_conversion_count");
};
TEST_F(MlirGraphOptimizationV1PassTest, OptimizationPassDoesNotFailFallback) {
Init(absl::OkStatus(), {MlirOptimizationPassState::FallbackEnabled});
GraphDef original_graph_def;
graph_->ToGraphDef(&original_graph_def);
EXPECT_EQ(function_optimization_pass_.Run(graph_optimization_pass_options_),
absl::OkStatus());
verifyGraph(original_graph_def, false);
verifyCounters();
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/mlir_graph_optimization_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
015dca0e-f9cb-4031-88b3-3bdd69de74ab | cpp | google/arolla | edge_ops | arolla/qexpr/operators/array_like/edge_ops.h | arolla/qexpr/operators/dense_array/edge_ops_test.cc | #ifndef AROLLA_QEXPR_OPERATORS_ARRAY_LIKE_EDGE_OPS_H_
#define AROLLA_QEXPR_OPERATORS_ARRAY_LIKE_EDGE_OPS_H_
#include <cstddef>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "arolla/memory/frame.h"
#include "arolla/qexpr/bound_operators.h"
#include "arolla/qexpr/eval_context.h"
#include "arolla/qexpr/operators.h"
#include "arolla/qexpr/qexpr_operator_signature.h"
#include "arolla/qtype/qtype.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
template <typename EdgeT>
class EdgeComposeOperator : public InlineOperator {
public:
explicit EdgeComposeOperator(size_t size)
: InlineOperator(QExprOperatorSignature::Get(
std::vector<QTypePtr>(size, ::arolla::GetQType<EdgeT>()),
::arolla::GetQType<EdgeT>())) {}
private:
absl::StatusOr<std::unique_ptr<BoundOperator>> DoBind(
absl::Span<const TypedSlot> input_slots,
TypedSlot output_slot) const override {
std::vector<Slot<EdgeT>> input_edge_slots;
input_edge_slots.reserve(input_slots.size());
for (const auto& input_slot : input_slots) {
ASSIGN_OR_RETURN(Slot<EdgeT> edge_slot, input_slot.ToSlot<EdgeT>());
input_edge_slots.push_back(std::move(edge_slot));
}
ASSIGN_OR_RETURN(Slot<EdgeT> output_edge_slot, output_slot.ToSlot<EdgeT>());
return MakeBoundOperator([input_edge_slots = std::move(input_edge_slots),
output_edge_slot = std::move(output_edge_slot)](
EvaluationContext* ctx, FramePtr frame) {
std::vector<EdgeT> edges;
edges.reserve(input_edge_slots.size());
for (const auto& edge_slot : input_edge_slots) {
edges.push_back(frame.Get(edge_slot));
}
ASSIGN_OR_RETURN(auto composed_edge,
EdgeT::ComposeEdges(edges, ctx->buffer_factory()),
ctx->set_status(std::move(_)));
frame.Set(output_edge_slot, std::move(composed_edge));
});
}
};
}
#endif | #include <cstdint>
#include <optional>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/edge.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qexpr/operators.h"
#include "arolla/qtype/shape_qtype.h"
#include "arolla/util/bytes.h"
#include "arolla/util/text.h"
#include "arolla/util/unit.h"
namespace arolla {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::testing::ElementsAre;
using ::testing::HasSubstr;
TEST(EdgeOpsTest, EdgeFromSplitPointsOp) {
auto sizes = CreateDenseArray<int64_t>({0, 2, 5, 6, 6, 8});
ASSERT_OK_AND_ASSIGN(auto edge, InvokeOperator<DenseArrayEdge>(
"edge.from_split_points", sizes));
EXPECT_THAT(edge.edge_values().values, ElementsAre(0, 2, 5, 6, 6, 8));
}
TEST(EdgeOpsTest, EdgeFromMappingOp) {
auto mapping = CreateDenseArray<int64_t>({0, 2, 5, 6, 6, 8});
ASSERT_OK_AND_ASSIGN(
auto edge, InvokeOperator<DenseArrayEdge>("edge.from_mapping", mapping,
int64_t{10}));
EXPECT_THAT(edge.edge_values().values, ElementsAre(0, 2, 5, 6, 6, 8));
EXPECT_THAT(
InvokeOperator<DenseArrayEdge>("edge.from_mapping", mapping, int64_t{5}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("parent_size=5, but parent id 8 is used")));
}
TEST(EdgeOpsTest, EdgeFromSizesOp) {
auto sizes = CreateDenseArray<int64_t>({2, 3, 1, 0, 2});
ASSERT_OK_AND_ASSIGN(
auto edge, InvokeOperator<DenseArrayEdge>("edge.from_sizes", sizes));
EXPECT_THAT(edge.edge_values().values, ElementsAre(0, 2, 5, 6, 6, 8));
}
TEST(EdgeOpsTest, EdgeFromShapeOp) {
ASSERT_OK_AND_ASSIGN(auto edge, InvokeOperator<DenseArrayGroupScalarEdge>(
"edge.from_shape", DenseArrayShape{5}));
EXPECT_THAT(edge.child_size(), 5);
}
TEST(EdgeOpsTest, MappingOp) {
{
const auto mapping = CreateDenseArray<int64_t>({1, 2, 3});
ASSERT_OK_AND_ASSIGN(auto edge, DenseArrayEdge::FromMapping(mapping, 4));
EXPECT_THAT(InvokeOperator<DenseArray<int64_t>>("edge.mapping", edge),
IsOkAndHolds(ElementsAre(1, 2, 3)));
}
{
const auto splits = CreateDenseArray<int64_t>({0, 2, 5});
ASSERT_OK_AND_ASSIGN(auto edge, DenseArrayEdge::FromSplitPoints(splits));
EXPECT_THAT(InvokeOperator<DenseArray<int64_t>>("edge.mapping", edge),
IsOkAndHolds(ElementsAre(0, 0, 1, 1, 1)));
}
}
TEST(EdgeOpsTest, FromKindAndShapeOp) {
auto split_points = CreateDenseArray<int64_t>({0, 2, 5, 6, 6, 8});
ASSERT_OK_AND_ASSIGN(auto edge,
DenseArrayEdge::FromSplitPoints(split_points));
EXPECT_THAT(InvokeOperator<DenseArrayShape>("edge.child_shape", edge),
IsOkAndHolds(DenseArrayShape{8}));
EXPECT_THAT(InvokeOperator<DenseArrayShape>("edge.child_shape",
DenseArrayGroupScalarEdge{5}),
IsOkAndHolds(DenseArrayShape{5}));
}
TEST(EdgeOpsTest, IntoKindAndShapeOp) {
auto split_points = CreateDenseArray<int64_t>({0, 2, 5, 6, 6, 8});
ASSERT_OK_AND_ASSIGN(auto edge,
DenseArrayEdge::FromSplitPoints(split_points));
EXPECT_THAT(InvokeOperator<DenseArrayShape>("edge.parent_shape", edge),
IsOkAndHolds(DenseArrayShape{5}));
EXPECT_THAT(InvokeOperator<OptionalScalarShape>("edge.parent_shape",
DenseArrayGroupScalarEdge{5}),
IsOkAndHolds(OptionalScalarShape{}));
}
TEST(EdgeOpsTest, ExpandOverMapping) {
auto mapping =
CreateDenseArray<int64_t>({0, 1, std::nullopt, 0, 1, 2, 2, 1, 0});
ASSERT_OK_AND_ASSIGN(auto edge, DenseArrayEdge::FromMapping(mapping, 3));
ASSERT_OK_AND_ASSIGN(auto bad_edge, DenseArrayEdge::FromMapping(mapping, 4));
{
auto values = CreateDenseArray<float>({0, std::nullopt, 1});
ASSERT_OK_AND_ASSIGN(
DenseArray<float> res,
InvokeOperator<DenseArray<float>>("array._expand", values, edge));
EXPECT_THAT(res, ElementsAre(0, std::nullopt, std::nullopt, 0, std::nullopt,
1, 1, std::nullopt, 0));
EXPECT_THAT(
InvokeOperator<DenseArray<float>>("array._expand", values, bad_edge),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("argument sizes mismatch")));
}
{
auto values = CreateDenseArray<Text>({Text("0"), std::nullopt, Text("1")});
ASSERT_OK_AND_ASSIGN(
DenseArray<Text> res,
InvokeOperator<DenseArray<Text>>("array._expand", values, edge));
EXPECT_THAT(res, ElementsAre(Text("0"), std::nullopt, std::nullopt,
Text("0"), std::nullopt, Text("1"), Text("1"),
std::nullopt, Text("0")));
EXPECT_EQ(values[0].value.begin(), res[0].value.begin());
EXPECT_EQ(values[0].value.begin(), res[3].value.begin());
EXPECT_EQ(values[0].value.begin(), res[8].value.begin());
EXPECT_EQ(values[2].value.begin(), res[5].value.begin());
EXPECT_EQ(values[2].value.begin(), res[6].value.begin());
}
}
TEST(EdgeOpsTest, ExpandOverSplitPoints) {
auto values =
CreateDenseArray<Bytes>({Bytes("first"), std::nullopt, Bytes("second")});
auto split_points = CreateDenseArray<int64_t>({0, 3, 6, 10});
ASSERT_OK_AND_ASSIGN(auto edge,
DenseArrayEdge::FromSplitPoints(split_points));
ASSERT_OK_AND_ASSIGN(auto res, InvokeOperator<DenseArray<Bytes>>(
"array._expand", values, edge));
EXPECT_THAT(
res, ElementsAre("first", "first", "first", std::nullopt, std::nullopt,
std::nullopt, "second", "second", "second", "second"));
EXPECT_EQ(values[0].value.begin(), res[0].value.begin());
EXPECT_EQ(values[0].value.begin(), res[2].value.begin());
EXPECT_EQ(values[2].value.begin(), res[6].value.begin());
EXPECT_EQ(values[2].value.begin(), res[9].value.begin());
}
TEST(EdgeOpsTest, ExpandOverSplitPointsNoBitmap) {
auto values = CreateFullDenseArray<Bytes>({Bytes("first"), Bytes("second")});
auto split_points = CreateDenseArray<int64_t>({0, 3, 7});
ASSERT_OK_AND_ASSIGN(auto edge,
DenseArrayEdge::FromSplitPoints(split_points));
ASSERT_OK_AND_ASSIGN(auto res, InvokeOperator<DenseArray<Bytes>>(
"array._expand", values, edge));
EXPECT_THAT(res, ElementsAre("first", "first", "first", "second", "second",
"second", "second"));
EXPECT_EQ(values[0].value.begin(), res[0].value.begin());
EXPECT_EQ(values[0].value.begin(), res[2].value.begin());
EXPECT_EQ(values[1].value.begin(), res[3].value.begin());
EXPECT_EQ(values[1].value.begin(), res[6].value.begin());
}
TEST(EdgeOpsTest, ExpandGroupScalarEdge) {
auto edge = DenseArrayGroupScalarEdge(3);
ASSERT_OK_AND_ASSIGN(
auto res1, InvokeOperator<DenseArray<Bytes>>(
"array._expand", MakeOptionalValue(Bytes("first")), edge));
EXPECT_THAT(res1, ElementsAre("first", "first", "first"));
ASSERT_OK_AND_ASSIGN(auto res2,
InvokeOperator<DenseArray<Bytes>>(
"array._expand", OptionalValue<Bytes>(), edge));
EXPECT_THAT(res2, ElementsAre(std::nullopt, std::nullopt, std::nullopt));
}
TEST(EdgeOpsTest, GroupByOp_Integral) {
const auto series = CreateDenseArray<int64_t>({101, 102, 103, 104});
ASSERT_OK_AND_ASSIGN(auto over, DenseArrayEdge::FromSplitPoints(
CreateDenseArray<int64_t>({0, 4})));
ASSERT_OK_AND_ASSIGN(auto edge, InvokeOperator<DenseArrayEdge>(
"edge._group_by", series, over));
EXPECT_EQ(edge.parent_size(), 4);
EXPECT_THAT(edge.edge_values(), ElementsAre(0, 1, 2, 3));
}
TEST(EdgeOpsTest, GroupByOp_Float) {
const auto series = CreateDenseArray<float>({5., 7., 1., 2., 4.});
ASSERT_OK_AND_ASSIGN(auto over, DenseArrayEdge::FromSplitPoints(
CreateDenseArray<int64_t>({0, 5})));
ASSERT_OK_AND_ASSIGN(auto edge, InvokeOperator<DenseArrayEdge>(
"edge._group_by", series, over));
EXPECT_EQ(edge.parent_size(), 5);
EXPECT_THAT(edge.edge_values(), ElementsAre(0, 1, 2, 3, 4));
}
TEST(EdgeOpsTest, GroupByOp_Bytes) {
const auto series = CreateDenseArray<Bytes>(
{Bytes("a"), Bytes("b"), Bytes("c"), Bytes("d"), Bytes("e")});
ASSERT_OK_AND_ASSIGN(auto over, DenseArrayEdge::FromSplitPoints(
CreateDenseArray<int64_t>({0, 5})));
ASSERT_OK_AND_ASSIGN(auto edge, InvokeOperator<DenseArrayEdge>(
"edge._group_by", series, over));
EXPECT_EQ(edge.parent_size(), 5);
EXPECT_THAT(edge.edge_values(), ElementsAre(0, 1, 2, 3, 4));
}
TEST(EdgeOpsTest, GroupByOp_DuplicatesInInputSeries) {
const auto series = CreateDenseArray<float>({5., 7., 5., 7., 4., 8.});
ASSERT_OK_AND_ASSIGN(auto over, DenseArrayEdge::FromSplitPoints(
CreateDenseArray<int64_t>({0, 6})));
ASSERT_OK_AND_ASSIGN(auto edge, InvokeOperator<DenseArrayEdge>(
"edge._group_by", series, over));
EXPECT_EQ(edge.parent_size(), 4);
EXPECT_THAT(edge.edge_values(), ElementsAre(0, 1, 0, 1, 2, 3));
}
TEST(EdgeOpsTest, GroupByOp_DuplicatesInInputSeries_WithSplits) {
const auto series = CreateDenseArray<float>({5., 7., 5., 7., 7., 8.});
ASSERT_OK_AND_ASSIGN(auto over, DenseArrayEdge::FromSplitPoints(
CreateDenseArray<int64_t>({0, 3, 6})));
ASSERT_OK_AND_ASSIGN(auto edge, InvokeOperator<DenseArrayEdge>(
"edge._group_by", series, over));
EXPECT_EQ(edge.parent_size(), 4);
EXPECT_THAT(edge.edge_values(), ElementsAre(0, 1, 0, 2, 2, 3));
}
TEST(EdgeOpsTest, GroupByOp_DuplicatesInInputSeries_WithMapping) {
const auto series = CreateDenseArray<float>({5., 7., 5., 7., 7., 8.});
ASSERT_OK_AND_ASSIGN(auto over,
DenseArrayEdge::FromMapping(
CreateDenseArray<int64_t>({1, 1, 0, 2, 1, 0}), 3));
ASSERT_OK_AND_ASSIGN(auto edge, InvokeOperator<DenseArrayEdge>(
"edge._group_by", series, over));
EXPECT_EQ(edge.parent_size(), 5);
EXPECT_THAT(edge.edge_values(), ElementsAre(0, 1, 2, 3, 1, 4));
}
TEST(EdgeOpsTest, GroupByOp_MissingValuesAndDuplicates) {
const auto series = CreateDenseArray<int64_t>({7, 8, std::nullopt, 7, 10, 8});
ASSERT_OK_AND_ASSIGN(auto over, DenseArrayEdge::FromSplitPoints(
CreateDenseArray<int64_t>({0, 6})));
ASSERT_OK_AND_ASSIGN(auto edge, InvokeOperator<DenseArrayEdge>(
"edge._group_by", series, over));
EXPECT_EQ(edge.parent_size(), 3);
EXPECT_THAT(edge.edge_values(), ElementsAre(0, 1, std::nullopt, 0, 2, 1));
}
TEST(EdgeOpsTest, GroupByOp_MissingValuesAndDuplicates_WithSplits) {
const auto series =
CreateDenseArray<int64_t>({7, 6, 7, 5, 5, std::nullopt, std::nullopt, 5,
5, std::nullopt, 7, 10, 7});
ASSERT_OK_AND_ASSIGN(auto over,
DenseArrayEdge::FromSplitPoints(
CreateDenseArray<int64_t>({0, 3, 4, 5, 7, 9, 13})));
ASSERT_OK_AND_ASSIGN(auto edge, InvokeOperator<DenseArrayEdge>(
"edge._group_by", series, over));
EXPECT_EQ(edge.parent_size(), 7);
EXPECT_THAT(edge.edge_values(),
ElementsAre(0, 1, 0, 2, 3, std::nullopt, std::nullopt, 4, 4,
std::nullopt, 5, 6, 5));
}
TEST(EdgeOpsTest, GroupByOp_EmptyDenseArray) {
const auto series = CreateDenseArray<int64_t>({});
ASSERT_OK_AND_ASSIGN(auto over, DenseArrayEdge::FromSplitPoints(
CreateDenseArray<int64_t>({0})));
ASSERT_OK_AND_ASSIGN(auto edge, InvokeOperator<DenseArrayEdge>(
"edge._group_by", series, over));
EXPECT_EQ(edge.parent_size(), 0);
EXPECT_THAT(edge.edge_values(), ElementsAre());
}
TEST(EdgeOpsTest, GroupByOp_MissingValuesAndDuplicates_WithMapping) {
const auto series =
CreateDenseArray<int64_t>({7, 6, 6, 7, 5, 5, std::nullopt, std::nullopt,
5, 5, std::nullopt, 7, 10, 7, 5});
ASSERT_OK_AND_ASSIGN(
auto over, DenseArrayEdge::FromMapping(
CreateDenseArray<int64_t>({2, std::nullopt, 2, 3, 1, 2, 2,
std::nullopt, 1, 2, 4, 2, 3, 3,
std::nullopt}),
5));
ASSERT_OK_AND_ASSIGN(auto edge, InvokeOperator<DenseArrayEdge>(
"edge._group_by", series, over));
EXPECT_EQ(edge.parent_size(), 6);
EXPECT_THAT(
edge.edge_values(),
ElementsAre(0, std::nullopt, 1, 2, 3, 4, std::nullopt, std::nullopt, 3, 4,
std::nullopt, 0, 5, 2, std::nullopt));
}
TEST(EdgeOpsTest, GroupByOp_IncompatibleOverEdge) {
const auto series = CreateDenseArray<int64_t>({1, 2});
ASSERT_OK_AND_ASSIGN(auto over, DenseArrayEdge::FromSplitPoints(
CreateDenseArray<int64_t>({0, 3})));
EXPECT_THAT(InvokeOperator<DenseArrayEdge>("edge._group_by", series, over),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("argument sizes mismatch")));
}
TEST(EdgeOpsTest, AggSizeEdgeOp_Mapping) {
auto mapping =
CreateDenseArray<int64_t>({0, std::nullopt, 0, std::nullopt, 2});
ASSERT_OK_AND_ASSIGN(auto edge, DenseArrayEdge::FromMapping(mapping, 3));
ASSERT_OK_AND_ASSIGN(auto dense_array,
InvokeOperator<DenseArray<int64_t>>("edge.sizes", edge));
EXPECT_THAT(dense_array, ElementsAre(2, 0, 1));
}
TEST(EdgeOpsTest, AggSizeEdgeOp_SplitPoints) {
auto split_points = CreateDenseArray<int64_t>({0, 2, 4, 4, 8});
ASSERT_OK_AND_ASSIGN(auto edge,
DenseArrayEdge::FromSplitPoints(split_points));
ASSERT_OK_AND_ASSIGN(auto dense_array,
InvokeOperator<DenseArray<int64_t>>("edge.sizes", edge));
EXPECT_THAT(dense_array, ElementsAre(2, 2, 0, 4));
}
TEST(EdgeOpsTest, TestAggCountScalarEdge) {
auto mask =
CreateDenseArray<Unit>({kUnit, std::nullopt, kUnit, std::nullopt});
auto edge = DenseArrayGroupScalarEdge(4);
EXPECT_THAT(InvokeOperator<int64_t>("array._count", mask, edge),
IsOkAndHolds(2));
}
TEST(EdgeOpsTest, EdgeComposeOp) {
{
ASSERT_OK_AND_ASSIGN(auto edge1, DenseArrayEdge::FromSplitPoints(
CreateDenseArray<int64_t>({0, 2, 3})));
ASSERT_OK_AND_ASSIGN(auto edge2,
DenseArrayEdge::FromSplitPoints(
CreateDenseArray<int64_t>({0, 1, 2, 4})));
ASSERT_OK_AND_ASSIGN(auto composed_edge,
InvokeOperator<DenseArrayEdge>(
"edge.compose._dense_array", edge1, edge2));
EXPECT_THAT(composed_edge.edge_values(), ElementsAre(0, 2, 4));
EXPECT_THAT(composed_edge.edge_type(), DenseArrayEdge::SPLIT_POINTS);
}
{
ASSERT_OK_AND_ASSIGN(auto edge1, DenseArrayEdge::FromSplitPoints(
CreateDenseArray<int64_t>({0, 2, 3})));
ASSERT_OK_AND_ASSIGN(auto edge2,
DenseArrayEdge::FromMapping(
CreateDenseArray<int64_t>({0, 1, 2, 2}), 3));
ASSERT_OK_AND_ASSIGN(auto composed_edge,
InvokeOperator<DenseArrayEdge>(
"edge.compose._dense_array", edge1, edge2));
EXPECT_THAT(composed_edge.edge_values(), ElementsAre(0, 0, 1, 1));
EXPECT_THAT(composed_edge.edge_type(), DenseArrayEdge::MAPPING);
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/operators/array_like/edge_ops.h | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/operators/dense_array/edge_ops_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
a51f6477-59f1-4381-9f29-e283489b4514 | cpp | abseil/abseil-cpp | vlog_is_on | absl/log/vlog_is_on.h | absl/log/vlog_is_on_test.cc | #ifndef ABSL_LOG_VLOG_IS_ON_H_
#define ABSL_LOG_VLOG_IS_ON_H_
#include "absl/log/absl_vlog_is_on.h"
#define VLOG_IS_ON(verbose_level) ABSL_VLOG_IS_ON(verbose_level)
#endif | #include "absl/log/vlog_is_on.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/log_severity.h"
#include "absl/flags/flag.h"
#include "absl/log/flags.h"
#include "absl/log/globals.h"
#include "absl/log/log.h"
#include "absl/log/scoped_mock_log.h"
#include "absl/types/optional.h"
namespace {
using ::testing::_;
absl::optional<int> MaxLogVerbosity() {
#ifdef ABSL_MAX_VLOG_VERBOSITY
return ABSL_MAX_VLOG_VERBOSITY;
#else
return absl::nullopt;
#endif
}
absl::optional<int> MinLogLevel() {
#ifdef ABSL_MIN_LOG_LEVEL
return static_cast<int>(ABSL_MIN_LOG_LEVEL);
#else
return absl::nullopt;
#endif
}
class VLogIsOnTest : public ::testing::Test {
protected:
void SetUp() override { ResetVLogLevels(); }
private:
static void ResetVLogLevels() {
absl::log_internal::UpdateVModule("");
absl::SetGlobalVLogLevel(0);
}
};
TEST_F(VLogIsOnTest, GlobalWorksWithoutMaxVerbosityAndMinLogLevel) {
if (MaxLogVerbosity().has_value() || MinLogLevel().has_value()) {
GTEST_SKIP();
}
absl::SetGlobalVLogLevel(3);
absl::ScopedMockLog log(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(log, Log(absl::LogSeverity::kInfo, _, "important"));
log.StartCapturingLogs();
VLOG(3) << "important";
VLOG(4) << "spam";
}
TEST_F(VLogIsOnTest, FileWorksWithoutMaxVerbosityAndMinLogLevel) {
if (MaxLogVerbosity().has_value() || MinLogLevel().has_value()) {
GTEST_SKIP();
}
absl::SetVLogLevel("vlog_is_on_test", 3);
absl::ScopedMockLog log(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(log, Log(absl::LogSeverity::kInfo, _, "important"));
log.StartCapturingLogs();
VLOG(3) << "important";
VLOG(4) << "spam";
}
TEST_F(VLogIsOnTest, PatternWorksWithoutMaxVerbosityAndMinLogLevel) {
if (MaxLogVerbosity().has_value() || MinLogLevel().has_value()) {
GTEST_SKIP();
}
absl::SetVLogLevel("vlog_is_on*", 3);
absl::ScopedMockLog log(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(log, Log(absl::LogSeverity::kInfo, _, "important"));
log.StartCapturingLogs();
VLOG(3) << "important";
VLOG(4) << "spam";
}
TEST_F(VLogIsOnTest,
PatternOverridesLessGenericOneWithoutMaxVerbosityAndMinLogLevel) {
if (MaxLogVerbosity().has_value() || MinLogLevel().has_value()) {
GTEST_SKIP();
}
absl::SetVLogLevel("vlog_is_on*", -1);
absl::SetVLogLevel("vlog*", 3);
absl::ScopedMockLog log(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(log, Log(absl::LogSeverity::kInfo, _, "important"));
log.StartCapturingLogs();
VLOG(3) << "important";
VLOG(4) << "spam";
}
TEST_F(VLogIsOnTest,
PatternDoesNotOverridesMoreGenericOneWithoutMaxVerbosityAndMinLogLevel) {
if (MaxLogVerbosity().has_value() || MinLogLevel().has_value()) {
GTEST_SKIP();
}
absl::SetVLogLevel("vlog*", 3);
absl::SetVLogLevel("vlog_is_on_some_other_test*", -1);
absl::ScopedMockLog log(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(log, Log(absl::LogSeverity::kInfo, _, "important"));
log.StartCapturingLogs();
VLOG(3) << "important";
VLOG(5) << "spam";
}
TEST_F(VLogIsOnTest, GlobalDoesNotFilterBelowMaxVerbosity) {
if (!MaxLogVerbosity().has_value() || *MaxLogVerbosity() < 2) {
GTEST_SKIP();
}
absl::SetGlobalVLogLevel(1000);
absl::ScopedMockLog log(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(log, Log(absl::LogSeverity::kInfo, _, "asdf"));
log.StartCapturingLogs();
VLOG(2) << "asdf";
}
TEST_F(VLogIsOnTest, FileDoesNotFilterBelowMaxVerbosity) {
if (!MaxLogVerbosity().has_value() || *MaxLogVerbosity() < 2) {
GTEST_SKIP();
}
absl::SetVLogLevel("vlog_is_on_test", 1000);
absl::ScopedMockLog log(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(log, Log(absl::LogSeverity::kInfo, _, "asdf"));
log.StartCapturingLogs();
VLOG(2) << "asdf";
}
TEST_F(VLogIsOnTest, PatternDoesNotFilterBelowMaxVerbosity) {
if (!MaxLogVerbosity().has_value() || *MaxLogVerbosity() < 2) {
GTEST_SKIP();
}
absl::SetVLogLevel("vlog_is_on*", 1000);
absl::ScopedMockLog log(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(log, Log(absl::LogSeverity::kInfo, _, "asdf"));
log.StartCapturingLogs();
VLOG(2) << "asdf";
}
TEST_F(VLogIsOnTest, GlobalFiltersAboveMaxVerbosity) {
if (!MaxLogVerbosity().has_value() || *MaxLogVerbosity() >= 4) {
GTEST_SKIP();
}
absl::SetGlobalVLogLevel(1000);
absl::ScopedMockLog log(absl::MockLogDefault::kDisallowUnexpected);
log.StartCapturingLogs();
VLOG(4) << "dfgh";
}
TEST_F(VLogIsOnTest, FileFiltersAboveMaxVerbosity) {
if (!MaxLogVerbosity().has_value() || *MaxLogVerbosity() >= 4) {
GTEST_SKIP();
}
absl::SetVLogLevel("vlog_is_on_test", 1000);
absl::ScopedMockLog log(absl::MockLogDefault::kDisallowUnexpected);
log.StartCapturingLogs();
VLOG(4) << "dfgh";
}
TEST_F(VLogIsOnTest, PatternFiltersAboveMaxVerbosity) {
if (!MaxLogVerbosity().has_value() || *MaxLogVerbosity() >= 4) {
GTEST_SKIP();
}
absl::SetVLogLevel("vlog_is_on*", 1000);
absl::ScopedMockLog log(absl::MockLogDefault::kDisallowUnexpected);
log.StartCapturingLogs();
VLOG(4) << "dfgh";
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/log/vlog_is_on.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/log/vlog_is_on_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
36995878-42b9-45db-8e37-0f90cd01d40f | cpp | tensorflow/tensorflow | tf2xla_opset | tensorflow/compiler/tf2xla/tf2xla_opset.cc | tensorflow/compiler/tf2xla/tf2xla_opset_test.cc | #include "tensorflow/compiler/tf2xla/tf2xla_opset.h"
#include <algorithm>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/core/framework/kernel_def.pb.h"
namespace tensorflow {
const int SUPPORTED_DEVICES_NUM = 2;
static const char* const SUPPORTED_DEVICES[SUPPORTED_DEVICES_NUM] = {
DEVICE_GPU_XLA_JIT, DEVICE_CPU_XLA_JIT};
bool IsSupportedBackend(absl::string_view device_name) {
for (int i = 0; i < SUPPORTED_DEVICES_NUM; i++) {
if (SUPPORTED_DEVICES[i] == device_name) return true;
}
return false;
}
absl::Status RegisterBackends(absl::string_view device_name) {
if (!IsSupportedBackend(device_name)) {
return absl::InvalidArgumentError(
absl::StrCat(device_name, " is not supported. Supported devices are ",
absl::StrJoin(SUPPORTED_DEVICES, ", ")));
}
auto op_filter = [](KernelDef* kdef) {
if (kdef->op() == "Const") {
AddDtypeToKernelDefConstraint("dtype", DT_STRING, kdef);
}
if (kdef->op() == "Assert") {
AddDtypeToKernelDefConstraint("T", DT_STRING, kdef);
}
return true;
};
if (!XlaOpRegistry::IsBackendRegistered(DEVICE_GPU_XLA_JIT)) {
static auto gpu_backend =
XlaBackendRegistrar(DEVICE_GPU_XLA_JIT, kGpuAllTypes, op_filter);
}
if (!XlaOpRegistry::IsBackendRegistered(DEVICE_CPU_XLA_JIT)) {
static auto cpu_backend =
XlaBackendRegistrar(DEVICE_CPU_XLA_JIT, kCpuAllTypes, op_filter);
}
if (!XlaOpRegistry::IsBackendRegistered(std::string(device_name))) {
return absl::InternalError(
absl::StrCat(device_name, " is not registered."));
}
return absl::OkStatus();
}
absl::StatusOr<std::vector<std::string>> GetRegisteredXlaOpsForDevice(
absl::string_view device_name) {
auto status = RegisterBackends(device_name);
if (!status.ok()) return status;
std::vector<const KernelDef*> kernel_defs =
XlaOpRegistry::DeviceKernels(std::string(device_name), true);
std::vector<std::string> op_names;
op_names.reserve(kernel_defs.size());
for (const auto& kernel_def : kernel_defs) {
op_names.push_back(kernel_def->op());
}
std::sort(op_names.begin(), op_names.end());
return op_names;
}
} | #include "tensorflow/compiler/tf2xla/tf2xla_opset.h"
#include <algorithm>
#include <string>
#include <vector>
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(GeXlaOpsForDeviceTest, InvalidDeviceToRegister) {
absl::StatusOr<std::vector<std::string>> result =
GetRegisteredXlaOpsForDevice("Invalid_Device");
EXPECT_FALSE(result.ok());
}
TEST(GeXlaOpsForDeviceTest, GetGpuNames) {
absl::StatusOr<std::vector<std::string>> result =
GetRegisteredXlaOpsForDevice("XLA_GPU_JIT");
EXPECT_GT(result.value().size(), 0);
auto matmul =
std::find(result.value().begin(), result.value().end(), "MatMul");
auto max = std::find(result.value().begin(), result.value().end(), "Max");
auto min = std::find(result.value().begin(), result.value().end(), "Min");
EXPECT_TRUE((matmul != result.value().end()));
EXPECT_TRUE((max != result.value().end()));
EXPECT_TRUE((min != result.value().end()));
EXPECT_LT(matmul, max);
EXPECT_LT(max, min);
}
TEST(GeXlaOpsForDeviceTest, GetCpuNames) {
absl::StatusOr<std::vector<std::string>> result =
GetRegisteredXlaOpsForDevice("XLA_CPU_JIT");
EXPECT_GT(result.value().size(), 0);
auto matmul =
std::find(result.value().begin(), result.value().end(), "MatMul");
auto max = std::find(result.value().begin(), result.value().end(), "Max");
auto min = std::find(result.value().begin(), result.value().end(), "Min");
EXPECT_TRUE((matmul != result.value().end()));
EXPECT_TRUE((max != result.value().end()));
EXPECT_TRUE((min != result.value().end()));
EXPECT_LT(matmul, max);
EXPECT_LT(max, min);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/tf2xla_opset.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/tf2xla_opset_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6709acfa-25b3-45ca-86e4-21b8908657e7 | cpp | google/tensorstore | batch | tensorstore/serialization/batch.cc | tensorstore/batch_test.cc | #include "tensorstore/serialization/batch.h"
#include "absl/status/status.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace serialization {
BatchEncodeSink::BatchEncodeSink(riegeli::Writer& writer)
: EncodeSink(writer) {}
BatchEncodeSink::~BatchEncodeSink() = default;
bool BatchEncodeSink::DoIndirect(const std::type_info& type,
ErasedEncodeWrapperFunction encode,
std::shared_ptr<void> object) {
auto [it, inserted] = indirect_map_.emplace(object, indirect_map_.size());
return serialization::WriteSize(writer(), it->second) &&
(!inserted || encode(*this, object));
}
BatchDecodeSource::BatchDecodeSource(riegeli::Reader& reader)
: DecodeSource(reader) {}
BatchDecodeSource::~BatchDecodeSource() = default;
bool BatchDecodeSource::DoIndirect(const std::type_info& type,
ErasedDecodeWrapperFunction decode,
std::shared_ptr<void>& value) {
size_t id;
if (!serialization::ReadSize(reader(), id)) return false;
if (id > indirect_objects_.size()) {
Fail(DecodeError(tensorstore::StrCat("Indirect object index ", id,
" out of range [0, ",
indirect_objects_.size(), ")")));
return false;
}
if (id < indirect_objects_.size()) {
auto& entry = indirect_objects_[id];
if (*entry.type != type) {
Fail(absl::InvalidArgumentError(tensorstore::StrCat(
"Type mismatch for indirect object, received ", entry.type->name(),
" but expected ", type.name())));
return false;
}
value = entry.value;
return true;
}
indirect_objects_.emplace_back();
if (!decode(*this, value)) return false;
auto& entry = indirect_objects_[id];
entry.type = &type;
entry.value = value;
return true;
}
}
} | #include "tensorstore/batch.h"
#include <stddef.h>
#include <functional>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "tensorstore/batch_impl.h"
namespace {
using ::tensorstore::Batch;
using ::testing::ElementsAre;
using Log = std::vector<std::string>;
template <typename T>
struct Entry : public Batch::Impl::Entry {
using KeyParam = T;
Entry(Log& log, size_t nesting_depth, T key)
: Batch::Impl::Entry(nesting_depth), key_(key), log(log) {}
T key_;
T key() const { return key_; }
virtual void Submit(Batch::View batch) {
log.push_back(absl::StrCat("begin_submit ", key()));
for (auto& submit_func : submit_funcs) {
submit_func(batch);
}
log.push_back(absl::StrCat("end_submit ", key()));
delete this;
}
std::vector<std::function<void(Batch::View batch)>> submit_funcs;
Log& log;
};
template <typename T>
void AddFunc(Log& log, Batch::View batch, size_t nesting_depth, T key,
std::function<void(Batch::View)> func) {
auto& entry = Batch::Impl::From(batch)->GetEntry<Entry<T>>(
key, [&] { return std::make_unique<Entry<T>>(log, nesting_depth, key); });
entry.submit_funcs.emplace_back(std::move(func));
}
TEST(BatchTest, SingleNestingDepth) {
Log log;
auto batch = Batch::New();
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 2; ++j) {
AddFunc<int>(log, batch, 0, i,
[&log, i, j](Batch::View batch) {
log.emplace_back(absl::StrFormat("i=%d, j=%d", i, j));
});
}
}
EXPECT_THAT(log, ElementsAre());
batch.Release();
EXPECT_THAT(log,
::testing::UnorderedElementsAre(
"begin_submit 0", "i=0, j=0", "i=0, j=1", "end_submit 0",
"begin_submit 1", "i=1, j=0", "i=1, j=1", "end_submit 1",
"begin_submit 2", "i=2, j=0", "i=2, j=1", "end_submit 2"));
}
TEST(BatchTest, MultipleNestingDepths) {
Log log;
auto batch = Batch::New();
for (int nesting_depth : {2, 3, 0}) {
AddFunc<int>(log, batch, nesting_depth, nesting_depth,
[](Batch::View batch) {});
}
EXPECT_THAT(log, ElementsAre());
batch.Release();
EXPECT_THAT(log, ::testing::ElementsAre("begin_submit 3", "end_submit 3",
"begin_submit 2", "end_submit 2",
"begin_submit 0", "end_submit 0"));
}
TEST(BatchTest, MultipleTypes) {
Log log;
auto batch = Batch::New();
AddFunc<int>(log, batch, 0, 42,
[](Batch::View batch) {});
AddFunc<float>(log, batch, 0, 1.5,
[](Batch::View batch) {});
EXPECT_THAT(log, ElementsAre());
batch.Release();
EXPECT_THAT(log,
::testing::ElementsAre("begin_submit 42", "end_submit 42",
"begin_submit 1.5", "end_submit 1.5"));
}
TEST(BatchTest, Async) {
Log log;
auto batch = Batch::New();
Batch saved_batch{Batch::no_batch};
AddFunc<int>(log, batch, 2, 2,
[&](Batch::View batch) { saved_batch = batch; });
AddFunc<int>(log, batch, 1, 3,
[](Batch::View batch) {});
batch.Release();
EXPECT_THAT(log, ElementsAre("begin_submit 2", "end_submit 2"));
log.clear();
AddFunc<int>(log, saved_batch, 1, 1,
[](Batch::View batch) {});
saved_batch.Release();
EXPECT_THAT(
log, ::testing::UnorderedElementsAre("begin_submit 1", "end_submit 1",
"begin_submit 3", "end_submit 3"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/serialization/batch.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/batch_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
849cb0cf-5da9-4a0b-9798-8ff341a34518 | cpp | tensorflow/tensorflow | transpose_folding | third_party/xla/xla/service/transpose_folding.cc | third_party/xla/xla/service/transpose_folding_test.cc | #include "xla/service/transpose_folding.h"
#include <algorithm>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace xla {
namespace {
TransposeFolding::OperandIndices CanFoldOperandsIntoConvolution(
const HloInstruction& convolution,
const TransposeFolding::TransposableConvOperandsFn&
transposable_conv_operands) {
if (HloOpcode::kConvolution != convolution.opcode()) {
return {};
}
TransposeFolding::OperandIndices operand_set;
for (int64_t i = 0; i < convolution.operand_count(); ++i) {
auto& operand = *convolution.operand(i);
if (operand.opcode() == HloOpcode::kTranspose) {
operand_set.push_back(i);
}
}
return transposable_conv_operands(convolution, operand_set);
}
bool IsNonIdentityTranspose(const HloInstruction* instruction) {
if (instruction->opcode() == HloOpcode::kTranspose) {
for (int dim = 0; dim < instruction->dimensions().size(); ++dim) {
if (dim != instruction->dimensions(dim)) {
return true;
}
}
}
return false;
}
void TransposeDims(tsl::protobuf::RepeatedField<int64_t>& dims,
absl::Span<const int64_t> transpose_dims) {
for (auto& dim : dims) {
dim = transpose_dims[dim];
}
}
using InstructionOperandsPair =
std::pair<HloInstruction*, TransposeFolding::OperandIndices>;
absl::Status FoldTransposeIntoDot(InstructionOperandsPair& pair) {
HloInstruction* dot = pair.first;
DotDimensionNumbers new_dot_dims = dot->dot_dimension_numbers();
HloInstruction* lhs = dot->mutable_operand(0);
HloInstruction* rhs = dot->mutable_operand(1);
for (int64_t operand_index : pair.second) {
if (operand_index == 0) {
TransposeDims(*new_dot_dims.mutable_lhs_contracting_dimensions(),
lhs->dimensions());
TransposeDims(*new_dot_dims.mutable_lhs_batch_dimensions(),
lhs->dimensions());
lhs = lhs->mutable_operand(0);
} else {
CHECK_EQ(operand_index, 1);
TransposeDims(*new_dot_dims.mutable_rhs_contracting_dimensions(),
rhs->dimensions());
TransposeDims(*new_dot_dims.mutable_rhs_batch_dimensions(),
rhs->dimensions());
rhs = rhs->mutable_operand(0);
}
}
return dot->parent()->ReplaceWithNewInstruction(
dot, HloInstruction::CreateDot(dot->shape(), lhs, rhs, new_dot_dims,
dot->precision_config()));
}
bool FoldTransposeIntoConvolution(InstructionOperandsPair& pair) {
auto& convolution = *pair.first;
auto& operand_indices = pair.second;
if (operand_indices.empty()) {
return false;
}
const ConvolutionDimensionNumbers& dnums =
convolution.convolution_dimension_numbers();
ConvolutionDimensionNumbers new_dnums = dnums;
HloInstruction* new_lhs;
const int64_t kLhsIdx = 0;
if (absl::c_linear_search(operand_indices, kLhsIdx)) {
HloInstruction& transpose = *convolution.mutable_operand(kLhsIdx);
const auto& transpose_dimensions = transpose.dimensions();
HloInstruction& transpose_operand = *transpose.mutable_operand(0);
new_dnums.set_input_batch_dimension(
transpose_dimensions[dnums.input_batch_dimension()]);
new_dnums.set_input_feature_dimension(
transpose_dimensions[dnums.input_feature_dimension()]);
for (auto& input_spatial_dimension :
*new_dnums.mutable_input_spatial_dimensions()) {
input_spatial_dimension = transpose_dimensions[input_spatial_dimension];
}
new_lhs = &transpose_operand;
} else {
new_lhs = convolution.mutable_operand(kLhsIdx);
}
HloInstruction* new_rhs;
const int64_t kRhsIdx = 1;
if (absl::c_linear_search(operand_indices, kRhsIdx)) {
HloInstruction& transpose = *convolution.mutable_operand(kRhsIdx);
const auto& transpose_dimensions = transpose.dimensions();
HloInstruction& transpose_operand = *transpose.mutable_operand(0);
new_dnums.set_kernel_input_feature_dimension(
transpose_dimensions[dnums.kernel_input_feature_dimension()]);
new_dnums.set_kernel_output_feature_dimension(
transpose_dimensions[dnums.kernel_output_feature_dimension()]);
for (auto& kernel_spatial_dimension :
*new_dnums.mutable_kernel_spatial_dimensions()) {
kernel_spatial_dimension = transpose_dimensions[kernel_spatial_dimension];
}
new_rhs = &transpose_operand;
} else {
new_rhs = convolution.mutable_operand(kRhsIdx);
}
auto new_conv = HloInstruction::CreateConvolve(
convolution.shape(), new_lhs, new_rhs, convolution.feature_group_count(),
convolution.batch_group_count(), convolution.window(), new_dnums,
convolution.precision_config());
TF_CHECK_OK(convolution.parent()->ReplaceWithNewInstruction(
&convolution, std::move(new_conv)));
return true;
}
}
TransposeFolding::TransposeFolding(
CanFoldTransposeOperand dot_can_fold_transpose_operand,
TransposableConvOperandsFn transposable_conv_operands)
: dot_can_fold_transpose_operand_(
std::move(dot_can_fold_transpose_operand)),
transposable_conv_operands_(std::move(transposable_conv_operands)) {}
absl::StatusOr<bool> TransposeFolding::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
std::vector<InstructionOperandsPair> foldable_dots;
std::vector<InstructionOperandsPair> foldable_convolutions;
FunctionVisitor visit_fn([this, &foldable_dots, &foldable_convolutions](
HloInstruction* instruction) {
if (instruction->opcode() == HloOpcode::kDot) {
if ((instruction->operand(0)->shape().rank() < 2) ||
(instruction->operand(1)->shape().rank() < 2)) {
return absl::OkStatus();
}
OperandIndices operand_indices;
for (int64_t i = 0; i < 2; ++i) {
if (!IsNonIdentityTranspose(instruction->operand(i))) {
continue;
}
TF_ASSIGN_OR_RETURN(bool can_fold_operand,
dot_can_fold_transpose_operand_(*instruction, i));
if (can_fold_operand) {
operand_indices.push_back(i);
}
}
if (!operand_indices.empty()) {
foldable_dots.emplace_back(instruction, operand_indices);
}
}
{
OperandIndices operand_indices = CanFoldOperandsIntoConvolution(
*instruction, transposable_conv_operands_);
if (!operand_indices.empty()) {
foldable_convolutions.emplace_back(instruction, operand_indices);
}
}
return absl::OkStatus();
});
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
TF_RETURN_IF_ERROR(comp->Accept(&visit_fn));
}
bool changed = false;
for (InstructionOperandsPair& pair : foldable_dots) {
TF_RETURN_IF_ERROR(FoldTransposeIntoDot(pair));
changed = true;
}
for (InstructionOperandsPair& pair : foldable_convolutions) {
changed |= FoldTransposeIntoConvolution(pair);
}
return changed;
}
absl::StatusOr<bool>
TransposeFolding::IsRowColumnTransposeDotOperand(const HloInstruction& dot,
int64_t operand_idx) {
TF_RET_CHECK(dot.opcode() == HloOpcode::kDot);
TF_RET_CHECK(dot.operand_count() > operand_idx);
const HloInstruction& transpose = *dot.operand(operand_idx);
TF_RET_CHECK(transpose.opcode() == HloOpcode::kTranspose);
const DotDimensionNumbers& dot_dims = dot.dot_dimension_numbers();
auto batch_dims = (operand_idx == 0) ? dot_dims.lhs_batch_dimensions()
: dot_dims.rhs_batch_dimensions();
auto contracting_dims = (operand_idx == 0)
? dot_dims.lhs_contracting_dimensions()
: dot_dims.rhs_contracting_dimensions();
return (batch_dims.size() == transpose.shape().rank() - 2) &&
(contracting_dims.size() == 1) &&
absl::c_all_of(batch_dims, [&](int64_t dim) {
return transpose.dimensions(dim) == dim;
});
}
} | #include "xla/service/transpose_folding.h"
#include <memory>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/shape_inference.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status_matchers.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
using ::tsl::testing::IsOkAndHolds;
using TransposeFoldingTest = HloTestBase;
TEST_F(TransposeFoldingTest, FoldDotTranspose) {
constexpr absl::string_view kHloString = R"(
HloModule FoldDotTranspose
ENTRY entry_computation {
x = f32[2,3]{1,0} parameter(0)
y = f32[2,3]{1,0} parameter(1)
transpose = f32[3,2]{1,0} transpose(y), dimensions={1,0}
ROOT dot = f32[2,2]{1,0} dot(x, transpose), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Dot(op::Parameter(0), op::Parameter(1),
1, 1));
}
TEST_F(TransposeFoldingTest, DontFoldTransposeOfBatchDimByDefault) {
constexpr absl::string_view kHloString = R"(
HloModule FoldDotTranspose
ENTRY entry_computation {
x = f32[2,3] parameter(0)
y = f32[3,2] parameter(1)
transpose = f32[2,3] transpose(y), dimensions={1,0}
ROOT dot = f32[2] dot(x, transpose), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(false));
}
TEST_F(TransposeFoldingTest, FoldTransposeOfBatchWhenPermitted) {
constexpr absl::string_view kHloString = R"(
HloModule FoldDotTranspose
ENTRY entry_computation {
x = f32[5,2,3] parameter(0)
y = f32[3,5,4] parameter(1)
transpose = f32[5,3,4] transpose(y), dimensions={1,0,2}
ROOT dot = f32[5,2,4] dot(x, transpose), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
TransposeFolding transpose_folding(
[](const HloInstruction&, int64_t) {
return true;
});
EXPECT_THAT(transpose_folding.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Dot(op::Parameter(0), op::Parameter(1),
2, 0));
}
TEST_F(TransposeFoldingTest, DontFoldTransposeOfRank1Dot) {
constexpr absl::string_view kHloString = R"(
HloModule FoldDotTranspose
ENTRY entry_computation {
x = f32[3] parameter(0)
y = f32[3,2] parameter(1)
transpose = f32[2,3] transpose(y), dimensions={1,0}
ROOT dot = f32[2] dot(x, transpose), lhs_batch_dims={}, rhs_batch_dims={}, lhs_contracting_dims={0}, rhs_contracting_dims={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(false));
}
TEST_F(TransposeFoldingTest, DontFoldTransposeOfDotWithoutContractingDims) {
constexpr absl::string_view kHloString = R"(
HloModule FoldDotTranspose
ENTRY entry_computation {
x = f32[3,4] parameter(0)
y = f32[3,4,6,7] parameter(1)
transpose = f32[3,4,7,6] transpose(y), dimensions={0,1,3,2}
ROOT dot = f32[3,4,7,6] dot(x, transpose), lhs_batch_dims={0,1}, rhs_batch_dims={0,1}, lhs_contracting_dims={}, rhs_contracting_dims={}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(false));
}
TEST_F(TransposeFoldingTest, FoldDotTransposeConstant) {
constexpr absl::string_view kHloString = R"(
HloModule FoldDotTransposeConstant
ENTRY entry_computation {
constant = f32[2,1]{1,0} constant({ { 1 }, { 2 } })
transpose = f32[1,2]{1,0} transpose(constant), dimensions={1,0}
constant.1 = f32[3,2]{1,0} constant({ { 1, 2 }, { 3, 4 }, { 5, 6 } })
transpose.1 = f32[2,3]{1,0} transpose(constant.1), dimensions={1,0}
ROOT dot = f32[1,3]{1,0} dot(transpose, transpose.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Dot(op::Constant(), op::Constant(),
0, 1));
}
TEST_F(TransposeFoldingTest, FuseDotWithConstantOperands) {
auto builder = HloComputation::Builder("entry");
HloInstruction* const1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
HloInstruction* const2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
HloInstruction* const3 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0)));
HloInstruction* add = builder.AddInstruction(HloInstruction::CreateBinary(
const1->shape(), HloOpcode::kAdd, const1, const2));
HloInstruction* sub = builder.AddInstruction(HloInstruction::CreateBinary(
const2->shape(), HloOpcode::kSubtract, const2, const3));
HloInstruction* mul = builder.AddInstruction(HloInstruction::CreateBinary(
add->shape(), HloOpcode::kMultiply, add, sub));
auto module = CreateNewVerifiedModule("fuse_with_constant_operands");
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build(mul));
HloInstruction* call = module->OutlineExpressionFromComputation(
{add, sub, mul}, "entry", entry_computation);
EXPECT_EQ(call, entry_computation->root_instruction());
HloComputation* callee_computation = call->to_apply();
EXPECT_THAT(call->operands(),
::testing::UnorderedElementsAre(const1, const2, const3));
EXPECT_EQ(6, callee_computation->instruction_count());
}
TEST_F(TransposeFoldingTest, FoldDotTransposeInCall) {
constexpr absl::string_view kHloString = R"(
HloModule FoldDotTransposeInCall
callee {
name.0 = f32[2,3]{1,0} parameter(0)
name.1 = f32[2,3]{1,0} parameter(1)
transpose.clone = f32[3,2]{1,0} transpose(name.0), dimensions={1,0}
ROOT dot.clone = f32[2,2]{1,0} dot(name.1, transpose.clone), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY entry_computation {
y = f32[2,3]{1,0} parameter(1)
x = f32[2,3]{1,0} parameter(0)
ROOT call = f32[2,2]{1,0} call(y, x), to_apply=callee
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));
const HloComputation* callee = module->GetComputationWithName("callee");
ASSERT_NE(callee, nullptr);
EXPECT_THAT(callee->root_instruction(),
op::Dot(op::Parameter(1), op::Parameter(0),
1, 1));
}
TEST_F(TransposeFoldingTest, FoldConvDimSwapTransposeRhs) {
auto builder = HloComputation::Builder("entry_computation");
HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {2, 3, 1, 1}),
"x"));
HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {3, 2, 1, 1}),
"y"));
HloInstruction* transpose_y =
builder.AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(F32, {2, 3, 1, 1}), y, {1, 0, 2, 3}));
auto dnums = XlaBuilder::CreateDefaultConvDimensionNumbers();
Window window;
for (int i = 0; i < 2; ++i) {
WindowDimension* dim = window.add_dimensions();
dim->set_padding_low(0);
dim->set_padding_high(0);
dim->set_base_dilation(1);
dim->set_window_dilation(1);
dim->set_stride(1);
dim->set_size(
transpose_y->shape().dimensions(dnums.kernel_spatial_dimensions(i)));
}
absl::StatusOr<Shape> conv_shape = ShapeInference::InferConvolveShape(
x->shape(), transpose_y->shape(), 1,
1, window, dnums,
std::nullopt);
EXPECT_IS_OK(conv_shape);
HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
conv_shape.value(), x, transpose_y,
1, 1, window, dnums,
DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule("test_module");
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build(conv));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));
absl::flat_hash_set<HloInstruction*> instruction_set(
entry_computation->instructions().begin(),
entry_computation->instructions().end());
CHECK_EQ(1, instruction_set.erase(x)) << "x is not in entry_computation.";
CHECK_EQ(1, instruction_set.erase(y)) << "y is not in entry_computation.";
CHECK_EQ(1, instruction_set.size())
<< "entry_computation should contain exactly 3 instructions.";
HloInstruction* new_conv = *instruction_set.begin();
EXPECT_EQ(HloOpcode::kConvolution, new_conv->opcode());
EXPECT_EQ(dnums.kernel_input_feature_dimension(),
new_conv->convolution_dimension_numbers()
.kernel_output_feature_dimension());
EXPECT_EQ(dnums.kernel_output_feature_dimension(),
new_conv->convolution_dimension_numbers()
.kernel_input_feature_dimension());
}
TEST_F(TransposeFoldingTest, FoldConvComplexTransposeRhs) {
auto builder = HloComputation::Builder("entry_computation");
HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {2, 3, 1, 1}),
"x"));
HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {1, 2, 1, 3}),
"y"));
HloInstruction* transpose_y =
builder.AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(F32, {2, 3, 1, 1}), y, {1, 3, 0, 2}));
auto dnums = XlaBuilder::CreateDefaultConvDimensionNumbers();
Window window;
for (int i = 0; i < 2; ++i) {
WindowDimension* dim = window.add_dimensions();
dim->set_padding_low(0);
dim->set_padding_high(0);
dim->set_base_dilation(1);
dim->set_window_dilation(1);
dim->set_stride(1);
dim->set_size(
transpose_y->shape().dimensions(dnums.kernel_spatial_dimensions(i)));
}
absl::StatusOr<Shape> conv_shape = ShapeInference::InferConvolveShape(
x->shape(), transpose_y->shape(), 1,
1, window, dnums,
std::nullopt);
EXPECT_IS_OK(conv_shape);
HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
conv_shape.value(), x, transpose_y,
1, 1, window, dnums,
DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule("test_module");
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build(conv));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));
absl::flat_hash_set<HloInstruction*> instruction_set(
entry_computation->instructions().begin(),
entry_computation->instructions().end());
CHECK_EQ(1, instruction_set.erase(x)) << "x is not in entry_computation.";
CHECK_EQ(1, instruction_set.erase(y)) << "y is not in entry_computation.";
CHECK_EQ(1, instruction_set.size())
<< "entry_computation should contain exactly 3 instructions.";
HloInstruction* new_conv = *instruction_set.begin();
EXPECT_EQ(HloOpcode::kConvolution, new_conv->opcode());
EXPECT_EQ(dnums.kernel_input_feature_dimension(),
new_conv->convolution_dimension_numbers()
.kernel_output_feature_dimension());
EXPECT_EQ(dnums.kernel_spatial_dimensions(1),
new_conv->convolution_dimension_numbers()
.kernel_input_feature_dimension());
EXPECT_EQ(
dnums.kernel_output_feature_dimension(),
new_conv->convolution_dimension_numbers().kernel_spatial_dimensions(0));
EXPECT_EQ(
dnums.kernel_spatial_dimensions(0),
new_conv->convolution_dimension_numbers().kernel_spatial_dimensions(1));
}
TEST_F(TransposeFoldingTest, FoldConvTransposeLhs) {
auto builder = HloComputation::Builder("entry_computation");
HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {3, 2, 1, 1}),
"x"));
HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {2, 3, 1, 1}),
"y"));
HloInstruction* transpose_x =
builder.AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(F32, {2, 3, 1, 1}), x, {1, 0, 2, 3}));
auto dnums = XlaBuilder::CreateDefaultConvDimensionNumbers();
Window window;
for (int i = 0; i < 2; ++i) {
WindowDimension* dim = window.add_dimensions();
dim->set_padding_low(0);
dim->set_padding_high(0);
dim->set_base_dilation(1);
dim->set_window_dilation(1);
dim->set_stride(1);
dim->set_size(y->shape().dimensions(dnums.kernel_spatial_dimensions(i)));
}
absl::StatusOr<Shape> conv_shape = ShapeInference::InferConvolveShape(
transpose_x->shape(), y->shape(), 1,
1, window, dnums,
std::nullopt);
EXPECT_IS_OK(conv_shape);
HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
conv_shape.value(), transpose_x, y,
1, 1, window, dnums,
DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule("test_module");
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build(conv));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));
absl::flat_hash_set<HloInstruction*> instruction_set(
entry_computation->instructions().begin(),
entry_computation->instructions().end());
EXPECT_EQ(1, instruction_set.erase(x)) << "x is not in entry_computation.";
EXPECT_EQ(1, instruction_set.erase(y)) << "y is not in entry_computation.";
EXPECT_EQ(1, instruction_set.size())
<< "entry_computation should contain exactly 3 instructions.";
HloInstruction* new_conv = *instruction_set.begin();
EXPECT_EQ(HloOpcode::kConvolution, new_conv->opcode());
EXPECT_EQ(dnums.input_feature_dimension(),
new_conv->convolution_dimension_numbers().input_batch_dimension());
EXPECT_EQ(
dnums.input_batch_dimension(),
new_conv->convolution_dimension_numbers().input_feature_dimension());
EXPECT_EQ(
dnums.input_spatial_dimensions(0),
new_conv->convolution_dimension_numbers().input_spatial_dimensions(0));
EXPECT_EQ(
dnums.input_spatial_dimensions(1),
new_conv->convolution_dimension_numbers().input_spatial_dimensions(1));
EXPECT_EQ(
dnums.output_spatial_dimensions(0),
new_conv->convolution_dimension_numbers().output_spatial_dimensions(0));
EXPECT_EQ(
dnums.output_spatial_dimensions(1),
new_conv->convolution_dimension_numbers().output_spatial_dimensions(1));
}
TEST_F(TransposeFoldingTest, FoldConvComplexTransposeLhs) {
auto builder = HloComputation::Builder("entry_computation");
HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {3, 2, 1, 1}),
"x"));
HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {2, 3, 1, 1}),
"y"));
HloInstruction* transpose_x =
builder.AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(F32, {2, 3, 1, 1}), x, {1, 0, 3, 2}));
auto dnums = XlaBuilder::CreateDefaultConvDimensionNumbers();
Window window;
for (int i = 0; i < 2; ++i) {
WindowDimension* dim = window.add_dimensions();
dim->set_padding_low(0);
dim->set_padding_high(0);
dim->set_base_dilation(1);
dim->set_window_dilation(1);
dim->set_stride(1);
dim->set_size(y->shape().dimensions(dnums.kernel_spatial_dimensions(i)));
}
absl::StatusOr<Shape> conv_shape = ShapeInference::InferConvolveShape(
transpose_x->shape(), y->shape(), 1,
1, window, dnums,
std::nullopt);
EXPECT_IS_OK(conv_shape);
HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
conv_shape.value(), transpose_x, y,
1, 1, window, dnums,
DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule("test_module");
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build(conv));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));
absl::flat_hash_set<HloInstruction*> instruction_set(
entry_computation->instructions().begin(),
entry_computation->instructions().end());
EXPECT_EQ(1, instruction_set.erase(x)) << "x is not in entry_computation.";
EXPECT_EQ(1, instruction_set.erase(y)) << "y is not in entry_computation.";
EXPECT_EQ(1, instruction_set.size())
<< "entry_computation should contain exactly 3 instructions.";
HloInstruction* new_conv = *instruction_set.begin();
EXPECT_EQ(HloOpcode::kConvolution, new_conv->opcode());
EXPECT_EQ(dnums.input_feature_dimension(),
new_conv->convolution_dimension_numbers().input_batch_dimension());
EXPECT_EQ(
dnums.input_batch_dimension(),
new_conv->convolution_dimension_numbers().input_feature_dimension());
EXPECT_EQ(
dnums.input_spatial_dimensions(0),
new_conv->convolution_dimension_numbers().input_spatial_dimensions(1));
EXPECT_EQ(
dnums.input_spatial_dimensions(1),
new_conv->convolution_dimension_numbers().input_spatial_dimensions(0));
EXPECT_EQ(
dnums.output_spatial_dimensions(0),
new_conv->convolution_dimension_numbers().output_spatial_dimensions(0));
EXPECT_EQ(
dnums.output_spatial_dimensions(1),
new_conv->convolution_dimension_numbers().output_spatial_dimensions(1));
}
TEST_F(TransposeFoldingTest, FoldBatchDotTranspose) {
constexpr absl::string_view kHloString = R"(
HloModule FoldBatchDotTranspose
ENTRY entry_computation {
x = f32[7,7,2,3]{3,2,1,0} parameter(0)
y = f32[7,7,2,3]{3,2,1,0} parameter(1)
transpose = f32[7,7,3,2]{3,2,1,0} transpose(y), dimensions={0,1,3,2}
ROOT dot = f32[7,7,2,2]{3,2,1,0} dot(x, transpose), lhs_contracting_dims={3},
rhs_contracting_dims={2}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Dot(op::Parameter(0), op::Parameter(1),
3, 3));
}
TEST_F(TransposeFoldingTest, NoFoldBatchDotTransposeBatch) {
constexpr absl::string_view kHloString = R"(
HloModule NoFoldBatchDotTransposeBatch
ENTRY entry_computation {
x = f32[7,7,2,3]{3,2,1,0} parameter(0)
y = f32[7,7,2,3]{3,2,1,0} parameter(1)
transpose = f32[7,7,3,2]{3,2,1,0} transpose(y), dimensions={1,0,3,2}
ROOT dot = f32[7,7,2,2]{3,2,1,0} dot(x, transpose), lhs_contracting_dims={3},
rhs_contracting_dims={2}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(false));
}
TEST_F(TransposeFoldingTest, FoldBatchDotTransposeNonContiguousBatch) {
constexpr absl::string_view kHloString = R"(
HloModule FoldBatchDotTransposeNonContiguousBatch
ENTRY entry_computation {
x = f32[7,2,7,3]{3,2,1,0} parameter(0)
y = f32[7,2,7,3]{3,2,1,0} parameter(1)
transpose = f32[7,3,7,2]{3,2,1,0} transpose(y), dimensions={0,3,2,1}
ROOT dot = f32[7,7,2,2]{3,2,1,0} dot(x, transpose), lhs_contracting_dims={3},
rhs_contracting_dims={1}, lhs_batch_dims={0,2}, rhs_batch_dims={0,2}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Dot(op::Parameter(0), op::Parameter(1),
3, 3));
}
TEST_F(TransposeFoldingTest, NoFoldBatchDotTransposeIdentity) {
constexpr absl::string_view kHloString = R"(
HloModule NoFoldBatchDotTransposeIdentity
ENTRY entry_computation {
x = f32[7,7,2,3]{3,2,1,0} parameter(0)
y = f32[7,7,3,2]{3,2,1,0} parameter(1)
transpose = f32[7,7,3,2]{3,2,1,0} transpose(y), dimensions={0,1,2,3}
ROOT dot = f32[7,7,2,2]{3,2,1,0} dot(x, transpose), lhs_contracting_dims={3},
rhs_contracting_dims={2}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_THAT(TransposeFolding().Run(module.get()), IsOkAndHolds(false));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/transpose_folding.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/transpose_folding_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9d9cd715-605d-40e6-ac89-395c9fa1afa1 | cpp | google/tensorstore | downsample_util | tensorstore/driver/downsample/downsample_util.cc | tensorstore/driver/downsample/downsample_util_test.cc | #include "tensorstore/driver/downsample/downsample_util.h"
#include <algorithm>
#include <cassert>
#include <cstdlib>
#include <limits>
#include <ostream>
#include <utility>
#include "absl/base/optimization.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/strings/str_join.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/downsample_method.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/internal/identity_transform.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/byte_strided_pointer.h"
#include "tensorstore/util/division.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_downsample {
std::ostream& operator<<(std::ostream& os,
const PropagatedIndexTransformDownsampling& x) {
return os << "transform=" << x.transform << "\ninput_downsample_factors="
<< absl::StrJoin(x.input_downsample_factors, ",");
}
namespace {
DimensionIndex ComputeAdditionalInputDimensionsNeeded(
IndexTransformView<> downsampled_transform,
span<const Index> output_downsample_factors,
span<DimensionIndex> input_dimension_ref_counts, bool is_domain_empty) {
using internal_index_space::TransformAccess;
assert(downsampled_transform.valid());
const DimensionIndex output_rank = downsampled_transform.output_rank();
assert(input_dimension_ref_counts.size() ==
downsampled_transform.input_rank());
assert(output_downsample_factors.size() == output_rank);
DimensionIndex additional_input_dims = 0;
auto old_transform_rep = TransformAccess::rep(downsampled_transform);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
assert(output_downsample_factors[output_dim] > 0);
if (output_downsample_factors[output_dim] == 1) {
continue;
}
const auto& output_map = old_transform_rep->output_index_maps()[output_dim];
switch (output_map.method()) {
case OutputIndexMethod::constant:
if (!is_domain_empty) {
++additional_input_dims;
}
break;
case OutputIndexMethod::single_input_dimension:
if ((std::abs(output_map.stride()) != 1 ||
input_dimension_ref_counts[output_map.input_dimension()] != 1) &&
!downsampled_transform.input_domain()
.box()[output_map.input_dimension()]
.empty()) {
++additional_input_dims;
}
break;
case OutputIndexMethod::array: {
++additional_input_dims;
break;
}
}
}
return additional_input_dims;
}
absl::Status ExtendOutputIndexMap(
const internal_index_space::OutputIndexMap& output_map,
internal_index_space::OutputIndexMap& new_output_map,
DimensionIndex input_rank, DimensionIndex new_input_rank) {
new_output_map.offset() = output_map.offset();
new_output_map.stride() = output_map.stride();
switch (output_map.method()) {
case OutputIndexMethod::constant:
new_output_map.SetConstant();
break;
case OutputIndexMethod::single_input_dimension:
new_output_map.SetSingleInputDimension(output_map.input_dimension());
break;
case OutputIndexMethod::array: {
const auto& index_array_data = output_map.index_array_data();
auto& new_index_array_data =
new_output_map.SetArrayIndexing(new_input_rank);
new_index_array_data.element_pointer = index_array_data.element_pointer;
new_index_array_data.index_range = index_array_data.index_range;
std::copy_n(index_array_data.byte_strides, input_rank,
new_index_array_data.byte_strides);
std::fill_n(new_index_array_data.byte_strides + input_rank,
new_input_rank - input_rank, Index(0));
break;
}
}
return absl::OkStatus();
}
absl::Status PropagateUnitStrideSingleInputDimensionMapDownsampling(
Index original_offset, Index original_stride, IndexInterval input_interval,
Index output_downsample_factor,
internal_index_space::OutputIndexMap& new_output_map,
IndexInterval output_base_bounds, MutableBoxView<> new_input_domain,
DimensionIndex new_input_dim,
PropagatedIndexTransformDownsampling& propagated) {
assert(original_stride == 1 || original_stride == -1);
if (internal::MulOverflow(original_offset, output_downsample_factor,
&new_output_map.offset())) {
return absl::OutOfRangeError(
tensorstore::StrCat("Integer overflow computing output offset ",
original_offset, " * ", output_downsample_factor));
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto bounds_interval,
GetAffineTransformDomain(output_base_bounds, new_output_map.offset(),
original_stride));
auto input_bounds = DownsampleInterval(
bounds_interval, output_downsample_factor, DownsampleMethod::kMean);
if (!Contains(input_bounds, input_interval)) {
return absl::OutOfRangeError(
tensorstore::StrCat("Propagated bounds interval ", input_bounds,
" does not contain ", input_interval));
}
propagated.input_downsample_factors[new_input_dim] = output_downsample_factor;
new_output_map.SetSingleInputDimension(new_input_dim);
TENSORSTORE_ASSIGN_OR_RETURN(
auto new_interval,
GetAffineTransformInverseDomain(
input_interval, 0, original_stride * output_downsample_factor));
new_interval = Intersect(new_interval, bounds_interval);
new_output_map.stride() = original_stride;
new_input_domain[new_input_dim] = new_interval;
return absl::OkStatus();
}
absl::Status PropagateSingleInputDimensionMapDownsamplingAsNewDimension(
const internal_index_space::OutputIndexMap& output_map,
IndexInterval input_interval, Index output_downsample_factor,
internal_index_space::OutputIndexMap& new_output_map,
IndexInterval output_base_bounds, MutableBoxView<> new_input_domain,
DimensionIndex new_input_dim,
PropagatedIndexTransformDownsampling& propagated) {
if (input_interval.size() == 1 || output_map.stride() == 0) {
Index adjusted_offset;
if (internal::MulOverflow(input_interval.inclusive_min(),
output_map.stride(), &adjusted_offset) ||
internal::AddOverflow(adjusted_offset, output_map.offset(),
&adjusted_offset)) {
return absl::OutOfRangeError(tensorstore::StrCat(
"Integer overflow computing offset ", output_map.offset(), " + ",
input_interval.inclusive_min(), " * ", output_map.stride()));
}
return PropagateUnitStrideSingleInputDimensionMapDownsampling(
adjusted_offset, 1,
IndexInterval::UncheckedSized(0, 1),
output_downsample_factor, new_output_map, output_base_bounds,
new_input_domain, new_input_dim, propagated);
}
propagated.input_downsample_factors[new_input_dim] = output_downsample_factor;
if (output_downsample_factor > kInfIndex) {
return absl::OutOfRangeError("Downsample factor is out of range");
}
new_input_domain[new_input_dim] =
IndexInterval::UncheckedSized(0, output_downsample_factor);
new_output_map.offset() = 0;
new_output_map.stride() = 1;
auto& new_index_array_data =
new_output_map.SetArrayIndexing(new_input_domain.rank());
new_index_array_data.index_range = output_base_bounds;
Index adjusted_stride;
Index adjusted_offset;
if (internal::MulOverflow(output_map.stride(), output_downsample_factor,
&adjusted_stride)) {
return absl::OutOfRangeError(tensorstore::StrCat(
"Integer overflow computing stride ", output_map.stride(), " * ",
output_downsample_factor));
}
if (internal::MulOverflow(output_map.offset(), output_downsample_factor,
&adjusted_offset)) {
return absl::OutOfRangeError(tensorstore::StrCat(
"Integer overflow computing offset ", output_map.offset(), " * ",
output_downsample_factor));
}
if (!input_interval.empty()) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto output_range,
GetAffineTransformRange(input_interval, adjusted_offset,
adjusted_stride));
TENSORSTORE_ASSIGN_OR_RETURN(
output_range,
ShiftInterval(output_range, output_downsample_factor - 1, 0));
if (!Contains(output_base_bounds, output_range)) {
return absl::OutOfRangeError(tensorstore::StrCat(
"Output bounds interval ", output_base_bounds,
" does not contain output range interval ", output_range));
}
}
std::fill_n(new_index_array_data.byte_strides, new_input_domain.rank(),
Index(0));
new_index_array_data.byte_strides[output_map.input_dimension()] = 1;
new_index_array_data.byte_strides[new_input_dim] = 2;
new_index_array_data.element_pointer = AllocateArrayElementsLike<Index>(
new_index_array_data.layout(new_input_domain),
new_index_array_data.byte_strides, skip_repeated_elements);
Index* array_origin =
const_cast<Index*>(new_index_array_data.array_view(new_input_domain)
.byte_strided_origin_pointer()
.get());
for (Index j = 0; j < input_interval.size(); ++j) {
const Index base_index =
adjusted_offset +
adjusted_stride * (input_interval.inclusive_min() + j);
for (Index i = 0; i < output_downsample_factor; ++i) {
Index x;
if (internal::AddOverflow(base_index, i, &x) ||
x > output_base_bounds.inclusive_max()) {
x = output_base_bounds.inclusive_max();
} else if (x < output_base_bounds.inclusive_min()) {
x = output_base_bounds.inclusive_min();
}
array_origin[input_interval.size() * i + j] = x;
}
}
return absl::OkStatus();
}
absl::Status PropagateIndexMapThatRequiresNewInputDimensionForEmptyDomain(
Index output_downsample_factor,
internal_index_space::OutputIndexMap& new_output_map,
MutableBoxView<> new_input_domain, DimensionIndex new_input_dim,
PropagatedIndexTransformDownsampling& propagated) {
propagated.input_downsample_factors[new_input_dim] = output_downsample_factor;
if (output_downsample_factor > kInfIndex) {
return absl::OutOfRangeError("Downsample factor is out of range");
}
new_input_domain[new_input_dim] =
IndexInterval::UncheckedSized(0, output_downsample_factor);
new_output_map.SetConstant();
new_output_map.offset() = 0;
new_output_map.stride() = 0;
return absl::OkStatus();
}
absl::Status PropagateIndexArrayMapDownsampling(
const internal_index_space::OutputIndexMap& output_map,
BoxView<> downsampled_input_domain, Index output_downsample_factor,
internal_index_space::OutputIndexMap& new_output_map,
IndexInterval output_base_bounds, MutableBoxView<> new_input_domain,
DimensionIndex new_input_dim,
PropagatedIndexTransformDownsampling& propagated) {
new_output_map.offset() = 0;
propagated.input_downsample_factors[new_input_dim] = output_downsample_factor;
if (output_downsample_factor > kInfIndex) {
return absl::OutOfRangeError("Downsample factor is out of range");
}
new_input_domain[new_input_dim] =
IndexInterval::UncheckedSized(0, output_downsample_factor);
const DimensionIndex input_rank = downsampled_input_domain.rank();
const auto& index_array_data = output_map.index_array_data();
new_output_map.stride() = 1;
auto& new_index_array_data =
new_output_map.SetArrayIndexing(new_input_domain.rank());
Index adjusted_stride;
Index adjusted_offset;
if (internal::MulOverflow(output_map.stride(), output_downsample_factor,
&adjusted_stride)) {
return absl::OutOfRangeError(tensorstore::StrCat(
"Integer overflow computing stride ", output_map.stride(), " * ",
output_downsample_factor));
}
if (internal::MulOverflow(output_map.offset(), output_downsample_factor,
&adjusted_offset)) {
return absl::OutOfRangeError(tensorstore::StrCat(
"Integer overflow computing offset ", output_map.offset(), " * ",
output_downsample_factor));
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto padded_output_interval,
ShiftInterval(output_base_bounds, -(output_downsample_factor - 1), 0));
TENSORSTORE_ASSIGN_OR_RETURN(
auto effective_index_range,
GetAffineTransformDomain(padded_output_interval, adjusted_offset,
adjusted_stride));
effective_index_range =
Intersect(effective_index_range, index_array_data.index_range);
new_index_array_data.index_range = output_base_bounds;
std::copy_n(index_array_data.byte_strides, input_rank,
new_index_array_data.byte_strides);
std::fill_n(new_index_array_data.byte_strides + input_rank,
new_input_domain.rank() - input_rank, Index(0));
new_index_array_data.byte_strides[new_input_dim] =
std::numeric_limits<Index>::max();
new_index_array_data.element_pointer = AllocateArrayElementsLike<Index>(
new_index_array_data.layout(new_input_domain),
new_index_array_data.byte_strides, skip_repeated_elements);
absl::Status status;
IterateOverArrays(
[&](const Index* existing_index,
ByteStridedPointer<const Index> new_index) {
const Index existing_index_value = *existing_index;
if (!Contains(effective_index_range, existing_index_value)) {
status = CheckContains(effective_index_range, existing_index_value);
return false;
}
Index base_index =
existing_index_value * adjusted_stride + adjusted_offset;
const Index byte_stride =
new_index_array_data.byte_strides[new_input_dim];
Index cur_index =
std::max(base_index, output_base_bounds.inclusive_min());
for (Index i = 0; i < output_downsample_factor; ++i) {
Index x;
if (!internal::AddOverflow(base_index, i, &x) &&
output_base_bounds.exclusive_max() > x) {
cur_index = std::max(cur_index, x);
}
assert(Contains(output_base_bounds, cur_index));
*const_cast<Index*>((new_index + i * byte_stride).get()) = cur_index;
}
return true;
},
skip_repeated_elements,
index_array_data.array_view(downsampled_input_domain),
new_index_array_data.array_view(downsampled_input_domain));
return status;
}
}
absl::Status PropagateIndexTransformDownsampling(
IndexTransformView<> downsampled_transform, BoxView<> output_base_bounds,
span<const Index> output_downsample_factors,
PropagatedIndexTransformDownsampling& propagated) {
using internal_index_space::TransformAccess;
using internal_index_space::TransformRep;
assert(downsampled_transform.valid());
const DimensionIndex output_rank = downsampled_transform.output_rank();
const DimensionIndex input_rank = downsampled_transform.input_rank();
assert(output_base_bounds.rank() == output_rank);
assert(output_downsample_factors.size() == output_rank);
DimensionIndex input_dimension_ref_counts[kMaxRank];
internal::ComputeInputDimensionReferenceCounts(
downsampled_transform, span(&input_dimension_ref_counts[0], input_rank));
const bool is_domain_empty = downsampled_transform.domain().box().is_empty();
DimensionIndex additional_input_dims = ComputeAdditionalInputDimensionsNeeded(
downsampled_transform, output_downsample_factors,
{input_dimension_ref_counts, input_rank}, is_domain_empty);
const DimensionIndex new_input_rank = input_rank + additional_input_dims;
TENSORSTORE_RETURN_IF_ERROR(ValidateRank(new_input_rank));
auto new_transform = TransformRep::Allocate(new_input_rank, output_rank);
new_transform->output_rank = output_rank;
internal_index_space::CopyTransformRepDomain(
TransformAccess::rep(downsampled_transform), new_transform.get());
new_transform->input_rank = new_input_rank;
new_transform->implicit_lower_bounds = false;
new_transform->implicit_upper_bounds = false;
MutableBoxView<> input_domain = new_transform->input_domain(new_input_rank);
std::fill(input_domain.origin().begin() + input_rank,
input_domain.origin().begin() + new_input_rank, Index(0));
std::fill(input_domain.shape().begin() + input_rank,
input_domain.shape().begin() + new_input_rank, Index(1));
propagated.input_downsample_factors.clear();
propagated.input_downsample_factors.resize(new_input_rank, 1);
DimensionIndex next_input_dim = input_rank;
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const auto& output_map = TransformAccess::rep(downsampled_transform)
->output_index_maps()[output_dim];
auto& new_output_map = new_transform->output_index_maps()[output_dim];
const Index output_downsample_factor =
output_downsample_factors[output_dim];
if (output_downsample_factor == 1) {
TENSORSTORE_RETURN_IF_ERROR(ExtendOutputIndexMap(
output_map, new_output_map, input_rank, new_input_rank));
continue;
}
absl::Status status;
switch (output_map.method()) {
case OutputIndexMethod::constant: {
if (is_domain_empty) {
new_output_map.SetConstant();
new_output_map.offset() = 0;
new_output_map.stride() = 0;
break;
}
status = PropagateUnitStrideSingleInputDimensionMapDownsampling(
output_map.offset(), 1,
IndexInterval::UncheckedSized(0, 1),
output_downsample_factor, new_output_map,
output_base_bounds[output_dim], input_domain,
next_input_dim++, propagated);
break;
}
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex input_dim = output_map.input_dimension();
const IndexInterval input_interval =
downsampled_transform.input_domain().box()[input_dim];
if (std::abs(output_map.stride()) == 1 &&
input_dimension_ref_counts[input_dim] == 1) {
status = PropagateUnitStrideSingleInputDimensionMapDownsampling(
output_map.offset(),
output_map.stride(),
input_interval, output_downsample_factor,
new_output_map,
output_base_bounds[output_dim],
input_domain,
input_dim, propagated);
break;
}
if (!IsFinite(input_interval)) {
status = absl::InvalidArgumentError(tensorstore::StrCat(
"Input domain ", input_interval, " is not finite"));
break;
}
if (input_interval.empty()) {
new_output_map.SetSingleInputDimension(input_dim);
new_output_map.offset() = 0;
new_output_map.stride() = 1;
break;
}
status =
is_domain_empty
? PropagateIndexMapThatRequiresNewInputDimensionForEmptyDomain(
output_downsample_factor, new_output_map, input_domain,
next_input_dim++, propagated)
: PropagateSingleInputDimensionMapDownsamplingAsNewDimension(
output_map, input_interval, output_downsample_factor,
new_output_map, output_base_bounds[output_dim],
input_domain, next_input_dim++, propagated);
break;
}
case OutputIndexMethod::array: {
status =
is_domain_empty
? PropagateIndexMapThatRequiresNewInputDimensionForEmptyDomain(
output_downsample_factor, new_output_map, input_domain,
next_input_dim++, propagated)
: PropagateIndexArrayMapDownsampling(
output_map, downsampled_transform.domain().box(),
output_downsample_factor, new_output_map,
output_base_bounds[output_dim], input_domain,
next_input_dim++, propagated);
break;
}
}
if (!status.ok()) {
return tensorstore::MaybeAnnotateStatus(
status,
tensorstore::StrCat("Propagating downsampling factor ",
output_downsample_factor,
" through output dimension ", output_dim));
}
}
internal_index_space::DebugCheckInvariants(new_transform.get());
propagated.transform =
internal_index_space::TransformAccess::Make<IndexTransform<>>(
std::move(new_transform));
return absl::OkStatus();
}
absl::Status PropagateAndComposeIndexTransformDownsampling(
IndexTransformView<> downsampled_transform,
IndexTransformView<> base_transform,
span<const Index> base_downsample_factors,
PropagatedIndexTransformDownsampling& propagated) {
TENSORSTORE_RETURN_IF_ERROR(PropagateIndexTransformDownsampling(
downsampled_transform, base_transform.domain().box(),
base_downsample_factors, propagated));
TENSORSTORE_ASSIGN_OR_RETURN(
propagated.transform,
ComposeTransforms(base_transform, propagated.transform));
return absl::OkStatus();
}
Result<PropagatedIndexTransformDownsampling>
PropagateIndexTransformDownsampling(
IndexTransformView<> downsampled_transform, BoxView<> output_base_bounds,
span<const Index> output_downsample_factors) {
PropagatedIndexTransformDownsampling propagated;
TENSORSTORE_RETURN_IF_ERROR(PropagateIndexTransformDownsampling(
downsampled_transform, output_base_bounds, output_downsample_factors,
propagated));
return propagated;
}
IndexInterval DownsampleInterval(IndexInterval base_interval,
Index downsample_factor,
DownsampleMethod method) {
assert(downsample_factor > 0);
Index inclusive_min;
if (base_interval.inclusive_min() == -kInfIndex) {
inclusive_min = -kInfIndex;
} else {
switch (method) {
case DownsampleMethod::kStride:
inclusive_min =
CeilOfRatio(base_interval.inclusive_min(), downsample_factor);
break;
case DownsampleMethod::kMean:
case DownsampleMethod::kMin:
case DownsampleMethod::kMax:
case DownsampleMethod::kMedian:
case DownsampleMethod::kMode:
inclusive_min =
FloorOfRatio(base_interval.inclusive_min(), downsample_factor);
break;
default:
ABSL_UNREACHABLE();
}
}
Index inclusive_max;
if (base_interval.inclusive_max() == kInfIndex) {
inclusive_max = kInfIndex;
} else if (base_interval.empty()) {
inclusive_max = inclusive_min - 1;
} else {
inclusive_max =
FloorOfRatio(base_interval.inclusive_max(), downsample_factor);
}
return IndexInterval::UncheckedClosed(inclusive_min, inclusive_max);
}
void DownsampleBounds(BoxView<> base_bounds,
MutableBoxView<> downsampled_bounds,
span<const Index> downsample_factors,
DownsampleMethod method) {
const DimensionIndex rank = base_bounds.rank();
assert(rank == downsampled_bounds.rank());
assert(rank == downsample_factors.size());
for (DimensionIndex i = 0; i < rank; ++i) {
downsampled_bounds[i] =
DownsampleInterval(base_bounds[i], downsample_factors[i], method);
}
}
namespace {
class DownsampleDomainBuilder {
public:
explicit DownsampleDomainBuilder(IndexDomainView<> base_domain,
bool domain_only) {
const DimensionIndex input_rank = base_domain.rank();
const DimensionIndex output_rank = domain_only ? 0 : input_rank;
rep = internal_index_space::TransformRep::Allocate(input_rank, output_rank);
rep->input_rank = input_rank;
rep->output_rank = output_rank;
rep->implicit_lower_bounds = base_domain.implicit_lower_bounds();
rep->implicit_upper_bounds = base_domain.implicit_upper_bounds();
const auto& labels = base_domain.labels();
std::copy(labels.begin(), labels.end(), rep->input_labels().begin());
if (!domain_only) {
internal_index_space::SetToIdentityTransform(rep->output_index_maps());
}
}
MutableBoxView<> InputBounds() { return rep->input_domain(rep->input_rank); }
IndexTransform<> MakeTransform() {
internal_index_space::DebugCheckInvariants(rep.get());
return internal_index_space::TransformAccess::Make<IndexTransform<>>(
std::move(rep));
}
private:
internal_index_space::TransformRep::Ptr<> rep;
};
}
IndexDomain<> DownsampleDomain(IndexDomainView<> base_domain,
span<const Index> downsample_factors,
DownsampleMethod method) {
DownsampleDomainBuilder builder(base_domain, true);
DownsampleBounds(base_domain.box(), builder.InputBounds(), downsample_factors,
method);
return builder.MakeTransform().domain();
}
IndexTransform<> GetDownsampledDomainIdentityTransform(
IndexDomainView<> base_domain, span<const Index> downsample_factors,
DownsampleMethod method) {
DownsampleDomainBuilder builder(base_domain, false);
DownsampleBounds(base_domain.box(), builder.InputBounds(), downsample_factors,
method);
return builder.MakeTransform();
}
bool CanDownsampleIndexTransform(IndexTransformView<> base_transform,
BoxView<> base_bounds,
span<const Index> downsample_factors) {
const Index output_rank = base_transform.output_rank();
assert(base_bounds.rank() == output_rank);
assert(downsample_factors.size() == output_rank);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const Index downsample_factor = downsample_factors[output_dim];
const auto base_interval = base_bounds[output_dim];
const auto map = base_transform.output_index_maps()[output_dim];
switch (map.method()) {
case OutputIndexMethod::constant:
if (downsample_factor != 1 &&
((base_interval.inclusive_min() != map.offset() &&
((map.offset() % downsample_factor) != 0)) ||
(base_interval.inclusive_max() != map.offset() &&
((map.offset() + 1) % downsample_factor) != 0))) {
return false;
}
break;
case OutputIndexMethod::single_input_dimension: {
if (downsample_factor == 1) break;
if (map.stride() != 1 && map.stride() != -1) {
return false;
}
const auto input_interval =
base_transform.input_domain().box()[map.input_dimension()];
TENSORSTORE_ASSIGN_OR_RETURN(
auto shifted_interval,
GetAffineTransformRange(input_interval, map.offset(), map.stride()),
false);
if ((base_interval.inclusive_min() !=
shifted_interval.inclusive_min() &&
(shifted_interval.inclusive_min() % downsample_factor) != 0) ||
(base_interval.exclusive_max() !=
shifted_interval.exclusive_max() &&
(shifted_interval.exclusive_max() % downsample_factor) != 0)) {
return false;
}
break;
}
case OutputIndexMethod::array:
return false;
}
}
return true;
}
}
} | #include "tensorstore/driver/downsample/downsample_util.h"
#include <stddef.h>
#include <stdint.h>
#include <limits>
#include <random>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/random/bit_gen_ref.h"
#include "absl/random/random.h"
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/data_type.h"
#include "tensorstore/downsample_method.h"
#include "tensorstore/driver/downsample/downsample_array.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/dimension_identifier.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/index_transform_testutil.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/data_type_random_generator.h"
#include "tensorstore/internal/testing/random_seed.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::Box;
using ::tensorstore::BoxView;
using ::tensorstore::DimensionIndex;
using ::tensorstore::Dims;
using ::tensorstore::DownsampleMethod;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::kInfIndex;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::span;
using ::tensorstore::internal_downsample::CanDownsampleIndexTransform;
using ::tensorstore::internal_downsample::DownsampleArray;
using ::tensorstore::internal_downsample::DownsampleBounds;
using ::tensorstore::internal_downsample::DownsampleInterval;
using ::tensorstore::internal_downsample::DownsampleTransformedArray;
using ::tensorstore::internal_downsample::PropagatedIndexTransformDownsampling;
using ::tensorstore::internal_downsample::PropagateIndexTransformDownsampling;
using ::testing::Optional;
TEST(PropagateIndexTransformDownsamplingTest, Rank0) {
EXPECT_THAT(PropagateIndexTransformDownsampling(
tensorstore::IdentityTransform(0), {}, {}),
Optional(PropagatedIndexTransformDownsampling{
tensorstore::IdentityTransform(0), {}}));
}
TEST(PropagateIndexTransformDownsamplingTest, Rank1SingleInputDimension) {
EXPECT_THAT(PropagateIndexTransformDownsampling(
tensorstore::IdentityTransform(BoxView({1}, {3})),
BoxView<1>({7}), span<const Index>({2})),
Optional(PropagatedIndexTransformDownsampling{
tensorstore::IdentityTransform(BoxView({2}, {5})), {2}}));
}
TEST(PropagateIndexTransformDownsamplingTest, InvalidRank) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_transform,
tensorstore::IdentityTransform(32) | Dims(0).Stride(2));
EXPECT_THAT(PropagateIndexTransformDownsampling(
downsampled_transform, Box(32), std::vector<Index>(32, 2)),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Rank 33 is outside valid range \\[0, 32\\]"));
}
TEST(PropagateIndexTransformDownsamplingTest, Rank1Constant) {
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(0, 1).output_constant(0, 2).Finalize().value(),
BoxView({7}, {2}), span<const Index>({3})),
Optional(PropagatedIndexTransformDownsampling{
IndexTransformBuilder(1, 1)
.input_origin({1})
.input_exclusive_max({3})
.output_single_input_dimension(0, 6, 1, 0)
.Finalize()
.value(),
{3}}));
}
TEST(PropagateIndexTransformDownsamplingTest,
Rank1SingleInputDimensionPartialStartBlock) {
EXPECT_THAT(PropagateIndexTransformDownsampling(
tensorstore::IdentityTransform(BoxView({0}, {4})),
BoxView({1}, {6}), span<const Index>({2})),
Optional(PropagatedIndexTransformDownsampling{
tensorstore::IdentityTransform(BoxView({1}, {6})), {2}}));
}
TEST(PropagateIndexTransformDownsamplingTest, Rank2WithIgnoredDimension) {
EXPECT_THAT(
PropagateIndexTransformDownsampling(
tensorstore::IdentityTransform(BoxView({1, 2}, {3, 5})),
BoxView({7, 10}), span<const Index>({2, 1})),
Optional(PropagatedIndexTransformDownsampling{
tensorstore::IdentityTransform(BoxView({2, 2}, {5, 5})), {2, 1}}));
}
TEST(PropagateIndexTransformDownsamplingTest, Rank1IndexArray) {
EXPECT_THAT(PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_shape({3})
.output_index_array(0, 0, 1, MakeArray<Index>({4, 7, 3}))
.Finalize()
.value(),
BoxView<1>({50}), span<const Index>({4})),
Optional(PropagatedIndexTransformDownsampling{
IndexTransformBuilder(2, 1)
.input_shape({3, 4})
.output_index_array(0, 0, 1,
MakeArray<Index>({{16, 17, 18, 19},
{28, 29, 30, 31},
{12, 13, 14, 15}}),
IndexInterval::Sized(0, 50))
.Finalize()
.value(),
{1, 4}}));
}
TEST(PropagateIndexTransformDownsamplingTest,
Rank3IndexArrayConstantNoDownsampling) {
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(2, 3)
.input_shape({3, 4})
.output_index_array(0, 0, 1, MakeArray<Index>({{4}, {7}, {3}}))
.output_single_input_dimension(1, 1)
.output_constant(2, 42)
.Finalize()
.value(),
BoxView({30, 50, 55}), span<const Index>({1, 2, 1})),
Optional(PropagatedIndexTransformDownsampling{
IndexTransformBuilder(2, 3)
.input_shape({3, 8})
.output_index_array(0, 0, 1, MakeArray<Index>({{4}, {7}, {3}}))
.output_single_input_dimension(1, 1)
.output_constant(2, 42)
.Finalize()
.value(),
{1, 2}}));
}
TEST(PropagateIndexTransformDownsamplingTest, Rank2IndexArray) {
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(2, 1)
.input_shape({2, 3})
.output_index_array(0, 0, 1,
MakeArray<Index>({{1, 2, 3}, {4, 5, 6}}))
.Finalize()
.value(),
BoxView<1>({50}), span<const Index>({4})),
Optional(PropagatedIndexTransformDownsampling{
IndexTransformBuilder(3, 1)
.input_shape({2, 3, 4})
.output_index_array(
0, 0, 1,
MakeArray<Index>(
{{{4, 5, 6, 7}, {8, 9, 10, 11}, {12, 13, 14, 15}},
{{16, 17, 18, 19}, {20, 21, 22, 23}, {24, 25, 26, 27}}}),
IndexInterval::Sized(0, 50))
.Finalize()
.value(),
{1, 1, 4}}));
}
TEST(PropagateIndexTransformDownsamplingTest,
Rank1SingleInputDimensionStrided) {
EXPECT_THAT(PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_shape({3})
.output_single_input_dimension(0, 1, 5, 0)
.Finalize()
.value(),
BoxView<1>({50}), span<const Index>({4})),
Optional(PropagatedIndexTransformDownsampling{
IndexTransformBuilder(2, 1)
.input_shape({3, 4})
.output_index_array(0, 0, 1,
MakeArray<Index>({{4, 5, 6, 7},
{24, 25, 26, 27},
{44, 45, 46, 47}}),
IndexInterval::Sized(0, 50))
.Finalize()
.value(),
{1, 4}}));
}
TEST(PropagateIndexTransformDownsamplingTest, ErrorRank1ConstantOverflow) {
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(0, 1)
.output_constant(0, tensorstore::kMaxFiniteIndex)
.Finalize()
.value(),
BoxView<1>({0}, {kInfIndex}), span<const Index>({1000})),
MatchesStatus(absl::StatusCode::kOutOfRange, ".*Integer overflow.*"));
}
TEST(PropagateIndexTransformDownsamplingTest, ErrorRank1ConstantOutOfBounds) {
TENSORSTORE_EXPECT_OK(PropagateIndexTransformDownsampling(
IndexTransformBuilder(0, 1).output_constant(0, 4).Finalize().value(),
BoxView<1>({0}, {15}), span<const Index>({3})));
TENSORSTORE_EXPECT_OK(PropagateIndexTransformDownsampling(
IndexTransformBuilder(0, 1).output_constant(0, 4).Finalize().value(),
BoxView<1>({0}, {14}), span<const Index>({3})));
TENSORSTORE_EXPECT_OK(PropagateIndexTransformDownsampling(
IndexTransformBuilder(0, 1).output_constant(0, 4).Finalize().value(),
BoxView<1>({0}, {13}), span<const Index>({3})));
TENSORSTORE_EXPECT_OK(PropagateIndexTransformDownsampling(
IndexTransformBuilder(0, 1).output_constant(0, 0).Finalize().value(),
BoxView<1>({1}, {13}), span<const Index>({3})));
TENSORSTORE_EXPECT_OK(PropagateIndexTransformDownsampling(
IndexTransformBuilder(0, 1).output_constant(0, 0).Finalize().value(),
BoxView<1>({2}, {13}), span<const Index>({3})));
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(0, 1).output_constant(0, 5).Finalize().value(),
BoxView<1>({0}, {15}), span<const Index>({3})),
MatchesStatus(absl::StatusCode::kOutOfRange,
".*Propagated bounds interval .* does not contain .*"));
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(0, 1).output_constant(0, 0).Finalize().value(),
BoxView<1>({3}, {15}), span<const Index>({3})),
MatchesStatus(absl::StatusCode::kOutOfRange,
".*Propagated bounds interval .* does not contain .*"));
}
TEST(PropagateIndexTransformDownsamplingTest,
ErrorSingleInputDimensionStridedNonFiniteDomain) {
EXPECT_THAT(PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_origin({0})
.output_single_input_dimension(0, 0, 2, 0)
.Finalize()
.value(),
BoxView<1>({0}, {kInfIndex}), span<const Index>({1000})),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*Input domain .* is not finite"));
}
TEST(PropagateIndexTransformDownsamplingTest,
ErrorSingleInputDimensionSize1StridedOverflow) {
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_origin({100})
.input_shape({1})
.output_single_input_dimension(
0, std::numeric_limits<Index>::max(), 2, 0)
.Finalize()
.value(),
BoxView<1>({0}, {kInfIndex}), span<const Index>({1000})),
MatchesStatus(absl::StatusCode::kOutOfRange, ".*Integer overflow.*"));
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_origin({100})
.input_shape({1})
.output_single_input_dimension(
0, 0, std::numeric_limits<Index>::max(), 0)
.Finalize()
.value(),
BoxView<1>({0}, {kInfIndex}), span<const Index>({1000})),
MatchesStatus(absl::StatusCode::kOutOfRange, ".*Integer overflow.*"));
}
TEST(PropagateIndexTransformDownsamplingTest,
ErrorSingleInputDimStridedInvalidDownsampleFactor) {
EXPECT_THAT(PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_shape({100})
.output_single_input_dimension(0, 0, 2, 0)
.Finalize()
.value(),
BoxView<1>({0}, {1000}),
span<const Index>({std::numeric_limits<Index>::max()})),
MatchesStatus(absl::StatusCode::kOutOfRange,
".*Downsample factor is out of range"));
}
TEST(PropagateIndexTransformDownsamplingTest,
ErrorSingleInputDimStridedOverflowMultiplyingStrideAndDownsampleFactor) {
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_shape({100})
.output_single_input_dimension(0, 0, 100, 0)
.Finalize()
.value(),
BoxView<1>({0}, {1000}), span<const Index>({kInfIndex})),
MatchesStatus(absl::StatusCode::kOutOfRange, ".*Integer overflow.*"));
}
TEST(PropagateIndexTransformDownsamplingTest,
ErrorSingleInputDimStridedOverflowMultiplyingOffsetAndDownsampleFactor) {
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_shape({100})
.output_single_input_dimension(
0, std::numeric_limits<Index>::max(), 2, 0)
.Finalize()
.value(),
BoxView<1>({0}, {1000}), span<const Index>({0xfffffffffffff})),
MatchesStatus(absl::StatusCode::kOutOfRange, ".*Integer overflow.*"));
}
TEST(PropagateIndexTransformDownsamplingTest,
ErrorSingleInputDimStridedOutOfRange) {
EXPECT_THAT(PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_shape({100})
.output_single_input_dimension(0, 0, 2, 0)
.Finalize()
.value(),
BoxView<1>({0}, {199}), span<const Index>({2})),
MatchesStatus(absl::StatusCode::kOutOfRange,
".*Output bounds interval .* does not contain "
"output range interval .*"));
}
TEST(PropagateIndexTransformDownsamplingTest,
ErrorIndexArrayInvalidDownsampleFactor) {
EXPECT_THAT(PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_shape({3})
.output_index_array(0, 0, 1, MakeArray<Index>({3, 4, 5}))
.Finalize()
.value(),
BoxView<1>({0}, {100}),
span<const Index>({std::numeric_limits<Index>::max()})),
MatchesStatus(absl::StatusCode::kOutOfRange,
".*Downsample factor is out of range"));
}
TEST(PropagateIndexTransformDownsamplingTest,
ErrorIndexArrayOverflowMultiplyingStrideAndDownsampleFactor) {
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_shape({3})
.output_index_array(0, 0, 100, MakeArray<Index>({3, 4, 5}))
.Finalize()
.value(),
BoxView<1>({0}, {100}), span<const Index>({kInfIndex})),
MatchesStatus(absl::StatusCode::kOutOfRange, ".*Integer overflow.*"));
}
TEST(PropagateIndexTransformDownsamplingTest,
ErrorIndexArrayOverflowMultiplyingOffsetAndDownsampleFactor) {
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_shape({3})
.output_index_array(0, 100, 1, MakeArray<Index>({3, 4, 5}))
.Finalize()
.value(),
BoxView<1>({0}, {100}), span<const Index>({kInfIndex})),
MatchesStatus(absl::StatusCode::kOutOfRange, ".*Integer overflow.*"));
}
TEST(PropagateIndexTransformDownsamplingTest, ErrorIndexArrayOutOfRange) {
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_shape({3})
.output_index_array(0, 0, 1, MakeArray<Index>({3, 4, 5}))
.Finalize()
.value(),
BoxView<1>({0}, {9}), span<const Index>({2})),
MatchesStatus(
absl::StatusCode::kOutOfRange,
"Propagating downsampling factor 2 through output dimension 0: "
"Index 5 is outside valid range \\[0, 5\\)"));
}
TEST(CanDownsampleIndexTransformTest, Rank0) {
EXPECT_TRUE(
CanDownsampleIndexTransform(tensorstore::IdentityTransform(0), {}, {}));
}
TEST(CanDownsampleIndexTransformTest, Constant) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
tensorstore::IdentityTransform(1) | Dims(0).IndexSlice(42));
EXPECT_TRUE(CanDownsampleIndexTransform(transform, BoxView<1>({42}, {1}),
span<const Index>({3})));
EXPECT_FALSE(CanDownsampleIndexTransform(transform, BoxView<1>({42}, {2}),
span<const Index>({3})));
EXPECT_FALSE(CanDownsampleIndexTransform(transform, BoxView<1>({41}, {3}),
span<const Index>({3})));
EXPECT_TRUE(CanDownsampleIndexTransform(transform, BoxView<1>({41}, {2}),
span<const Index>({3})));
EXPECT_FALSE(CanDownsampleIndexTransform(transform, BoxView<1>({100}),
span<const Index>({3})));
EXPECT_TRUE(CanDownsampleIndexTransform(transform, BoxView<1>({100}),
span<const Index>({1})));
}
TEST(CanDownsampleIndexTransformTest, SingleInputDimension) {
EXPECT_TRUE(CanDownsampleIndexTransform(
(tensorstore::IdentityTransform(1) | Dims(0).SizedInterval(9, 3)).value(),
BoxView<1>({9}, {10}), span<const Index>({3})));
EXPECT_TRUE(CanDownsampleIndexTransform(
(tensorstore::IdentityTransform(1) | Dims(0).SizedInterval(18, 1))
.value(),
BoxView<1>({9}, {10}), span<const Index>({3})));
EXPECT_FALSE(CanDownsampleIndexTransform(
(tensorstore::IdentityTransform(1) | Dims(0).SizedInterval(9, 2)).value(),
BoxView<1>({9}, {10}), span<const Index>({3})));
EXPECT_FALSE(CanDownsampleIndexTransform(
(tensorstore::IdentityTransform(1) | Dims(0).SizedInterval(9, 3, -1))
.value(),
BoxView<1>({9}, {10}), span<const Index>({3})));
EXPECT_FALSE(CanDownsampleIndexTransform(
(tensorstore::IdentityTransform(1) | Dims(0).SizedInterval(10, 2))
.value(),
BoxView<1>({9}, {10}), span<const Index>({3})));
EXPECT_FALSE(CanDownsampleIndexTransform(
(tensorstore::IdentityTransform(1) | Dims(0).SizedInterval(9, 3, 2))
.value(),
BoxView<1>({9}, {10}), span<const Index>({3})));
}
TEST(CanDownsampleIndexTransformTest, IndexArray) {
EXPECT_FALSE(CanDownsampleIndexTransform(
(tensorstore::IdentityTransform(1) |
Dims(0).IndexArraySlice(MakeArray<Index>({2, 5, 3})))
.value(),
BoxView<1>({0}, {100}), span<const Index>({2})));
}
void TestPropagateIndexTransformDownsamplingInvariance(DimensionIndex rank) {
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_DOWNSAMPLE_PROPAGATE_INVARIANCE_SEED")};
tensorstore::internal::MakeRandomBoxParameters box_p;
box_p.min_rank = box_p.max_rank = rank;
auto base_bounds = tensorstore::internal::MakeRandomBox(gen, box_p);
SCOPED_TRACE(tensorstore::StrCat("base_bounds=", base_bounds));
auto base_data = tensorstore::internal::MakeRandomArray(
gen, base_bounds, tensorstore::dtype_v<uint8_t>);
SCOPED_TRACE(tensorstore::StrCat("base_data=", base_data));
std::vector<Index> downsample_factors(rank);
for (DimensionIndex i = 0; i < rank; ++i) {
downsample_factors[i] =
absl::Uniform<Index>(absl::IntervalClosedClosed, gen, 1, 2);
}
SCOPED_TRACE(tensorstore::StrCat("downsample_factors=",
tensorstore::span(downsample_factors)));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_data,
DownsampleArray(base_data, downsample_factors, DownsampleMethod::kMean));
Box<> downsampled_bounds(rank);
DownsampleBounds(base_bounds, downsampled_bounds, downsample_factors,
DownsampleMethod::kMean);
SCOPED_TRACE(tensorstore::StrCat("downsampled_bounds=", downsampled_bounds));
auto downsampled_transform = tensorstore::internal::MakeRandomIndexTransform(
gen, downsampled_bounds, rank * 2);
SCOPED_TRACE(
tensorstore::StrCat("downsampled_transform=", downsampled_transform));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto propagated,
PropagateIndexTransformDownsampling(downsampled_transform, base_bounds,
downsample_factors));
SCOPED_TRACE(tensorstore::StrCat("propagated=", propagated));
SCOPED_TRACE(tensorstore::StrCat("downsampled_data=", downsampled_data));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsample_then_transform,
downsampled_data | downsampled_transform | tensorstore::Materialize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto transformed_base,
base_data | propagated.transform);
tensorstore::SharedOffsetArray<const void> transform_then_downsample;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
transform_then_downsample,
DownsampleTransformedArray(transformed_base,
propagated.input_downsample_factors,
DownsampleMethod::kMean));
if (downsampled_transform.input_rank() < propagated.transform.input_rank()) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
transform_then_downsample,
transform_then_downsample |
tensorstore::DynamicDims(
{tensorstore::DimRangeSpec{downsampled_transform.input_rank()}})
.IndexSlice(0) |
tensorstore::Materialize());
}
EXPECT_EQ(transform_then_downsample, downsample_then_transform);
}
constexpr size_t kNumRandomTests = 50;
TEST(PropagateIndexTransformDownsamplingTest, InvarianceRank0) {
for (size_t i = 0; i < kNumRandomTests; ++i) {
TestPropagateIndexTransformDownsamplingInvariance(0);
}
}
TEST(PropagateIndexTransformDownsamplingTest, InvarianceRank1) {
for (size_t i = 0; i < kNumRandomTests; ++i) {
TestPropagateIndexTransformDownsamplingInvariance(1);
}
}
TEST(PropagateIndexTransformDownsamplingTest, InvarianceRank2) {
for (size_t i = 0; i < kNumRandomTests; ++i) {
TestPropagateIndexTransformDownsamplingInvariance(2);
}
}
TEST(PropagateIndexTransformDownsamplingTest, InvarianceRank3) {
for (size_t i = 0; i < kNumRandomTests; ++i) {
TestPropagateIndexTransformDownsamplingInvariance(3);
}
}
TEST(DownsampleIntervalTest, UnboundedLower) {
EXPECT_EQ(IndexInterval::Closed(-kInfIndex, 10),
DownsampleInterval(IndexInterval::UncheckedClosed(-kInfIndex, 30),
3, DownsampleMethod::kMean));
}
TEST(DownsampleIntervalTest, UnboundedUpper) {
EXPECT_EQ(IndexInterval::Closed(-10, kInfIndex),
DownsampleInterval(IndexInterval::UncheckedClosed(-30, kInfIndex),
3, DownsampleMethod::kMean));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/downsample/downsample_util.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/downsample/downsample_util_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
26ad6729-c38a-4ff8-bc05-3336cfc5b0f0 | cpp | tensorflow/tensorflow | hlo_op_profiler | third_party/xla/xla/service/gpu/model/hlo_op_profiler.cc | third_party/xla/xla/service/gpu/model/hlo_op_profiler_test.cc | #include "xla/service/gpu/model/hlo_op_profiler.h"
#include <cstdint>
#include <memory>
#include <random>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/primitive_util.h"
#include "xla/service/executable.h"
#include "xla/service/gpu/model/hlo_op_profile.pb.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_runner.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/test_utils.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#ifdef GOOGLE_CUDA
#include "xla/backends/profiler/gpu/cupti_collector.h"
#include "xla/backends/profiler/gpu/cupti_tracer.h"
#endif
namespace xla {
namespace gpu {
#ifdef GOOGLE_CUDA
class CuptiKernelTracer : public profiler::CuptiTraceCollector {
public:
CuptiKernelTracer()
: profiler::CuptiTraceCollector({}),
cupti_tracer_(profiler::CuptiTracer::GetCuptiTracerSingleton()) {
CHECK(cupti_tracer_->IsAvailable());
profiler::CuptiTracerOptions options;
options.cbids_selected.push_back(
CUPTI_DRIVER_TRACE_CBID_cu64GLMapBufferObject);
options.activities_selected.push_back(CUPTI_ACTIVITY_KIND_KERNEL);
cupti_tracer_->Enable(options, this);
}
uint64_t getMedianKernelTimeNs() && {
cupti_tracer_->Disable();
if (kernel_times_ns_.empty()) {
LOG(ERROR) << "No kernel events";
return 0;
}
std::sort(kernel_times_ns_.begin(), kernel_times_ns_.end());
size_t i = kernel_times_ns_.size() / 2;
if (kernel_times_ns_.size() % 2 != 0) {
return kernel_times_ns_[i];
}
return (kernel_times_ns_[i - 1] + kernel_times_ns_[i] + 1) / 2;
}
private:
void AddEvent(profiler::CuptiTracerEvent&& event) override {
if (event.type == profiler::CuptiTracerEventType::Kernel) {
kernel_times_ns_.push_back(event.end_time_ns - event.start_time_ns);
}
VLOG(5) << "CuptiTracerEvent: " << event.name << ", "
<< event.end_time_ns - event.start_time_ns << "ns";
}
void OnEventsDropped(const std::string& reason,
uint32_t num_events) override {
LOG(WARNING) << "Dropped " << num_events << " events: " << reason;
}
void Flush() override {}
profiler::CuptiTracer* cupti_tracer_;
std::vector<uint64_t> kernel_times_ns_;
};
#else
class CuptiKernelTracer {
public:
uint64_t getMedianKernelTimeNs() && {
LOG(FATAL) << "Not built with --config=cuda";
}
};
#endif
std::unique_ptr<HloModule> HloOpProfiler::MakeModuleForMeasurements(
HloOpcode op, PrimitiveType data_type, int chain_length) {
constexpr int64_t kInputSize = 1;
const Shape shape = ShapeUtil::MakeShape(data_type, {kInputSize});
HloModuleConfig config;
config.set_debug_options(GetDebugOptionsFromFlags());
auto module = std::make_unique<HloModule>("module", config);
HloComputation::Builder entry_builder("entry");
HloComputation::Builder fusion_builder("fusion");
HloInstruction* pf = fusion_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "pf"));
HloInstruction* last = pf;
for (int i = 0; i < chain_length; ++i) {
switch (HloOpcodeArity(op).value_or(0)) {
case 1:
last = fusion_builder.AddInstruction(
HloInstruction::CreateUnary(shape, op, last));
break;
case 2:
last = fusion_builder.AddInstruction(
HloInstruction::CreateBinary(shape, op, last, pf));
break;
default:
LOG(FATAL) << "Unsupported opcode: " << HloOpcodeString(op);
}
}
HloComputation* subcomp =
module->AddEmbeddedComputation(fusion_builder.Build());
HloInstruction* p0 = entry_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "p0"));
entry_builder.AddInstruction(HloInstruction::CreateFusion(
shape, HloInstruction::FusionKind::kLoop, {p0}, subcomp));
module->AddEntryComputation(entry_builder.Build());
VLOG(9) << module->ToString();
return module;
}
absl::StatusOr<absl::Duration> HloOpProfiler::MeasureOpChainDuration(
HloOpcode op, PrimitiveType data_type, int chain_length) {
#ifndef GOOGLE_CUDA
return FailedPrecondition("Not built with --config=cuda");
#endif
std::unique_ptr<HloModule> module =
MakeModuleForMeasurements(op, data_type, chain_length);
std::minstd_rand0 engine;
std::vector<Literal> args_small = MakeFakeArguments(module.get(), &engine,
false)
.value();
std::vector<Literal> args_large = MakeFakeArguments(module.get(), &engine,
true)
.value();
const absl::Time t_compile_start = absl::Now();
TF_ASSIGN_OR_RETURN(std::unique_ptr<Executable> ex,
runner_.CreateExecutable(std::move(module),
false));
if (absl::Now() - t_compile_start > absl::Seconds(10)) {
return ResourceExhausted("Too slow compilation");
}
TF_RETURN_IF_ERROR(
runner_.ExecuteWithExecutable(ex.get(), args_small).status());
CuptiKernelTracer cupti_tracer;
for (int i = 0; i < 10; ++i) {
TF_RETURN_IF_ERROR(
runner_.ExecuteWithExecutable(ex.get(), args_small).status());
TF_RETURN_IF_ERROR(
runner_.ExecuteWithExecutable(ex.get(), args_large).status());
}
return absl::Nanoseconds(std::move(cupti_tracer).getMedianKernelTimeNs());
}
HloOpProfiler::HloOpProfiler(HloRunner& runner)
: runner_(runner),
dev_info_(runner.backend().stream_executors()[0]->GetDeviceDescription()),
min_duration_(2 * MeasureOpChainDuration(HloOpcode::kNegate, F32, 0)
.value_or(absl::ZeroDuration())) {
VLOG(3) << "Minimum kernel duration: " << min_duration_;
CHECK_GT(min_duration_, absl::ZeroDuration())
<< "Failed to measure kernel runtime";
}
absl::StatusOr<HloInstructionProfile> HloOpProfiler::MeasureClockCyclesPerOp(
HloOpcode op, PrimitiveType data_type) {
VLOG(2) << "Measuring " << HloOpcodeString(op) << " "
<< primitive_util::LowercasePrimitiveTypeName(data_type);
constexpr int kMinOpChainLength = 16;
constexpr int kMaxOpChainLength = 8192;
absl::Duration duration = absl::ZeroDuration();
int chain_length = kMinOpChainLength;
do {
if (chain_length * 2 > kMaxOpChainLength) {
return FailedPrecondition("%s is too fast to measure",
HloOpcodeString(op));
}
TF_ASSIGN_OR_RETURN(duration,
MeasureOpChainDuration(op, data_type, chain_length));
VLOG(3) << chain_length << "\t" << duration;
chain_length *= 2;
} while (duration < min_duration_);
TF_ASSIGN_OR_RETURN(absl::Duration double_duration,
MeasureOpChainDuration(op, data_type, chain_length));
VLOG(3) << chain_length << "\t" << double_duration;
const absl::Duration time_per_op =
(double_duration - duration) * 2.0 / chain_length;
const float clocks_per_nanosecond =
dev_info_.clock_rate_ghz() * 2;
const int64_t n_clocks =
absl::ToInt64Nanoseconds(time_per_op) * clocks_per_nanosecond;
VLOG(3) << time_per_op << " = " << n_clocks << " clock cycles";
HloInstructionProfile profile;
profile.mutable_instruction()->mutable_opcode()->assign(HloOpcodeString(op));
profile.mutable_instruction()->mutable_shape()->set_element_type(data_type);
profile.set_clock_cycles(n_clocks);
return profile;
}
}
} | #include "xla/service/gpu/model/hlo_op_profiler.h"
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
namespace {
using HloOpProfilerTest = HloTestBase;
TEST_F(HloOpProfilerTest, BasicMeasurementsAreCorrect) {
#ifndef GOOGLE_CUDA
GTEST_SKIP() << "Not built with --config=cuda";
#endif
HloOpProfiler profiler(test_runner_);
EXPECT_GT(profiler.MeasureClockCyclesPerOp(HloOpcode::kAdd, F32)
.value()
.clock_cycles(),
0);
EXPECT_GT(profiler.MeasureClockCyclesPerOp(HloOpcode::kDivide, F64)
.value()
.clock_cycles(),
300);
EXPECT_GT(profiler.MeasureClockCyclesPerOp(HloOpcode::kSqrt, C128)
.value()
.clock_cycles(),
1000);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/hlo_op_profiler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/hlo_op_profiler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e9e402c9-9be5-442f-9be4-e0ed14971680 | cpp | google/cel-cpp | memory | common/memory.cc | common/memory_test.cc | #include "common/memory.h"
#include <cstddef>
#include <cstring>
#include <new>
#include <ostream>
#include "absl/base/no_destructor.h"
#include "absl/log/absl_check.h"
#include "absl/numeric/bits.h"
#include "google/protobuf/arena.h"
namespace cel {
std::ostream& operator<<(std::ostream& out,
MemoryManagement memory_management) {
switch (memory_management) {
case MemoryManagement::kPooling:
return out << "POOLING";
case MemoryManagement::kReferenceCounting:
return out << "REFERENCE_COUNTING";
}
}
void* ReferenceCountingMemoryManager::Allocate(size_t size, size_t alignment) {
ABSL_DCHECK(absl::has_single_bit(alignment))
<< "alignment must be a power of 2: " << alignment;
if (size == 0) {
return nullptr;
}
if (alignment <= __STDCPP_DEFAULT_NEW_ALIGNMENT__) {
return ::operator new(size);
}
return ::operator new(size, static_cast<std::align_val_t>(alignment));
}
bool ReferenceCountingMemoryManager::Deallocate(void* ptr, size_t size,
size_t alignment) noexcept {
ABSL_DCHECK(absl::has_single_bit(alignment))
<< "alignment must be a power of 2: " << alignment;
if (ptr == nullptr) {
ABSL_DCHECK_EQ(size, 0);
return false;
}
ABSL_DCHECK_GT(size, 0);
if (alignment <= __STDCPP_DEFAULT_NEW_ALIGNMENT__) {
#if defined(__cpp_sized_deallocation) && __cpp_sized_deallocation >= 201309L
::operator delete(ptr, size);
#else
::operator delete(ptr);
#endif
} else {
#if defined(__cpp_sized_deallocation) && __cpp_sized_deallocation >= 201309L
::operator delete(ptr, size, static_cast<std::align_val_t>(alignment));
#else
::operator delete(ptr, static_cast<std::align_val_t>(alignment));
#endif
}
return true;
}
MemoryManager MemoryManager::Unmanaged() {
static absl::NoDestructor<google::protobuf::Arena> arena;
return MemoryManager::Pooling(&*arena);
}
} | #include "common/memory.h"
#include <cstddef>
#include <memory>
#include <sstream>
#include <string>
#include <utility>
#include "google/protobuf/struct.pb.h"
#include "absl/base/nullability.h"
#include "absl/debugging/leak_check.h"
#include "absl/log/absl_check.h"
#include "absl/types/optional.h"
#include "common/allocator.h"
#include "common/data.h"
#include "common/internal/reference_count.h"
#include "common/native_type.h"
#include "internal/testing.h"
#include "google/protobuf/arena.h"
#ifdef ABSL_HAVE_EXCEPTIONS
#include <stdexcept>
#endif
namespace cel {
namespace {
using ::testing::_;
using ::testing::IsFalse;
using ::testing::IsNull;
using ::testing::IsTrue;
using ::testing::NotNull;
using ::testing::TestParamInfo;
using ::testing::TestWithParam;
TEST(MemoryManagement, ostream) {
{
std::ostringstream out;
out << MemoryManagement::kPooling;
EXPECT_EQ(out.str(), "POOLING");
}
{
std::ostringstream out;
out << MemoryManagement::kReferenceCounting;
EXPECT_EQ(out.str(), "REFERENCE_COUNTING");
}
}
struct TrivialSmallObject {
uintptr_t ptr;
char padding[32 - sizeof(uintptr_t)];
};
TEST(RegionalMemoryManager, TrivialSmallSizes) {
google::protobuf::Arena arena;
MemoryManager memory_manager = MemoryManager::Pooling(&arena);
for (size_t i = 0; i < 1024; ++i) {
static_cast<void>(memory_manager.MakeUnique<TrivialSmallObject>());
}
}
struct TrivialMediumObject {
uintptr_t ptr;
char padding[256 - sizeof(uintptr_t)];
};
TEST(RegionalMemoryManager, TrivialMediumSizes) {
google::protobuf::Arena arena;
MemoryManager memory_manager = MemoryManager::Pooling(&arena);
for (size_t i = 0; i < 1024; ++i) {
static_cast<void>(memory_manager.MakeUnique<TrivialMediumObject>());
}
}
struct TrivialLargeObject {
uintptr_t ptr;
char padding[4096 - sizeof(uintptr_t)];
};
TEST(RegionalMemoryManager, TrivialLargeSizes) {
google::protobuf::Arena arena;
MemoryManager memory_manager = MemoryManager::Pooling(&arena);
for (size_t i = 0; i < 1024; ++i) {
static_cast<void>(memory_manager.MakeUnique<TrivialLargeObject>());
}
}
TEST(RegionalMemoryManager, TrivialMixedSizes) {
google::protobuf::Arena arena;
MemoryManager memory_manager = MemoryManager::Pooling(&arena);
for (size_t i = 0; i < 1024; ++i) {
switch (i % 3) {
case 0:
static_cast<void>(memory_manager.MakeUnique<TrivialSmallObject>());
break;
case 1:
static_cast<void>(memory_manager.MakeUnique<TrivialMediumObject>());
break;
case 2:
static_cast<void>(memory_manager.MakeUnique<TrivialLargeObject>());
break;
}
}
}
struct TrivialHugeObject {
uintptr_t ptr;
char padding[32768 - sizeof(uintptr_t)];
};
TEST(RegionalMemoryManager, TrivialHugeSizes) {
google::protobuf::Arena arena;
MemoryManager memory_manager = MemoryManager::Pooling(&arena);
for (size_t i = 0; i < 1024; ++i) {
static_cast<void>(memory_manager.MakeUnique<TrivialHugeObject>());
}
}
class SkippableDestructor {
public:
explicit SkippableDestructor(bool& deleted) : deleted_(deleted) {}
~SkippableDestructor() { deleted_ = true; }
private:
bool& deleted_;
};
}
template <>
struct NativeTypeTraits<SkippableDestructor> final {
static bool SkipDestructor(const SkippableDestructor&) { return true; }
};
namespace {
TEST(RegionalMemoryManager, SkippableDestructor) {
bool deleted = false;
{
google::protobuf::Arena arena;
MemoryManager memory_manager = MemoryManager::Pooling(&arena);
auto shared = memory_manager.MakeShared<SkippableDestructor>(deleted);
static_cast<void>(shared);
}
EXPECT_FALSE(deleted);
}
class MemoryManagerTest : public TestWithParam<MemoryManagement> {
public:
void SetUp() override {}
void TearDown() override { Finish(); }
void Finish() { arena_.reset(); }
MemoryManagerRef memory_manager() {
switch (memory_management()) {
case MemoryManagement::kReferenceCounting:
return MemoryManager::ReferenceCounting();
case MemoryManagement::kPooling:
if (!arena_) {
arena_.emplace();
}
return MemoryManager::Pooling(&*arena_);
}
}
MemoryManagement memory_management() const { return GetParam(); }
static std::string ToString(TestParamInfo<MemoryManagement> param) {
std::ostringstream out;
out << param.param;
return out.str();
}
private:
absl::optional<google::protobuf::Arena> arena_;
};
TEST_P(MemoryManagerTest, AllocateAndDeallocateZeroSize) {
EXPECT_THAT(memory_manager().Allocate(0, 1), IsNull());
EXPECT_THAT(memory_manager().Deallocate(nullptr, 0, 1), IsFalse());
}
TEST_P(MemoryManagerTest, AllocateAndDeallocateBadAlignment) {
EXPECT_DEBUG_DEATH(absl::IgnoreLeak(memory_manager().Allocate(1, 0)), _);
EXPECT_DEBUG_DEATH(memory_manager().Deallocate(nullptr, 0, 0), _);
}
TEST_P(MemoryManagerTest, AllocateAndDeallocate) {
constexpr size_t kSize = 1024;
constexpr size_t kAlignment = __STDCPP_DEFAULT_NEW_ALIGNMENT__;
void* ptr = memory_manager().Allocate(kSize, kAlignment);
ASSERT_THAT(ptr, NotNull());
if (memory_management() == MemoryManagement::kReferenceCounting) {
EXPECT_THAT(memory_manager().Deallocate(ptr, kSize, kAlignment), IsTrue());
}
}
TEST_P(MemoryManagerTest, AllocateAndDeallocateOveraligned) {
constexpr size_t kSize = 1024;
constexpr size_t kAlignment = __STDCPP_DEFAULT_NEW_ALIGNMENT__ * 4;
void* ptr = memory_manager().Allocate(kSize, kAlignment);
ASSERT_THAT(ptr, NotNull());
if (memory_management() == MemoryManagement::kReferenceCounting) {
EXPECT_THAT(memory_manager().Deallocate(ptr, kSize, kAlignment), IsTrue());
}
}
class Object {
public:
Object() : deleted_(nullptr) {}
explicit Object(bool& deleted) : deleted_(&deleted) {}
~Object() {
if (deleted_ != nullptr) {
ABSL_CHECK(!*deleted_);
*deleted_ = true;
}
}
int member = 0;
private:
bool* deleted_;
};
class Subobject : public Object {
public:
using Object::Object;
};
TEST_P(MemoryManagerTest, Shared) {
bool deleted = false;
{
auto object = memory_manager().MakeShared<Object>(deleted);
EXPECT_TRUE(object);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedAliasCopy) {
bool deleted = false;
{
auto object = memory_manager().MakeShared<Object>(deleted);
EXPECT_TRUE(object);
EXPECT_FALSE(deleted);
{
auto member = Shared<int>(object, &object->member);
EXPECT_TRUE(object);
EXPECT_FALSE(deleted);
EXPECT_TRUE(member);
}
EXPECT_TRUE(object);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedAliasMove) {
bool deleted = false;
{
auto object = memory_manager().MakeShared<Object>(deleted);
EXPECT_TRUE(object);
EXPECT_FALSE(deleted);
{
auto member = Shared<int>(std::move(object), &object->member);
EXPECT_FALSE(object);
EXPECT_FALSE(deleted);
EXPECT_TRUE(member);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
}
Finish();
}
TEST_P(MemoryManagerTest, SharedStaticCastCopy) {
bool deleted = false;
{
auto object = memory_manager().MakeShared<Object>(deleted);
EXPECT_TRUE(object);
EXPECT_FALSE(deleted);
{
auto member = StaticCast<void>(object);
EXPECT_TRUE(object);
EXPECT_FALSE(deleted);
EXPECT_TRUE(member);
}
EXPECT_TRUE(object);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedStaticCastMove) {
bool deleted = false;
{
auto object = memory_manager().MakeShared<Object>(deleted);
EXPECT_TRUE(object);
EXPECT_FALSE(deleted);
{
auto member = StaticCast<void>(std::move(object));
EXPECT_FALSE(object);
EXPECT_FALSE(deleted);
EXPECT_TRUE(member);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
}
Finish();
}
TEST_P(MemoryManagerTest, SharedCopyConstruct) {
bool deleted = false;
{
auto object = memory_manager().MakeShared<Object>(deleted);
EXPECT_TRUE(object);
Shared<Object> copied_object(object);
EXPECT_TRUE(copied_object);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedMoveConstruct) {
bool deleted = false;
{
auto object = memory_manager().MakeShared<Object>(deleted);
EXPECT_TRUE(object);
Shared<Object> moved_object(std::move(object));
EXPECT_FALSE(object);
EXPECT_TRUE(moved_object);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedCopyAssign) {
bool deleted = false;
{
auto object = memory_manager().MakeShared<Object>(deleted);
EXPECT_TRUE(object);
Shared<Object> moved_object(std::move(object));
EXPECT_FALSE(object);
EXPECT_TRUE(moved_object);
object = moved_object;
EXPECT_TRUE(object);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedMoveAssign) {
bool deleted = false;
{
auto object = memory_manager().MakeShared<Object>(deleted);
EXPECT_TRUE(object);
Shared<Object> moved_object(std::move(object));
EXPECT_FALSE(object);
EXPECT_TRUE(moved_object);
object = std::move(moved_object);
EXPECT_FALSE(moved_object);
EXPECT_TRUE(object);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedCopyConstructConvertible) {
bool deleted = false;
{
auto object = memory_manager().MakeShared<Subobject>(deleted);
EXPECT_TRUE(object);
Shared<Object> copied_object(object);
EXPECT_TRUE(copied_object);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedMoveConstructConvertible) {
bool deleted = false;
{
auto object = memory_manager().MakeShared<Subobject>(deleted);
EXPECT_TRUE(object);
Shared<Object> moved_object(std::move(object));
EXPECT_FALSE(object);
EXPECT_TRUE(moved_object);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedCopyAssignConvertible) {
bool deleted = false;
{
auto subobject = memory_manager().MakeShared<Subobject>(deleted);
EXPECT_TRUE(subobject);
auto object = memory_manager().MakeShared<Object>();
EXPECT_TRUE(object);
object = subobject;
EXPECT_TRUE(object);
EXPECT_TRUE(subobject);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedMoveAssignConvertible) {
bool deleted = false;
{
auto subobject = memory_manager().MakeShared<Subobject>(deleted);
EXPECT_TRUE(subobject);
auto object = memory_manager().MakeShared<Object>();
EXPECT_TRUE(object);
object = std::move(subobject);
EXPECT_TRUE(object);
EXPECT_FALSE(subobject);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedSwap) {
using std::swap;
auto object1 = memory_manager().MakeShared<Object>();
auto object2 = memory_manager().MakeShared<Object>();
auto* const object1_ptr = object1.operator->();
auto* const object2_ptr = object2.operator->();
swap(object1, object2);
EXPECT_EQ(object1.operator->(), object2_ptr);
EXPECT_EQ(object2.operator->(), object1_ptr);
}
TEST_P(MemoryManagerTest, SharedPointee) {
using std::swap;
auto object = memory_manager().MakeShared<Object>();
EXPECT_EQ(std::addressof(*object), object.operator->());
}
TEST_P(MemoryManagerTest, SharedViewConstruct) {
bool deleted = false;
absl::optional<SharedView<Object>> dangling_object_view;
{
auto object = memory_manager().MakeShared<Object>(deleted);
dangling_object_view.emplace(object);
EXPECT_TRUE(*dangling_object_view);
{
auto copied_object = Shared<Object>(*dangling_object_view);
EXPECT_FALSE(deleted);
}
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedViewCopyConstruct) {
bool deleted = false;
absl::optional<SharedView<Object>> dangling_object_view;
{
auto object = memory_manager().MakeShared<Object>(deleted);
auto object_view = SharedView<Object>(object);
SharedView<Object> copied_object_view(object_view);
dangling_object_view.emplace(copied_object_view);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedViewMoveConstruct) {
bool deleted = false;
absl::optional<SharedView<Object>> dangling_object_view;
{
auto object = memory_manager().MakeShared<Object>(deleted);
auto object_view = SharedView<Object>(object);
SharedView<Object> moved_object_view(std::move(object_view));
dangling_object_view.emplace(moved_object_view);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedViewCopyAssign) {
bool deleted = false;
absl::optional<SharedView<Object>> dangling_object_view;
{
auto object = memory_manager().MakeShared<Object>(deleted);
auto object_view1 = SharedView<Object>(object);
SharedView<Object> object_view2(object);
object_view1 = object_view2;
dangling_object_view.emplace(object_view1);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedViewMoveAssign) {
bool deleted = false;
absl::optional<SharedView<Object>> dangling_object_view;
{
auto object = memory_manager().MakeShared<Object>(deleted);
auto object_view1 = SharedView<Object>(object);
SharedView<Object> object_view2(object);
object_view1 = std::move(object_view2);
dangling_object_view.emplace(object_view1);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedViewCopyConstructConvertible) {
bool deleted = false;
absl::optional<SharedView<Object>> dangling_object_view;
{
auto subobject = memory_manager().MakeShared<Subobject>(deleted);
auto subobject_view = SharedView<Subobject>(subobject);
SharedView<Object> object_view(subobject_view);
dangling_object_view.emplace(object_view);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedViewMoveConstructConvertible) {
bool deleted = false;
absl::optional<SharedView<Object>> dangling_object_view;
{
auto subobject = memory_manager().MakeShared<Subobject>(deleted);
auto subobject_view = SharedView<Subobject>(subobject);
SharedView<Object> object_view(std::move(subobject_view));
dangling_object_view.emplace(object_view);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedViewCopyAssignConvertible) {
bool deleted = false;
absl::optional<SharedView<Object>> dangling_object_view;
{
auto subobject = memory_manager().MakeShared<Subobject>(deleted);
auto object_view1 = SharedView<Object>(subobject);
SharedView<Subobject> subobject_view2(subobject);
object_view1 = subobject_view2;
dangling_object_view.emplace(object_view1);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedViewMoveAssignConvertible) {
bool deleted = false;
absl::optional<SharedView<Object>> dangling_object_view;
{
auto subobject = memory_manager().MakeShared<Subobject>(deleted);
auto object_view1 = SharedView<Object>(subobject);
SharedView<Subobject> subobject_view2(subobject);
object_view1 = std::move(subobject_view2);
dangling_object_view.emplace(object_view1);
EXPECT_FALSE(deleted);
}
switch (memory_management()) {
case MemoryManagement::kPooling:
EXPECT_FALSE(deleted);
break;
case MemoryManagement::kReferenceCounting:
EXPECT_TRUE(deleted);
break;
}
Finish();
}
TEST_P(MemoryManagerTest, SharedViewSwap) {
using std::swap;
auto object1 = memory_manager().MakeShared<Object>();
auto object2 = memory_manager().MakeShared<Object>();
auto object1_view = SharedView<Object>(object1);
auto object2_view = SharedView<Object>(object2);
swap(object1_view, object2_view);
EXPECT_EQ(object1_view.operator->(), object2.operator->());
EXPECT_EQ(object2_view.operator->(), object1.operator->());
}
TEST_P(MemoryManagerTest, SharedViewPointee) {
using std::swap;
auto object = memory_manager().MakeShared<Object>();
auto object_view = SharedView<Object>(object);
EXPECT_EQ(std::addressof(*object_view), object_view.operator->());
}
TEST_P(MemoryManagerTest, Unique) {
bool deleted = false;
{
auto object = memory_manager().MakeUnique<Object>(deleted);
EXPECT_TRUE(object);
EXPECT_FALSE(deleted);
}
EXPECT_TRUE(deleted);
Finish();
}
TEST_P(MemoryManagerTest, UniquePointee) {
using std::swap;
auto object = memory_manager().MakeUnique<Object>();
EXPECT_EQ(std::addressof(*object), object.operator->());
}
TEST_P(MemoryManagerTest, UniqueSwap) {
using std::swap;
auto object1 = memory_manager().MakeUnique<Object>();
auto object2 = memory_manager().MakeUnique<Object>();
auto* const object1_ptr = object1.operator->();
auto* const object2_ptr = object2.operator->();
swap(object1, object2);
EXPECT_EQ(object1.operator->(), object2_ptr);
EXPECT_EQ(object2.operator->(), object1_ptr);
}
struct EnabledObject : EnableSharedFromThis<EnabledObject> {
Shared<EnabledObject> This() { return shared_from_this(); }
Shared<const EnabledObject> This() const { return shared_from_this(); }
};
TEST_P(MemoryManagerTest, EnableSharedFromThis) {
{
auto object = memory_manager().MakeShared<EnabledObject>();
auto this_object = object->This();
EXPECT_EQ(this_object.operator->(), object.operator->());
}
{
auto object = memory_manager().MakeShared<const EnabledObject>();
auto this_object = object->This();
EXPECT_EQ(this_object.operator->(), object.operator->());
}
Finish();
}
struct ThrowingConstructorObject {
ThrowingConstructorObject() {
#ifdef ABSL_HAVE_EXCEPTIONS
throw std::invalid_argument("ThrowingConstructorObject");
#endif
}
char padding[64];
};
TEST_P(MemoryManagerTest, SharedThrowingConstructor) {
#ifdef ABSL_HAVE_EXCEPTIONS
EXPECT_THROW(static_cast<void>(
memory_manager().MakeShared<ThrowingConstructorObject>()),
std::invalid_argument);
#else
GTEST_SKIP();
#endif
}
TEST_P(MemoryManagerTest, UniqueThrowingConstructor) {
#ifdef ABSL_HAVE_EXCEPTIONS
EXPECT_THROW(static_cast<void>(
memory_manager().MakeUnique<ThrowingConstructorObject>()),
std::invalid_argument);
#else
GTEST_SKIP();
#endif
}
INSTANTIATE_TEST_SUITE_P(
MemoryManagerTest, MemoryManagerTest,
::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting),
MemoryManagerTest::ToString);
TEST(Owner, None) {
EXPECT_THAT(Owner::None(), IsFalse());
EXPECT_THAT(Owner::None().arena(), IsNull());
}
TEST(Owner, Allocator) {
google::protobuf::Arena arena;
EXPECT_THAT(Owner::Allocator(NewDeleteAllocator()), IsFalse());
EXPECT_THAT(Owner::Allocator(ArenaAllocator(&arena)), IsTrue());
}
TEST(Owner, Arena) {
google::protobuf::Arena arena;
EXPECT_THAT(Owner::Arena(&arena), IsTrue());
EXPECT_EQ(Owner::Arena(&arena).arena(), &arena);
}
TEST(Owner, ReferenceCount) {
auto* refcount = new common_internal::ReferenceCounted();
EXPECT_THAT(Owner::ReferenceCount(refcount), IsTrue());
EXPECT_THAT(Owner::ReferenceCount(refcount).arena(), IsNull());
common_internal::StrongUnref(refcount);
}
TEST(Owner, Equality) {
google::protobuf::Arena arena1;
google::protobuf::Arena arena2;
EXPECT_EQ(Owner::None(), Owner::None());
EXPECT_EQ(Owner::Allocator(NewDeleteAllocator()), Owner::None());
EXPECT_EQ(Owner::Arena(&arena1), Owner::Arena(&arena1));
EXPECT_NE(Owner::Arena(&arena1), Owner::None());
EXPECT_NE(Owner::None(), Owner::Arena(&arena1));
EXPECT_NE(Owner::Arena(&arena1), Owner::Arena(&arena2));
EXPECT_EQ(Owner::Allocator(ArenaAllocator(&arena1)), Owner::Arena(&arena1));
}
TEST(Borrower, None) {
EXPECT_THAT(Borrower::None(), IsFalse());
EXPECT_THAT(Borrower::None().arena(), IsNull());
}
TEST(Borrower, Allocator) {
google::protobuf::Arena arena;
EXPECT_THAT(Borrower::Allocator(NewDeleteAllocator()), IsFalse());
EXPECT_THAT(Borrower::Allocator(ArenaAllocator(&arena)), IsTrue());
}
TEST(Borrower, Arena) {
google::protobuf::Arena arena;
EXPECT_THAT(Borrower::Arena(&arena), IsTrue());
EXPECT_EQ(Borrower::Arena(&arena).arena(), &arena);
}
TEST(Borrower, ReferenceCount) {
auto* refcount = new common_internal::ReferenceCounted();
EXPECT_THAT(Borrower::ReferenceCount(refcount), IsTrue());
EXPECT_THAT(Borrower::ReferenceCount(refcount).arena(), IsNull());
common_internal::StrongUnref(refcount);
}
TEST(Borrower, Equality) {
google::protobuf::Arena arena1;
google::protobuf::Arena arena2;
EXPECT_EQ(Borrower::None(), Borrower::None());
EXPECT_EQ(Borrower::Allocator(NewDeleteAllocator()), Borrower::None());
EXPECT_EQ(Borrower::Arena(&arena1), Borrower::Arena(&arena1));
EXPECT_NE(Borrower::Arena(&arena1), Borrower::None());
EXPECT_NE(Borrower::None(), Borrower::Arena(&arena1));
EXPECT_NE(Borrower::Arena(&arena1), Borrower::Arena(&arena2));
EXPECT_EQ(Borrower::Allocator(ArenaAllocator(&arena1)),
Borrower::Arena(&arena1));
}
TEST(OwnerBorrower, CopyConstruct) {
auto* refcount = new common_internal::ReferenceCounted();
Owner owner1 = Owner::ReferenceCount(refcount);
common_internal::StrongUnref(refcount);
Owner owner2(owner1);
Borrower borrower(owner1);
EXPECT_EQ(owner1, owner2);
EXPECT_EQ(owner1, borrower);
EXPECT_EQ(borrower, owner1);
}
TEST(OwnerBorrower, MoveConstruct) {
auto* refcount = new common_internal::ReferenceCounted();
Owner owner1 = Owner::ReferenceCount(refcount);
common_internal::StrongUnref(refcount);
Owner owner2(std::move(owner1));
Borrower borrower(owner2);
EXPECT_EQ(owner2, borrower);
EXPECT_EQ(borrower, owner2);
}
TEST(OwnerBorrower, CopyAssign) {
auto* refcount = new common_internal::ReferenceCounted();
Owner owner1 = Owner::ReferenceCount(refcount);
common_internal::StrongUnref(refcount);
Owner owner2;
owner2 = owner1;
Borrower borrower(owner1);
EXPECT_EQ(owner1, owner2);
EXPECT_EQ(owner1, borrower);
EXPECT_EQ(borrower, owner1);
}
TEST(OwnerBorrower, MoveAssign) {
auto* refcount = new common_internal::ReferenceCounted();
Owner owner1 = Owner::ReferenceCount(refcount);
common_internal::StrongUnref(refcount);
Owner owner2;
owner2 = std::move(owner1);
Borrower borrower(owner2);
EXPECT_EQ(owner2, borrower);
EXPECT_EQ(borrower, owner2);
}
TEST(Unique, ToAddress) {
Unique<bool> unique;
EXPECT_EQ(cel::to_address(unique), nullptr);
unique = AllocateUnique<bool>(NewDeleteAllocator());
EXPECT_EQ(cel::to_address(unique), unique.operator->());
}
class OwnedTest : public TestWithParam<MemoryManagement> {
public:
Allocator<> GetAllocator() {
switch (GetParam()) {
case MemoryManagement::kPooling:
return ArenaAllocator(&arena_);
case MemoryManagement::kReferenceCounting:
return NewDeleteAllocator();
}
}
private:
google::protobuf::Arena arena_;
};
TEST_P(OwnedTest, Default) {
Owned<Data> owned;
EXPECT_FALSE(owned);
EXPECT_EQ(cel::to_address(owned), nullptr);
EXPECT_FALSE(owned != nullptr);
EXPECT_FALSE(nullptr != owned);
}
class TestData final : public Data {
public:
using InternalArenaConstructable_ = void;
using DestructorSkippable_ = void;
TestData() noexcept : Data() {}
explicit TestData(absl::Nullable<google::protobuf::Arena*> arena) noexcept
: Data(arena) {}
};
TEST_P(OwnedTest, AllocateSharedData) {
auto owned = AllocateShared<TestData>(GetAllocator());
EXPECT_EQ(owned->GetArena(), GetAllocator().arena());
EXPECT_EQ(Owner(owned).arena(), GetAllocator().arena());
EXPECT_EQ(Borrower(owned).arena(), GetAllocator().arena());
}
TEST_P(OwnedTest, AllocateSharedMessageLite) {
auto owned = AllocateShared<google::protobuf::Value>(GetAllocator());
EXPECT_EQ(owned->GetArena(), GetAllocator().arena());
EXPECT_EQ(Owner(owned).arena(), GetAllocator().arena());
EXPECT_EQ(Borrower(owned).arena(), GetAllocator().arena());
}
TEST_P(OwnedTest, WrapSharedData) {
auto owned =
WrapShared(google::protobuf::Arena::Create<TestData>(GetAllocator().arena()));
EXPECT_EQ(owned->GetArena(), GetAllocator().arena());
EXPECT_EQ(Owner(owned).arena(), GetAllocator().arena());
EXPECT_EQ(Borrower(owned).arena(), GetAllocator().arena());
}
TEST_P(OwnedTest, WrapSharedMessageLite) {
auto owned = WrapShared(
google::protobuf::Arena::Create<google::protobuf::Value>(GetAllocator().arena()));
EXPECT_EQ(owned->GetArena(), GetAllocator().arena());
EXPECT_EQ(Owner(owned).arena(), GetAllocator().arena());
EXPECT_EQ(Borrower(owned).arena(), GetAllocator().arena());
}
TEST_P(OwnedTest, SharedFromUniqueData) {
auto owned = Owned(AllocateUnique<TestData>(GetAllocator()));
EXPECT_EQ(owned->GetArena(), GetAllocator().arena());
EXPECT_EQ(Owner(owned).arena(), GetAllocator().arena());
EXPECT_EQ(Borrower(owned).arena(), GetAllocator().arena());
}
TEST_P(OwnedTest, SharedFromUniqueMessageLite) {
auto owned = Owned(AllocateUnique<google::protobuf::Value>(GetAllocator()));
EXPECT_EQ(owned->GetArena(), GetAllocator().arena());
EXPECT_EQ(Owner(owned).arena(), GetAllocator().arena());
EXPECT_EQ(Borrower(owned).arena(), GetAllocator().arena());
}
TEST_P(OwnedTest, CopyConstruct) {
auto owned = Owned(AllocateUnique<TestData>(GetAllocator()));
EXPECT_EQ(owned->GetArena(), GetAllocator().arena());
Owned<TestData> copied_owned(owned);
EXPECT_EQ(copied_owned->GetArena(), GetAllocator().arena());
}
TEST_P(OwnedTest, MoveConstruct) {
auto owned = Owned(AllocateUnique<TestData>(GetAllocator()));
EXPECT_EQ(owned->GetArena(), GetAllocator().arena());
Owned<TestData> moved_owned(std::move(owned));
EXPECT_EQ(moved_owned->GetArena(), GetAllocator().arena());
}
TEST_P(OwnedTest, CopyConstructOther) {
auto owned = Owned(AllocateUnique<TestData>(GetAllocator()));
EXPECT_EQ(owned->GetArena(), GetAllocator().arena());
Owned<Data> copied_owned(owned);
EXPECT_EQ(copied_owned->GetArena(), GetAllocator().arena());
}
TEST_P(OwnedTest, MoveConstructOther) {
auto owned = Owned(AllocateUnique<TestData>(GetAllocator()));
EXPECT_EQ(owned->GetArena(), GetAllocator().arena());
Owned<Data> moved_owned(std::move(owned));
EXPECT_EQ(moved_owned->GetArena(), GetAllocator().arena());
}
TEST_P(OwnedTest, ConstructBorrowed) {
auto owned = Owned(AllocateUnique<TestData>(GetAllocator()));
EXPECT_EQ(owned->GetArena(), GetAllocator().arena());
Owned<TestData> borrowed_owned(Borrowed<TestData>{owned});
EXPECT_EQ(borrowed_owned->GetArena(), GetAllocator().arena());
}
TEST_P(OwnedTest, ConstructOwner) {
auto owned = Owned(AllocateUnique<TestData>(GetAllocator()));
EXPECT_EQ(owned->GetArena(), GetAllocator().arena());
Owned<TestData> owner_owned(Owner(owned), cel::to_address(owned));
EXPECT_EQ(owner_owned->GetArena(), GetAllocator().arena());
}
TEST_P(OwnedTest, ConstructNullPtr) {
Owned<Data> owned(nullptr);
EXPECT_EQ(owned, nullptr);
}
TEST_P(OwnedTest, CopyAssign) {
auto owned = Owned(AllocateUnique<TestData>(GetAllocator()));
EXPECT_EQ(owned->GetArena(), GetAllocator().arena());
Owned<TestData> copied_owned;
copied_owned = owned;
EXPECT_EQ(copied_owned->GetArena(), GetAllocator().arena());
}
TEST_P(OwnedTest, MoveAssign) {
auto owned = Owned(AllocateUnique<TestData>(GetAllocator()));
EXPECT_EQ(owned->GetArena(), GetAllocator().arena());
Owned<TestData> moved_owned;
moved_owned = std::move(owned);
EXPECT_EQ(moved_owned->GetArena(), GetAllocator().arena());
}
TEST_P(OwnedTest, CopyAssignOther) {
auto owned = Owned(AllocateUnique<TestData>(GetAllocator()));
EXPECT_EQ(owned->GetArena(), GetAllocator().arena());
Owned<Data> copied_owned;
copied_owned = owned;
EXPECT_EQ(copied_owned->GetArena(), GetAllocator().arena());
}
TEST_P(OwnedTest, MoveAssignOther) {
auto owned = Owned(AllocateUnique<TestData>(GetAllocator()));
EXPECT_EQ(owned->GetArena(), GetAllocator().arena());
Owned<Data> moved_owned;
moved_owned = std::move(owned);
EXPECT_EQ(moved_owned->GetArena(), GetAllocator().arena());
}
TEST_P(OwnedTest, AssignBorrowed) {
auto owned = Owned(AllocateUnique<TestData>(GetAllocator()));
EXPECT_EQ(owned->GetArena(), GetAllocator().arena());
Owned<TestData> borrowed_owned;
borrowed_owned = Borrowed<TestData>{owned};
EXPECT_EQ(borrowed_owned->GetArena(), GetAllocator().arena());
}
TEST_P(OwnedTest, AssignUnique) {
Owned<TestData> owned;
owned = AllocateUnique<TestData>(GetAllocator());
EXPECT_EQ(owned->GetArena(), GetAllocator().arena());
}
TEST_P(OwnedTest, AssignNullPtr) {
auto owned = Owned(AllocateUnique<TestData>(GetAllocator()));
EXPECT_EQ(owned->GetArena(), GetAllocator().arena());
EXPECT_TRUE(owned);
owned = nullptr;
EXPECT_FALSE(owned);
}
INSTANTIATE_TEST_SUITE_P(
OwnedTest, OwnedTest,
::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting));
class BorrowedTest : public TestWithParam<MemoryManagement> {
public:
Allocator<> GetAllocator() {
switch (GetParam()) {
case MemoryManagement::kPooling:
return ArenaAllocator(&arena_);
case MemoryManagement::kReferenceCounting:
return NewDeleteAllocator();
}
}
private:
google::protobuf::Arena arena_;
};
TEST_P(BorrowedTest, Default) {
Borrowed<Data> borrowed;
EXPECT_FALSE(borrowed);
EXPECT_EQ(cel::to_address(borrowed), nullptr);
EXPECT_FALSE(borrowed != nullptr);
EXPECT_FALSE(nullptr != borrowed);
}
TEST_P(BorrowedTest, CopyConstruct) {
auto owned = Owned(AllocateUnique<TestData>(GetAllocator()));
auto borrowed = Borrowed(owned);
EXPECT_EQ(borrowed->GetArena(), GetAllocator().arena());
Borrowed<TestData> copied_borrowed(borrowed);
EXPECT_EQ(copied_borrowed->GetArena(), GetAllocator().arena());
}
TEST_P(BorrowedTest, MoveConstruct) {
auto owned = Owned(AllocateUnique<TestData>(GetAllocator()));
auto borrowed = Borrowed(owned);
EXPECT_EQ(borrowed->GetArena(), GetAllocator().arena());
Borrowed<TestData> moved_borrowed(std::move(borrowed));
EXPECT_EQ(moved_borrowed->GetArena(), GetAllocator().arena());
}
TEST_P(BorrowedTest, CopyConstructOther) {
auto owned = Owned(AllocateUnique<TestData>(GetAllocator()));
auto borrowed = Borrowed(owned);
EXPECT_EQ(borrowed->GetArena(), GetAllocator().arena());
Borrowed<Data> copied_borrowed(borrowed);
EXPECT_EQ(copied_borrowed->GetArena(), GetAllocator().arena());
}
TEST_P(BorrowedTest, MoveConstructOther) {
auto owned = Owned(AllocateUnique<TestData>(GetAllocator()));
auto borrowed = Borrowed(owned);
EXPECT_EQ(borrowed->GetArena(), GetAllocator().arena());
Borrowed<Data> moved_borrowed(std::move(borrowed));
EXPECT_EQ(moved_borrowed->GetArena(), GetAllocator().arena());
}
TEST_P(BorrowedTest, ConstructNullPtr) {
Borrowed<TestData> borrowed(nullptr);
EXPECT_FALSE(borrowed);
}
TEST_P(BorrowedTest, CopyAssign) {
auto owned = Owned(AllocateUnique<TestData>(GetAllocator()));
auto borrowed = Borrowed(owned);
EXPECT_EQ(borrowed->GetArena(), GetAllocator().arena());
Borrowed<TestData> copied_borrowed;
copied_borrowed = borrowed;
EXPECT_EQ(copied_borrowed->GetArena(), GetAllocator().arena());
}
TEST_P(BorrowedTest, MoveAssign) {
auto owned = Owned(AllocateUnique<TestData>(GetAllocator()));
auto borrowed = Borrowed(owned);
EXPECT_EQ(borrowed->GetArena(), GetAllocator().arena());
Borrowed<TestData> moved_borrowed;
moved_borrowed = std::move(borrowed);
EXPECT_EQ(moved_borrowed->GetArena(), GetAllocator().arena());
}
TEST_P(BorrowedTest, CopyAssignOther) {
auto owned = Owned(AllocateUnique<TestData>(GetAllocator()));
auto borrowed = Borrowed(owned);
EXPECT_EQ(borrowed->GetArena(), GetAllocator().arena());
Borrowed<Data> copied_borrowed;
copied_borrowed = borrowed;
EXPECT_EQ(copied_borrowed->GetArena(), GetAllocator().arena());
}
TEST_P(BorrowedTest, MoveAssignOther) {
auto owned = Owned(AllocateUnique<TestData>(GetAllocator()));
auto borrowed = Borrowed(owned);
EXPECT_EQ(borrowed->GetArena(), GetAllocator().arena());
Borrowed<Data> moved_borrowed;
moved_borrowed = std::move(borrowed);
EXPECT_EQ(moved_borrowed->GetArena(), GetAllocator().arena());
}
TEST_P(BorrowedTest, AssignOwned) {
auto owned = Owned(AllocateUnique<TestData>(GetAllocator()));
EXPECT_EQ(owned->GetArena(), GetAllocator().arena());
Borrowed<Data> borrowed = owned;
EXPECT_EQ(borrowed->GetArena(), GetAllocator().arena());
}
TEST_P(BorrowedTest, AssignNullPtr) {
Borrowed<TestData> borrowed;
borrowed = nullptr;
EXPECT_FALSE(borrowed);
}
INSTANTIATE_TEST_SUITE_P(
BorrowedTest, BorrowedTest,
::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting));
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/memory.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/memory_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
7801b87e-4f22-4f70-8c3c-dc16e2b87f00 | cpp | tensorflow/tensorflow | eager_operation | tensorflow/core/common_runtime/eager/eager_operation.cc | tensorflow/core/common_runtime/eager/eager_operation_test.cc | #include "tensorflow/core/common_runtime/eager/eager_operation.h"
#include <memory>
#include <string>
#include <variant>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "tensorflow/c/eager/abstract_operation.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/immediate_execution_tensor_handle.h"
#include "tensorflow/c/tf_tensor_internal.h"
#include "tensorflow/core/common_runtime/eager/attr_builder.h"
#include "tensorflow/core/common_runtime/eager/custom_device.h"
#include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/platform/casts.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/host_info.h"
namespace tensorflow {
void EagerOperation::Clear() {
for (ImmediateExecutionTensorHandle* h : inputs_) {
h->Unref();
}
inputs_.clear();
custom_device_tensor_handles_count_ = 0;
ClearInferenceState();
}
Status EagerOperation::SetAttrValue(const char* attr_name,
const AttrValue& value) {
MutableAttrs()->Set(attr_name, value);
return absl::OkStatus();
}
Status EagerOperation::SetAttrString(const char* attr_name, const char* data,
size_t length) {
MutableAttrs()->Set(attr_name, StringPiece(data, length));
return absl::OkStatus();
}
Status EagerOperation::SetAttrInt(const char* attr_name, int64_t value) {
MutableAttrs()->Set(attr_name, static_cast<int64_t>(value));
return absl::OkStatus();
}
Status EagerOperation::SetAttrFloat(const char* attr_name, float value) {
MutableAttrs()->Set(attr_name, value);
return absl::OkStatus();
}
Status EagerOperation::SetAttrBool(const char* attr_name, bool value) {
MutableAttrs()->Set(attr_name, value);
return absl::OkStatus();
}
Status EagerOperation::SetAttrType(const char* attr_name, DataType value) {
MutableAttrs()->Set(attr_name, value);
return absl::OkStatus();
}
Status EagerOperation::SetAttrShape(const char* attr_name, const int64_t* dims,
const int num_dims) {
if (num_dims > TensorShape::MaxDimensions()) {
return errors::InvalidArgument("Value specified for `", attr_name, "` has ",
num_dims,
" dimensions which is over the limit of ",
TensorShape::MaxDimensions(), ".");
}
TensorShapeProto proto;
if (num_dims < 0) {
proto.set_unknown_rank(true);
} else {
for (int d = 0; d < num_dims; ++d) {
proto.add_dim()->set_size(dims[d]);
}
}
MutableAttrs()->Set(attr_name, proto);
return absl::OkStatus();
}
Status EagerOperation::SetAttrFunction(const char* attr_name,
const AbstractOperation* value) {
AttrValue attr_value;
NameAttrList* func = attr_value.mutable_func();
func->set_name(value->Name());
auto* value_operation = down_cast<const EagerOperation*>(value);
value_operation->Attrs().FillAttrValueMap(func->mutable_attr());
MutableAttrs()->Set(attr_name, attr_value);
return absl::OkStatus();
}
Status EagerOperation::SetAttrFunctionName(const char* attr_name,
const char* data, size_t length) {
AttrValue attr_value;
NameAttrList* func = attr_value.mutable_func();
func->set_name(data, length);
MutableAttrs()->Set(attr_name, attr_value);
return absl::OkStatus();
}
Status EagerOperation::SetAttrTensor(const char* attr_name,
AbstractTensorInterface* tensor) {
Tensor t = TensorFromInterface(tensor);
MutableAttrs()->Set(attr_name, t);
return absl::OkStatus();
}
Status EagerOperation::SetAttrStringList(const char* attr_name,
const void* const* values,
const size_t* lengths,
int num_values) {
std::vector<StringPiece> v(num_values);
for (int i = 0; i < num_values; ++i) {
v[i] = StringPiece(static_cast<const char*>(values[i]), lengths[i]);
}
MutableAttrs()->Set(attr_name, v);
return absl::OkStatus();
}
Status EagerOperation::SetAttrFloatList(const char* attr_name,
const float* values, int num_values) {
MutableAttrs()->Set(attr_name,
gtl::ArraySlice<const float>(values, num_values));
return absl::OkStatus();
}
Status EagerOperation::SetAttrIntList(const char* attr_name,
const int64_t* values, int num_values) {
MutableAttrs()->Set(
attr_name, gtl::ArraySlice<const int64_t>(
reinterpret_cast<const int64_t*>(values), num_values));
return absl::OkStatus();
}
Status EagerOperation::SetAttrTypeList(const char* attr_name,
const DataType* values, int num_values) {
MutableAttrs()->Set(attr_name,
gtl::ArraySlice<const DataType>(values, num_values));
return absl::OkStatus();
}
Status EagerOperation::SetAttrBoolList(const char* attr_name,
const unsigned char* values,
int num_values) {
std::unique_ptr<bool[]> b(new bool[num_values]);
for (int i = 0; i < num_values; ++i) {
b[i] = values[i];
}
MutableAttrs()->Set(attr_name,
gtl::ArraySlice<const bool>(b.get(), num_values));
return absl::OkStatus();
}
Status EagerOperation::SetAttrShapeList(const char* attr_name,
const int64_t** dims,
const int* num_dims, int num_values) {
std::unique_ptr<TensorShapeProto[]> proto(new TensorShapeProto[num_values]);
for (int i = 0; i < num_values; ++i) {
const auto num_dims_i = num_dims[i];
if (num_dims_i > TensorShape::MaxDimensions()) {
return errors::InvalidArgument(
strings::StrCat("Value specified for `", attr_name, "` has ",
num_dims_i, " dimensions which is over the limit of ",
TensorShape::MaxDimensions(), "."));
}
if (num_dims_i < 0) {
proto[i].set_unknown_rank(true);
} else {
const int64_t* dims_i = dims[i];
auto proto_i = &proto[i];
for (int d = 0; d < num_dims_i; ++d) {
proto_i->add_dim()->set_size(dims_i[d]);
}
}
}
MutableAttrs()->Set(
attr_name, gtl::ArraySlice<TensorShapeProto>(proto.get(), num_values));
return absl::OkStatus();
}
Status EagerOperation::SetAttrFunctionList(
const char* attr_name, absl::Span<const AbstractOperation*> values) {
size_t num_values = values.size();
std::unique_ptr<NameAttrList[]> funcs(new NameAttrList[num_values]);
for (int i = 0; i < num_values; i++) {
auto* value_operation = down_cast<const EagerOperation*>(values[i]);
funcs[i].set_name(value_operation->Name());
value_operation->Attrs().FillAttrValueMap(funcs[i].mutable_attr());
}
MutableAttrs()->Set(
attr_name, gtl::ArraySlice<const NameAttrList>(funcs.get(), num_values));
return absl::OkStatus();
}
const OpDef* EagerOperation::GetOpDef(Status* status) {
const tensorflow::OpDef* op_def = OpDef();
if (op_def) return op_def;
*status = OpDefForOp(Name(), &op_def);
return op_def;
}
Status EagerOperation::InputLength(const char* input_name, int* length) {
Status status;
const tensorflow::OpDef* op_def = GetOpDef(&status);
if (!status.ok()) {
return status;
}
AttrValueMap attrs;
Attrs().FillAttrValueMap(&attrs);
NameRangeMap name_ranges;
TF_RETURN_IF_ERROR(
NameRangesForNode(AttrSlice(&attrs), *op_def, &name_ranges, nullptr));
auto iter = name_ranges.find(input_name);
if (iter == name_ranges.end()) {
return errors::InvalidArgument("Input '", input_name, "' not found");
}
*length = iter->second.second - iter->second.first;
return absl::OkStatus();
}
absl::Span<ImmediateExecutionTensorHandle* const> EagerOperation::GetInputs()
const {
return absl::MakeSpan(
reinterpret_cast<ImmediateExecutionTensorHandle* const*>(inputs_.data()),
inputs_.size());
}
Status EagerOperation::OutputLength(const char* output_name, int* length) {
Status status;
const tensorflow::OpDef* op_def = GetOpDef(&status);
if (!status.ok()) {
return status;
}
AttrValueMap attrs;
Attrs().FillAttrValueMap(&attrs);
NameRangeMap name_ranges;
TF_RETURN_IF_ERROR(
NameRangesForNode(AttrSlice(&attrs), *op_def, nullptr, &name_ranges));
auto iter = name_ranges.find(output_name);
if (iter == name_ranges.end()) {
return errors::InvalidArgument("Output '", output_name, "' not found");
}
*length = iter->second.second - iter->second.first;
return absl::OkStatus();
}
Status EagerOperation::AddInput(AbstractTensorHandle* input) {
ImmediateExecutionTensorHandle* h =
down_cast<ImmediateExecutionTensorHandle*>(input);
if (CustomDeviceTensorHandle::classof(h)) {
custom_device_tensor_handles_count_++;
}
AddTensorHandle(h);
return MaybeInferSingleInputAttrs(h);
}
Status EagerOperation::AddInputList(
absl::Span<AbstractTensorHandle* const> inputs) {
for (auto& input : inputs) {
if (CustomDeviceTensorHandle::classof(input)) {
custom_device_tensor_handles_count_++;
}
ImmediateExecutionTensorHandle* h =
down_cast<ImmediateExecutionTensorHandle*>(input);
AddTensorHandle(h);
}
return InferInputListAttrs(inputs.size());
}
Status EagerOperation::SetInput(size_t index,
ImmediateExecutionTensorHandle* input) {
if (index >= inputs_.size()) {
return errors::InvalidArgument("Index >= inputs.size: %d >= %d", index,
inputs_.size());
}
auto* previous = inputs_[index];
if (CustomDeviceTensorHandle::classof(previous)) {
custom_device_tensor_handles_count_--;
}
if (CustomDeviceTensorHandle::classof(input)) {
custom_device_tensor_handles_count_++;
}
input->Ref();
inputs_[index] = input;
previous->Unref();
return absl::OkStatus();
}
Status EagerOperation::Reset(
const char* op, const char* device_name, bool remote,
EagerExecutor* executor,
const absl::optional<EagerFunctionParams> eager_func_params) {
DCHECK(inputs_.empty());
ClearInferenceState();
bool is_function = false;
TF_RETURN_IF_ERROR(AttrTypeMapForOp(op, &attr_types_, &is_function));
colocation_exempt_ = is_function;
if (!is_function) {
const auto& exempt_ops = InputColocationExemptionRegistry::Global()->Get();
colocation_exempt_ = exempt_ops.find(op) != exempt_ops.end();
TF_RETURN_IF_ERROR(OpDefForOp(op, &op_def_));
} else if (!remote) {
const FunctionLibraryDefinition* func_lib_def;
if (eager_func_params.has_value() &&
eager_func_params.value().func_lib_def_override != nullptr) {
func_lib_def = eager_func_params.value().func_lib_def_override;
} else {
func_lib_def = ctx_.FuncLibDef();
}
if (func_lib_def->Find(op) == nullptr) {
return absl::NotFoundError(absl::StrCat(
"'", op,
"' is neither a type of a primitive operation nor a name "
"of a function registered in binary running on ",
port::Hostname(),
". Make sure the operation or function is "
"registered in the binary running in this process."));
}
}
attrs_.Reset(op);
stack_trace_.reset();
is_function_ = is_function;
cancellation_manager_ = nullptr;
executor_ = executor ? executor : &ctx_.Executor();
if (eager_func_params.has_value()) {
eager_func_params_ = eager_func_params;
}
op_name_ = op;
return SetDeviceName(device_name);
}
Status EagerOperation::MaybeInferSingleInputAttrs(
ImmediateExecutionTensorHandle* handle) {
if (!op_def_) return absl::OkStatus();
const auto& input_def = op_def_->input_arg(inference_arg_idx_++);
if (!input_def.number_attr().empty() || !input_def.type_list_attr().empty()) {
ClearInferenceState();
return absl::OkStatus();
}
const std::string& type_attr = input_def.type_attr();
if (!type_attr.empty() &&
inference_attrs_.find(type_attr) == inference_attrs_.end()) {
MutableAttrs()->Set(type_attr, handle->DataType());
inference_attrs_.insert(type_attr);
}
return absl::OkStatus();
}
void EagerOperation::InferSingleTypeInputListAttrs(
const OpDef::ArgDef& input_def, const DataType dtype, int num_inputs) {
if (inference_attrs_.find(input_def.number_attr()) ==
inference_attrs_.end()) {
MutableAttrs()->Set(input_def.number_attr(), num_inputs);
inference_attrs_.insert(input_def.number_attr());
}
if (inference_attrs_.find(input_def.type_attr()) == inference_attrs_.end()) {
MutableAttrs()->Set(input_def.type_attr(), dtype);
inference_attrs_.insert(input_def.type_attr());
}
}
void EagerOperation::InferMixedTypeInputListAttrs(
const OpDef::ArgDef& input_def, const std::vector<DataType>& dtypes) {
if (inference_attrs_.find(input_def.type_list_attr()) ==
inference_attrs_.end()) {
MutableAttrs()->Set(
input_def.type_list_attr(),
gtl::ArraySlice<const DataType>(dtypes.data(), dtypes.size()));
inference_attrs_.insert(input_def.type_list_attr());
}
}
Status EagerOperation::InferInputListAttrs(int num_inputs) {
if (!op_def_) return absl::OkStatus();
int start = inference_arg_idx_;
const auto& input_def = op_def_->input_arg(inference_arg_idx_++);
if (!input_def.type_list_attr().empty()) {
std::vector<DataType> dtypes(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
dtypes[i] = inputs_[start + i]->DataType();
}
InferMixedTypeInputListAttrs(input_def, dtypes);
} else if (!input_def.type_attr().empty() &&
!input_def.number_attr().empty()) {
InferSingleTypeInputListAttrs(input_def, inputs_[start]->DataType(),
num_inputs);
} else if (!input_def.number_attr().empty()) {
if (inference_attrs_.find(input_def.number_attr()) ==
inference_attrs_.end()) {
MutableAttrs()->Set(input_def.number_attr(), num_inputs);
inference_attrs_.insert(input_def.number_attr());
}
} else {
return errors::InvalidArgument("Invalid input list definition");
}
return absl::OkStatus();
}
Status EagerOperation::TensorHandleInputs(
const absl::InlinedVector<TensorHandle*, 4>** inputs) const {
if (TF_PREDICT_TRUE(!HasCustomDeviceInput())) {
*inputs = reinterpret_cast<const absl::InlinedVector<TensorHandle*, 4>*>(
&inputs_);
return absl::OkStatus();
} else {
return errors::Internal("The operation unexpectedly had custom devices.");
}
}
Status EagerOperation::MutableTensorHandleInputs(
absl::InlinedVector<TensorHandle*, 4>** inputs) {
if (TF_PREDICT_TRUE(!HasCustomDeviceInput())) {
*inputs =
reinterpret_cast<absl::InlinedVector<TensorHandle*, 4>*>(&inputs_);
return absl::OkStatus();
} else {
return errors::Internal("The operation unexpectedly had custom devices.");
}
}
Status EagerOperation::SetDeviceName(const char* c_name) {
string name(c_name != nullptr ? c_name : "");
if (name != last_set_device_name_) {
if (!DeviceNameUtils::ParseFullName(name, &device_parsed_name_)) {
return errors::InvalidArgument("Malformed device specification '", name,
"' in eager op: ", DebugString());
}
last_set_device_name_ = name;
device_name_ = DeviceNameUtils::ParsedNameToString(device_parsed_name_);
device_ = kVariantDeviceNull;
}
return absl::OkStatus();
}
bool EagerOperation::IsLocal() const {
if (ctx_.remote_device_mgr() == nullptr) return true;
if (!device_parsed_name_.has_job && !device_parsed_name_.has_replica &&
!device_parsed_name_.has_task)
return true;
auto& host_cpu_name = ctx_.HostCPU()->parsed_name();
return device_parsed_name_.job == host_cpu_name.job &&
device_parsed_name_.replica == host_cpu_name.replica &&
device_parsed_name_.task == host_cpu_name.task;
}
string VariantDeviceDebugString(VariantDevice device) {
if (device == kVariantDeviceNull) {
return "[]";
} else if (std::holds_alternative<CustomDevice*>(device)) {
return std::get<CustomDevice*>(device)->name();
} else {
return std::get<Device*>(device)->DebugString();
}
}
const AbstractOpAttrs* EagerOperation::GetOpAttrs() const { return &attrs_; }
void EagerOperation::AddAttrs(const AbstractOpAttrs* op_attrs) {
attrs_.CopyAttributes(*(down_cast<const AttrBuilder*>(op_attrs)));
}
string EagerOperation::DebugString() const {
string out;
VLOG(1) << "EagerOperation::DebugString() over " << this;
strings::StrAppend(&out, "Name: ", Name(), "\n");
strings::StrAppend(&out, "Device Name: [", device_name_, "]\n");
strings::StrAppend(&out, "Device: ", VariantDeviceDebugString(Device()),
"\n");
for (const auto& input : inputs_) {
VLOG(1) << "Input ptr: " << input;
strings::StrAppend(&out, "Input: ", input->DebugString(), "\n");
}
NodeDef ndef;
Attrs().FillAttrValueMap(ndef.mutable_attr());
strings::StrAppend(&out, "Attrs: ", ndef.DebugString(), "\n");
return out;
}
void EagerOperation::AddTensorHandle(ImmediateExecutionTensorHandle* h) {
h->Ref();
inputs_.push_back(h);
attrs_.NumInputs(static_cast<int>(inputs_.size()));
}
} | #include "tensorflow/core/common_runtime/eager/eager_operation.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(EagerOperationTest, DeviceName) {
StaticDeviceMgr device_mgr(DeviceFactory::NewDevice(
"CPU", {}, "/job:localhost/replica:0/task:0/device:CPU:0"));
auto ctx = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT, false,
&device_mgr, false, nullptr, nullptr, nullptr,
true);
auto op = new EagerOperation(ctx);
TF_ASSERT_OK(op->SetDeviceName("/device:DONTHAVE"));
EXPECT_EQ("/device:DONTHAVE:*", op->DeviceName());
TF_ASSERT_OK(op->SetDeviceName(""));
EXPECT_EQ("", op->DeviceName());
TF_ASSERT_OK(op->SetDeviceName("/job:localhost"));
EXPECT_EQ("/job:localhost", op->DeviceName());
EXPECT_NE(absl::OkStatus(), op->SetDeviceName("/not/a/valid/name"));
delete op;
ctx->Unref();
}
TEST(EagerOperationTest, EagerFunctionParamsAndStepId) {
StaticDeviceMgr device_mgr(DeviceFactory::NewDevice(
"CPU", {}, "/job:localhost/replica:0/task:0/device:CPU:0"));
auto ctx = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT, false,
&device_mgr, false, nullptr, nullptr, nullptr,
true);
tensorflow::FunctionDef function_def;
CHECK(tensorflow::protobuf::TextFormat::ParseFromString(
" signature {"
" name: 'DummyFunction'"
" }",
&function_def));
TF_ASSERT_OK(ctx->AddFunctionDef(function_def));
auto op = new EagerOperation(ctx);
EXPECT_FALSE(op->eager_func_params().has_value());
string device_name = "/job:localhost/replica:0/task:0/device:CPU:0";
TF_ASSERT_OK(op->SetDeviceName(device_name.c_str()));
TF_ASSERT_OK(op->Reset("DummyFunction", device_name.c_str()));
op->SetStepId(255);
EXPECT_EQ(op->eager_func_params()->step_id.value(), 255);
delete op;
ctx->Unref();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/eager_operation.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/eager_operation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
483b923a-2dce-43d2-aaf9-c1676862cd7b | cpp | google/arolla | dict_types | arolla/qtype/dict/dict_types.cc | arolla/qtype/dict/dict_types_test.cc | #include "arolla/qtype/dict/dict_types.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include "absl/base/no_destructor.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/mutex.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/qtype/derived_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/tuple_qtype.h"
#include "arolla/util/bytes.h"
#include "arolla/util/fast_dynamic_downcast_final.h"
#include "arolla/util/text.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
namespace {
class KeyToRowDictTypeRegistry {
public:
static KeyToRowDictTypeRegistry& instance() {
static absl::NoDestructor<KeyToRowDictTypeRegistry> result;
return *result;
}
absl::Status Register(QTypePtr key_qtype, QTypePtr dict_qtype) {
absl::MutexLock l(&lock_);
auto [iter, inserted] = dict_types_.emplace(key_qtype, dict_qtype);
if (!inserted) {
return absl::FailedPreconditionError(absl::StrFormat(
"attempt to register %s dict twice", dict_qtype->name()));
}
return absl::OkStatus();
}
absl::StatusOr<QTypePtr> Get(QTypePtr qtype) {
absl::ReaderMutexLock l(&lock_);
auto iter = dict_types_.find(qtype);
if (iter == dict_types_.end()) {
return absl::NotFoundError(
absl::StrFormat("no dict with %s keys found", qtype->name()));
}
return iter->second;
}
private:
absl::Mutex lock_;
absl::flat_hash_map<QTypePtr, QTypePtr> dict_types_ ABSL_GUARDED_BY(lock_);
};
class DictQType final : public BasicDerivedQType {
public:
DictQType(std::string name, QTypePtr dict_type, QTypePtr values_array_type)
: BasicDerivedQType(ConstructorArgs{
.name = std::move(name),
.base_qtype = MakeTupleQType({dict_type, values_array_type}),
.qtype_specialization_key = "::arolla::DictQType",
}) {}
};
class DictQTypeRegistry {
public:
static DictQTypeRegistry& instance() {
static absl::NoDestructor<DictQTypeRegistry> result;
return *result;
}
absl::StatusOr<QTypePtr> GetQType(QTypePtr key_type, QTypePtr value_type) {
{
absl::ReaderMutexLock guard(&lock_);
if (const auto it = registry_.find({key_type, value_type});
it != registry_.end()) {
return it->second.get();
}
}
ASSIGN_OR_RETURN(QTypePtr dict_type, GetKeyToRowDictQType(key_type));
ASSIGN_OR_RETURN(QTypePtr values_array_type,
GetDenseArrayQTypeByValueQType(value_type));
auto kv_dict_type = std::make_unique<DictQType>(
absl::StrFormat("Dict<%s,%s>", key_type->name(), value_type->name()),
dict_type, values_array_type);
absl::MutexLock guard(&lock_);
return registry_
.emplace(std::make_pair(key_type, value_type), std::move(kv_dict_type))
.first->second.get();
}
private:
absl::Mutex lock_;
absl::flat_hash_map<std::pair<QTypePtr, QTypePtr>, std::unique_ptr<QType>>
registry_ ABSL_GUARDED_BY(lock_);
};
}
namespace dict_impl {
void RegisterKeyToRowDictQType(QTypePtr key_type, QTypePtr dict_type) {
auto status =
KeyToRowDictTypeRegistry::instance().Register(key_type, dict_type);
DCHECK_OK(status);
}
}
absl::StatusOr<QTypePtr> GetKeyToRowDictQType(QTypePtr key_type) {
return KeyToRowDictTypeRegistry::instance().Get(key_type);
}
bool IsKeyToRowDictQType(QTypePtr type) {
if (type->value_qtype() == nullptr) {
return false;
}
ASSIGN_OR_RETURN(QTypePtr dict_type,
GetKeyToRowDictQType(type->value_qtype()), false);
return dict_type == type;
}
absl::StatusOr<QTypePtr> GetDictQType(QTypePtr key_type, QTypePtr value_type) {
return DictQTypeRegistry::instance().GetQType(key_type, value_type);
}
const QType* GetDictKeyQTypeOrNull(QTypePtr dict_type) {
auto d = fast_dynamic_downcast_final<const DictQType*>(dict_type);
return d != nullptr ? d->type_fields()[0].GetType()->value_qtype() : nullptr;
}
const QType* GetDictValueQTypeOrNull(QTypePtr dict_type) {
auto d = fast_dynamic_downcast_final<const DictQType*>(dict_type);
return d != nullptr ? d->type_fields()[1].GetType()->value_qtype() : nullptr;
}
bool IsDictQType(const QType* qtype) {
return fast_dynamic_downcast_final<const DictQType*>(qtype) != nullptr;
}
template struct QTypeTraits<KeyToRowDict<bool>>;
template struct QTypeTraits<KeyToRowDict<int32_t>>;
template struct QTypeTraits<KeyToRowDict<int64_t>>;
template struct QTypeTraits<KeyToRowDict<Bytes>>;
template struct QTypeTraits<KeyToRowDict<Text>>;
} | #include "arolla/qtype/dict/dict_types.h"
#include <cstdint>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/bytes.h"
#include "arolla/util/repr.h"
#include "arolla/util/unit.h"
namespace arolla {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::Ne;
using ::testing::Property;
TEST(DictTypes, GetKeyToRowDictQType) {
GetKeyToRowDictQType<int64_t>();
EXPECT_THAT(GetKeyToRowDictQType<int64_t>()->value_qtype(),
Eq(GetQType<int64_t>()));
EXPECT_THAT(GetKeyToRowDictQType(GetQType<int64_t>()),
IsOkAndHolds(GetQType<KeyToRowDict<int64_t>>()));
EXPECT_THAT(GetKeyToRowDictQType(GetQType<int64_t>()),
IsOkAndHolds(GetKeyToRowDictQType<int64_t>()));
EXPECT_THAT(GetKeyToRowDictQType(GetQType<KeyToRowDict<int64_t>>()),
StatusIs(absl::StatusCode::kNotFound,
HasSubstr("no dict with DICT_INT64 keys found")));
}
TEST(DictTypes, GetDictQType) {
GetKeyToRowDictQType<int64_t>();
GetDenseArrayQType<float>();
GetDenseArrayQType<double>();
ASSERT_OK_AND_ASSIGN(QTypePtr int_to_float_dict,
GetDictQType(GetQType<int64_t>(), GetQType<float>()));
EXPECT_THAT(int_to_float_dict->name(), Eq("Dict<INT64,FLOAT32>"));
EXPECT_THAT(GetDictKeyQTypeOrNull(int_to_float_dict),
Eq(GetQType<int64_t>()));
EXPECT_THAT(GetDictValueQTypeOrNull(int_to_float_dict),
Eq(GetQType<float>()));
EXPECT_THAT(
int_to_float_dict->type_fields(),
ElementsAre(
Property(&TypedSlot::GetType, Eq(GetKeyToRowDictQType<int64_t>())),
Property(&TypedSlot::GetType, Eq(GetDenseArrayQType<float>()))));
EXPECT_THAT(GetDictQType(GetQType<int64_t>(), GetQType<float>()),
IsOkAndHolds(Eq(int_to_float_dict)));
EXPECT_THAT(GetDictQType(GetQType<int64_t>(), GetQType<double>()),
IsOkAndHolds(Ne(int_to_float_dict)));
}
TEST(DictTypes, IsDictQType) {
GetKeyToRowDictQType<int64_t>();
GetDenseArrayQType<float>();
GetDenseArrayQType<Unit>();
{
ASSERT_OK_AND_ASSIGN(QTypePtr int_to_float_dict,
GetDictQType(GetQType<int64_t>(), GetQType<float>()));
ASSERT_TRUE(IsDictQType(int_to_float_dict));
}
{
ASSERT_OK_AND_ASSIGN(QTypePtr int_to_unit_dict,
GetDictQType(GetQType<int64_t>(), GetQType<Unit>()));
ASSERT_TRUE(IsDictQType(int_to_unit_dict));
}
{
EXPECT_THAT(GetDictQType(GetQType<Unit>(), GetQType<float>()),
StatusIs(absl::StatusCode::kNotFound,
HasSubstr("no dict with UNIT keys found")));
}
{
EXPECT_THAT(GetDictQType(GetQType<float>(), GetQType<float>()),
StatusIs(absl::StatusCode::kNotFound,
HasSubstr("no dict with FLOAT32 keys found")));
}
}
TEST(DictTypes, ReprTraits) {
EXPECT_EQ(Repr(KeyToRowDict<float>{}), "dict{}");
EXPECT_EQ(Repr(KeyToRowDict<float>{{{0.5, 1}}}), "dict{0.5:int64{1},}");
EXPECT_EQ(Repr(KeyToRowDict<float>{{{0.5, 1}, {2.5, 3}}}),
"dict{0.5:int64{1},2.5:int64{3},}");
EXPECT_EQ(Repr(KeyToRowDict<Bytes>{{{Bytes("key"), 2}}}),
"dict{b'key':int64{2},}");
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/dict/dict_types.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/dict/dict_types_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
b694c9ed-3864-4e50-b8e1-72fee39e53f7 | cpp | tensorflow/tensorflow | optimizer_cse | tensorflow/core/graph/optimizer_cse.cc | tensorflow/core/graph/optimizer_cse_test.cc | #include "tensorflow/core/graph/optimizer_cse.h"
#include <iostream>
#include <unordered_map>
#include <utility>
#include <vector>
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
class OptimizerCSE {
public:
explicit OptimizerCSE(Graph* g) : g_(g) {}
bool Optimize(const std::function<bool(const Node*)>& consider_fn);
private:
static size_t NodeHash(const Node* n);
static bool Equivalent(const Node* a, const Node* b,
AttrSlice::Scratch* scratch);
Graph* g_;
};
static void FillInputs(
const Node* n, absl::InlinedVector<const Node*, 4UL>* control_edges,
absl::InlinedVector<std::pair<const Node*, int>, 4UL>* in) {
DCHECK_EQ(in->size(), n->num_inputs());
control_edges->clear();
for (const Edge* e : n->in_edges()) {
if (e->IsControlEdge()) {
control_edges->push_back(e->src());
} else {
(*in)[e->dst_input()] = std::make_pair(e->src(), e->src_output());
}
}
std::sort(control_edges->begin(), control_edges->end());
if (n->op_def().is_commutative()) {
std::sort(in->begin(), in->end());
}
}
static size_t kIllegalNodeHash = 0;
class Hasher {
public:
uint64 hash() { return h_ == kIllegalNodeHash ? kIllegalNodeHash + 1 : h_; }
void MixString(const string& s) { h_ = Hash64(s.data(), s.size(), h_); }
void MixInteger(size_t z) { h_ = Hash64Combine(h_, z); }
void MixProto(const protobuf::MessageLite& msg) {
msg.ByteSizeLong();
HashingOutputStream hasher;
{
protobuf::io::CodedOutputStream stream(&hasher);
stream.EnableAliasing(true);
stream.SetSerializationDeterministic(true);
msg.SerializeWithCachedSizes(&stream);
}
h_ = Hash64Combine(h_, hasher.hash());
}
private:
class HashingOutputStream : public protobuf::io::ZeroCopyOutputStream {
public:
static constexpr size_t kBufSize = 228;
static constexpr uint64 kDefaultSeed = 2570847921467975139ULL;
bool Next(void** data, int* size) override {
if (i_ == kBufSize) {
Mix(buf_, kBufSize);
*data = buf_;
*size = kBufSize;
} else {
*data = buf_ + i_;
*size = kBufSize - i_;
}
i_ = kBufSize;
return true;
}
void BackUp(int count) override { i_ -= count; }
int64_t ByteCount() const override { return byte_count_; }
bool WriteAliasedRaw(const void* void_data, int size) override {
const char* data = static_cast<const char*>(void_data);
const auto remaining = kBufSize - i_;
if (remaining > 0) {
if (size < remaining) {
memcpy(buf_ + i_, data, size);
i_ += size;
return true;
}
memcpy(buf_ + i_, data, remaining);
i_ = kBufSize;
data += remaining;
size -= remaining;
}
if (i_ == kBufSize) {
Mix(buf_, kBufSize);
i_ = 0;
}
while (size >= kBufSize) {
Mix(data, kBufSize);
data += kBufSize;
size -= kBufSize;
}
memcpy(buf_, data, size);
i_ = size;
return true;
}
bool AllowsAliasing() const override { return true; }
uint64 hash() {
if (i_ != 0) {
Mix(buf_, i_);
i_ = 0;
}
return h_;
}
private:
void Mix(const char* p, size_t n) {
byte_count_ += n;
h_ = Hash64(p, n, h_);
}
char buf_[kBufSize];
int i_ = 0;
int64_t byte_count_ = 0;
uint64 h_ = kDefaultSeed;
};
uint64 h_ = HashingOutputStream::kDefaultSeed;
};
size_t OptimizerCSE::NodeHash(const Node* n) {
Hasher hasher;
hasher.MixString(n->type_string());
hasher.MixInteger(n->output_types().size());
for (DataType dt : n->output_types()) {
hasher.MixInteger(dt);
}
hasher.MixInteger(n->num_inputs());
absl::InlinedVector<const Node*, 4UL> control_edges;
absl::InlinedVector<std::pair<const Node*, int>, 4UL> in(n->num_inputs());
FillInputs(n, &control_edges, &in);
for (const auto& edge : in) {
hasher.MixInteger(edge.first->id());
hasher.MixInteger(edge.second);
}
#if !defined(__ANDROID__)
size_t attr_hashes = 0;
for (const auto& attr : n->attrs()) {
Hasher h;
h.MixString(attr.first);
h.MixProto(attr.second);
attr_hashes = Hash64CombineUnordered(attr_hashes, h.hash());
}
hasher.MixInteger(attr_hashes);
#endif
return hasher.hash();
}
static bool HasRefInput(const Node* n) {
for (auto dt : n->input_types()) {
if (IsRefType(dt)) return true;
}
return false;
}
bool OptimizerCSE::Equivalent(const Node* a, const Node* b,
AttrSlice::Scratch* scratch) {
if (a->type_string() != b->type_string()) return false;
if (a->op_def().is_stateful()) return false;
if (HasRefInput(a) || HasRefInput(b)) return false;
if (!a->attrs().EqualAttrs(b->attrs(), scratch)) return false;
if (a->num_inputs() != b->num_inputs()) return false;
const int N_in = a->num_inputs();
absl::InlinedVector<const Node*, 4UL> a_control_edges;
absl::InlinedVector<const Node*, 4UL> b_control_edges;
absl::InlinedVector<std::pair<const Node*, int>, 4UL> a_in(N_in);
absl::InlinedVector<std::pair<const Node*, int>, 4UL> b_in(N_in);
FillInputs(a, &a_control_edges, &a_in);
FillInputs(b, &b_control_edges, &b_in);
if (a_in != b_in) return false;
if (a_control_edges != b_control_edges) return false;
return true;
}
bool OptimizerCSE::Optimize(
const std::function<bool(const Node*)>& consider_fn) {
std::vector<Node*> order;
GetReversePostOrder(*g_, &order, NodeComparatorID());
std::unordered_map<size_t, Node*> available;
bool changed = false;
AttrSlice::Scratch scratch;
for (Node* n : order) {
if (!n->IsOp()) continue;
if (n->type_string() == "Placeholder" ||
n->type_string() == "PlaceholderV2" ||
n->type_string() == "PlaceholderWithDefault") {
continue;
}
if (consider_fn != nullptr && !consider_fn(n)) continue;
size_t h = NodeHash(n);
Node** candidate = &available[h];
if (*candidate == nullptr) {
*candidate = n;
} else if (Equivalent(*candidate, n, &scratch)) {
VLOG(1) << "CSE: equivalent: " << (*candidate)->name() << " and "
<< n->name();
for (const Edge* e : n->out_edges()) {
g_->AddEdge(*candidate, e->src_output(), e->dst(), e->dst_input());
}
MergeDebugInfo(NodeDebugInfo(*n), *candidate);
g_->RemoveNode(n);
changed = true;
}
}
return changed;
}
bool OptimizeCSE(Graph* g,
const std::function<bool(const Node*)>& consider_fn) {
OptimizerCSE opt(g);
return opt.Optimize(consider_fn);
}
} | #include "tensorflow/core/graph/optimizer_cse.h"
#include <utility>
#include <vector>
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tsl/platform/protobuf.h"
namespace tensorflow {
namespace {
static void InitGraph(const string& s, Graph* graph) {
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
CHECK(parser.MergeFromString(s, &graph_def)) << s;
GraphConstructorOptions opts;
TF_CHECK_OK(ConvertGraphDefToGraph(opts, graph_def, graph));
}
class OptimizerCSETest : public ::testing::Test {
public:
OptimizerCSETest() : graph_(OpRegistry::Global()) {}
void InitGraph(const string& s) {
::tensorflow::InitGraph(s, &graph_);
original_ = CanonicalGraphString(&graph_);
}
static bool IncludeNode(const Node* n) { return n->IsOp(); }
static string EdgeId(const Node* n, int index) {
if (index == 0) {
return n->name();
} else if (index == Graph::kControlSlot) {
return strings::StrCat(n->name(), ":control");
} else {
return strings::StrCat(n->name(), ":", index);
}
}
string CanonicalGraphString(Graph* g) {
std::vector<string> nodes;
std::vector<string> edges;
for (const Node* n : g->nodes()) {
if (IncludeNode(n)) {
nodes.push_back(strings::StrCat(n->name(), "(", n->type_string(), ")"));
}
}
for (const Edge* e : g->edges()) {
if (IncludeNode(e->src()) && IncludeNode(e->dst())) {
edges.push_back(strings::StrCat(EdgeId(e->src(), e->src_output()), "->",
EdgeId(e->dst(), e->dst_input())));
}
}
std::sort(nodes.begin(), nodes.end());
std::sort(edges.begin(), edges.end());
return strings::StrCat(absl::StrJoin(nodes, ";"), "|",
absl::StrJoin(edges, ";"));
}
string DoCSE(const std::function<bool(const Node*)>& consider_fn = nullptr) {
string before = CanonicalGraphString(&graph_);
LOG(ERROR) << "Before rewrites: " << before;
OptimizeCSE(&graph_, consider_fn);
string result = CanonicalGraphString(&graph_);
LOG(ERROR) << "After rewrites: " << result;
return result;
}
const string& OriginalGraph() const { return original_; }
Graph graph_;
string original_;
};
REGISTER_OP("Input").Output("o: float").SetIsStateful();
TEST_F(OptimizerCSETest, Simple) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }");
EXPECT_EQ(DoCSE(),
"A(Input);B(Input);C(Mul)|"
"A->C;B->C:1");
}
TEST_F(OptimizerCSETest, Simple_ThreeEquivalent) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'E' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }");
EXPECT_EQ(DoCSE(),
"A(Input);B(Input);C(Mul)|"
"A->C;B->C:1");
}
TEST_F(OptimizerCSETest, Simple_WithFixups) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'E' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['C', 'D'] }");
EXPECT_EQ(DoCSE(),
"A(Input);B(Input);C(Mul);E(Mul)|"
"A->C;B->C:1;C->E;C->E:1");
}
TEST_F(OptimizerCSETest, Simple_Commutative) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['B', 'A'] }");
EXPECT_EQ(DoCSE(),
"A(Input);B(Input);C(Mul)|"
"A->C;B->C:1");
}
static bool IsNotMultiply(const Node* n) { return n->type_string() != "Mul"; }
TEST_F(OptimizerCSETest, Simple_Filtered) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['B', 'A'] }");
EXPECT_EQ(DoCSE(IsNotMultiply), OriginalGraph());
}
TEST_F(OptimizerCSETest, Simple_NotCommutative) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Sub' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'D' op: 'Sub' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['B', 'A'] }");
EXPECT_EQ(DoCSE(), OriginalGraph());
}
TEST_F(OptimizerCSETest, NotEquivalent_Ops) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'D' op: 'Sub' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }");
EXPECT_EQ(DoCSE(), OriginalGraph());
}
TEST_F(OptimizerCSETest, Simple_SameOps_SameAttrs1) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] attr { key: 'shape'"
" value { shape: { dim: { size: 37 name: 'SAME_NAME' } } } } }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] attr { key: 'shape'"
" value { shape: { dim: { size: 37 name: 'SAME_NAME' } } } } }");
EXPECT_EQ(DoCSE(),
"A(Input);B(Input);C(Mul)|"
"A->C;B->C:1");
}
TEST_F(OptimizerCSETest, Simple_SameOps_SameAttrs2) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B']"
" attr { key: 'a' value { i: 3 } }"
" attr { key: 't' value { type: DT_INT32 } } }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B']"
" attr { key: 't' value { type: DT_INT32 } }"
" attr { key: 'a' value { i: 3 } } }");
EXPECT_EQ(DoCSE(),
"A(Input);B(Input);C(Mul)|"
"A->C;B->C:1");
}
TEST_F(OptimizerCSETest, SameConstants) {
InitGraph(
"node { name: 'A' op: 'Const' "
" attr { key: 'dtype' value { type: DT_INT32 } }"
" attr { key: 'value' value {"
" tensor { dtype: DT_INT32 tensor_shape { dim { size: 1 } } "
" int_val: 0 } } } }"
"node { name: 'B' op: 'Const' "
" attr { key: 'dtype' value { type: DT_INT32 } }"
" attr { key: 'value' value {"
" tensor { dtype: DT_INT32 tensor_shape { dim { size: 1 } } "
" int_val: 0 } } } }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_INT32 } }"
" input: ['A', 'B'] }");
EXPECT_EQ(DoCSE(),
"A(Const);D(Mul)|"
"A->D;A->D:1");
}
TEST_F(OptimizerCSETest, DifferentConstants) {
InitGraph(
"node { name: 'A' op: 'Const' "
" attr { key: 'dtype' value { type: DT_INT32 } }"
" attr { key: 'value' value {"
" tensor { dtype: DT_INT32 tensor_shape { dim { size: 1 } } "
" int_val: 0 } } } }"
"node { name: 'B' op: 'Const' "
" attr { key: 'dtype' value { type: DT_INT32 } }"
" attr { key: 'value' value {"
" tensor { dtype: DT_INT32 tensor_shape { dim { size: 1 } } "
" int_val: 100000 } } } }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_INT32 } }"
" input: ['A', 'B'] }");
EXPECT_EQ(DoCSE(),
"A(Const);B(Const);D(Mul)|"
"A->D;B->D:1");
}
TEST_F(OptimizerCSETest, SameOps_DifferentAttrs1) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B']"
" attr { key: 'a' value { i: 3 } }"
" attr { key: 't' value { type: DT_INT32 } } }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B']"
" attr { key: 't' value { type: DT_INT32 } }"
" attr { key: 'a' value { i: 4 } } }");
EXPECT_EQ(DoCSE(), OriginalGraph());
}
TEST_F(OptimizerCSETest, SameOps_DifferentAttrs2) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B']"
" attr { key: 'a' value { i: 3 } }"
" attr { key: 't' value { type: DT_FLOAT } } }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B']"
" attr { key: 't' value { type: DT_INT32 } }"
" attr { key: 'a' value { i: 3 } } }");
EXPECT_EQ(DoCSE(), OriginalGraph());
}
TEST_F(OptimizerCSETest, NotEquivalent_Inputs) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Input'}"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'E' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'C'] }");
EXPECT_EQ(DoCSE(), OriginalGraph());
}
TEST_F(OptimizerCSETest, Constant_Dedup) {
Tensor a(DT_FLOAT, TensorShape({1}));
a.flat<float>()(0) = 1.0;
Tensor b(DT_DOUBLE, TensorShape({1}));
b.flat<double>()(0) = 1.0;
Tensor c(DT_FLOAT, TensorShape({1, 1}));
c.flat<float>()(0) = 1.0;
Tensor d(DT_FLOAT, TensorShape({1}));
d.flat<float>()(0) = 2.0;
Graph g(OpRegistry::Global());
for (const auto& val : {a, b, c, d, d, c, b, a}) {
test::graph::Constant(&g, val);
}
GraphDef gdef;
test::graph::ToGraphDef(&g, &gdef);
InitGraph(tsl::LegacyUnredactedDebugString(gdef));
EXPECT_EQ(OriginalGraph(),
"n/_0(Const);n/_1(Const);n/_2(Const);n/_3(Const);"
"n/_4(Const);n/_5(Const);n/_6(Const);n/_7(Const)|");
std::vector<string> nodes = str_util::Split(DoCSE(), ";|");
std::set<string> node_set(nodes.begin(), nodes.end());
EXPECT_EQ(node_set.count("n/_0(Const)") + node_set.count("n/_7(Const)"), 1);
EXPECT_EQ(node_set.count("n/_1(Const)") + node_set.count("n/_6(Const)"), 1);
EXPECT_EQ(node_set.count("n/_2(Const)") + node_set.count("n/_5(Const)"), 1);
EXPECT_EQ(node_set.count("n/_3(Const)") + node_set.count("n/_4(Const)"), 1);
}
void BM_CSE(::testing::benchmark::State& state) {
const int op_nodes = state.range(0);
string s;
for (int in = 0; in < 10; in++) {
s += strings::Printf("node { name: 'in%04d' op: 'Input'}", in);
}
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
for (int op = 0; op < op_nodes; op++) {
s += strings::Printf(
"node { name: 'op%04d' op: 'Mul' attr { key: 'T' value { "
"type: DT_FLOAT } } input: ['in%04d', 'in%04d' ] }",
op, rnd.Uniform(10), rnd.Uniform(10));
}
bool first = true;
for (auto i : state) {
state.PauseTiming();
Graph* graph = new Graph(OpRegistry::Global());
InitGraph(s, graph);
int N = graph->num_node_ids();
if (first) {
state.SetLabel(strings::StrCat("Per graph node. Nodes: ", N));
first = false;
}
{
state.ResumeTiming();
OptimizeCSE(graph, nullptr);
state.PauseTiming();
}
delete graph;
state.ResumeTiming();
}
}
BENCHMARK(BM_CSE)->Arg(1000)->Arg(10000);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/optimizer_cse.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/optimizer_cse_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ff9599d4-95bd-458f-a1e1-7cabad607e71 | cpp | tensorflow/tensorflow | take_dataset_op | tensorflow/core/kernels/data/take_dataset_op.cc | tensorflow/core/kernels/data/take_dataset_op_test.cc | #include "tensorflow/core/kernels/data/take_dataset_op.h"
#include <cstdint>
#include <memory>
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
namespace tensorflow {
namespace data {
constexpr const char* const TakeDatasetOp::kDatasetType;
constexpr const char* const TakeDatasetOp::kInputDataset;
constexpr const char* const TakeDatasetOp::kCount;
constexpr const char* const TakeDatasetOp::kOutputTypes;
constexpr const char* const TakeDatasetOp::kOutputShapes;
constexpr char kCurIndex[] = "i";
constexpr char kInputImplEmpty[] = "input_impl_empty";
constexpr char kEmptyTake[] = "EmptyTake";
constexpr char kFiniteTake[] = "FiniteTake";
TakeDataset::TakeDataset(OpKernelContext* ctx, int64_t count,
const DatasetBase* input)
: DatasetBase(DatasetContext(ctx)), count_(count), input_(input) {
input_->Ref();
}
TakeDataset::TakeDataset(DatasetContext::Params params, int64_t count,
const DatasetBase* input)
: DatasetBase(DatasetContext(std::move(params))),
count_(count),
input_(input) {
input_->Ref();
random_indexing_compatible_ = absl::OkStatus();
if (input_ != nullptr) {
random_indexing_compatible_ = input_->RandomIndexingCompatible();
}
}
TakeDataset::~TakeDataset() { input_->Unref(); }
const DataTypeVector& TakeDataset::output_dtypes() const {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& TakeDataset::output_shapes() const {
return input_->output_shapes();
}
string TakeDataset::DebugString() const {
return name_utils::DatasetDebugString(TakeDatasetOp::kDatasetType);
}
int64_t TakeDataset::CardinalityInternal(CardinalityOptions options) const {
int64_t n = input_->Cardinality(options);
if (n == kUnknownCardinality) {
return kUnknownCardinality;
}
if (n == kInfiniteCardinality) {
return count_;
} else if (count_ == kInfiniteCardinality) {
return n;
}
return std::min(n, count_);
}
Status TakeDataset::InputDatasets(
std::vector<const DatasetBase*>* inputs) const {
inputs->push_back(input_);
return absl::OkStatus();
}
Status TakeDataset::CheckExternalState() const {
return input_->CheckExternalState();
}
Status TakeDataset::Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
return input_->Get(ctx, index, out_tensors);
}
absl::Status TakeDataset::RandomIndexingCompatible() const {
return random_indexing_compatible_;
}
class TakeDataset::EmptyIterator : public DatasetIterator<TakeDataset> {
public:
explicit EmptyIterator(const Params& params)
: DatasetIterator<TakeDataset>(params) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status GetNextInternal(IteratorContext* ctx, std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
*end_of_sequence = true;
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
return absl::OkStatus();
}
};
class TakeDataset::FiniteIterator : public DatasetIterator<TakeDataset> {
public:
explicit FiniteIterator(const Params& params)
: DatasetIterator<TakeDataset>(params), i_(0) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx, std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
while (dataset()->count_ < 0 || i_ < dataset()->count_) {
TF_RETURN_IF_ERROR(
input_impl_->GetNext(ctx, out_tensors, end_of_sequence));
if (!*end_of_sequence) {
++i_;
return absl::OkStatus();
}
break;
}
*end_of_sequence = true;
input_impl_.reset();
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kCurIndex, i_));
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kInputImplEmpty,
static_cast<int64_t>(!input_impl_)));
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kCurIndex, &i_));
int64_t input_empty;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kInputImplEmpty, &input_empty));
if (!static_cast<bool>(input_empty)) {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
} else {
input_impl_.reset();
}
return absl::OkStatus();
}
private:
mutex mu_;
int64_t i_ TF_GUARDED_BY(mu_);
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
};
std::unique_ptr<IteratorBase> TakeDataset::MakeIteratorInternal(
const string& prefix) const {
if (count_ == 0) {
return std::make_unique<EmptyIterator>(EmptyIterator::Params{
this, name_utils::IteratorPrefix(kEmptyTake, prefix)});
} else {
return std::make_unique<FiniteIterator>(FiniteIterator::Params{
this, name_utils::IteratorPrefix(kFiniteTake, prefix)});
}
}
Status TakeDataset::AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* count = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(count_, &count));
TF_RETURN_IF_ERROR(b->AddDataset(this, {input_graph_node, count}, output));
return absl::OkStatus();
}
TakeDatasetOp::TakeDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {}
void TakeDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64_t count;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kCount, &count));
*output = new TakeDataset(ctx, count, input);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("TakeDataset").Device(DEVICE_CPU), TakeDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/take_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "take_dataset";
class TakeDatasetOpTest : public DatasetOpsTestBase {};
TakeDatasetParams TakeLessTakeDatasetParams() {
return TakeDatasetParams(RangeDatasetParams(0, 10, 1),
4,
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
TakeDatasetParams TakeMoreTakeDatasetParams() {
return TakeDatasetParams(RangeDatasetParams(0, 10, 1),
25,
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
TakeDatasetParams TakeAllTakeDatasetParams() {
return TakeDatasetParams(RangeDatasetParams(0, 10, 1),
-1,
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
TakeDatasetParams TakeNothingTakeDatasetParams() {
return TakeDatasetParams(RangeDatasetParams(0, 10, 1),
0,
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
std::vector<GetNextTestCase<TakeDatasetParams>> GetNextTestCases() {
return {{TakeLessTakeDatasetParams(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}, {3}})},
{TakeMoreTakeDatasetParams(),
CreateTensors<int64_t>(
TensorShape({}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{TakeAllTakeDatasetParams(),
CreateTensors<int64_t>(
TensorShape({}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{TakeNothingTakeDatasetParams(),
{}}};
}
ITERATOR_GET_NEXT_TEST_P(TakeDatasetOpTest, TakeDatasetParams,
GetNextTestCases())
TEST_F(TakeDatasetOpTest, DatasetNodeName) {
auto dataset_params = TakeLessTakeDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(TakeDatasetOpTest, DatasetTypeString) {
auto dataset_params = TakeLessTakeDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(
CheckDatasetTypeString(name_utils::OpName(TakeDatasetOp::kDatasetType)));
}
TEST_F(TakeDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = TakeLessTakeDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
std::vector<DatasetOutputShapesTestCase<TakeDatasetParams>>
DatasetOutputShapesTestCases() {
return {{TakeLessTakeDatasetParams(),
{PartialTensorShape({})}},
{TakeMoreTakeDatasetParams(),
{PartialTensorShape({})}},
{TakeAllTakeDatasetParams(),
{PartialTensorShape({})}},
{TakeNothingTakeDatasetParams(),
{PartialTensorShape({})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(TakeDatasetOpTest, TakeDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<TakeDatasetParams>> CardinalityTestCases() {
return {{TakeLessTakeDatasetParams(),
4},
{TakeMoreTakeDatasetParams(),
10},
{TakeAllTakeDatasetParams(),
10},
{TakeNothingTakeDatasetParams(),
0}};
}
DATASET_CARDINALITY_TEST_P(TakeDatasetOpTest, TakeDatasetParams,
CardinalityTestCases())
TEST_F(TakeDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = TakeLessTakeDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
std::vector<IteratorOutputShapesTestCase<TakeDatasetParams>>
IteratorOutputShapesTestCases() {
return {{TakeLessTakeDatasetParams(),
{PartialTensorShape({})}},
{TakeMoreTakeDatasetParams(),
{PartialTensorShape({})}},
{TakeAllTakeDatasetParams(),
{PartialTensorShape({})}},
{TakeNothingTakeDatasetParams(),
{PartialTensorShape({})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(TakeDatasetOpTest, TakeDatasetParams,
IteratorOutputShapesTestCases())
std::vector<IteratorPrefixTestCase<TakeDatasetParams>>
IteratorPrefixTestCases() {
return {{TakeLessTakeDatasetParams(),
name_utils::IteratorPrefix(
"FiniteTake", TakeLessTakeDatasetParams().iterator_prefix())},
{TakeMoreTakeDatasetParams(),
name_utils::IteratorPrefix(
"FiniteTake", TakeMoreTakeDatasetParams().iterator_prefix())},
{TakeAllTakeDatasetParams(),
name_utils::IteratorPrefix(
"FiniteTake", TakeAllTakeDatasetParams().iterator_prefix())},
{TakeNothingTakeDatasetParams(),
name_utils::IteratorPrefix(
"EmptyTake", TakeNothingTakeDatasetParams().iterator_prefix())}};
}
ITERATOR_PREFIX_TEST_P(TakeDatasetOpTest, TakeDatasetParams,
IteratorPrefixTestCases())
std::vector<IteratorSaveAndRestoreTestCase<TakeDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{TakeLessTakeDatasetParams(),
{0, 2, 5, 11},
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}, {3}})},
{TakeMoreTakeDatasetParams(),
{0, 2, 5, 11},
CreateTensors<int64_t>(
TensorShape({}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{TakeAllTakeDatasetParams(),
{0, 2, 5, 11},
CreateTensors<int64_t>(
TensorShape({}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{TakeNothingTakeDatasetParams(),
{0, 2, 5, 11},
{}}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(TakeDatasetOpTest, TakeDatasetParams,
IteratorSaveAndRestoreTestCases())
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/take_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/take_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bf90ee0a-f59e-4e9c-9cf8-d22906551122 | cpp | tensorflow/tensorflow | uniform_quantized_convolution_ops | tensorflow/core/kernels/uniform_quant_ops/uniform_quantized_convolution_ops.cc | tensorflow/core/kernels/uniform_quant_ops/uniform_quantized_convolution_ops_test.cc | #include <algorithm>
#include <limits>
#include <vector>
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/uniform_quant_ops/math_utils.h"
#include "tensorflow/core/kernels/uniform_quant_ops/tensor_utils.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/util/quantization/uniform_quant_ops_attr.pb.h"
#include "tensorflow/core/util/quantization/uniform_quant_ops_params.h"
namespace tensorflow {
namespace {
using tensorflow::errors::InvalidArgument;
std::vector<int32_t> LhsTransposePerm(
const UniformQuantizedConvolutionDimensionNumbersAttr& dimension_numbers,
const int dims) {
std::vector<int32_t> lhs_perm(dims);
lhs_perm[0] = dimension_numbers.input_batch_dimension();
lhs_perm[1] = dimension_numbers.input_feature_dimension();
std::copy(dimension_numbers.input_spatial_dimensions().begin(),
dimension_numbers.input_spatial_dimensions().end(),
lhs_perm.begin() + 2);
return lhs_perm;
}
std::vector<int32_t> RhsTransposePerm(
const UniformQuantizedConvolutionDimensionNumbersAttr& dimension_numbers,
const int dims) {
std::vector<int32_t> rhs_perm(dims);
rhs_perm[0] = dimension_numbers.kernel_output_feature_dimension();
rhs_perm[1] = dimension_numbers.kernel_input_feature_dimension();
std::copy(dimension_numbers.kernel_spatial_dimensions().begin(),
dimension_numbers.kernel_spatial_dimensions().end(),
rhs_perm.begin() + 2);
return rhs_perm;
}
std::vector<int32_t> OutTransposePerm(
const UniformQuantizedConvolutionDimensionNumbersAttr& dimension_numbers,
const int dims) {
std::vector<int32_t> out_perm(dims);
out_perm[0] = dimension_numbers.output_batch_dimension();
out_perm[1] = dimension_numbers.output_feature_dimension();
std::copy(dimension_numbers.output_spatial_dimensions().begin(),
dimension_numbers.output_spatial_dimensions().end(),
out_perm.begin() + 2);
return out_perm;
}
std::vector<int32_t> OutBackTransposePerm(absl::Span<const int32_t> out_perm) {
std::vector<int32_t> out_perm_back(out_perm.size());
for (int i = 0; i < out_perm.size(); ++i) {
out_perm_back[out_perm[i]] = i;
}
return out_perm_back;
}
TensorShape PaddedAndDilatedTransposedLhsShape(
const TensorShape& in_shape,
const UniformQuantizedConvolutionParams& convolution_params) {
TensorShape out_shape = in_shape;
for (int i = 2; i < in_shape.dims(); ++i) {
const int64_t lhs_size_dilated =
UniformQuantizedConvolutionParams::DilatedSize(
in_shape.dim_size(i), convolution_params.lhs_dilation()[i - 2]);
const int64_t out_lhs_size =
lhs_size_dilated + convolution_params.padding_list()[2 * (i - 2)] +
convolution_params.padding_list()[2 * (i - 2) + 1];
out_shape.set_dim(i, out_lhs_size);
}
return out_shape;
}
int64_t PaddedAndDilatedTransposedLhsSpatialIdx(
const UniformQuantizedConvolutionParams& convolution_params,
const TensorShape& lhs_in_shape, const TensorShape& lhs_out_shape,
int64_t in_spatial_idx) {
int64_t out_spatial_idx = 0;
int64_t out_spatial_inner_dim_size = 1;
for (int dim = lhs_in_shape.dims() - 1; dim >= 2; --dim) {
const int64_t in_spatial_idx_of_dim =
in_spatial_idx % lhs_in_shape.dim_size(dim);
const int64_t out_spatial_idx_of_dim =
convolution_params.padding_list()[2 * (dim - 2)] +
convolution_params.lhs_dilation()[dim - 2] * in_spatial_idx_of_dim;
out_spatial_idx += out_spatial_idx_of_dim * out_spatial_inner_dim_size;
in_spatial_idx /= lhs_in_shape.dim_size(dim);
out_spatial_inner_dim_size *= lhs_out_shape.dim_size(dim);
}
return out_spatial_idx;
}
int64_t ConvolutionTransposedLhsSpatialIdx(
const UniformQuantizedConvolutionParams& convolution_params,
const TensorShape& lhs_shape, const TensorShape& rhs_shape,
const TensorShape& out_shape, int64_t rhs_spatial_idx,
int64_t out_spatial_idx) {
int64_t lhs_spatial_idx = 0;
int64_t lhs_spatial_inner_dim_size = 1;
for (int dim = lhs_shape.dims() - 1; dim >= 2; --dim) {
const int64_t rhs_spatial_idx_of_dim =
rhs_spatial_idx % rhs_shape.dim_size(dim);
const int64_t out_spatial_idx_of_dim =
out_spatial_idx % out_shape.dim_size(dim);
const int64_t lhs_spatial_idx_of_dim =
out_spatial_idx_of_dim * convolution_params.window_strides()[dim - 2] +
rhs_spatial_idx_of_dim * convolution_params.rhs_dilation()[dim - 2];
lhs_spatial_idx += lhs_spatial_idx_of_dim * lhs_spatial_inner_dim_size;
rhs_spatial_idx /= rhs_shape.dim_size(dim);
out_spatial_idx /= out_shape.dim_size(dim);
lhs_spatial_inner_dim_size *= lhs_shape.dim_size(dim);
}
return lhs_spatial_idx;
}
template <typename Tlhs>
void PadAndDilateTransposedLhs(
const Tensor& lhs_in,
const UniformQuantizedConvolutionParams& convolution_params,
const Tensor& lhs_zero_points, Tensor& lhs_out) {
auto lhs_in_tensor = lhs_in.flat_outer_dims<Tlhs, 3>();
auto lhs_out_tensor = lhs_out.flat_outer_dims<Tlhs, 3>();
const int32_t* lhs_zero_points_data = lhs_zero_points.flat<int32_t>().data();
const bool is_lhs_zero_points_scalar = lhs_zero_points.dims() == 0;
for (int64_t batch_idx = 0; batch_idx < lhs_in.dim_size(0); ++batch_idx) {
lhs_out_tensor.template chip<0>(batch_idx).setConstant(
lhs_zero_points_data[is_lhs_zero_points_scalar ? 0 : batch_idx]);
for (int64_t feature_idx = 0; feature_idx < lhs_in.dim_size(1);
++feature_idx) {
for (int64_t in_spatial_idx = 0;
in_spatial_idx < lhs_in_tensor.dimension(2); ++in_spatial_idx) {
const int64_t out_spatial_idx = PaddedAndDilatedTransposedLhsSpatialIdx(
convolution_params, lhs_in.shape(), lhs_out.shape(),
in_spatial_idx);
lhs_out_tensor(batch_idx, feature_idx, out_spatial_idx) =
lhs_in_tensor(batch_idx, feature_idx, in_spatial_idx);
}
}
}
}
template <typename Tlhs, typename Trhs, typename Tout, typename AccF,
typename OutF>
void ConvWithAccFunctionAndOutFunction(
const Tensor& lhs, const Tensor& rhs,
const UniformQuantizedConvolutionParams& convolution_params, Tensor& out,
const AccF& acc_f, const OutF& out_f) {
const int64_t out_feature_group_size_by_feature_group_count =
out.dim_size(1) / convolution_params.feature_group_count();
const int64_t out_feature_group_size_by_batch_group_count =
out.dim_size(1) / convolution_params.batch_group_count();
auto lhs_tensor = lhs.flat_outer_dims<Tlhs, 3>();
auto rhs_tensor = rhs.flat_outer_dims<Trhs, 3>();
auto out_tensor = out.flat_outer_dims<Tout, 3>();
for (int64_t out_batch_idx = 0; out_batch_idx < out_tensor.dimension(0);
++out_batch_idx) {
for (int64_t out_feature_idx = 0; out_feature_idx < out_tensor.dimension(1);
++out_feature_idx) {
const int64_t lhs_batch_idx =
(out_feature_idx / out_feature_group_size_by_batch_group_count) *
out_tensor.dimension(0) +
out_batch_idx;
for (int out_spatial_idx = 0; out_spatial_idx < out_tensor.dimension(2);
++out_spatial_idx) {
int32_t acc = 0;
for (int64_t rhs_in_feature_idx = 0;
rhs_in_feature_idx < rhs_tensor.dimension(1);
++rhs_in_feature_idx) {
const int64_t lhs_feature_idx =
(out_feature_idx /
out_feature_group_size_by_feature_group_count) *
rhs_tensor.dimension(1) +
rhs_in_feature_idx;
for (int64_t rhs_spatial_idx = 0;
rhs_spatial_idx < rhs_tensor.dimension(2); ++rhs_spatial_idx) {
const int64_t lhs_spatial_idx = ConvolutionTransposedLhsSpatialIdx(
convolution_params, lhs.shape(), rhs.shape(), out.shape(),
rhs_spatial_idx, out_spatial_idx);
const Tlhs lhs_val =
lhs_tensor(lhs_batch_idx, lhs_feature_idx, lhs_spatial_idx);
const Trhs rhs_val = rhs_tensor(out_feature_idx, rhs_in_feature_idx,
rhs_spatial_idx);
acc += acc_f(lhs_val, rhs_val, lhs_batch_idx, out_feature_idx);
}
}
out_tensor(out_batch_idx, out_feature_idx, out_spatial_idx) =
out_f(acc, lhs_batch_idx, out_feature_idx);
}
}
}
}
template <typename Tin, typename Tout>
Status EvalLhsPerTensorAndRhsPerTensorQuantizedConv(
const Tensor& lhs, const Tensor& rhs,
const UniformQuantizedConvolutionParams& convolution_params,
const float lhs_scale, const int32_t lhs_zero_point, const float rhs_scale,
const int32_t rhs_zero_point, const float output_scale,
const int32_t output_zero_point, const int output_quantization_min_val,
const int output_quantization_max_val, Tensor& out) {
const double effective_multiplier =
static_cast<double>(lhs_scale) * rhs_scale / output_scale;
int32_t effective_quantized_multiplier;
int effective_shift;
TF_RETURN_IF_ERROR(QuantizeMultiplier(
effective_multiplier, effective_quantized_multiplier, effective_shift));
ConvWithAccFunctionAndOutFunction<Tin, Tin, Tout>(
lhs, rhs, convolution_params, out,
[lhs_zero_point, rhs_zero_point](Tin lhs_val, Tin rhs_val,
int64_t lhs_batch_idx,
int64_t out_feature_idx) {
return (static_cast<int32_t>(lhs_val) - lhs_zero_point) *
(static_cast<int32_t>(rhs_val) - rhs_zero_point);
},
[effective_quantized_multiplier, effective_shift, output_zero_point,
output_quantization_min_val, output_quantization_max_val](
int32_t acc, int64_t lhs_batch_idx, int64_t out_feature_idx) {
return AffineRequantizeWithQuantizedMultiplierAndShift<int32_t, Tout>(
acc, effective_quantized_multiplier, effective_shift,
0, output_zero_point,
output_quantization_min_val, output_quantization_max_val);
});
return absl::OkStatus();
}
template <typename Tin, typename Tout>
Status EvalLhsPerTensorAndRhsPerChannelQuantizedConv(
OpKernelContext* context, const Tensor& lhs, const Tensor& rhs,
const UniformQuantizedConvolutionParams& convolution_params,
const float lhs_scale, const int32_t lhs_zero_point,
const Tensor& rhs_scales, const Tensor& rhs_zero_points,
const Tensor& output_scales, const Tensor& output_zero_points,
const int output_quantization_min_val,
const int output_quantization_max_val, Tensor& out) {
const int64_t out_feature_size = out.dim_size(1);
const float* rhs_scales_data = rhs_scales.flat<float>().data();
const int32_t* rhs_zero_points_data = rhs_zero_points.flat<int32_t>().data();
Tensor effective_quantized_multipliers;
TF_RETURN_IF_ERROR(context->allocate_temp(DT_INT32, rhs_scales.shape(),
&effective_quantized_multipliers));
Tensor effective_shifts;
TF_RETURN_IF_ERROR(
context->allocate_temp(DT_INT32, rhs_scales.shape(), &effective_shifts));
int32_t* effective_quantized_multipliers_data =
effective_quantized_multipliers.flat<int32_t>().data();
int32_t* effective_shifts_data = effective_shifts.flat<int32_t>().data();
const bool is_output_scales_scalar = output_scales.dims() == 0;
if (!is_output_scales_scalar) {
const float* output_scales_data = output_scales.flat<float>().data();
for (int64_t out_feature_idx = 0; out_feature_idx < out_feature_size;
++out_feature_idx) {
const double effective_multiplier = static_cast<double>(lhs_scale) *
rhs_scales_data[out_feature_idx] /
output_scales_data[out_feature_idx];
TF_RETURN_IF_ERROR(QuantizeMultiplier(
effective_multiplier,
effective_quantized_multipliers_data[out_feature_idx],
effective_shifts_data[out_feature_idx]));
}
} else {
const float output_scale = output_scales.scalar<float>()();
for (int64_t out_feature_idx = 0; out_feature_idx < out_feature_size;
++out_feature_idx) {
const double effective_multiplier = static_cast<double>(lhs_scale) *
rhs_scales_data[out_feature_idx] /
output_scale;
TF_RETURN_IF_ERROR(QuantizeMultiplier(
effective_multiplier,
effective_quantized_multipliers_data[out_feature_idx],
effective_shifts_data[out_feature_idx]));
}
}
const int32_t* output_zero_points_data =
output_zero_points.flat<int32_t>().data();
ConvWithAccFunctionAndOutFunction<Tin, Tin, Tout>(
lhs, rhs, convolution_params, out,
[lhs_zero_point, rhs_zero_points_data](Tin lhs_val, Tin rhs_val,
int64_t lhs_batch_idx,
int64_t out_feature_idx) {
return (static_cast<int32_t>(lhs_val) - lhs_zero_point) *
(static_cast<int32_t>(rhs_val) -
rhs_zero_points_data[out_feature_idx]);
},
[effective_quantized_multipliers_data, effective_shifts_data,
output_zero_points_data, output_quantization_min_val,
output_quantization_max_val, is_output_scales_scalar](
int32_t acc, int64_t lhs_batch_idx, int64_t out_feature_idx) {
return AffineRequantizeWithQuantizedMultiplierAndShift<int32_t, Tout>(
acc, effective_quantized_multipliers_data[out_feature_idx],
effective_shifts_data[out_feature_idx],
0,
output_zero_points_data[is_output_scales_scalar ? 0
: out_feature_idx],
output_quantization_min_val, output_quantization_max_val);
});
return absl::OkStatus();
}
template <typename Tlhs, typename Trhs>
void EvalLhsPerBatchAndRhsPerTensorQuantizedConv(
OpKernelContext* context, const Tensor& lhs, const Tensor& rhs,
const UniformQuantizedConvolutionParams& convolution_params,
const Tensor& lhs_scales, const Tensor& lhs_zero_points,
const float rhs_scale, const int32_t rhs_zero_point, Tensor& out) {
const float* lhs_scales_data = lhs_scales.flat<float>().data();
const int32_t* lhs_zero_points_data = lhs_zero_points.flat<int32_t>().data();
ConvWithAccFunctionAndOutFunction<Tlhs, Trhs, float>(
lhs, rhs, convolution_params, out,
[lhs_zero_points_data, rhs_zero_point](Tlhs lhs_val, Trhs rhs_val,
int64_t lhs_batch_idx,
int64_t out_feature_idx) {
return (static_cast<int32_t>(lhs_val) -
lhs_zero_points_data[lhs_batch_idx]) *
(static_cast<int32_t>(rhs_val) - rhs_zero_point);
},
[lhs_scales_data, rhs_scale](int32_t acc, int64_t lhs_batch_idx,
int64_t out_feature_idx) {
return acc * lhs_scales_data[lhs_batch_idx] * rhs_scale;
});
}
template <typename Tlhs, typename Trhs>
void EvalLhsPerBatchAndRhsPerChannelQuantizedConv(
const Tensor& lhs, const Tensor& rhs,
const UniformQuantizedConvolutionParams& convolution_params,
const Tensor& lhs_scales, const Tensor& lhs_zero_points,
const Tensor& rhs_scales, const Tensor& rhs_zero_points, Tensor& out) {
const float* lhs_scales_data = lhs_scales.flat<float>().data();
const int32_t* lhs_zero_points_data = lhs_zero_points.flat<int32_t>().data();
const float* rhs_scales_data = rhs_scales.flat<float>().data();
const int32_t* rhs_zero_points_data = rhs_zero_points.flat<int32_t>().data();
ConvWithAccFunctionAndOutFunction<Tlhs, Trhs, float>(
lhs, rhs, convolution_params, out,
[lhs_zero_points_data, rhs_zero_points_data](Tlhs lhs_val, Trhs rhs_val,
int64_t lhs_batch_idx,
int64_t out_feature_idx) {
return (static_cast<int32_t>(lhs_val) -
lhs_zero_points_data[lhs_batch_idx]) *
(static_cast<int32_t>(rhs_val) -
rhs_zero_points_data[out_feature_idx]);
},
[lhs_scales_data, rhs_scales_data](int32_t acc, int64_t lhs_batch_idx,
int64_t out_feature_idx) {
return acc * lhs_scales_data[lhs_batch_idx] *
rhs_scales_data[out_feature_idx];
});
}
template <typename Tin, typename Tout>
Status EvalQuantizedConv(
OpKernelContext* context, const Tensor& lhs, const Tensor& rhs,
const UniformQuantizedConvolutionParams& convolution_params,
const Tensor& lhs_scales, const Tensor& lhs_zero_points,
const Tensor& rhs_scales, const Tensor& rhs_zero_points,
const Tensor& output_scales, const Tensor& output_zero_points,
int output_quantization_min_val, int output_quantization_max_val,
Tensor& out) {
const auto& dimension_numbers = convolution_params.dimension_numbers();
const auto& lhs_perm = LhsTransposePerm(dimension_numbers, lhs.dims());
Tensor lhs_transposed;
TF_RETURN_IF_ERROR(context->allocate_temp(
lhs.dtype(), TransposedShape(lhs.shape(), lhs_perm), &lhs_transposed));
Transpose<Tin>(lhs, lhs_perm, lhs_transposed);
const auto& rhs_perm = RhsTransposePerm(dimension_numbers, rhs.dims());
Tensor rhs_transposed;
TF_RETURN_IF_ERROR(context->allocate_temp(
rhs.dtype(), TransposedShape(rhs.shape(), rhs_perm), &rhs_transposed));
Transpose<Tin>(rhs, rhs_perm, rhs_transposed);
const auto& out_perm = OutTransposePerm(dimension_numbers, out.dims());
Tensor out_transposed;
TF_RETURN_IF_ERROR(context->allocate_temp(
out.dtype(), TransposedShape(out.shape(), out_perm), &out_transposed));
Tensor lhs_padded_and_dilated;
TF_RETURN_IF_ERROR(
context->allocate_temp(lhs_transposed.dtype(),
PaddedAndDilatedTransposedLhsShape(
lhs_transposed.shape(), convolution_params),
&lhs_padded_and_dilated));
PadAndDilateTransposedLhs<Tin>(lhs_transposed, convolution_params,
lhs_zero_points, lhs_padded_and_dilated);
const float lhs_scale = lhs_scales.scalar<float>()();
const int32_t lhs_zero_point = lhs_zero_points.scalar<int32_t>()();
if (rhs_scales.dims() != 0) {
TF_RETURN_IF_ERROR(EvalLhsPerTensorAndRhsPerChannelQuantizedConv<Tin, Tout>(
context, lhs_padded_and_dilated, rhs_transposed, convolution_params,
lhs_scale, lhs_zero_point, rhs_scales, rhs_zero_points, output_scales,
output_zero_points, output_quantization_min_val,
output_quantization_max_val, out_transposed));
} else {
DCHECK_EQ(output_scales.dims(), 0);
const float rhs_scale = rhs_scales.scalar<float>()();
const int32_t rhs_zero_point = rhs_zero_points.scalar<int32_t>()();
const float output_scale = output_scales.scalar<float>()();
const int32_t output_zero_point = output_zero_points.scalar<int32_t>()();
TF_RETURN_IF_ERROR(EvalLhsPerTensorAndRhsPerTensorQuantizedConv<Tin, Tout>(
lhs_padded_and_dilated, rhs_transposed, convolution_params, lhs_scale,
lhs_zero_point, rhs_scale, rhs_zero_point, output_scale,
output_zero_point, output_quantization_min_val,
output_quantization_max_val, out_transposed));
}
const auto& out_perm_back = OutBackTransposePerm(out_perm);
Transpose<Tout>(out_transposed, out_perm_back, out);
return absl::OkStatus();
}
template <typename Trhs>
Status EvalHybridConv(
OpKernelContext* context, const Tensor& lhs, const Tensor& rhs,
const UniformQuantizedConvolutionParams& convolution_params,
const Tensor& rhs_scales, const Tensor& rhs_zero_points, Tensor& out) {
using TlhsQuant = Trhs;
DataType lhs_quant_dtype = DataTypeToEnum<TlhsQuant>::v();
const auto& dimension_numbers = convolution_params.dimension_numbers();
const auto& lhs_perm = LhsTransposePerm(dimension_numbers, lhs.dims());
Tensor lhs_transposed;
TF_RETURN_IF_ERROR(context->allocate_temp(
DT_FLOAT, TransposedShape(lhs.shape(), lhs_perm), &lhs_transposed));
Transpose<float>(lhs, lhs_perm, lhs_transposed);
const auto& rhs_perm = RhsTransposePerm(dimension_numbers, rhs.dims());
Tensor rhs_transposed;
TF_RETURN_IF_ERROR(context->allocate_temp(
rhs.dtype(), TransposedShape(rhs.shape(), rhs_perm), &rhs_transposed));
Transpose<Trhs>(rhs, rhs_perm, rhs_transposed);
const auto& out_perm = OutTransposePerm(dimension_numbers, out.dims());
Tensor out_transposed;
TF_RETURN_IF_ERROR(context->allocate_temp(
DT_FLOAT, TransposedShape(out.shape(), out_perm), &out_transposed));
const int64_t lhs_batch_size = lhs_transposed.dim_size(0);
Tensor lhs_quantized;
TF_RETURN_IF_ERROR(context->allocate_temp(
lhs_quant_dtype, lhs_transposed.shape(), &lhs_quantized));
Tensor lhs_scales;
TF_RETURN_IF_ERROR(
context->allocate_temp(DT_FLOAT, {lhs_batch_size}, &lhs_scales));
Tensor lhs_zero_points;
TF_RETURN_IF_ERROR(
context->allocate_temp(DT_INT32, {lhs_batch_size}, &lhs_zero_points));
float* lhs_scales_data = lhs_scales.flat<float>().data();
int32_t* lhs_zero_points_data = lhs_zero_points.flat<int32_t>().data();
auto lhs_tensor = lhs_transposed.template flat_outer_dims<float, 2>();
auto lhs_quantized_tensor =
lhs_quantized.template flat_outer_dims<TlhsQuant, 2>();
for (int64_t b = 0; b < lhs_batch_size; ++b) {
TF_RETURN_IF_ERROR(AsymmetricQuantize(
lhs_tensor.template chip<0>(b),
std::numeric_limits<TlhsQuant>::lowest(),
std::numeric_limits<TlhsQuant>::max(),
lhs_scales_data[b], lhs_zero_points_data[b],
lhs_quantized_tensor.template chip<0>(b)));
}
Tensor lhs_padded_and_dilated;
TF_RETURN_IF_ERROR(
context->allocate_temp(lhs_quant_dtype,
PaddedAndDilatedTransposedLhsShape(
lhs_quantized.shape(), convolution_params),
&lhs_padded_and_dilated));
PadAndDilateTransposedLhs<TlhsQuant>(lhs_quantized, convolution_params,
lhs_zero_points, lhs_padded_and_dilated);
if (rhs_scales.dims() != 0) {
EvalLhsPerBatchAndRhsPerChannelQuantizedConv<TlhsQuant, Trhs>(
lhs_padded_and_dilated, rhs_transposed, convolution_params, lhs_scales,
lhs_zero_points, rhs_scales, rhs_zero_points, out_transposed);
} else {
EvalLhsPerBatchAndRhsPerTensorQuantizedConv<TlhsQuant, Trhs>(
context, lhs_padded_and_dilated, rhs_transposed, convolution_params,
lhs_scales, lhs_zero_points, rhs_scales.scalar<float>()(),
rhs_zero_points.scalar<int32_t>()(), out_transposed);
}
const auto& out_perm_back = OutBackTransposePerm(out_perm);
Transpose<float>(out_transposed, out_perm_back, out);
return absl::OkStatus();
}
}
template <typename Tin, typename Tout>
class UniformQuantizedConvolutionOp : public OpKernel {
public:
explicit UniformQuantizedConvolutionOp(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, convolution_params_.LoadFromAttrs(*context));
OP_REQUIRES_OK(context, context->GetAttr("output_quantization_min_val",
&output_quantization_min_val_));
OP_REQUIRES_OK(context, context->GetAttr("output_quantization_max_val",
&output_quantization_max_val_));
int lhs_quantization_axis;
OP_REQUIRES_OK(context, context->GetAttr("lhs_quantization_axis",
&lhs_quantization_axis));
OP_REQUIRES(
context, (lhs_quantization_axis == -1),
InvalidArgument("lhs_quantization_axis Attr must be -1 (per-tensor)."));
OP_REQUIRES_OK(context, context->GetAttr("rhs_quantization_axis",
&rhs_quantization_axis_));
OP_REQUIRES_OK(context, context->GetAttr("output_quantization_axis",
&output_quantization_axis_));
}
void Compute(OpKernelContext* context) override {
const Tensor& lhs = context->input(0);
const Tensor& rhs = context->input(1);
const Tensor& lhs_scales = context->input(2);
const Tensor& lhs_zero_points = context->input(3);
const Tensor& rhs_scales = context->input(4);
const Tensor& rhs_zero_points = context->input(5);
const Tensor& output_scales = context->input(6);
const Tensor& output_zero_points = context->input(7);
OP_REQUIRES(context, (AllElementsPositive<float>(lhs_scales)),
InvalidArgument("lhs scales elements must be all positive."));
OP_REQUIRES(context, (AllElementsPositive<float>(rhs_scales)),
InvalidArgument("rhs scales elements must be all positive."));
OP_REQUIRES(
context, (AllElementsPositive<float>(output_scales)),
InvalidArgument("output scales elements must be all positive."));
OP_REQUIRES_OK(context,
convolution_params_.ValidateOrFillParamsAndValidateShape(
lhs.shape(), rhs.shape()));
OP_REQUIRES(
context,
(lhs_scales.IsSameSize(lhs_zero_points) && lhs_scales.dims() == 0),
InvalidArgument(
"lhs scales/zero_points must be all scalar tensors. Given: ",
lhs_scales.shape().DebugString(),
lhs_zero_points.shape().DebugString()));
OP_REQUIRES(
context,
(rhs_quantization_axis_ == -1 ||
rhs_quantization_axis_ == convolution_params_.dimension_numbers()
.kernel_output_feature_dimension()),
InvalidArgument("rhs_quantization_axis Attr must be -1 (per-tensor) or "
"dimension_numbers.kernel_output_feature_dimension "
"(per-channel)."));
OP_REQUIRES_OK(
context, QuantizationAxisAndShapeValid(rhs.shape(), rhs_scales.shape(),
rhs_zero_points.shape(),
rhs_quantization_axis_));
OP_REQUIRES(
context,
(output_quantization_axis_ == -1 ||
output_quantization_axis_ == convolution_params_.dimension_numbers()
.output_feature_dimension()),
InvalidArgument(
"output_quantization_axis Attr must be -1 (per-tensor) or "
"dimension_numbers.output_feature_dimension (per-channel)."));
auto output_shape =
convolution_params_.CalculateOutputShape(lhs.shape(), rhs.shape());
OP_REQUIRES_OK(context, output_shape.status());
OP_REQUIRES_OK(context,
QuantizationAxisAndShapeValid(
output_shape.value(), output_scales.shape(),
output_zero_points.shape(), output_quantization_axis_));
OP_REQUIRES(
context, (rhs_scales.dims() > 0 || output_scales.dims() == 0),
InvalidArgument(
"If rhs is per-tensor quantized, output must be also per-tensor "
"quantized. Given output scales/zero_points of rank ",
output_scales.dims()));
Tensor* output;
OP_REQUIRES_OK(context,
context->allocate_output(0, output_shape.value(), &output));
OP_REQUIRES_OK(
context,
EvalQuantizedConv<Tin, Tout>(
context, lhs, rhs, convolution_params_, lhs_scales, lhs_zero_points,
rhs_scales, rhs_zero_points, output_scales, output_zero_points,
output_quantization_min_val_, output_quantization_max_val_,
*output));
}
private:
UniformQuantizedConvolutionParams convolution_params_;
int rhs_quantization_axis_;
int output_quantization_axis_;
int output_quantization_min_val_;
int output_quantization_max_val_;
};
template <typename Tlhs, typename Trhs, typename Tout>
class UniformQuantizedConvolutionHybridOp : public OpKernel {
public:
explicit UniformQuantizedConvolutionHybridOp(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("rhs_quantization_axis",
&rhs_quantization_axis_));
OP_REQUIRES_OK(context, convolution_params_.LoadFromAttrs(*context));
}
void Compute(OpKernelContext* context) override {
const Tensor& lhs = context->input(0);
const Tensor& rhs = context->input(1);
const Tensor& rhs_scales = context->input(2);
const Tensor& rhs_zero_points = context->input(3);
OP_REQUIRES(context, (AllElementsPositive<float>(rhs_scales)),
InvalidArgument("rhs scales elements must be all positive."));
OP_REQUIRES_OK(context,
convolution_params_.ValidateOrFillParamsAndValidateShape(
lhs.shape(), rhs.shape()));
OP_REQUIRES(
context,
(rhs_quantization_axis_ == -1 ||
rhs_quantization_axis_ == convolution_params_.dimension_numbers()
.kernel_output_feature_dimension()),
InvalidArgument("rhs_quantization_axis Attr must be -1 (per-tensor) or "
"dimension_numbers.kernel_output_feature_dimension "
"(per-channel)."));
OP_REQUIRES_OK(
context, QuantizationAxisAndShapeValid(rhs.shape(), rhs_scales.shape(),
rhs_zero_points.shape(),
rhs_quantization_axis_));
Tensor* output;
auto output_shape =
convolution_params_.CalculateOutputShape(lhs.shape(), rhs.shape());
OP_REQUIRES_OK(context, output_shape.status());
OP_REQUIRES_OK(context,
context->allocate_output(0, output_shape.value(), &output));
OP_REQUIRES_OK(context,
EvalHybridConv<Trhs>(context, lhs, rhs, convolution_params_,
rhs_scales, rhs_zero_points, *output));
}
private:
UniformQuantizedConvolutionParams convolution_params_;
int rhs_quantization_axis_;
};
REGISTER_KERNEL_BUILDER(Name("UniformQuantizedConvolution")
.Device(DEVICE_CPU)
.TypeConstraint<qint8>("Tin")
.TypeConstraint<qint32>("Tout"),
UniformQuantizedConvolutionOp<qint8, qint32>);
REGISTER_KERNEL_BUILDER(
Name("UniformQuantizedConvolutionHybrid")
.Device(DEVICE_CPU)
.TypeConstraint<float>("Tlhs")
.TypeConstraint<qint8>("Trhs")
.TypeConstraint<float>("Tout"),
UniformQuantizedConvolutionHybridOp<float, qint8, float>);
} | #include <cstdint>
#include <limits>
#include <vector>
#include <gtest/gtest.h>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/numeric_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/quantization/uniform_quant_ops_attr.pb.h"
namespace tensorflow {
namespace {
using protobuf::TextFormat;
constexpr int32_t kInt8Min = std::numeric_limits<int8_t>::min();
constexpr int32_t kInt8Max = std::numeric_limits<int8_t>::max();
constexpr int32_t kInt32Min = std::numeric_limits<int32_t>::min();
constexpr int32_t kInt32Max = std::numeric_limits<int32_t>::max();
template <typename T>
std::vector<T> Arange(int start, int stop, int step = 1) {
std::vector<T> array;
int val = start;
while (val < stop) {
array.push_back(val);
val += step;
}
return array;
}
}
class UniformQuantizedConvolutionTest : public OpsTestBase {
protected:
};
TEST_F(UniformQuantizedConvolutionTest, PerTensorQuantizedDefaultAttrs) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolution")
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tin", DT_QINT8)
.Attr("Tout", DT_QINT32)
.Attr("lhs_quantization_min_val", kInt8Min)
.Attr("lhs_quantization_max_val", kInt8Max)
.Attr("rhs_quantization_min_val", kInt8Min)
.Attr("rhs_quantization_max_val", kInt8Max)
.Attr("output_quantization_min_val", kInt32Min)
.Attr("output_quantization_max_val", kInt32Max)
.Attr("padding", "VALID")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<qint8>(TensorShape({2, 2, 3, 4}), Arange<qint8>(-24, 24));
AddInputFromArray<qint8>(TensorShape({3, 2, 2, 3}), Arange<qint8>(-18, 18));
AddInputFromArray<float>(TensorShape({}), {2.0});
AddInputFromArray<int32>(TensorShape({}), {1});
AddInputFromArray<float>(TensorShape({}), {2.0});
AddInputFromArray<int32>(TensorShape({}), {2});
AddInputFromArray<float>(TensorShape({}), {3.0});
AddInputFromArray<int32>(TensorShape({}), {3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3, 2, 2}));
test::FillValues<qint32>(
&expected, {4062, 3830, 3134, 2902, 990, 950, 830, 790,
-2082, -1930, -1474, -1322, -1506, -1738, -2434, -2666,
30, -10, -130, -170, 1566, 1718, 2174, 2326});
test::ExpectTensorEqual<qint32>(expected, *GetOutput(0));
}
TEST_F(UniformQuantizedConvolutionTest, PerTensorQuantizedSetStrides) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolution")
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tin", DT_QINT8)
.Attr("Tout", DT_QINT32)
.Attr("lhs_quantization_min_val", kInt8Min)
.Attr("lhs_quantization_max_val", kInt8Max)
.Attr("rhs_quantization_min_val", kInt8Min)
.Attr("rhs_quantization_max_val", kInt8Max)
.Attr("output_quantization_min_val", kInt32Min)
.Attr("output_quantization_max_val", kInt32Max)
.Attr("padding", "VALID")
.Attr("window_strides", {2, 3})
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<qint8>(TensorShape({2, 2, 3, 4}), Arange<qint8>(-24, 24));
AddInputFromArray<qint8>(TensorShape({3, 2, 2, 3}), Arange<qint8>(-18, 18));
AddInputFromArray<float>(TensorShape({}), {2.0});
AddInputFromArray<int32>(TensorShape({}), {1});
AddInputFromArray<float>(TensorShape({}), {2.0});
AddInputFromArray<int32>(TensorShape({}), {2});
AddInputFromArray<float>(TensorShape({}), {3.0});
AddInputFromArray<int32>(TensorShape({}), {3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3, 1, 1}));
test::FillValues<qint32>(&expected, {4062, 990, -2082, -1506, 30, 1566});
test::ExpectTensorEqual<qint32>(expected, *GetOutput(0));
}
TEST_F(UniformQuantizedConvolutionTest, PerTensorQuantizedSetExplicitPadding) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolution")
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tin", DT_QINT8)
.Attr("Tout", DT_QINT32)
.Attr("lhs_quantization_min_val", kInt8Min)
.Attr("lhs_quantization_max_val", kInt8Max)
.Attr("rhs_quantization_min_val", kInt8Min)
.Attr("rhs_quantization_max_val", kInt8Max)
.Attr("output_quantization_min_val", kInt32Min)
.Attr("output_quantization_max_val", kInt32Max)
.Attr("padding", "EXPLICIT")
.Attr("explicit_padding", {0, 1, 1, 2})
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<qint8>(TensorShape({2, 2, 3, 4}), Arange<qint8>(-24, 24));
AddInputFromArray<qint8>(TensorShape({3, 2, 2, 3}), Arange<qint8>(-18, 18));
AddInputFromArray<float>(TensorShape({}), {2.0});
AddInputFromArray<int32>(TensorShape({}), {1});
AddInputFromArray<float>(TensorShape({}), {2.0});
AddInputFromArray<int32>(TensorShape({}), {2});
AddInputFromArray<float>(TensorShape({}), {3.0});
AddInputFromArray<int32>(TensorShape({}), {3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3, 3, 5}));
test::FillValues<qint32>(
&expected,
{2694, 4062, 3830, 2550, 1272, 2096, 3134, 2902, 1910, 942,
968, 1432, 1304, 848, 414, 582, 990, 950, 694, 376,
496, 830, 790, 566, 302, 296, 472, 440, 304, 158,
-1530, -2082, -1930, -1162, -520, -1104, -1474, -1322, -778, -338,
-376, -488, -424, -240, -98, -890, -1506, -1738, -1290, -712,
-1488, -2434, -2666, -1930, -1042, -1016, -1640, -1768, -1264, -674,
70, 30, -10, -74, -72, -16, -130, -170, -202, -146,
-152, -296, -328, -272, -162, 1030, 1566, 1718, 1142, 568,
1456, 2174, 2326, 1526, 750, 712, 1048, 1112, 720, 350});
test::ExpectTensorEqual<qint32>(expected, *GetOutput(0));
}
TEST_F(UniformQuantizedConvolutionTest, PerTensorQuantizedSetSamePadding) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolution")
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tin", DT_QINT8)
.Attr("Tout", DT_QINT32)
.Attr("lhs_quantization_min_val", kInt8Min)
.Attr("lhs_quantization_max_val", kInt8Max)
.Attr("rhs_quantization_min_val", kInt8Min)
.Attr("rhs_quantization_max_val", kInt8Max)
.Attr("output_quantization_min_val", kInt32Min)
.Attr("output_quantization_max_val", kInt32Max)
.Attr("padding", "SAME")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<qint8>(TensorShape({1, 1, 2, 2}), Arange<qint8>(-2, 2));
AddInputFromArray<qint8>(TensorShape({1, 1, 2, 1}), Arange<qint8>(1, 3));
AddInputFromArray<float>(TensorShape({}), {2.0});
AddInputFromArray<int32>(TensorShape({}), {1});
AddInputFromArray<float>(TensorShape({}), {2.0});
AddInputFromArray<int32>(TensorShape({}), {2});
AddInputFromArray<float>(TensorShape({}), {4.0});
AddInputFromArray<int32>(TensorShape({}), {3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({1, 1, 2, 2}));
test::FillValues<qint32>(&expected, {6, 5, 4, 3});
test::ExpectTensorEqual<qint32>(expected, *GetOutput(0));
}
TEST_F(UniformQuantizedConvolutionTest, PerTensorQuantizedSetDimensionNumbers) {
UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers;
ASSERT_TRUE(TextFormat::ParseFromString(R"pb(
input_batch_dimension: 1
input_feature_dimension: 3
input_spatial_dimensions: 2
input_spatial_dimensions: 0
kernel_output_feature_dimension: 2
kernel_input_feature_dimension: 1
kernel_spatial_dimensions: 0
kernel_spatial_dimensions: 3
output_batch_dimension: 2
output_feature_dimension: 1
output_spatial_dimensions: 3
output_spatial_dimensions: 0
)pb",
&dimension_numbers));
TF_ASSERT_OK(
NodeDefBuilder("test", "UniformQuantizedConvolution")
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tin", DT_QINT8)
.Attr("Tout", DT_QINT32)
.Attr("lhs_quantization_min_val", kInt8Min)
.Attr("lhs_quantization_max_val", kInt8Max)
.Attr("rhs_quantization_min_val", kInt8Min)
.Attr("rhs_quantization_max_val", kInt8Max)
.Attr("output_quantization_min_val", kInt32Min)
.Attr("output_quantization_max_val", kInt32Max)
.Attr("padding", "VALID")
.Attr("dimension_numbers", dimension_numbers.SerializeAsString())
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<qint8>(TensorShape({4, 2, 3, 2}), Arange<qint8>(-24, 24));
AddInputFromArray<qint8>(TensorShape({2, 2, 3, 3}), Arange<qint8>(-18, 18));
AddInputFromArray<float>(TensorShape({}), {2.0});
AddInputFromArray<int32>(TensorShape({}), {1});
AddInputFromArray<float>(TensorShape({}), {2.0});
AddInputFromArray<int32>(TensorShape({}), {2});
AddInputFromArray<float>(TensorShape({}), {3.0});
AddInputFromArray<int32>(TensorShape({}), {3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3, 2, 2}));
test::FillValues<qint32>(
&expected,
{1323, 1147, 795, 619, 771, 691, 531, 451, 219, 235, 267, 283,
267, 91, -261, -437, 291, 211, 51, -29, 315, 331, 363, 379});
test::ExpectTensorEqual<qint32>(expected, *GetOutput(0));
}
TEST_F(UniformQuantizedConvolutionTest,
PerTensorQuantizedSetFeatureGroupCount) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolution")
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tin", DT_QINT8)
.Attr("Tout", DT_QINT32)
.Attr("lhs_quantization_min_val", kInt8Min)
.Attr("lhs_quantization_max_val", kInt8Max)
.Attr("rhs_quantization_min_val", kInt8Min)
.Attr("rhs_quantization_max_val", kInt8Max)
.Attr("output_quantization_min_val", kInt32Min)
.Attr("output_quantization_max_val", kInt32Max)
.Attr("padding", "VALID")
.Attr("feature_group_count", 2)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<qint8>(TensorShape({2, 4, 3, 4}), Arange<qint8>(-48, 48));
AddInputFromArray<qint8>(TensorShape({4, 2, 2, 3}), Arange<qint8>(-24, 24));
AddInputFromArray<float>(TensorShape({}), {2.0});
AddInputFromArray<int32>(TensorShape({}), {1});
AddInputFromArray<float>(TensorShape({}), {2.0});
AddInputFromArray<int32>(TensorShape({}), {2});
AddInputFromArray<float>(TensorShape({}), {3.0});
AddInputFromArray<int32>(TensorShape({}), {3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({2, 4, 2, 2}));
test::FillValues<qint32>(
&expected, {13470, 13142, 12158, 11830, 5790, 5654, 5246, 5110,
-546, -490, -322, -266, -3618, -3370, -2626, -2378,
-2274, -2602, -3586, -3914, -738, -874, -1282, -1418,
2142, 2198, 2366, 2422, 8286, 8534, 9278, 9526});
test::ExpectTensorEqual<qint32>(expected, *GetOutput(0));
}
TEST_F(UniformQuantizedConvolutionTest, PerTensorQuantizedSetBatchGroupCount) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolution")
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tin", DT_QINT8)
.Attr("Tout", DT_QINT32)
.Attr("lhs_quantization_min_val", kInt8Min)
.Attr("lhs_quantization_max_val", kInt8Max)
.Attr("rhs_quantization_min_val", kInt8Min)
.Attr("rhs_quantization_max_val", kInt8Max)
.Attr("output_quantization_min_val", kInt32Min)
.Attr("output_quantization_max_val", kInt32Max)
.Attr("padding", "VALID")
.Attr("batch_group_count", 2)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<qint8>(TensorShape({4, 2, 3, 4}), Arange<qint8>(-48, 48));
AddInputFromArray<qint8>(TensorShape({4, 2, 2, 3}), Arange<qint8>(-24, 24));
AddInputFromArray<float>(TensorShape({}), {2.0});
AddInputFromArray<int32>(TensorShape({}), {1});
AddInputFromArray<float>(TensorShape({}), {2.0});
AddInputFromArray<int32>(TensorShape({}), {2});
AddInputFromArray<float>(TensorShape({}), {3.0});
AddInputFromArray<int32>(TensorShape({}), {3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({2, 4, 2, 2}));
test::FillValues<qint32>(
&expected,
{13470, 13142, 12158, 11830, 5790, 5654, 5246, 5110, 798, 854, 1022,
1078, 2334, 2582, 3326, 3574, 5598, 5270, 4286, 3958, 2526, 2390,
1982, 1846, 2142, 2198, 2366, 2422, 8286, 8534, 9278, 9526});
test::ExpectTensorEqual<qint32>(expected, *GetOutput(0));
}
TEST_F(UniformQuantizedConvolutionTest, PerTensorQuantizedSetLhsDilation) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolution")
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tin", DT_QINT8)
.Attr("Tout", DT_QINT32)
.Attr("lhs_quantization_min_val", kInt8Min)
.Attr("lhs_quantization_max_val", kInt8Max)
.Attr("rhs_quantization_min_val", kInt8Min)
.Attr("rhs_quantization_max_val", kInt8Max)
.Attr("output_quantization_min_val", kInt32Min)
.Attr("output_quantization_max_val", kInt32Max)
.Attr("padding", "VALID")
.Attr("lhs_dilation", {2, 2})
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<qint8>(TensorShape({2, 2, 3, 4}), Arange<qint8>(-24, 24));
AddInputFromArray<qint8>(TensorShape({3, 2, 2, 3}), Arange<qint8>(-18, 18));
AddInputFromArray<float>(TensorShape({}), {2.0});
AddInputFromArray<int32>(TensorShape({}), {1});
AddInputFromArray<float>(TensorShape({}), {2.0});
AddInputFromArray<int32>(TensorShape({}), {2});
AddInputFromArray<float>(TensorShape({}), {3.0});
AddInputFromArray<int32>(TensorShape({}), {3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3, 4, 5}));
test::FillValues<qint32>(
&expected,
{1680, 819, 1595, 776, 1510, 1107, 536, 1038, 502, 968, 1339, 648,
1254, 606, 1168, 830, 398, 760, 363, 691, 496, 243, 475, 232,
454, 179, 88, 174, 86, 168, 411, 200, 390, 190, 368, 158,
78, 152, 75, 147, -688, -333, -645, -312, -602, -749, -360, -690,
-330, -632, -517, -248, -474, -226, -432, -514, -242, -456, -213, -397,
-368, -205, -453, -248, -538, -557, -296, -626, -330, -696, -709, -376,
-794, -418, -880, -834, -434, -904, -469, -973, -16, -13, -37, -24,
-58, 51, 24, 46, 22, 40, -101, -56, -122, -66, -144, 30,
14, 24, 11, 19, 336, 179, 379, 200, 422, 659, 344, 718,
374, 776, 507, 264, 550, 286, 592, 894, 462, 952, 491, 1011});
test::ExpectTensorEqual<qint32>(expected, *GetOutput(0));
}
TEST_F(UniformQuantizedConvolutionTest, PerTensorQuantizedSetRhsDilation) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolution")
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tin", DT_QINT8)
.Attr("Tout", DT_QINT32)
.Attr("lhs_quantization_min_val", kInt8Min)
.Attr("lhs_quantization_max_val", kInt8Max)
.Attr("rhs_quantization_min_val", kInt8Min)
.Attr("rhs_quantization_max_val", kInt8Max)
.Attr("output_quantization_min_val", kInt32Min)
.Attr("output_quantization_max_val", kInt32Max)
.Attr("padding", "VALID")
.Attr("rhs_dilation", {2, 2})
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<qint8>(TensorShape({2, 2, 4, 5}), Arange<qint8>(-40, 40));
AddInputFromArray<qint8>(TensorShape({3, 2, 2, 3}), Arange<qint8>(-18, 18));
AddInputFromArray<float>(TensorShape({}), {2.0});
AddInputFromArray<int32>(TensorShape({}), {1});
AddInputFromArray<float>(TensorShape({}), {2.0});
AddInputFromArray<int32>(TensorShape({}), {2});
AddInputFromArray<float>(TensorShape({}), {3.0});
AddInputFromArray<int32>(TensorShape({}), {3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3, 2, 1}));
test::FillValues<qint32>(&expected, {6192, 5032, 1584, 1384, -3024, -2264,
-3088, -4248, -16, -216, 3056, 3816});
test::ExpectTensorEqual<qint32>(expected, *GetOutput(0));
}
TEST_F(UniformQuantizedConvolutionTest, PerChannelQuantizedDefaultAttrs) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolution")
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tin", DT_QINT8)
.Attr("Tout", DT_QINT32)
.Attr("rhs_quantization_axis", 0)
.Attr("lhs_quantization_min_val", kInt8Min)
.Attr("lhs_quantization_max_val", kInt8Max)
.Attr("rhs_quantization_min_val", kInt8Min)
.Attr("rhs_quantization_max_val", kInt8Max)
.Attr("output_quantization_min_val", kInt32Min)
.Attr("output_quantization_max_val", kInt32Max)
.Attr("padding", "VALID")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<qint8>(TensorShape({2, 2, 3, 4}), Arange<qint8>(-24, 24));
AddInputFromArray<qint8>(TensorShape({3, 2, 2, 3}), Arange<qint8>(-18, 18));
AddInputFromArray<float>(TensorShape({}), {2.0});
AddInputFromArray<int32>(TensorShape({}), {1});
AddInputFromArray<float>(TensorShape({3}), {2.0, 4.0, 2.0});
AddInputFromArray<int32>(TensorShape({3}), {2, 4, 2});
AddInputFromArray<float>(TensorShape({}), {3.0});
AddInputFromArray<int32>(TensorShape({}), {3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3, 2, 2}));
test::FillValues<qint32>(
&expected, {4062, 3830, 3134, 2902, 3000, 2856, 2424, 2280,
-2082, -1930, -1474, -1322, -1506, -1738, -2434, -2666,
-456, -600, -1032, -1176, 1566, 1718, 2174, 2326});
test::ExpectTensorEqual<qint32>(expected, *GetOutput(0));
}
TEST_F(UniformQuantizedConvolutionTest,
PerChannelQuantizedRhsAndOutputDefaultAttrs) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolution")
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tin", DT_QINT8)
.Attr("Tout", DT_QINT32)
.Attr("rhs_quantization_axis", 0)
.Attr("output_quantization_axis", 1)
.Attr("lhs_quantization_min_val", kInt8Min)
.Attr("lhs_quantization_max_val", kInt8Max)
.Attr("rhs_quantization_min_val", kInt8Min)
.Attr("rhs_quantization_max_val", kInt8Max)
.Attr("output_quantization_min_val", kInt32Min)
.Attr("output_quantization_max_val", kInt32Max)
.Attr("padding", "VALID")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<qint8>(TensorShape({2, 2, 3, 4}), Arange<qint8>(-24, 24));
AddInputFromArray<qint8>(TensorShape({3, 2, 2, 3}), Arange<qint8>(-18, 18));
AddInputFromArray<float>(TensorShape({}), {2.0});
AddInputFromArray<int32>(TensorShape({}), {1});
AddInputFromArray<float>(TensorShape({3}), {2.0, 4.0, 2.0});
AddInputFromArray<int32>(TensorShape({3}), {2, 4, 2});
AddInputFromArray<float>(TensorShape({3}), {3.0, 2.0, 1.0});
AddInputFromArray<int32>(TensorShape({3}), {3, 2, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3, 2, 2}));
test::FillValues<qint32>(
&expected, {4062, 3830, 3134, 2902, 4498, 4282, 3634, 3418,
-6255, -5799, -4431, -3975, -1506, -1738, -2434, -2666,
-686, -902, -1550, -1766, 4689, 5145, 6513, 6969});
test::ExpectTensorEqual<qint32>(expected, *GetOutput(0));
}
TEST_F(UniformQuantizedConvolutionTest, PerChannelQuantizedTFConv2DLikeConfig) {
UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers;
ASSERT_TRUE(TextFormat::ParseFromString(R"pb(
input_batch_dimension: 0
input_feature_dimension: 3
input_spatial_dimensions: 1
input_spatial_dimensions: 2
kernel_output_feature_dimension: 3
kernel_input_feature_dimension: 2
kernel_spatial_dimensions: 0
kernel_spatial_dimensions: 1
output_batch_dimension: 0
output_feature_dimension: 3
output_spatial_dimensions: 1
output_spatial_dimensions: 2
)pb",
&dimension_numbers));
TF_ASSERT_OK(
NodeDefBuilder("test", "UniformQuantizedConvolution")
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tin", DT_QINT8)
.Attr("Tout", DT_QINT32)
.Attr("rhs_quantization_axis", 3)
.Attr("lhs_quantization_min_val", kInt8Min)
.Attr("lhs_quantization_max_val", kInt8Max)
.Attr("rhs_quantization_min_val", kInt8Min)
.Attr("rhs_quantization_max_val", kInt8Max)
.Attr("output_quantization_min_val", kInt32Min)
.Attr("output_quantization_max_val", kInt32Max)
.Attr("padding", "VALID")
.Attr("dimension_numbers", dimension_numbers.SerializeAsString())
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<qint8>(TensorShape({2, 3, 4, 2}), Arange<qint8>(-24, 24));
AddInputFromArray<qint8>(TensorShape({2, 3, 2, 3}), Arange<qint8>(-18, 18));
AddInputFromArray<float>(TensorShape({}), {2.0});
AddInputFromArray<int32>(TensorShape({}), {1});
AddInputFromArray<float>(TensorShape({3}), {2.0, 4.0, 2.0});
AddInputFromArray<int32>(TensorShape({3}), {2, 4, 2});
AddInputFromArray<float>(TensorShape({}), {3.0});
AddInputFromArray<int32>(TensorShape({}), {3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({2, 2, 2, 3}));
test::FillValues<qint32>(
&expected,
{1755, 4099, 1163, 1643, 3811, 1115, 1307, 2947, 971, 1195, 2659, 923,
411, 643, 587, 299, 355, 539, -37, -509, 395, -149, -797, 347});
test::ExpectTensorEqual<qint32>(expected, *GetOutput(0));
}
TEST_F(UniformQuantizedConvolutionTest,
PerChannelQuantizedTFDepthwiseConv2DLikeConfig) {
UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers;
ASSERT_TRUE(TextFormat::ParseFromString(R"pb(
input_batch_dimension: 0
input_feature_dimension: 3
input_spatial_dimensions: 1
input_spatial_dimensions: 2
kernel_output_feature_dimension: 3
kernel_input_feature_dimension: 2
kernel_spatial_dimensions: 0
kernel_spatial_dimensions: 1
output_batch_dimension: 0
output_feature_dimension: 3
output_spatial_dimensions: 1
output_spatial_dimensions: 2
)pb",
&dimension_numbers));
TF_ASSERT_OK(
NodeDefBuilder("test", "UniformQuantizedConvolution")
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tin", DT_QINT8)
.Attr("Tout", DT_QINT32)
.Attr("rhs_quantization_axis", 3)
.Attr("lhs_quantization_min_val", kInt8Min)
.Attr("lhs_quantization_max_val", kInt8Max)
.Attr("rhs_quantization_min_val", kInt8Min)
.Attr("rhs_quantization_max_val", kInt8Max)
.Attr("output_quantization_min_val", kInt32Min)
.Attr("output_quantization_max_val", kInt32Max)
.Attr("padding", "VALID")
.Attr("feature_group_count", 2)
.Attr("dimension_numbers", dimension_numbers.SerializeAsString())
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<qint8>(TensorShape({2, 3, 4, 2}), Arange<qint8>(-24, 24));
AddInputFromArray<qint8>(TensorShape({2, 3, 1, 2}), Arange<qint8>(-6, 6));
AddInputFromArray<float>(TensorShape({}), {2.0});
AddInputFromArray<int32>(TensorShape({}), {1});
AddInputFromArray<float>(TensorShape({2}), {2.0, 4.0});
AddInputFromArray<int32>(TensorShape({2}), {2, 4});
AddInputFromArray<float>(TensorShape({}), {3.0});
AddInputFromArray<int32>(TensorShape({}), {3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({2, 2, 2, 2}));
test::FillValues<qint32>(
&expected, {576, 1390, 528, 1262, 384, 878, 336, 750, 0, -146, -48, -274,
-192, -658, -240, -786});
test::ExpectTensorEqual<qint32>(expected, *GetOutput(0));
}
TEST_F(UniformQuantizedConvolutionTest, PerChannelQuantizedTFConv3DLikeConfig) {
UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers;
ASSERT_TRUE(TextFormat::ParseFromString(R"pb(
input_batch_dimension: 0
input_feature_dimension: 4
input_spatial_dimensions: 1
input_spatial_dimensions: 2
input_spatial_dimensions: 3
kernel_output_feature_dimension: 4
kernel_input_feature_dimension: 3
kernel_spatial_dimensions: 0
kernel_spatial_dimensions: 1
kernel_spatial_dimensions: 2
output_batch_dimension: 0
output_feature_dimension: 4
output_spatial_dimensions: 1
output_spatial_dimensions: 2
output_spatial_dimensions: 3
)pb",
&dimension_numbers));
TF_ASSERT_OK(
NodeDefBuilder("test", "UniformQuantizedConvolution")
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tin", DT_QINT8)
.Attr("Tout", DT_QINT32)
.Attr("rhs_quantization_axis", 4)
.Attr("lhs_quantization_min_val", kInt8Min)
.Attr("lhs_quantization_max_val", kInt8Max)
.Attr("rhs_quantization_min_val", kInt8Min)
.Attr("rhs_quantization_max_val", kInt8Max)
.Attr("output_quantization_min_val", kInt32Min)
.Attr("output_quantization_max_val", kInt32Max)
.Attr("padding", "VALID")
.Attr("dimension_numbers", dimension_numbers.SerializeAsString())
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<qint8>(TensorShape({2, 3, 4, 2, 2}),
Arange<qint8>(-50, 46));
AddInputFromArray<qint8>(TensorShape({2, 3, 2, 2, 2}),
Arange<qint8>(-24, 24));
AddInputFromArray<float>(TensorShape({}), {2.0});
AddInputFromArray<int32>(TensorShape({}), {1});
AddInputFromArray<float>(TensorShape({2}), {2.0, 4.0});
AddInputFromArray<int32>(TensorShape({2}), {2, 4});
AddInputFromArray<float>(TensorShape({}), {3.0});
AddInputFromArray<int32>(TensorShape({}), {3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({2, 2, 2, 1, 2}));
test::FillValues<qint32>(
&expected, {7438, 17272, 7054, 16248, 5902, 13176, 5518, 12152, 2830,
4984, 2446, 3960, 1294, 888, 910, -136});
test::ExpectTensorEqual<qint32>(expected, *GetOutput(0));
}
TEST_F(UniformQuantizedConvolutionTest, HybridPerTensorQuantizedDefaultAttrs) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolutionHybrid")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tlhs", DT_FLOAT)
.Attr("Trhs", DT_QINT8)
.Attr("Tout", DT_FLOAT)
.Attr("rhs_quantization_min_val", kInt8Min)
.Attr("rhs_quantization_max_val", kInt8Max)
.Attr("padding", "VALID")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 2, 3, 4}),
Arange<float>(-50, 46, 2));
AddInputFromArray<qint8>(TensorShape({3, 2, 2, 3}), Arange<qint8>(-18, 18));
AddInputFromArray<float>(TensorShape({}), {2.0});
AddInputFromArray<int32>(TensorShape({}), {2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3, 2, 2}));
test::FillValues<float>(
&expected,
{12176., 11480., 9392., 8696., 2960., 2840., 2480., 2360.,
-6256., -5800., -4432., -3976., -4528., -5224., -7312., -8008.,
80., -40., -400., -520., 4688., 5144., 6512., 6968.});
test::ExpectClose(expected, *GetOutput(0), 1, 0.01);
}
TEST_F(UniformQuantizedConvolutionTest, HybridPerTensorQuantizedSetStrides) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolutionHybrid")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tlhs", DT_FLOAT)
.Attr("Trhs", DT_QINT8)
.Attr("Tout", DT_FLOAT)
.Attr("rhs_quantization_min_val", kInt8Min)
.Attr("rhs_quantization_max_val", kInt8Max)
.Attr("padding", "VALID")
.Attr("window_strides", {2, 3})
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 2, 3, 4}),
Arange<float>(-50, 46, 2));
AddInputFromArray<qint8>(TensorShape({3, 2, 2, 3}), Arange<qint8>(-18, 18));
AddInputFromArray<float>(TensorShape({}), {2.0});
AddInputFromArray<int32>(TensorShape({}), {2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3, 1, 1}));
test::FillValues<float>(&expected,
{12176., 2960., -6256., -4528., 80., 4688.});
test::ExpectClose(expected, *GetOutput(0), 1, 0.01);
}
TEST_F(UniformQuantizedConvolutionTest, HybridPerTensorQuantizedSetPadding) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolutionHybrid")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tlhs", DT_FLOAT)
.Attr("Trhs", DT_QINT8)
.Attr("Tout", DT_FLOAT)
.Attr("rhs_quantization_min_val", kInt8Min)
.Attr("rhs_quantization_max_val", kInt8Max)
.Attr("padding", "EXPLICIT")
.Attr("explicit_padding", {0, 1, 1, 2})
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 2, 3, 4}),
Arange<float>(-50, 46, 2));
AddInputFromArray<qint8>(TensorShape({3, 2, 2, 3}), Arange<qint8>(-18, 18));
AddInputFromArray<float>(TensorShape({}), {2.0});
AddInputFromArray<int32>(TensorShape({}), {2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3, 3, 5}));
test::FillValues<float>(
&expected,
{8072., 12176., 11480., 7640., 3808., 6280., 9392., 8696., 5720.,
2816., 2896., 4288., 3904., 2536., 1232., 1736., 2960., 2840.,
2072., 1120., 1480., 2480., 2360., 1688., 896., 880., 1408.,
1312., 904., 464., -4600., -6256., -5800., -3496., -1568., -3320.,
-4432., -3976., -2344., -1024., -1136., -1472., -1280., -728., -304.,
-2680., -4528., -5224., -3880., -2144., -4472., -7312., -8008., -5800.,
-3136., -3056., -4928., -5312., -3800., -2032., 200., 80., -40.,
-232., -224., -56., -400., -520., -616., -448., -464., -896.,
-992., -824., -496., 3080., 4688., 5144., 3416., 1696., 4360.,
6512., 6968., 4568., 2240., 2128., 3136., 3328., 2152., 1040.});
test::ExpectClose(expected, *GetOutput(0), 1.5, 0.04);
}
TEST_F(UniformQuantizedConvolutionTest,
HybridPerTensorQuantizedSetExplicitPadding) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolutionHybrid")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tlhs", DT_FLOAT)
.Attr("Trhs", DT_QINT8)
.Attr("Tout", DT_FLOAT)
.Attr("rhs_quantization_min_val", kInt8Min)
.Attr("rhs_quantization_max_val", kInt8Max)
.Attr("padding", "EXPLICIT")
.Attr("explicit_padding", {0, 1, 1, 2})
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 2, 3, 4}),
Arange<float>(-50, 46, 2));
AddInputFromArray<qint8>(TensorShape({3, 2, 2, 3}), Arange<qint8>(-18, 18));
AddInputFromArray<float>(TensorShape({}), {2.0});
AddInputFromArray<int32>(TensorShape({}), {2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3, 3, 5}));
test::FillValues<float>(
&expected,
{8072., 12176., 11480., 7640., 3808., 6280., 9392., 8696., 5720.,
2816., 2896., 4288., 3904., 2536., 1232., 1736., 2960., 2840.,
2072., 1120., 1480., 2480., 2360., 1688., 896., 880., 1408.,
1312., 904., 464., -4600., -6256., -5800., -3496., -1568., -3320.,
-4432., -3976., -2344., -1024., -1136., -1472., -1280., -728., -304.,
-2680., -4528., -5224., -3880., -2144., -4472., -7312., -8008., -5800.,
-3136., -3056., -4928., -5312., -3800., -2032., 200., 80., -40.,
-232., -224., -56., -400., -520., -616., -448., -464., -896.,
-992., -824., -496., 3080., 4688., 5144., 3416., 1696., 4360.,
6512., 6968., 4568., 2240., 2128., 3136., 3328., 2152., 1040.});
test::ExpectClose(expected, *GetOutput(0), 1.5, 0.04);
}
TEST_F(UniformQuantizedConvolutionTest,
HybridPerTensorQuantizedSetDimensionNumbers) {
UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers;
ASSERT_TRUE(TextFormat::ParseFromString(R"pb(
input_batch_dimension: 1
input_feature_dimension: 3
input_spatial_dimensions: 2
input_spatial_dimensions: 0
kernel_output_feature_dimension: 2
kernel_input_feature_dimension: 1
kernel_spatial_dimensions: 0
kernel_spatial_dimensions: 3
output_batch_dimension: 2
output_feature_dimension: 1
output_spatial_dimensions: 3
output_spatial_dimensions: 0
)pb",
&dimension_numbers));
TF_ASSERT_OK(
NodeDefBuilder("test", "UniformQuantizedConvolutionHybrid")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tlhs", DT_FLOAT)
.Attr("Trhs", DT_QINT8)
.Attr("Tout", DT_FLOAT)
.Attr("rhs_quantization_axis", -1)
.Attr("rhs_quantization_min_val", kInt8Min)
.Attr("rhs_quantization_max_val", kInt8Max)
.Attr("padding", "VALID")
.Attr("dimension_numbers", dimension_numbers.SerializeAsString())
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({4, 2, 3, 2}),
Arange<float>(-50, 46, 2));
AddInputFromArray<qint8>(TensorShape({2, 2, 3, 3}), Arange<qint8>(-18, 18));
AddInputFromArray<float>(TensorShape({}), {2.0});
AddInputFromArray<int32>(TensorShape({}), {2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3, 2, 2}));
test::FillValues<float>(
&expected, {3960., 3432., 2376., 1848., 2304., 2064., 1584., 1344.,
648., 696., 792., 840., 792., 264., -792., -1320.,
864., 624., 144., -96., 936., 984., 1080., 1128.});
test::ExpectClose(expected, *GetOutput(0), 10, 0.02);
}
TEST_F(UniformQuantizedConvolutionTest,
HybridPerTensorQuantizedSetFeatureGroupCount) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolutionHybrid")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tlhs", DT_FLOAT)
.Attr("Trhs", DT_QINT8)
.Attr("Tout", DT_FLOAT)
.Attr("rhs_quantization_min_val", kInt8Min)
.Attr("rhs_quantization_max_val", kInt8Max)
.Attr("padding", "VALID")
.Attr("feature_group_count", 2)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 4, 3, 4}),
Arange<float>(-98, 94, 2));
AddInputFromArray<qint8>(TensorShape({4, 2, 2, 3}), Arange<qint8>(-24, 24));
AddInputFromArray<float>(TensorShape({}), {2.0});
AddInputFromArray<int32>(TensorShape({}), {2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 4, 2, 2}));
test::FillValues<float>(
&expected,
{40400., 39416., 36464., 35480., 17360., 16952., 15728., 15320.,
-1648., -1480., -976., -808., -10864., -10120., -7888., -7144.,
-6832., -7816., -10768., -11752., -2224., -2632., -3856., -4264.,
6416., 6584., 7088., 7256., 24848., 25592., 27824., 28568.});
test::ExpectClose(expected, *GetOutput(0), 1, 0.01);
}
TEST_F(UniformQuantizedConvolutionTest,
HybridPerTensorQuantizedSetBatchGroupCount) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolutionHybrid")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tlhs", DT_FLOAT)
.Attr("Trhs", DT_QINT8)
.Attr("Tout", DT_FLOAT)
.Attr("rhs_quantization_min_val", kInt8Min)
.Attr("rhs_quantization_max_val", kInt8Max)
.Attr("padding", "VALID")
.Attr("batch_group_count", 2)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({4, 2, 3, 4}),
Arange<float>(-98, 94, 2));
AddInputFromArray<qint8>(TensorShape({4, 2, 2, 3}), Arange<qint8>(-24, 24));
AddInputFromArray<float>(TensorShape({}), {2.0});
AddInputFromArray<int32>(TensorShape({}), {2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 4, 2, 2}));
test::FillValues<float>(
&expected,
{40400., 39416., 36464., 35480., 17360., 16952., 15728., 15320.,
2384., 2552., 3056., 3224., 6992., 7736., 9968., 10712.,
16784., 15800., 12848., 11864., 7568., 7160., 5936., 5528.,
6416., 6584., 7088., 7256., 24848., 25592., 27824., 28568.});
test::ExpectClose(expected, *GetOutput(0), 1, 0.01);
}
TEST_F(UniformQuantizedConvolutionTest,
HybridPerTensorQuantizedSetLhsDilation) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolutionHybrid")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tlhs", DT_FLOAT)
.Attr("Trhs", DT_QINT8)
.Attr("Tout", DT_FLOAT)
.Attr("rhs_quantization_min_val", kInt8Min)
.Attr("rhs_quantization_max_val", kInt8Max)
.Attr("padding", "VALID")
.Attr("lhs_dilation", {2, 2})
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 2, 3, 4}),
Arange<float>(-50, 46, 2));
AddInputFromArray<qint8>(TensorShape({3, 2, 2, 3}), Arange<qint8>(-18, 18));
AddInputFromArray<float>(TensorShape({}), {2.0});
AddInputFromArray<int32>(TensorShape({}), {2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3, 4, 5}));
test::FillValues<float>(
&expected,
{5032., 2448., 4776., 2320., 4520., 3312., 1600., 3104., 1496.,
2896., 4008., 1936., 3752., 1808., 3496., 2480., 1184., 2272.,
1080., 2064., 1480., 720., 1416., 688., 1352., 528., 256.,
512., 248., 496., 1224., 592., 1160., 560., 1096., 464.,
224., 448., 216., 432., -2072., -1008., -1944., -944., -1816.,
-2256., -1088., -2080., -1000., -1904., -1560., -752., -1432., -688.,
-1304., -1552., -736., -1376., -648., -1200., -1112., -624., -1368.,
-752., -1624., -1680., -896., -1888., -1000., -2096., -2136., -1136.,
-2392., -1264., -2648., -2512., -1312., -2720., -1416., -2928., -56.,
-48., -120., -80., -184., 144., 64., 128., 56., 112.,
-312., -176., -376., -208., -440., 80., 32., 64., 24.,
48., 1000., 528., 1128., 592., 1256., 1968., 1024., 2144.,
1112., 2320., 1512., 784., 1640., 848., 1768., 2672., 1376.,
2848., 1464., 3024.});
test::ExpectClose(expected, *GetOutput(0), 1, 0.01);
}
TEST_F(UniformQuantizedConvolutionTest,
HybridPerTensorQuantizedSetRhsDilation) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolutionHybrid")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tlhs", DT_FLOAT)
.Attr("Trhs", DT_QINT8)
.Attr("Tout", DT_FLOAT)
.Attr("rhs_quantization_min_val", kInt8Min)
.Attr("rhs_quantization_max_val", kInt8Max)
.Attr("padding", "VALID")
.Attr("rhs_dilation", {2, 2})
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 2, 4, 5}),
Arange<float>(-82, 78, 2));
AddInputFromArray<qint8>(TensorShape({3, 2, 2, 3}), Arange<qint8>(-18, 18));
AddInputFromArray<float>(TensorShape({}), {2.0});
AddInputFromArray<int32>(TensorShape({}), {2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3, 2, 1}));
test::FillValues<float>(
&expected, {18568., 15088., 4744., 4144., -9080., -6800., -9272., -12752.,
-56., -656., 9160., 11440.});
test::ExpectClose(expected, *GetOutput(0), 1, 0.01);
}
TEST_F(UniformQuantizedConvolutionTest, HybridPerChannelQuantizedDefaultAttrs) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolutionHybrid")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tlhs", DT_FLOAT)
.Attr("Trhs", DT_QINT8)
.Attr("Tout", DT_FLOAT)
.Attr("rhs_quantization_axis", 0)
.Attr("rhs_quantization_min_val", kInt8Min)
.Attr("rhs_quantization_max_val", kInt8Max)
.Attr("padding", "VALID")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 2, 3, 4}),
Arange<float>(-50, 46, 2));
AddInputFromArray<qint8>(TensorShape({3, 2, 2, 3}), Arange<qint8>(-18, 18));
AddInputFromArray<float>(TensorShape({3}), {2.0, 4.0, 2.0});
AddInputFromArray<int32>(TensorShape({3}), {2, 4, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3, 2, 2}));
test::FillValues<float>(
&expected,
{12176., 11480., 9392., 8696., 8992., 8560., 7264., 6832.,
-6256., -5800., -4432., -3976., -4528., -5224., -7312., -8008.,
-1376., -1808., -3104., -3536., 4688., 5144., 6512., 6968.});
test::ExpectClose(expected, *GetOutput(0), 1, 0.01);
}
TEST_F(UniformQuantizedConvolutionTest,
HybridPerChannelQuantizedTFConv2DLikeConfig) {
UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers;
ASSERT_TRUE(TextFormat::ParseFromString(R"pb(
input_batch_dimension: 0
input_feature_dimension: 3
input_spatial_dimensions: 1
input_spatial_dimensions: 2
kernel_output_feature_dimension: 3
kernel_input_feature_dimension: 2
kernel_spatial_dimensions: 0
kernel_spatial_dimensions: 1
output_batch_dimension: 0
output_feature_dimension: 3
output_spatial_dimensions: 1
output_spatial_dimensions: 2
)pb",
&dimension_numbers));
TF_ASSERT_OK(
NodeDefBuilder("test", "UniformQuantizedConvolutionHybrid")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tlhs", DT_FLOAT)
.Attr("Trhs", DT_QINT8)
.Attr("Tout", DT_FLOAT)
.Attr("rhs_quantization_axis", 3)
.Attr("rhs_quantization_min_val", kInt8Min)
.Attr("rhs_quantization_max_val", kInt8Max)
.Attr("padding", "VALID")
.Attr("dimension_numbers", dimension_numbers.SerializeAsString())
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 3, 4, 2}),
Arange<float>(-50, 46, 2));
AddInputFromArray<qint8>(TensorShape({2, 3, 2, 3}), Arange<qint8>(-18, 18));
AddInputFromArray<float>(TensorShape({3}), {2.0, 4.0, 2.0});
AddInputFromArray<int32>(TensorShape({3}), {2, 4, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 2, 3}));
test::FillValues<float>(
&expected, {5256., 12288., 3480., 4920., 11424., 3336., 3912., 8832.,
2904., 3576., 7968., 2760., 1224., 1920., 1752., 888.,
1056., 1608., -120., -1536., 1176., -456., -2400., 1032.});
test::ExpectClose(expected, *GetOutput(0), 4, 0.04);
}
TEST_F(UniformQuantizedConvolutionTest,
HybridPerChannelQuantizedTFDepthwiseConv2DLikeConfig) {
UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers;
ASSERT_TRUE(TextFormat::ParseFromString(R"pb(
input_batch_dimension: 0
input_feature_dimension: 3
input_spatial_dimensions: 1
input_spatial_dimensions: 2
kernel_output_feature_dimension: 3
kernel_input_feature_dimension: 2
kernel_spatial_dimensions: 0
kernel_spatial_dimensions: 1
output_batch_dimension: 0
output_feature_dimension: 3
output_spatial_dimensions: 1
output_spatial_dimensions: 2
)pb",
&dimension_numbers));
TF_ASSERT_OK(
NodeDefBuilder("test", "UniformQuantizedConvolutionHybrid")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tlhs", DT_FLOAT)
.Attr("Trhs", DT_QINT8)
.Attr("Tout", DT_FLOAT)
.Attr("rhs_quantization_axis", 3)
.Attr("rhs_quantization_min_val", kInt8Min)
.Attr("rhs_quantization_max_val", kInt8Max)
.Attr("padding", "VALID")
.Attr("feature_group_count", 2)
.Attr("dimension_numbers", dimension_numbers.SerializeAsString())
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 3, 4, 2}),
Arange<float>(-50, 46, 2));
AddInputFromArray<qint8>(TensorShape({2, 3, 1, 2}), Arange<qint8>(-6, 6));
AddInputFromArray<float>(TensorShape({2}), {2.0, 4.0});
AddInputFromArray<int32>(TensorShape({2}), {2, 4});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 2, 2}));
test::FillValues<float>(
&expected, {1720., 4160., 1576., 3776., 1144., 2624., 1000., 2240., -8.,
-448., -152., -832., -584., -1984., -728., -2368.});
test::ExpectClose(expected, *GetOutput(0), 1, 0.01);
}
TEST_F(UniformQuantizedConvolutionTest,
HybridPerChannelQuantizedTFConv3DLikeConfig) {
UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers;
ASSERT_TRUE(TextFormat::ParseFromString(R"pb(
input_batch_dimension: 0
input_feature_dimension: 4
input_spatial_dimensions: 1
input_spatial_dimensions: 2
input_spatial_dimensions: 3
kernel_output_feature_dimension: 4
kernel_input_feature_dimension: 3
kernel_spatial_dimensions: 0
kernel_spatial_dimensions: 1
kernel_spatial_dimensions: 2
output_batch_dimension: 0
output_feature_dimension: 4
output_spatial_dimensions: 1
output_spatial_dimensions: 2
output_spatial_dimensions: 3
)pb",
&dimension_numbers));
TF_ASSERT_OK(
NodeDefBuilder("test", "UniformQuantizedConvolutionHybrid")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tlhs", DT_FLOAT)
.Attr("Trhs", DT_QINT8)
.Attr("Tout", DT_FLOAT)
.Attr("rhs_quantization_axis", 4)
.Attr("rhs_quantization_min_val", kInt8Min)
.Attr("rhs_quantization_max_val", kInt8Max)
.Attr("padding", "VALID")
.Attr("dimension_numbers", dimension_numbers.SerializeAsString())
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 3, 4, 2, 2}),
Arange<float>(-50, 46));
AddInputFromArray<qint8>(TensorShape({2, 3, 2, 2, 2}),
Arange<qint8>(-24, 24));
AddInputFromArray<float>(TensorShape({2}), {2.0, 4.0});
AddInputFromArray<int32>(TensorShape({2}), {2, 4});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 2, 1, 2}));
test::FillValues<float>(
&expected, {11008., 25520., 10432., 23984., 8704., 19376., 8128., 17840.,
4096., 7088., 3520., 5552., 1792., 944., 1216., -592.});
test::ExpectClose(expected, *GetOutput(0), 11, 0.02);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/uniform_quant_ops/uniform_quantized_convolution_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/uniform_quant_ops/uniform_quantized_convolution_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c3b5f314-0c26-448c-8d86-c1abcf0736a6 | cpp | google/tensorstore | driver | tensorstore/kvstore/ocdbt/driver.cc | tensorstore/kvstore/ocdbt/distributed/driver_test.cc | #include "tensorstore/kvstore/ocdbt/driver.h"
#include <stddef.h>
#include <stdint.h>
#include <cstring>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/time/time.h"
#include "tensorstore/context.h"
#include "tensorstore/context_resource_provider.h"
#include "tensorstore/internal/cache/cache_pool_resource.h"
#include "tensorstore/internal/data_copy_concurrency_resource.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/metrics/counter.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/internal/ref_counted_string.h"
#include "tensorstore/kvstore/common_metrics.h"
#include "tensorstore/kvstore/driver.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/ocdbt/btree_writer.h"
#include "tensorstore/kvstore/ocdbt/config.h"
#include "tensorstore/kvstore/ocdbt/distributed/btree_writer.h"
#include "tensorstore/kvstore/ocdbt/distributed/rpc_security.h"
#include "tensorstore/kvstore/ocdbt/distributed/rpc_security_registry.h"
#include "tensorstore/kvstore/ocdbt/format/manifest.h"
#include "tensorstore/kvstore/ocdbt/io/io_handle_impl.h"
#include "tensorstore/kvstore/ocdbt/io_handle.h"
#include "tensorstore/kvstore/ocdbt/non_distributed/btree_writer.h"
#include "tensorstore/kvstore/ocdbt/non_distributed/list.h"
#include "tensorstore/kvstore/ocdbt/non_distributed/read.h"
#include "tensorstore/kvstore/ocdbt/non_distributed/transactional_btree_writer.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/registry.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/supported_features.h"
#include "tensorstore/open_mode.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/str_cat.h"
#include "tensorstore/internal/cache_key/absl_time.h"
#include "tensorstore/internal/cache_key/std_optional.h"
#include "tensorstore/internal/json_binding/absl_time.h"
#include "tensorstore/internal/json_binding/std_optional.h"
using ::tensorstore::kvstore::ListReceiver;
namespace tensorstore {
namespace internal_ocdbt {
namespace {
namespace jb = ::tensorstore::internal_json_binding;
struct OcdbtMetrics : public internal_kvstore::CommonReadMetrics,
public internal_kvstore::CommonWriteMetrics {};
auto ocdbt_metrics = []() -> OcdbtMetrics {
return {TENSORSTORE_KVSTORE_COMMON_READ_METRICS(ocdbt),
TENSORSTORE_KVSTORE_COMMON_WRITE_METRICS(ocdbt)};
}();
constexpr absl::Duration kDefaultLeaseDuration = absl::Seconds(10);
constexpr size_t kDefaultTargetBufferSize = 2u << 30;
struct OcdbtCoordinatorResourceTraits
: public internal::ContextResourceTraits<OcdbtCoordinatorResource> {
using Spec = OcdbtCoordinatorResource::Spec;
using Resource = OcdbtCoordinatorResource::Resource;
static Spec Default() { return {}; }
static constexpr auto JsonBinder() {
namespace jb = tensorstore::internal_json_binding;
return jb::Object(
jb::Member("address", jb::Projection<&Spec::address>()),
jb::Member("lease_duration", jb::Projection<&Spec::lease_duration>()),
jb::Member("security", jb::Projection<&Spec::security>(
RpcSecurityMethodJsonBinder)));
}
static Result<Resource> Create(
const Spec& spec, internal::ContextResourceCreationContext context) {
return spec;
}
static Spec GetSpec(const Resource& resource,
const internal::ContextSpecBuilder& builder) {
return resource;
}
};
const internal::ContextResourceRegistration<OcdbtCoordinatorResourceTraits>
registration;
}
TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(
OcdbtDriverSpecData,
jb::Object(
jb::Member("base", jb::Projection<&OcdbtDriverSpecData::base>()),
jb::Member("manifest",
jb::Projection<&OcdbtDriverSpecData::manifest>()),
jb::Initialize([](auto* obj) {
internal::EnsureDirectoryPath(obj->base.path);
if (obj->manifest) {
internal::EnsureDirectoryPath(obj->manifest->path);
}
return absl::OkStatus();
}),
jb::Member("config", jb::Projection<&OcdbtDriverSpecData::config>(
jb::DefaultInitializedValue())),
jb::Projection<&OcdbtDriverSpecData::data_file_prefixes>(jb::Sequence(
jb::Member("value_data_prefix",
jb::Projection<&DataFilePrefixes::value>(
jb::DefaultValue([](auto* v) { *v = "d/"; }))),
jb::Member("btree_node_data_prefix",
jb::Projection<&DataFilePrefixes::btree_node>(
jb::DefaultValue([](auto* v) { *v = "d/"; }))),
jb::Member("version_tree_node_data_prefix",
jb::Projection<&DataFilePrefixes::version_tree_node>(
jb::DefaultValue([](auto* v) { *v = "d/"; }))))),
jb::Member("assume_config",
jb::Projection<&OcdbtDriverSpecData::assume_config>(
jb::DefaultInitializedValue())),
jb::Member(
"experimental_read_coalescing_threshold_bytes",
jb::Projection<&OcdbtDriverSpecData::
experimental_read_coalescing_threshold_bytes>()),
jb::Member(
"experimental_read_coalescing_merged_bytes",
jb::Projection<&OcdbtDriverSpecData::
experimental_read_coalescing_merged_bytes>()),
jb::Member(
"experimental_read_coalescing_interval",
jb::Projection<
&OcdbtDriverSpecData::experimental_read_coalescing_interval>()),
jb::Member(
"target_data_file_size",
jb::Projection<&OcdbtDriverSpecData::target_data_file_size>()),
jb::Member("coordinator",
jb::Projection<&OcdbtDriverSpecData::coordinator>()),
jb::Member(internal::CachePoolResource::id,
jb::Projection<&OcdbtDriverSpecData::cache_pool>()),
jb::Member(
internal::DataCopyConcurrencyResource::id,
jb::Projection<&OcdbtDriverSpecData::data_copy_concurrency>())));
Result<kvstore::Spec> OcdbtDriverSpec::GetBase(std::string_view path) const {
return data_.base;
}
Future<kvstore::DriverPtr> OcdbtDriverSpec::DoOpen() const {
auto base_kvstore_future = kvstore::Open(data_.base);
Future<kvstore::KvStore> manifest_kvstore_future =
data_.manifest ? kvstore::Open(*data_.manifest)
: Future<kvstore::KvStore>(kvstore::KvStore{});
return MapFutureValue(
InlineExecutor{},
[spec = internal::IntrusivePtr<const OcdbtDriverSpec>(this)](
kvstore::KvStore& base_kvstore,
kvstore::KvStore& manifest_kvstore) -> Result<kvstore::DriverPtr> {
auto driver = internal::MakeIntrusivePtr<OcdbtDriver>();
driver->base_ = std::move(base_kvstore);
driver->manifest_kvstore_ = std::move(manifest_kvstore);
auto supported_manifest_features =
driver->base_.driver->GetSupportedFeatures(KeyRange::Prefix(
tensorstore::StrCat(driver->base_.path, "manifest.")));
driver->cache_pool_ = spec->data_.cache_pool;
driver->data_copy_concurrency_ = spec->data_.data_copy_concurrency;
driver->data_file_prefixes_ = spec->data_.data_file_prefixes;
driver->experimental_read_coalescing_threshold_bytes_ =
spec->data_.experimental_read_coalescing_threshold_bytes;
driver->experimental_read_coalescing_merged_bytes_ =
spec->data_.experimental_read_coalescing_merged_bytes;
driver->experimental_read_coalescing_interval_ =
spec->data_.experimental_read_coalescing_interval;
driver->target_data_file_size_ = spec->data_.target_data_file_size;
std::optional<ReadCoalesceOptions> read_coalesce_options;
if (driver->experimental_read_coalescing_threshold_bytes_ ||
driver->experimental_read_coalescing_merged_bytes_ ||
driver->experimental_read_coalescing_interval_) {
read_coalesce_options.emplace();
read_coalesce_options->max_overhead_bytes_per_request =
static_cast<int64_t>(
driver->experimental_read_coalescing_threshold_bytes_
.value_or(0));
read_coalesce_options->max_merged_bytes_per_request =
static_cast<int64_t>(
driver->experimental_read_coalescing_merged_bytes_.value_or(
0));
read_coalesce_options->max_interval =
driver->experimental_read_coalescing_interval_.value_or(
absl::ZeroDuration());
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto config_state,
ConfigState::Make(spec->data_.config, supported_manifest_features,
spec->data_.assume_config));
driver->io_handle_ = internal_ocdbt::MakeIoHandle(
driver->data_copy_concurrency_, driver->cache_pool_->get(),
driver->base_,
driver->manifest_kvstore_.driver ? driver->manifest_kvstore_
: driver->base_,
std::move(config_state), driver->data_file_prefixes_,
driver->target_data_file_size_.value_or(kDefaultTargetBufferSize),
std::move(read_coalesce_options));
driver->btree_writer_ =
MakeNonDistributedBtreeWriter(driver->io_handle_);
driver->coordinator_ = spec->data_.coordinator;
if (!driver->coordinator_->address) {
driver->btree_writer_ =
MakeNonDistributedBtreeWriter(driver->io_handle_);
return driver;
}
DistributedBtreeWriterOptions options;
options.io_handle = driver->io_handle_;
options.coordinator_address = *driver->coordinator_->address;
options.security = driver->coordinator_->security;
if (!options.security) {
options.security = GetInsecureRpcSecurityMethod();
}
options.lease_duration = driver->coordinator_->lease_duration.value_or(
kDefaultLeaseDuration);
TENSORSTORE_ASSIGN_OR_RETURN(auto base_spec,
driver->base_.spec(MinimalSpec{}));
TENSORSTORE_ASSIGN_OR_RETURN(auto base_spec_json, base_spec.ToJson());
options.storage_identifier = base_spec_json.dump();
driver->btree_writer_ = MakeDistributedBtreeWriter(std::move(options));
return driver;
},
std::move(base_kvstore_future), std::move(manifest_kvstore_future));
}
absl::Status OcdbtDriverSpec::ApplyOptions(
kvstore::DriverSpecOptions&& options) {
if (options.minimal_spec) {
data_.config = {};
data_.assume_config = false;
}
return data_.base.driver.Set(std::move(options));
}
absl::Status OcdbtDriver::GetBoundSpecData(OcdbtDriverSpecData& spec) const {
TENSORSTORE_ASSIGN_OR_RETURN(spec.base.driver, base_.driver->GetBoundSpec());
spec.base.path = base_.path;
if (manifest_kvstore_.driver) {
auto& manifest_spec = spec.manifest.emplace();
TENSORSTORE_ASSIGN_OR_RETURN(manifest_spec.driver,
base_.driver->GetBoundSpec());
manifest_spec.path = manifest_kvstore_.path;
}
spec.data_copy_concurrency = data_copy_concurrency_;
spec.cache_pool = cache_pool_;
spec.config = io_handle_->config_state->GetConstraints();
spec.assume_config = io_handle_->config_state->assume_config();
spec.data_file_prefixes = data_file_prefixes_;
spec.experimental_read_coalescing_threshold_bytes =
experimental_read_coalescing_threshold_bytes_;
spec.experimental_read_coalescing_merged_bytes =
experimental_read_coalescing_merged_bytes_;
spec.experimental_read_coalescing_interval =
experimental_read_coalescing_interval_;
spec.target_data_file_size = target_data_file_size_;
spec.coordinator = coordinator_;
return absl::Status();
}
kvstore::SupportedFeatures OcdbtDriver::GetSupportedFeatures(
const KeyRange& key_range) const {
return kvstore::SupportedFeatures::kSingleKeyAtomicReadModifyWrite |
kvstore::SupportedFeatures::kAtomicWriteWithoutOverwrite;
}
Future<kvstore::ReadResult> OcdbtDriver::Read(kvstore::Key key,
kvstore::ReadOptions options) {
ocdbt_metrics.read.Increment();
return internal_ocdbt::NonDistributedRead(io_handle_, std::move(key),
std::move(options));
}
void OcdbtDriver::ListImpl(kvstore::ListOptions options,
ListReceiver receiver) {
ocdbt_metrics.list.Increment();
return internal_ocdbt::NonDistributedList(io_handle_, std::move(options),
std::move(receiver));
}
Future<TimestampedStorageGeneration> OcdbtDriver::Write(
Key key, std::optional<Value> value, WriteOptions options) {
ocdbt_metrics.write.Increment();
return btree_writer_->Write(std::move(key), std::move(value),
std::move(options));
}
Future<const void> OcdbtDriver::DeleteRange(KeyRange range) {
ocdbt_metrics.delete_range.Increment();
return btree_writer_->DeleteRange(std::move(range));
}
Future<const void> OcdbtDriver::ExperimentalCopyRangeFrom(
const internal::OpenTransactionPtr& transaction, const KvStore& source,
std::string target_prefix, kvstore::CopyRangeOptions options) {
if (typeid(*source.driver) == typeid(OcdbtDriver)) {
auto& source_driver = static_cast<OcdbtDriver&>(*source.driver);
if (source.transaction != no_transaction) {
return absl::UnimplementedError("Source transactions not supported");
}
if (source_driver.base_.driver == base_.driver &&
absl::StartsWith(source_driver.base_.path, base_.path)) {
auto [promise, future] = PromiseFuturePair<void>::Make();
auto manifest_future =
source_driver.io_handle_->GetManifest(options.source_staleness_bound);
LinkValue(
[self = internal::IntrusivePtr<OcdbtDriver>(this),
target_prefix = std::move(target_prefix),
data_path_prefix =
source_driver.base_.path.substr(base_.path.size()),
source_range =
KeyRange::AddPrefix(source.path, options.source_range),
source_prefix_length = source.path.size(),
transaction = std::move(transaction)](
Promise<void> promise,
ReadyFuture<const ManifestWithTime> future) mutable {
auto& manifest_with_time = future.value();
if (!manifest_with_time.manifest) {
promise.SetResult(absl::OkStatus());
return;
}
auto& manifest = *manifest_with_time.manifest;
auto& latest_version = manifest.latest_version();
if (latest_version.root.location.IsMissing()) {
promise.SetResult(absl::OkStatus());
return;
}
BtreeWriter::CopySubtreeOptions copy_node_options;
copy_node_options.node = latest_version.root;
if (!data_path_prefix.empty()) {
auto& base_path =
copy_node_options.node.location.file_id.base_path;
internal::RefCountedStringWriter base_path_writer(
data_path_prefix.size() + base_path.size());
std::memcpy(base_path_writer.data(), data_path_prefix.data(),
data_path_prefix.size());
std::memcpy(base_path_writer.data() + data_path_prefix.size(),
base_path.data(), base_path.size());
base_path = std::move(base_path_writer);
}
copy_node_options.node_height = latest_version.root_height;
copy_node_options.range = std::move(source_range);
copy_node_options.strip_prefix_length = source_prefix_length;
copy_node_options.add_prefix = std::move(target_prefix);
LinkResult(std::move(promise),
transaction ? internal_ocdbt::AddCopySubtree(
&*self, *self->io_handle_, transaction,
std::move(copy_node_options))
: self->btree_writer_->CopySubtree(
std::move(copy_node_options)));
},
std::move(promise), std::move(manifest_future));
return std::move(future);
}
}
return kvstore::Driver::ExperimentalCopyRangeFrom(
transaction, source, std::move(target_prefix), std::move(options));
}
std::string OcdbtDriver::DescribeKey(std::string_view key) {
return tensorstore::StrCat(tensorstore::QuoteString(key),
" in OCDBT database at ",
io_handle_->DescribeLocation());
}
Result<KvStore> OcdbtDriver::GetBase(std::string_view path,
const Transaction& transaction) const {
return base_;
}
absl::Status OcdbtDriver::ReadModifyWrite(
internal::OpenTransactionPtr& transaction, size_t& phase, Key key,
ReadModifyWriteSource& source) {
if (!transaction || !transaction->atomic() || coordinator_->address) {
return kvstore::Driver::ReadModifyWrite(transaction, phase, std::move(key),
source);
}
return internal_ocdbt::AddReadModifyWrite(this, *io_handle_, transaction,
phase, std::move(key), source);
}
absl::Status OcdbtDriver::TransactionalDeleteRange(
const internal::OpenTransactionPtr& transaction, KeyRange range) {
if (!transaction->atomic() || coordinator_->address) {
return kvstore::Driver::TransactionalDeleteRange(transaction,
std::move(range));
}
return internal_ocdbt::AddDeleteRange(this, *io_handle_, transaction,
std::move(range));
}
}
}
namespace {
const tensorstore::internal_kvstore::DriverRegistration<
tensorstore::internal_ocdbt::OcdbtDriverSpec>
registration;
} | #include "tensorstore/kvstore/ocdbt/driver.h"
#include <stddef.h>
#include <stdint.h>
#include <cassert>
#include <memory>
#include <random>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/random/random.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_format.h"
#include <nlohmann/json.hpp>
#include "tensorstore/context.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/testing/random_seed.h"
#include "tensorstore/internal/testing/scoped_directory.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/ocdbt/distributed/coordinator_server.h"
#include "tensorstore/kvstore/ocdbt/format/btree.h"
#include "tensorstore/kvstore/ocdbt/format/indirect_data_reference.h"
#include "tensorstore/kvstore/ocdbt/format/version_tree.h"
#include "tensorstore/kvstore/ocdbt/test_util.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
namespace kvstore = ::tensorstore::kvstore;
using ::tensorstore::Context;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal::GetMap;
using ::tensorstore::internal_ocdbt::OcdbtDriver;
using ::tensorstore::internal_ocdbt::ReadManifest;
using ::tensorstore::ocdbt::CoordinatorServer;
class DistributedTest : public ::testing::Test {
protected:
CoordinatorServer coordinator_server_;
std::string coordinator_address_;
Context::Spec context_spec;
DistributedTest() {
::nlohmann::json security_json = ::nlohmann::json::value_t::discarded;
{
CoordinatorServer::Options options;
options.spec = CoordinatorServer::Spec::FromJson(
{{"bind_addresses", {"localhost:0"}},
{"security", security_json}})
.value();
TENSORSTORE_CHECK_OK_AND_ASSIGN(
coordinator_server_, CoordinatorServer::Start(std::move(options)));
}
assert(coordinator_server_.port() != 0);
coordinator_address_ =
tensorstore::StrCat("localhost:", coordinator_server_.port());
TENSORSTORE_CHECK_OK_AND_ASSIGN(
context_spec,
Context::Spec::FromJson({{"ocdbt_coordinator",
{{"address", coordinator_address_},
{"security", security_json}}}}));
}
};
TEST_F(DistributedTest, WriteSingleKey) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto base_store,
kvstore::Open("memory:
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open({{"driver", "ocdbt"}, {"base", "memory:
Context(context_spec))
.result());
auto& driver = static_cast<OcdbtDriver&>(*store.driver);
TENSORSTORE_ASSERT_OK(kvstore::Write(store, "a", absl::Cord("value")));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto manifest, ReadManifest(driver));
ASSERT_TRUE(manifest);
auto& version = manifest->latest_version();
EXPECT_EQ(2, version.generation_number);
EXPECT_FALSE(version.root.location.IsMissing());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto map, GetMap(store));
EXPECT_THAT(
map, ::testing::ElementsAre(::testing::Pair("a", absl::Cord("value"))));
}
TEST_F(DistributedTest, WriteTwoKeys) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open({{"driver", "ocdbt"}, {"base", "memory:
Context(context_spec))
.result());
TENSORSTORE_ASSERT_OK(kvstore::Write(store, "testa", absl::Cord("a")));
TENSORSTORE_ASSERT_OK(kvstore::Write(store, "testb", absl::Cord("b")));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto map, GetMap(store));
EXPECT_THAT(
map, ::testing::ElementsAre(::testing::Pair("testa", absl::Cord("a")),
::testing::Pair("testb", absl::Cord("b"))));
}
TEST_F(DistributedTest, BasicFunctionality) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open({{"driver", "ocdbt"}, {"base", "memory:
Context(context_spec))
.result());
tensorstore::internal::TestKeyValueReadWriteOps(store);
}
TEST_F(DistributedTest, BasicFunctionalityMinArity) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
tensorstore::kvstore::Open({{"driver", "ocdbt"},
{"base", "memory:
{"config", {{"max_decoded_node_bytes", 1}}}},
Context(context_spec))
.result());
tensorstore::internal::TestKeyValueReadWriteOps(store);
}
TEST_F(DistributedTest, BasicFunctionalityMinArityNoInline) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
tensorstore::kvstore::Open({{"driver", "ocdbt"},
{"base", "memory:
{"config",
{
{"max_decoded_node_bytes", 1},
{"max_inline_value_bytes", 0},
}}},
Context(context_spec))
.result());
tensorstore::internal::TestKeyValueReadWriteOps(store);
}
TEST_F(DistributedTest, TwoCooperators) {
tensorstore::internal_testing::ScopedTemporaryDirectory tempdir;
::nlohmann::json base_kvs_store_spec{{"driver", "file"},
{"path", tempdir.path() + "/"}};
::nlohmann::json kvs_spec{
{"driver", "ocdbt"},
{"base", base_kvs_store_spec},
};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store1, kvstore::Open(kvs_spec, Context(context_spec)).result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store2, kvstore::Open(kvs_spec, Context(context_spec)).result());
TENSORSTORE_ASSERT_OK(kvstore::Write(store1, "testa", absl::Cord("a")));
TENSORSTORE_ASSERT_OK(kvstore::Write(store2, "testb", absl::Cord("b")));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto map, GetMap(store1));
EXPECT_THAT(
map, ::testing::ElementsAre(::testing::Pair("testa", absl::Cord("a")),
::testing::Pair("testb", absl::Cord("b"))));
}
TEST_F(DistributedTest, MultipleCooperatorsManyWrites) {
tensorstore::internal_testing::ScopedTemporaryDirectory tempdir;
::nlohmann::json base_kvs_store_spec{{"driver", "file"},
{"path", tempdir.path() + "/"}};
::nlohmann::json kvs_spec{
{"driver", "ocdbt"},
{"base", base_kvs_store_spec},
{"config", {{"max_decoded_node_bytes", 500}}},
};
constexpr size_t kNumCooperators = 3;
constexpr size_t kNumWrites = 30;
constexpr size_t kIterations = 5;
std::vector<kvstore::KvStore> stores;
for (size_t i = 0; i < kNumCooperators; ++i) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open(kvs_spec, Context(context_spec)).result());
stores.push_back(store);
}
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_OCDBT_DRIVER_TEST_SEED")};
for (size_t iter = 0; iter < kIterations; ++iter) {
std::vector<tensorstore::AnyFuture> write_futures;
for (size_t i = 0; i < kNumWrites; ++i) {
auto k = absl::Uniform<uint16_t>(gen);
write_futures.push_back(kvstore::Write(stores[i % kNumCooperators],
absl::StrFormat("%04x", k),
absl::Cord("a")));
}
for (auto& future : write_futures) {
TENSORSTORE_ASSERT_OK(future.status());
}
}
}
TEST_F(DistributedTest, TwoCooperatorsManifestDeleted) {
::nlohmann::json base_kvs_store_spec = "memory:
::nlohmann::json kvs_spec{
{"driver", "ocdbt"},
{"base", base_kvs_store_spec},
};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store1, kvstore::Open(kvs_spec, Context(context_spec)).result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store2, kvstore::Open(kvs_spec, Context(context_spec)).result());
TENSORSTORE_ASSERT_OK(kvstore::Write(store1, "testa", absl::Cord("a")));
EXPECT_THAT(kvstore::Write(store2, "testb", absl::Cord("b")).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
}
TEST_F(DistributedTest, UnmodifiedNode) {
tensorstore::internal_ocdbt::TestUnmodifiedNode(Context(context_spec));
}
TEST_F(DistributedTest, ManifestDeleted) {
auto context = Context(context_spec);
::nlohmann::json base_kvs_store_spec = "memory:
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open({{"driver", "ocdbt"}, {"base", base_kvs_store_spec}},
context)
.result());
TENSORSTORE_ASSERT_OK(kvstore::Write(store, "testa", absl::Cord("a")));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto base_store, kvstore::Open(base_kvs_store_spec, context).result());
TENSORSTORE_ASSERT_OK(kvstore::Delete(base_store, "manifest.ocdbt"));
EXPECT_THAT(kvstore::Write(store, "testb", absl::Cord("b")).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/ocdbt/driver.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/ocdbt/distributed/driver_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
28604415-bb4e-4d01-bd3b-610ccff48128 | cpp | tensorflow/tensorflow | test_util | tensorflow/compiler/jit/test_util.cc | tensorflow/lite/kernels/shim/test_util_test.cc | #include "tensorflow/compiler/jit/test_util.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "tensorflow/compiler/jit/shape_inference.h"
#include "xla/status_macros.h"
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
Status ShapeAnnotationsMatch(
const Graph& graph, const GraphShapeInfo& shape_info,
std::map<string, std::vector<PartialTensorShape>> expected_shapes) {
for (Node* node : graph.op_nodes()) {
auto sit = shape_info.find(node->name());
TF_RET_CHECK(sit != shape_info.end())
<< "Missing shape information for node " << node->name();
std::vector<PartialTensorShape> shapes;
for (const auto& output : sit->second) shapes.push_back(output.shape);
auto it = expected_shapes.find(node->name());
if (it != expected_shapes.end()) {
if (!PartialTensorShapeUtils::AreIdentical(shapes, it->second)) {
return errors::InvalidArgument(
"Shape mismatch for ", node->name(), ". Expected: ",
PartialTensorShapeUtils::PartialShapeListString(it->second),
", actual: ",
PartialTensorShapeUtils::PartialShapeListString(shapes));
}
expected_shapes.erase(it);
}
}
if (!expected_shapes.empty()) {
std::vector<string> missing;
missing.reserve(expected_shapes.size());
for (const auto& entry : expected_shapes) {
missing.push_back(entry.first);
}
return errors::InvalidArgument("Missing shapes for nodes: ",
absl::StrJoin(missing, ","));
}
return absl::OkStatus();
}
void DeviceSetup::AddDevicesAndSetUp(
const std::vector<std::string>& device_names,
const std::optional<FunctionDef>& fdef) {
SessionOptions options;
auto* device_count = options.config.mutable_device_count();
for (const auto& device_name : device_names) {
device_count->insert({device_name, 1});
}
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::AddDevices(
options, "/job:localhost/replica:0/task:0", &devices));
device_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(devices));
OptimizerOptions opts;
lib_def_ = std::make_unique<FunctionLibraryDefinition>(OpRegistry::Global(),
FunctionDefLibrary());
if (fdef.has_value()) {
TF_CHECK_OK(lib_def_->AddFunctionDef(*fdef));
}
pflr_ = std::make_unique<ProcessFunctionLibraryRuntime>(
device_mgr_.get(), Env::Default(), nullptr,
TF_GRAPH_DEF_VERSION, lib_def_.get(), opts,
nullptr, nullptr);
flr_ = pflr_->GetFLR("/job:localhost/replica:0/task:0/cpu:0");
}
Device* DeviceSetup::GetDevice(const string& device_name) {
if (device_mgr_ == nullptr) {
return nullptr;
}
string full_device_name = absl::StrCat(
"/job:localhost/replica:0/task:0/device:", device_name, ":0");
Device* device;
TF_CHECK_OK(device_mgr_->LookupDevice(full_device_name, &device));
return device;
}
} | #include "tensorflow/lite/kernels/shim/test_util.h"
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
namespace tflite {
namespace {
TEST(TfliteTensorDebugString, Basic) {
::tflite::Interpreter interpreter;
interpreter.AddTensors(3);
interpreter.AllocateTensors();
auto t_int32 = UniqueTfLiteTensor(interpreter.tensor(0));
PopulateTfLiteTensor<int32_t>({1, 2, 3, 4, 5}, {5}, t_int32.get());
EXPECT_EQ("[1, 2, 3, 4, 5]", TfliteTensorDebugString(t_int32.get()));
auto t_int64 = UniqueTfLiteTensor(interpreter.tensor(1));
PopulateTfLiteTensor<int32_t>({1, 2, 3, 4}, {2, 2}, t_int64.get());
EXPECT_EQ("[[1, 2], [3, 4]]", TfliteTensorDebugString(t_int64.get()));
auto t_str = UniqueTfLiteTensor(interpreter.tensor(2));
PopulateTfLiteTensor<std::string>({"ab", "cde", "f"}, {1, 3}, t_str.get());
EXPECT_EQ("[[ab, cde, f]]", TfliteTensorDebugString(t_str.get()));
}
TEST(TfliteTensorDebugString, MaxVal) {
::tflite::Interpreter interpreter;
interpreter.AddTensors(2);
interpreter.AllocateTensors();
auto t_int32 = UniqueTfLiteTensor(interpreter.tensor(0));
PopulateTfLiteTensor<int32_t>({1, 2, 3, 4}, {4}, t_int32.get());
EXPECT_EQ("[1, 2, 3, 4]",
TfliteTensorDebugString(t_int32.get(), 4));
t_int32 = UniqueTfLiteTensor(interpreter.tensor(0));
PopulateTfLiteTensor<int32_t>({1, 2, 3, 4, 5}, {5}, t_int32.get());
EXPECT_EQ("[1, 2, 3, 4, ...]",
TfliteTensorDebugString(t_int32.get(), 4));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/test_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/shim/test_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
772caff1-e862-49bd-948f-2822a6bfb7e6 | cpp | google/cel-cpp | field_access_impl | eval/public/structs/field_access_impl.cc | eval/public/structs/field_access_impl_test.cc | #include "eval/public/structs/field_access_impl.h"
#include <cstdint>
#include <string>
#include <type_traits>
#include <utility>
#include "google/protobuf/any.pb.h"
#include "google/protobuf/struct.pb.h"
#include "google/protobuf/wrappers.pb.h"
#include "google/protobuf/arena.h"
#include "google/protobuf/map_field.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "eval/public/structs/cel_proto_wrap_util.h"
#include "internal/casts.h"
#include "internal/overflow.h"
namespace google::api::expr::runtime::internal {
namespace {
using ::google::protobuf::Arena;
using ::google::protobuf::FieldDescriptor;
using ::google::protobuf::MapValueConstRef;
using ::google::protobuf::Message;
using ::google::protobuf::Reflection;
template <class Derived>
class FieldAccessor {
public:
bool GetBool() const { return static_cast<const Derived*>(this)->GetBool(); }
int64_t GetInt32() const {
return static_cast<const Derived*>(this)->GetInt32();
}
uint64_t GetUInt32() const {
return static_cast<const Derived*>(this)->GetUInt32();
}
int64_t GetInt64() const {
return static_cast<const Derived*>(this)->GetInt64();
}
uint64_t GetUInt64() const {
return static_cast<const Derived*>(this)->GetUInt64();
}
double GetFloat() const {
return static_cast<const Derived*>(this)->GetFloat();
}
double GetDouble() const {
return static_cast<const Derived*>(this)->GetDouble();
}
absl::string_view GetString(std::string* buffer) const {
return static_cast<const Derived*>(this)->GetString(buffer);
}
const Message* GetMessage() const {
return static_cast<const Derived*>(this)->GetMessage();
}
int64_t GetEnumValue() const {
return static_cast<const Derived*>(this)->GetEnumValue();
}
absl::StatusOr<CelValue> CreateValueFromFieldAccessor(Arena* arena) {
switch (field_desc_->cpp_type()) {
case FieldDescriptor::CPPTYPE_BOOL: {
bool value = GetBool();
return CelValue::CreateBool(value);
}
case FieldDescriptor::CPPTYPE_INT32: {
int64_t value = GetInt32();
return CelValue::CreateInt64(value);
}
case FieldDescriptor::CPPTYPE_INT64: {
int64_t value = GetInt64();
return CelValue::CreateInt64(value);
}
case FieldDescriptor::CPPTYPE_UINT32: {
uint64_t value = GetUInt32();
return CelValue::CreateUint64(value);
}
case FieldDescriptor::CPPTYPE_UINT64: {
uint64_t value = GetUInt64();
return CelValue::CreateUint64(value);
}
case FieldDescriptor::CPPTYPE_FLOAT: {
double value = GetFloat();
return CelValue::CreateDouble(value);
}
case FieldDescriptor::CPPTYPE_DOUBLE: {
double value = GetDouble();
return CelValue::CreateDouble(value);
}
case FieldDescriptor::CPPTYPE_STRING: {
std::string buffer;
absl::string_view value = GetString(&buffer);
if (value.data() == buffer.data() && value.size() == buffer.size()) {
value = absl::string_view(
*google::protobuf::Arena::Create<std::string>(arena, std::move(buffer)));
}
switch (field_desc_->type()) {
case FieldDescriptor::TYPE_STRING:
return CelValue::CreateStringView(value);
case FieldDescriptor::TYPE_BYTES:
return CelValue::CreateBytesView(value);
default:
return absl::Status(absl::StatusCode::kInvalidArgument,
"Error handling C++ string conversion");
}
break;
}
case FieldDescriptor::CPPTYPE_MESSAGE: {
const google::protobuf::Message* msg_value = GetMessage();
return UnwrapMessageToValue(msg_value, protobuf_value_factory_, arena);
}
case FieldDescriptor::CPPTYPE_ENUM: {
int enum_value = GetEnumValue();
return CelValue::CreateInt64(enum_value);
}
default:
return absl::Status(absl::StatusCode::kInvalidArgument,
"Unhandled C++ type conversion");
}
return absl::Status(absl::StatusCode::kInvalidArgument,
"Unhandled C++ type conversion");
}
protected:
FieldAccessor(const Message* msg, const FieldDescriptor* field_desc,
const ProtobufValueFactory& protobuf_value_factory)
: msg_(msg),
field_desc_(field_desc),
protobuf_value_factory_(protobuf_value_factory) {}
const Message* msg_;
const FieldDescriptor* field_desc_;
const ProtobufValueFactory& protobuf_value_factory_;
};
const absl::flat_hash_set<std::string>& WellKnownWrapperTypes() {
static auto* wrapper_types = new absl::flat_hash_set<std::string>{
"google.protobuf.BoolValue", "google.protobuf.DoubleValue",
"google.protobuf.FloatValue", "google.protobuf.Int64Value",
"google.protobuf.Int32Value", "google.protobuf.UInt64Value",
"google.protobuf.UInt32Value", "google.protobuf.StringValue",
"google.protobuf.BytesValue",
};
return *wrapper_types;
}
bool IsWrapperType(const FieldDescriptor* field_descriptor) {
return WellKnownWrapperTypes().find(
field_descriptor->message_type()->full_name()) !=
WellKnownWrapperTypes().end();
}
class ScalarFieldAccessor : public FieldAccessor<ScalarFieldAccessor> {
public:
ScalarFieldAccessor(const Message* msg, const FieldDescriptor* field_desc,
bool unset_wrapper_as_null,
const ProtobufValueFactory& factory)
: FieldAccessor(msg, field_desc, factory),
unset_wrapper_as_null_(unset_wrapper_as_null) {}
bool GetBool() const { return GetReflection()->GetBool(*msg_, field_desc_); }
int64_t GetInt32() const {
return GetReflection()->GetInt32(*msg_, field_desc_);
}
uint64_t GetUInt32() const {
return GetReflection()->GetUInt32(*msg_, field_desc_);
}
int64_t GetInt64() const {
return GetReflection()->GetInt64(*msg_, field_desc_);
}
uint64_t GetUInt64() const {
return GetReflection()->GetUInt64(*msg_, field_desc_);
}
double GetFloat() const {
return GetReflection()->GetFloat(*msg_, field_desc_);
}
double GetDouble() const {
return GetReflection()->GetDouble(*msg_, field_desc_);
}
absl::string_view GetString(std::string* buffer) const {
return GetReflection()->GetStringReference(*msg_, field_desc_, buffer);
}
const Message* GetMessage() const {
if (unset_wrapper_as_null_ &&
!GetReflection()->HasField(*msg_, field_desc_) &&
IsWrapperType(field_desc_)) {
return nullptr;
}
return &GetReflection()->GetMessage(*msg_, field_desc_);
}
int64_t GetEnumValue() const {
return GetReflection()->GetEnumValue(*msg_, field_desc_);
}
const Reflection* GetReflection() const { return msg_->GetReflection(); }
private:
bool unset_wrapper_as_null_;
};
class RepeatedFieldAccessor : public FieldAccessor<RepeatedFieldAccessor> {
public:
RepeatedFieldAccessor(const Message* msg, const FieldDescriptor* field_desc,
int index, const ProtobufValueFactory& factory)
: FieldAccessor(msg, field_desc, factory), index_(index) {}
bool GetBool() const {
return GetReflection()->GetRepeatedBool(*msg_, field_desc_, index_);
}
int64_t GetInt32() const {
return GetReflection()->GetRepeatedInt32(*msg_, field_desc_, index_);
}
uint64_t GetUInt32() const {
return GetReflection()->GetRepeatedUInt32(*msg_, field_desc_, index_);
}
int64_t GetInt64() const {
return GetReflection()->GetRepeatedInt64(*msg_, field_desc_, index_);
}
uint64_t GetUInt64() const {
return GetReflection()->GetRepeatedUInt64(*msg_, field_desc_, index_);
}
double GetFloat() const {
return GetReflection()->GetRepeatedFloat(*msg_, field_desc_, index_);
}
double GetDouble() const {
return GetReflection()->GetRepeatedDouble(*msg_, field_desc_, index_);
}
absl::string_view GetString(std::string* buffer) const {
return GetReflection()->GetRepeatedStringReference(*msg_, field_desc_,
index_, buffer);
}
const Message* GetMessage() const {
return &GetReflection()->GetRepeatedMessage(*msg_, field_desc_, index_);
}
int64_t GetEnumValue() const {
return GetReflection()->GetRepeatedEnumValue(*msg_, field_desc_, index_);
}
const Reflection* GetReflection() const { return msg_->GetReflection(); }
private:
int index_;
};
class MapValueAccessor : public FieldAccessor<MapValueAccessor> {
public:
MapValueAccessor(const Message* msg, const FieldDescriptor* field_desc,
const MapValueConstRef* value_ref,
const ProtobufValueFactory& factory)
: FieldAccessor(msg, field_desc, factory), value_ref_(value_ref) {}
bool GetBool() const { return value_ref_->GetBoolValue(); }
int64_t GetInt32() const { return value_ref_->GetInt32Value(); }
uint64_t GetUInt32() const { return value_ref_->GetUInt32Value(); }
int64_t GetInt64() const { return value_ref_->GetInt64Value(); }
uint64_t GetUInt64() const { return value_ref_->GetUInt64Value(); }
double GetFloat() const { return value_ref_->GetFloatValue(); }
double GetDouble() const { return value_ref_->GetDoubleValue(); }
absl::string_view GetString(std::string* ) const {
return value_ref_->GetStringValue();
}
const Message* GetMessage() const { return &value_ref_->GetMessageValue(); }
int64_t GetEnumValue() const { return value_ref_->GetEnumValue(); }
const Reflection* GetReflection() const { return msg_->GetReflection(); }
private:
const MapValueConstRef* value_ref_;
};
template <class Derived>
class FieldSetter {
public:
bool AssignBool(const CelValue& cel_value) const {
bool value;
if (!cel_value.GetValue(&value)) {
return false;
}
static_cast<const Derived*>(this)->SetBool(value);
return true;
}
bool AssignInt32(const CelValue& cel_value) const {
int64_t value;
if (!cel_value.GetValue(&value)) {
return false;
}
absl::StatusOr<int32_t> checked_cast =
cel::internal::CheckedInt64ToInt32(value);
if (!checked_cast.ok()) {
return false;
}
static_cast<const Derived*>(this)->SetInt32(*checked_cast);
return true;
}
bool AssignUInt32(const CelValue& cel_value) const {
uint64_t value;
if (!cel_value.GetValue(&value)) {
return false;
}
if (!cel::internal::CheckedUint64ToUint32(value).ok()) {
return false;
}
static_cast<const Derived*>(this)->SetUInt32(value);
return true;
}
bool AssignInt64(const CelValue& cel_value) const {
int64_t value;
if (!cel_value.GetValue(&value)) {
return false;
}
static_cast<const Derived*>(this)->SetInt64(value);
return true;
}
bool AssignUInt64(const CelValue& cel_value) const {
uint64_t value;
if (!cel_value.GetValue(&value)) {
return false;
}
static_cast<const Derived*>(this)->SetUInt64(value);
return true;
}
bool AssignFloat(const CelValue& cel_value) const {
double value;
if (!cel_value.GetValue(&value)) {
return false;
}
static_cast<const Derived*>(this)->SetFloat(value);
return true;
}
bool AssignDouble(const CelValue& cel_value) const {
double value;
if (!cel_value.GetValue(&value)) {
return false;
}
static_cast<const Derived*>(this)->SetDouble(value);
return true;
}
bool AssignString(const CelValue& cel_value) const {
CelValue::StringHolder value;
if (!cel_value.GetValue(&value)) {
return false;
}
static_cast<const Derived*>(this)->SetString(value);
return true;
}
bool AssignBytes(const CelValue& cel_value) const {
CelValue::BytesHolder value;
if (!cel_value.GetValue(&value)) {
return false;
}
static_cast<const Derived*>(this)->SetBytes(value);
return true;
}
bool AssignEnum(const CelValue& cel_value) const {
int64_t value;
if (!cel_value.GetValue(&value)) {
return false;
}
if (!cel::internal::CheckedInt64ToInt32(value).ok()) {
return false;
}
static_cast<const Derived*>(this)->SetEnum(value);
return true;
}
bool AssignMessage(const google::protobuf::Message* message) const {
return static_cast<const Derived*>(this)->SetMessage(message);
}
bool SetFieldFromCelValue(const CelValue& value) {
switch (field_desc_->cpp_type()) {
case FieldDescriptor::CPPTYPE_BOOL: {
return AssignBool(value);
}
case FieldDescriptor::CPPTYPE_INT32: {
return AssignInt32(value);
}
case FieldDescriptor::CPPTYPE_INT64: {
return AssignInt64(value);
}
case FieldDescriptor::CPPTYPE_UINT32: {
return AssignUInt32(value);
}
case FieldDescriptor::CPPTYPE_UINT64: {
return AssignUInt64(value);
}
case FieldDescriptor::CPPTYPE_FLOAT: {
return AssignFloat(value);
}
case FieldDescriptor::CPPTYPE_DOUBLE: {
return AssignDouble(value);
}
case FieldDescriptor::CPPTYPE_STRING: {
switch (field_desc_->type()) {
case FieldDescriptor::TYPE_STRING:
return AssignString(value);
case FieldDescriptor::TYPE_BYTES:
return AssignBytes(value);
default:
return false;
}
break;
}
case FieldDescriptor::CPPTYPE_MESSAGE: {
const google::protobuf::Message* wrapped_value = MaybeWrapValueToMessage(
field_desc_->message_type(),
msg_->GetReflection()->GetMessageFactory(), value, arena_);
if (wrapped_value == nullptr) {
if (value.IsNull()) {
return true;
}
if (CelValue::MessageWrapper wrapper;
value.GetValue(&wrapper) && wrapper.HasFullProto()) {
wrapped_value =
static_cast<const google::protobuf::Message*>(wrapper.message_ptr());
} else {
return false;
}
}
return AssignMessage(wrapped_value);
}
case FieldDescriptor::CPPTYPE_ENUM: {
return AssignEnum(value);
}
default:
return false;
}
return true;
}
protected:
FieldSetter(Message* msg, const FieldDescriptor* field_desc, Arena* arena)
: msg_(msg), field_desc_(field_desc), arena_(arena) {}
Message* msg_;
const FieldDescriptor* field_desc_;
Arena* arena_;
};
bool MergeFromWithSerializeFallback(const google::protobuf::Message& value,
google::protobuf::Message& field) {
if (field.GetDescriptor() == value.GetDescriptor()) {
field.MergeFrom(value);
return true;
}
return field.MergeFromString(value.SerializeAsString());
}
class ScalarFieldSetter : public FieldSetter<ScalarFieldSetter> {
public:
ScalarFieldSetter(Message* msg, const FieldDescriptor* field_desc,
Arena* arena)
: FieldSetter(msg, field_desc, arena) {}
bool SetBool(bool value) const {
GetReflection()->SetBool(msg_, field_desc_, value);
return true;
}
bool SetInt32(int32_t value) const {
GetReflection()->SetInt32(msg_, field_desc_, value);
return true;
}
bool SetUInt32(uint32_t value) const {
GetReflection()->SetUInt32(msg_, field_desc_, value);
return true;
}
bool SetInt64(int64_t value) const {
GetReflection()->SetInt64(msg_, field_desc_, value);
return true;
}
bool SetUInt64(uint64_t value) const {
GetReflection()->SetUInt64(msg_, field_desc_, value);
return true;
}
bool SetFloat(float value) const {
GetReflection()->SetFloat(msg_, field_desc_, value);
return true;
}
bool SetDouble(double value) const {
GetReflection()->SetDouble(msg_, field_desc_, value);
return true;
}
bool SetString(CelValue::StringHolder value) const {
GetReflection()->SetString(msg_, field_desc_, std::string(value.value()));
return true;
}
bool SetBytes(CelValue::BytesHolder value) const {
GetReflection()->SetString(msg_, field_desc_, std::string(value.value()));
return true;
}
bool SetMessage(const Message* value) const {
if (!value) {
ABSL_LOG(ERROR) << "Message is NULL";
return true;
}
if (value->GetDescriptor()->full_name() ==
field_desc_->message_type()->full_name()) {
auto* assignable_field_msg =
GetReflection()->MutableMessage(msg_, field_desc_);
return MergeFromWithSerializeFallback(*value, *assignable_field_msg);
}
return false;
}
bool SetEnum(const int64_t value) const {
GetReflection()->SetEnumValue(msg_, field_desc_, value);
return true;
}
const Reflection* GetReflection() const { return msg_->GetReflection(); }
};
class RepeatedFieldSetter : public FieldSetter<RepeatedFieldSetter> {
public:
RepeatedFieldSetter(Message* msg, const FieldDescriptor* field_desc,
Arena* arena)
: FieldSetter(msg, field_desc, arena) {}
bool SetBool(bool value) const {
GetReflection()->AddBool(msg_, field_desc_, value);
return true;
}
bool SetInt32(int32_t value) const {
GetReflection()->AddInt32(msg_, field_desc_, value);
return true;
}
bool SetUInt32(uint32_t value) const {
GetReflection()->AddUInt32(msg_, field_desc_, value);
return true;
}
bool SetInt64(int64_t value) const {
GetReflection()->AddInt64(msg_, field_desc_, value);
return true;
}
bool SetUInt64(uint64_t value) const {
GetReflection()->AddUInt64(msg_, field_desc_, value);
return true;
}
bool SetFloat(float value) const {
GetReflection()->AddFloat(msg_, field_desc_, value);
return true;
}
bool SetDouble(double value) const {
GetReflection()->AddDouble(msg_, field_desc_, value);
return true;
}
bool SetString(CelValue::StringHolder value) const {
GetReflection()->AddString(msg_, field_desc_, std::string(value.value()));
return true;
}
bool SetBytes(CelValue::BytesHolder value) const {
GetReflection()->AddString(msg_, field_desc_, std::string(value.value()));
return true;
}
bool SetMessage(const Message* value) const {
if (!value) return true;
if (value->GetDescriptor()->full_name() !=
field_desc_->message_type()->full_name()) {
return false;
}
auto* assignable_message = GetReflection()->AddMessage(msg_, field_desc_);
return MergeFromWithSerializeFallback(*value, *assignable_message);
}
bool SetEnum(const int64_t value) const {
GetReflection()->AddEnumValue(msg_, field_desc_, value);
return true;
}
private:
const Reflection* GetReflection() const { return msg_->GetReflection(); }
};
}
absl::StatusOr<CelValue> CreateValueFromSingleField(
const google::protobuf::Message* msg, const FieldDescriptor* desc,
ProtoWrapperTypeOptions options, const ProtobufValueFactory& factory,
google::protobuf::Arena* arena) {
ScalarFieldAccessor accessor(
msg, desc, (options == ProtoWrapperTypeOptions::kUnsetNull), factory);
return accessor.CreateValueFromFieldAccessor(arena);
}
absl::StatusOr<CelValue> CreateValueFromRepeatedField(
const google::protobuf::Message* msg, const FieldDescriptor* desc, int index,
const ProtobufValueFactory& factory, google::protobuf::Arena* arena) {
RepeatedFieldAccessor accessor(msg, desc, index, factory);
return accessor.CreateValueFromFieldAccessor(arena);
}
absl::StatusOr<CelValue> CreateValueFromMapValue(
const google::protobuf::Message* msg, const FieldDescriptor* desc,
const MapValueConstRef* value_ref, const ProtobufValueFactory& factory,
google::protobuf::Arena* arena) {
MapValueAccessor accessor(msg, desc, value_ref, factory);
return accessor.CreateValueFromFieldAccessor(arena);
}
absl::Status SetValueToSingleField(const CelValue& value,
const FieldDescriptor* desc, Message* msg,
Arena* arena) {
ScalarFieldSetter setter(msg, desc, arena);
return (setter.SetFieldFromCelValue(value))
? absl::OkStatus()
: absl::InvalidArgumentError(absl::Substitute(
"Could not assign supplied argument to message \"$0\" field "
"\"$1\" of type $2: value type \"$3\"",
msg->GetDescriptor()->name(), desc->name(),
desc->type_name(), CelValue::TypeName(value.type())));
}
absl::Status AddValueToRepeatedField(const CelValue& value,
const FieldDescriptor* desc, Message* msg,
Arena* arena) {
RepeatedFieldSetter setter(msg, desc, arena);
return (setter.SetFieldFromCelValue(value))
? absl::OkStatus()
: absl::InvalidArgumentError(absl::Substitute(
"Could not add supplied argument to message \"$0\" field "
"\"$1\" of type $2: value type \"$3\"",
msg->GetDescriptor()->name(), desc->name(),
desc->type_name(), CelValue::TypeName(value.type())));
}
} | #include "eval/public/structs/field_access_impl.h"
#include <array>
#include <limits>
#include <string>
#include "google/protobuf/arena.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/message.h"
#include "google/protobuf/text_format.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "eval/public/cel_value.h"
#include "eval/public/structs/cel_proto_wrapper.h"
#include "eval/public/testing/matchers.h"
#include "eval/testutil/test_message.pb.h"
#include "internal/testing.h"
#include "internal/time.h"
#include "testutil/util.h"
#include "proto/test/v1/proto3/test_all_types.pb.h"
namespace google::api::expr::runtime::internal {
namespace {
using ::absl_testing::StatusIs;
using ::cel::internal::MaxDuration;
using ::cel::internal::MaxTimestamp;
using ::google::api::expr::test::v1::proto3::TestAllTypes;
using ::google::protobuf::Arena;
using ::google::protobuf::FieldDescriptor;
using ::testing::HasSubstr;
using testutil::EqualsProto;
TEST(FieldAccessTest, SetDuration) {
Arena arena;
TestAllTypes msg;
const FieldDescriptor* field =
TestAllTypes::descriptor()->FindFieldByName("single_duration");
auto status = SetValueToSingleField(CelValue::CreateDuration(MaxDuration()),
field, &msg, &arena);
EXPECT_TRUE(status.ok());
}
TEST(FieldAccessTest, SetDurationBadDuration) {
Arena arena;
TestAllTypes msg;
const FieldDescriptor* field =
TestAllTypes::descriptor()->FindFieldByName("single_duration");
auto status = SetValueToSingleField(
CelValue::CreateDuration(MaxDuration() + absl::Seconds(1)), field, &msg,
&arena);
EXPECT_FALSE(status.ok());
EXPECT_EQ(status.code(), absl::StatusCode::kInvalidArgument);
}
TEST(FieldAccessTest, SetDurationBadInputType) {
Arena arena;
TestAllTypes msg;
const FieldDescriptor* field =
TestAllTypes::descriptor()->FindFieldByName("single_duration");
auto status =
SetValueToSingleField(CelValue::CreateInt64(1), field, &msg, &arena);
EXPECT_FALSE(status.ok());
EXPECT_EQ(status.code(), absl::StatusCode::kInvalidArgument);
}
TEST(FieldAccessTest, SetTimestamp) {
Arena arena;
TestAllTypes msg;
const FieldDescriptor* field =
TestAllTypes::descriptor()->FindFieldByName("single_timestamp");
auto status = SetValueToSingleField(CelValue::CreateTimestamp(MaxTimestamp()),
field, &msg, &arena);
EXPECT_TRUE(status.ok());
}
TEST(FieldAccessTest, SetTimestampBadTime) {
Arena arena;
TestAllTypes msg;
const FieldDescriptor* field =
TestAllTypes::descriptor()->FindFieldByName("single_timestamp");
auto status = SetValueToSingleField(
CelValue::CreateTimestamp(MaxTimestamp() + absl::Seconds(1)), field, &msg,
&arena);
EXPECT_FALSE(status.ok());
EXPECT_EQ(status.code(), absl::StatusCode::kInvalidArgument);
}
TEST(FieldAccessTest, SetTimestampBadInputType) {
Arena arena;
TestAllTypes msg;
const FieldDescriptor* field =
TestAllTypes::descriptor()->FindFieldByName("single_timestamp");
auto status =
SetValueToSingleField(CelValue::CreateInt64(1), field, &msg, &arena);
EXPECT_FALSE(status.ok());
EXPECT_EQ(status.code(), absl::StatusCode::kInvalidArgument);
}
TEST(FieldAccessTest, SetInt32Overflow) {
Arena arena;
TestAllTypes msg;
const FieldDescriptor* field =
TestAllTypes::descriptor()->FindFieldByName("single_int32");
EXPECT_THAT(
SetValueToSingleField(
CelValue::CreateInt64(std::numeric_limits<int32_t>::max() + 1L),
field, &msg, &arena),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Could not assign")));
}
TEST(FieldAccessTest, SetUint32Overflow) {
Arena arena;
TestAllTypes msg;
const FieldDescriptor* field =
TestAllTypes::descriptor()->FindFieldByName("single_uint32");
EXPECT_THAT(
SetValueToSingleField(
CelValue::CreateUint64(std::numeric_limits<uint32_t>::max() + 1L),
field, &msg, &arena),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Could not assign")));
}
TEST(FieldAccessTest, SetMessage) {
Arena arena;
TestAllTypes msg;
const FieldDescriptor* field =
TestAllTypes::descriptor()->FindFieldByName("standalone_message");
TestAllTypes::NestedMessage* nested_msg =
google::protobuf::Arena::Create<TestAllTypes::NestedMessage>(&arena);
nested_msg->set_bb(1);
auto status = SetValueToSingleField(
CelProtoWrapper::CreateMessage(nested_msg, &arena), field, &msg, &arena);
EXPECT_TRUE(status.ok());
}
TEST(FieldAccessTest, SetMessageWithNull) {
Arena arena;
TestAllTypes msg;
const FieldDescriptor* field =
TestAllTypes::descriptor()->FindFieldByName("standalone_message");
auto status =
SetValueToSingleField(CelValue::CreateNull(), field, &msg, &arena);
EXPECT_TRUE(status.ok());
}
struct AccessFieldTestParam {
absl::string_view field_name;
absl::string_view message_textproto;
CelValue cel_value;
};
std::string GetTestName(
const testing::TestParamInfo<AccessFieldTestParam>& info) {
return std::string(info.param.field_name);
}
class SingleFieldTest : public testing::TestWithParam<AccessFieldTestParam> {
public:
absl::string_view field_name() const { return GetParam().field_name; }
absl::string_view message_textproto() const {
return GetParam().message_textproto;
}
CelValue cel_value() const { return GetParam().cel_value; }
};
TEST_P(SingleFieldTest, Getter) {
TestAllTypes test_message;
ASSERT_TRUE(
google::protobuf::TextFormat::ParseFromString(message_textproto(), &test_message));
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(
CelValue accessed_value,
CreateValueFromSingleField(
&test_message,
test_message.GetDescriptor()->FindFieldByName(field_name()),
ProtoWrapperTypeOptions::kUnsetProtoDefault,
&CelProtoWrapper::InternalWrapMessage, &arena));
EXPECT_THAT(accessed_value, test::EqualsCelValue(cel_value()));
}
TEST_P(SingleFieldTest, Setter) {
TestAllTypes test_message;
CelValue to_set = cel_value();
google::protobuf::Arena arena;
ASSERT_OK(SetValueToSingleField(
to_set, test_message.GetDescriptor()->FindFieldByName(field_name()),
&test_message, &arena));
EXPECT_THAT(test_message, EqualsProto(message_textproto()));
}
INSTANTIATE_TEST_SUITE_P(
AllTypes, SingleFieldTest,
testing::ValuesIn<AccessFieldTestParam>({
{"single_int32", "single_int32: 1", CelValue::CreateInt64(1)},
{"single_int64", "single_int64: 1", CelValue::CreateInt64(1)},
{"single_uint32", "single_uint32: 1", CelValue::CreateUint64(1)},
{"single_uint64", "single_uint64: 1", CelValue::CreateUint64(1)},
{"single_sint32", "single_sint32: 1", CelValue::CreateInt64(1)},
{"single_sint64", "single_sint64: 1", CelValue::CreateInt64(1)},
{"single_fixed32", "single_fixed32: 1", CelValue::CreateUint64(1)},
{"single_fixed64", "single_fixed64: 1", CelValue::CreateUint64(1)},
{"single_sfixed32", "single_sfixed32: 1", CelValue::CreateInt64(1)},
{"single_sfixed64", "single_sfixed64: 1", CelValue::CreateInt64(1)},
{"single_float", "single_float: 1.0", CelValue::CreateDouble(1.0)},
{"single_double", "single_double: 1.0", CelValue::CreateDouble(1.0)},
{"single_bool", "single_bool: true", CelValue::CreateBool(true)},
{"single_string", "single_string: 'abcd'",
CelValue::CreateStringView("abcd")},
{"single_bytes", "single_bytes: 'asdf'",
CelValue::CreateBytesView("asdf")},
{"standalone_enum", "standalone_enum: BAZ", CelValue::CreateInt64(2)},
{"single_int64_wrapper", "single_int64_wrapper { value: 20 }",
CelValue::CreateInt64(20)},
{"single_value", "single_value { null_value: NULL_VALUE }",
CelValue::CreateNull()},
}),
&GetTestName);
TEST(CreateValueFromSingleFieldTest, GetMessage) {
TestAllTypes test_message;
google::protobuf::Arena arena;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(
"standalone_message { bb: 10 }", &test_message));
ASSERT_OK_AND_ASSIGN(
CelValue accessed_value,
CreateValueFromSingleField(
&test_message,
test_message.GetDescriptor()->FindFieldByName("standalone_message"),
ProtoWrapperTypeOptions::kUnsetProtoDefault,
&CelProtoWrapper::InternalWrapMessage, &arena));
EXPECT_THAT(accessed_value, test::IsCelMessage(EqualsProto("bb: 10")));
}
TEST(SetValueToSingleFieldTest, WrongType) {
TestAllTypes test_message;
google::protobuf::Arena arena;
EXPECT_THAT(SetValueToSingleField(
CelValue::CreateDouble(1.0),
test_message.GetDescriptor()->FindFieldByName("single_int32"),
&test_message, &arena),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(SetValueToSingleFieldTest, IntOutOfRange) {
CelValue out_of_range = CelValue::CreateInt64(1LL << 31);
TestAllTypes test_message;
const google::protobuf::Descriptor* descriptor = test_message.GetDescriptor();
google::protobuf::Arena arena;
EXPECT_THAT(SetValueToSingleField(out_of_range,
descriptor->FindFieldByName("single_int32"),
&test_message, &arena),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(SetValueToSingleField(
out_of_range, descriptor->FindFieldByName("standalone_enum"),
&test_message, &arena),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(SetValueToSingleFieldTest, UintOutOfRange) {
CelValue out_of_range = CelValue::CreateUint64(1LL << 32);
TestAllTypes test_message;
const google::protobuf::Descriptor* descriptor = test_message.GetDescriptor();
google::protobuf::Arena arena;
EXPECT_THAT(SetValueToSingleField(
out_of_range, descriptor->FindFieldByName("single_uint32"),
&test_message, &arena),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(SetValueToSingleFieldTest, SetMessage) {
TestAllTypes::NestedMessage nested_message;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(R"(
bb: 42
)",
&nested_message));
google::protobuf::Arena arena;
CelValue nested_value =
CelProtoWrapper::CreateMessage(&nested_message, &arena);
TestAllTypes test_message;
const google::protobuf::Descriptor* descriptor = test_message.GetDescriptor();
ASSERT_OK(SetValueToSingleField(
nested_value, descriptor->FindFieldByName("standalone_message"),
&test_message, &arena));
EXPECT_THAT(test_message, EqualsProto("standalone_message { bb: 42 }"));
}
TEST(SetValueToSingleFieldTest, SetAnyMessage) {
TestAllTypes::NestedMessage nested_message;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(R"(
bb: 42
)",
&nested_message));
google::protobuf::Arena arena;
CelValue nested_value =
CelProtoWrapper::CreateMessage(&nested_message, &arena);
TestAllTypes test_message;
const google::protobuf::Descriptor* descriptor = test_message.GetDescriptor();
ASSERT_OK(SetValueToSingleField(nested_value,
descriptor->FindFieldByName("single_any"),
&test_message, &arena));
TestAllTypes::NestedMessage unpacked;
test_message.single_any().UnpackTo(&unpacked);
EXPECT_THAT(unpacked, EqualsProto("bb: 42"));
}
TEST(SetValueToSingleFieldTest, SetMessageToNullNoop) {
google::protobuf::Arena arena;
TestAllTypes test_message;
const google::protobuf::Descriptor* descriptor = test_message.GetDescriptor();
ASSERT_OK(SetValueToSingleField(
CelValue::CreateNull(), descriptor->FindFieldByName("standalone_message"),
&test_message, &arena));
EXPECT_THAT(test_message, EqualsProto(test_message.default_instance()));
}
class RepeatedFieldTest : public testing::TestWithParam<AccessFieldTestParam> {
public:
absl::string_view field_name() const { return GetParam().field_name; }
absl::string_view message_textproto() const {
return GetParam().message_textproto;
}
CelValue cel_value() const { return GetParam().cel_value; }
};
TEST_P(RepeatedFieldTest, GetFirstElem) {
TestAllTypes test_message;
ASSERT_TRUE(
google::protobuf::TextFormat::ParseFromString(message_textproto(), &test_message));
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(
CelValue accessed_value,
CreateValueFromRepeatedField(
&test_message,
test_message.GetDescriptor()->FindFieldByName(field_name()), 0,
&CelProtoWrapper::InternalWrapMessage, &arena));
EXPECT_THAT(accessed_value, test::EqualsCelValue(cel_value()));
}
TEST_P(RepeatedFieldTest, AppendElem) {
TestAllTypes test_message;
CelValue to_add = cel_value();
google::protobuf::Arena arena;
ASSERT_OK(AddValueToRepeatedField(
to_add, test_message.GetDescriptor()->FindFieldByName(field_name()),
&test_message, &arena));
EXPECT_THAT(test_message, EqualsProto(message_textproto()));
}
INSTANTIATE_TEST_SUITE_P(
AllTypes, RepeatedFieldTest,
testing::ValuesIn<AccessFieldTestParam>(
{{"repeated_int32", "repeated_int32: 1", CelValue::CreateInt64(1)},
{"repeated_int64", "repeated_int64: 1", CelValue::CreateInt64(1)},
{"repeated_uint32", "repeated_uint32: 1", CelValue::CreateUint64(1)},
{"repeated_uint64", "repeated_uint64: 1", CelValue::CreateUint64(1)},
{"repeated_sint32", "repeated_sint32: 1", CelValue::CreateInt64(1)},
{"repeated_sint64", "repeated_sint64: 1", CelValue::CreateInt64(1)},
{"repeated_fixed32", "repeated_fixed32: 1", CelValue::CreateUint64(1)},
{"repeated_fixed64", "repeated_fixed64: 1", CelValue::CreateUint64(1)},
{"repeated_sfixed32", "repeated_sfixed32: 1",
CelValue::CreateInt64(1)},
{"repeated_sfixed64", "repeated_sfixed64: 1",
CelValue::CreateInt64(1)},
{"repeated_float", "repeated_float: 1.0", CelValue::CreateDouble(1.0)},
{"repeated_double", "repeated_double: 1.0",
CelValue::CreateDouble(1.0)},
{"repeated_bool", "repeated_bool: true", CelValue::CreateBool(true)},
{"repeated_string", "repeated_string: 'abcd'",
CelValue::CreateStringView("abcd")},
{"repeated_bytes", "repeated_bytes: 'asdf'",
CelValue::CreateBytesView("asdf")},
{"repeated_nested_enum", "repeated_nested_enum: BAZ",
CelValue::CreateInt64(2)}}),
&GetTestName);
TEST(RepeatedFieldTest, GetMessage) {
TestAllTypes test_message;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(
"repeated_nested_message { bb: 30 }", &test_message));
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(CelValue accessed_value,
CreateValueFromRepeatedField(
&test_message,
test_message.GetDescriptor()->FindFieldByName(
"repeated_nested_message"),
0, &CelProtoWrapper::InternalWrapMessage, &arena));
EXPECT_THAT(accessed_value, test::IsCelMessage(EqualsProto("bb: 30")));
}
TEST(AddValueToRepeatedFieldTest, WrongType) {
TestAllTypes test_message;
google::protobuf::Arena arena;
EXPECT_THAT(
AddValueToRepeatedField(
CelValue::CreateDouble(1.0),
test_message.GetDescriptor()->FindFieldByName("repeated_int32"),
&test_message, &arena),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(AddValueToRepeatedFieldTest, IntOutOfRange) {
CelValue out_of_range = CelValue::CreateInt64(1LL << 31);
TestAllTypes test_message;
const google::protobuf::Descriptor* descriptor = test_message.GetDescriptor();
google::protobuf::Arena arena;
EXPECT_THAT(AddValueToRepeatedField(
out_of_range, descriptor->FindFieldByName("repeated_int32"),
&test_message, &arena),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
AddValueToRepeatedField(
out_of_range, descriptor->FindFieldByName("repeated_nested_enum"),
&test_message, &arena),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(AddValueToRepeatedFieldTest, UintOutOfRange) {
CelValue out_of_range = CelValue::CreateUint64(1LL << 32);
TestAllTypes test_message;
const google::protobuf::Descriptor* descriptor = test_message.GetDescriptor();
google::protobuf::Arena arena;
EXPECT_THAT(AddValueToRepeatedField(
out_of_range, descriptor->FindFieldByName("repeated_uint32"),
&test_message, &arena),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(AddValueToRepeatedFieldTest, AddMessage) {
TestAllTypes::NestedMessage nested_message;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(R"(
bb: 42
)",
&nested_message));
google::protobuf::Arena arena;
CelValue nested_value =
CelProtoWrapper::CreateMessage(&nested_message, &arena);
TestAllTypes test_message;
const google::protobuf::Descriptor* descriptor = test_message.GetDescriptor();
ASSERT_OK(AddValueToRepeatedField(
nested_value, descriptor->FindFieldByName("repeated_nested_message"),
&test_message, &arena));
EXPECT_THAT(test_message, EqualsProto("repeated_nested_message { bb: 42 }"));
}
constexpr std::array<const char*, 9> kWrapperFieldNames = {
"single_bool_wrapper", "single_int64_wrapper", "single_int32_wrapper",
"single_uint64_wrapper", "single_uint32_wrapper", "single_double_wrapper",
"single_float_wrapper", "single_string_wrapper", "single_bytes_wrapper"};
TEST(CreateValueFromFieldTest, UnsetWrapperTypesNullIfEnabled) {
CelValue result;
TestAllTypes test_message;
google::protobuf::Arena arena;
for (const auto& field : kWrapperFieldNames) {
ASSERT_OK_AND_ASSIGN(
result, CreateValueFromSingleField(
&test_message,
TestAllTypes::GetDescriptor()->FindFieldByName(field),
ProtoWrapperTypeOptions::kUnsetNull,
&CelProtoWrapper::InternalWrapMessage, &arena));
ASSERT_TRUE(result.IsNull()) << field << ": " << result.DebugString();
}
}
TEST(CreateValueFromFieldTest, UnsetWrapperTypesDefaultValueIfDisabled) {
CelValue result;
TestAllTypes test_message;
google::protobuf::Arena arena;
for (const auto& field : kWrapperFieldNames) {
ASSERT_OK_AND_ASSIGN(
result, CreateValueFromSingleField(
&test_message,
TestAllTypes::GetDescriptor()->FindFieldByName(field),
ProtoWrapperTypeOptions::kUnsetProtoDefault,
&CelProtoWrapper::InternalWrapMessage, &arena));
ASSERT_FALSE(result.IsNull()) << field << ": " << result.DebugString();
}
}
TEST(CreateValueFromFieldTest, SetWrapperTypesDefaultValue) {
CelValue result;
TestAllTypes test_message;
google::protobuf::Arena arena;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(
R"pb(
single_bool_wrapper {}
single_int64_wrapper {}
single_int32_wrapper {}
single_uint64_wrapper {}
single_uint32_wrapper {}
single_double_wrapper {}
single_float_wrapper {}
single_string_wrapper {}
single_bytes_wrapper {}
)pb",
&test_message));
ASSERT_OK_AND_ASSIGN(
result,
CreateValueFromSingleField(
&test_message,
TestAllTypes::GetDescriptor()->FindFieldByName("single_bool_wrapper"),
ProtoWrapperTypeOptions::kUnsetNull,
&CelProtoWrapper::InternalWrapMessage, &arena));
EXPECT_THAT(result, test::IsCelBool(false));
ASSERT_OK_AND_ASSIGN(result,
CreateValueFromSingleField(
&test_message,
TestAllTypes::GetDescriptor()->FindFieldByName(
"single_int64_wrapper"),
ProtoWrapperTypeOptions::kUnsetNull,
&CelProtoWrapper::InternalWrapMessage, &arena));
EXPECT_THAT(result, test::IsCelInt64(0));
ASSERT_OK_AND_ASSIGN(result,
CreateValueFromSingleField(
&test_message,
TestAllTypes::GetDescriptor()->FindFieldByName(
"single_int32_wrapper"),
ProtoWrapperTypeOptions::kUnsetNull,
&CelProtoWrapper::InternalWrapMessage, &arena));
EXPECT_THAT(result, test::IsCelInt64(0));
ASSERT_OK_AND_ASSIGN(
result,
CreateValueFromSingleField(&test_message,
TestAllTypes::GetDescriptor()->FindFieldByName(
"single_uint64_wrapper"),
ProtoWrapperTypeOptions::kUnsetNull,
&CelProtoWrapper::InternalWrapMessage,
&arena));
EXPECT_THAT(result, test::IsCelUint64(0));
ASSERT_OK_AND_ASSIGN(
result,
CreateValueFromSingleField(&test_message,
TestAllTypes::GetDescriptor()->FindFieldByName(
"single_uint32_wrapper"),
ProtoWrapperTypeOptions::kUnsetNull,
&CelProtoWrapper::InternalWrapMessage,
&arena));
EXPECT_THAT(result, test::IsCelUint64(0));
ASSERT_OK_AND_ASSIGN(result,
CreateValueFromSingleField(
&test_message,
TestAllTypes::GetDescriptor()->FindFieldByName(
"single_double_wrapper"),
ProtoWrapperTypeOptions::kUnsetNull,
&CelProtoWrapper::InternalWrapMessage, &arena));
EXPECT_THAT(result, test::IsCelDouble(0.0f));
ASSERT_OK_AND_ASSIGN(result,
CreateValueFromSingleField(
&test_message,
TestAllTypes::GetDescriptor()->FindFieldByName(
"single_float_wrapper"),
ProtoWrapperTypeOptions::kUnsetNull,
&CelProtoWrapper::InternalWrapMessage, &arena));
EXPECT_THAT(result, test::IsCelDouble(0.0f));
ASSERT_OK_AND_ASSIGN(result,
CreateValueFromSingleField(
&test_message,
TestAllTypes::GetDescriptor()->FindFieldByName(
"single_string_wrapper"),
ProtoWrapperTypeOptions::kUnsetNull,
&CelProtoWrapper::InternalWrapMessage, &arena));
EXPECT_THAT(result, test::IsCelString(""));
ASSERT_OK_AND_ASSIGN(result,
CreateValueFromSingleField(
&test_message,
TestAllTypes::GetDescriptor()->FindFieldByName(
"single_bytes_wrapper"),
ProtoWrapperTypeOptions::kUnsetNull,
&CelProtoWrapper::InternalWrapMessage, &arena));
EXPECT_THAT(result, test::IsCelBytes(""));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/structs/field_access_impl.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/structs/field_access_impl_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
05402f47-5603-4c59-b33b-796f157484b4 | cpp | tensorflow/tensorflow | dump_graph | tensorflow/compiler/mlir/tensorflow/utils/dump_graph.cc | tensorflow/compiler/mlir/tensorflow/utils/dump_graph_test.cc | #include "tensorflow/compiler/mlir/tensorflow/utils/dump_graph.h"
#include <cstdint>
#include <cstring>
#include <string>
#include <utility>
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Verifier.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/core/ir/importexport/graphdef_import.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
namespace {
struct WritableFileRawStream : public llvm::raw_ostream {
explicit WritableFileRawStream(WritableFile* file) : file(file) {
SetUnbuffered();
}
~WritableFileRawStream() override = default;
uint64_t current_pos() const override { return 0; }
void write_impl(const char* ptr, size_t size) override {
if (file) {
Status s = file->Append(StringPiece(ptr, size));
if (!s.ok()) {
LOG(WARNING) << "Write failed: " << s;
file = nullptr;
}
}
}
WritableFile* file;
};
}
Status DumpTextualIRToFile(const MlirDumpConfig& config, const Graph& graph,
const FunctionLibraryDefinition* flib_def,
WritableFile* file) {
WritableFileRawStream os(std::move(file));
mlir::MLIRContext context;
mlir::OwningOpRef<mlir::ModuleOp> module;
if (flib_def) {
flib_def = &graph.flib_def();
}
auto convert = [&]() -> Status {
mlir::StatusScopedDiagnosticHandler status_handler(&context);
GraphDebugInfo debug_info;
switch (config.dialect) {
case MlirDumpConfig::Dialect::kTFG: {
TF_ASSIGN_OR_RETURN(module,
mlir::tfg::ImportGraphAndFunctionsToMlir(
&context, debug_info, graph,
flib_def ? *flib_def : graph.flib_def()));
break;
}
}
if (failed(mlir::verify(*module))) {
return status_handler.ConsumeStatus();
}
return status_handler.ConsumeStatus();
};
TF_RETURN_IF_ERROR(convert());
module->print(os, config.op_printing_flags);
return absl::OkStatus();
}
void UseMlirForGraphDump(const MlirDumpConfig& config) {
SetGraphDumper(
[config](const Graph& graph, const FunctionLibraryDefinition* flib_def,
WritableFile* file) -> Status {
return DumpTextualIRToFile(config, graph, flib_def, file);
},
".mlir");
}
} | #include "tensorflow/compiler/mlir/tensorflow/utils/dump_graph.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
namespace {
void ExpectHasSubstr(const string& s, const string& expected) {
EXPECT_TRUE(absl::StrContains(s, expected))
<< "'" << s << "' does not contain '" << expected << "'";
}
void ExpectHasNoSubstr(const string& s, const string& expected) {
EXPECT_FALSE(absl::StrContains(s, expected))
<< "'" << s << "' should not contain '" << expected << "'";
}
class StringWritableFile : public WritableFile {
public:
explicit StringWritableFile(string* str) : str_(*str) {}
Status Append(StringPiece data) override {
absl::StrAppend(&str_, data);
return absl::OkStatus();
}
Status Close() override { return absl::OkStatus(); }
Status Flush() override { return absl::OkStatus(); }
Status Name(StringPiece* result) const override {
*result = "(string)";
return absl::OkStatus();
}
Status Sync() override { return absl::OkStatus(); }
Status Tell(int64_t* position) override {
return errors::Unimplemented("Stream not seekable");
}
private:
string& str_;
};
TEST(Dump, TextualIrToFileSuccess) {
Graph graph(OpRegistry::Global());
Node* node;
TF_CHECK_OK(NodeBuilder("A", "NoOp").Finalize(&graph, &node));
setenv("TF_DUMP_GRAPH_PREFIX", testing::TmpDir().c_str(), 1);
UseMlirForGraphDump(MlirDumpConfig());
string ret = DumpGraphToFile("tir", graph);
ASSERT_EQ(ret, io::JoinPath(testing::TmpDir(), "tir.mlir"));
string actual;
TF_ASSERT_OK(ReadFileToString(Env::Default(), ret, &actual));
}
TEST(Dump, TextualIrWithOptions) {
Graph graph(OpRegistry::Global());
Node* node;
TF_ASSERT_OK(NodeBuilder("A", "Placeholder")
.Attr("dtype", DT_FLOAT)
.Finalize(&graph, &node));
string actual;
StringWritableFile file(&actual);
TF_ASSERT_OK(DumpTextualIRToFile(MlirDumpConfig().emit_location_information(),
graph, nullptr, &file));
string expected_substr = R"(loc(#loc))";
ExpectHasSubstr(actual, expected_substr);
}
TEST(Dump, DumpToTFG) {
Graph graph(OpRegistry::Global());
Node* node;
TF_CHECK_OK(NodeBuilder("A", "NoOp").Finalize(&graph, &node));
string actual;
StringWritableFile file(&actual);
TF_ASSERT_OK(DumpTextualIRToFile(
MlirDumpConfig().emit_dialect(MlirDumpConfig::Dialect::kTFG), graph,
nullptr, &file));
string expected_substr("tfg.graph");
ExpectHasSubstr(actual, expected_substr);
string not_expected_substr("tf_executor.island");
ExpectHasNoSubstr(actual, not_expected_substr);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/dump_graph.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/dump_graph_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f4324f89-eb93-434c-987d-d0f5ebdbb8c2 | cpp | tensorflow/tensorflow | data_flow_ops | tensorflow/core/ops/data_flow_ops.cc | tensorflow/core/ops/data_flow_ops_test.cc | #include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/framework/shape_inference.h"
namespace tensorflow {
using shape_inference::DimensionHandle;
using shape_inference::InferenceContext;
using shape_inference::ShapeHandle;
namespace {
Status DequeueManyV2Shape(InferenceContext* c, ShapeHandle n_shape) {
auto* t = c->input_handle_shapes_and_types(0);
if (t != nullptr && t->size() == c->num_outputs()) {
for (int i = 0; i < c->num_outputs(); ++i) {
ShapeHandle combined_shape;
TF_RETURN_IF_ERROR(
c->Concatenate(n_shape, (*t)[i].shape, &combined_shape));
c->set_output(i, combined_shape);
}
return absl::OkStatus();
} else {
return shape_inference::UnknownShape(c);
}
}
}
REGISTER_OP("DynamicPartition")
.Input("data: T")
.Input("partitions: int32")
.Output("outputs: num_partitions * T")
.Attr("num_partitions: int")
.Attr("T: type")
.SetShapeFn([](InferenceContext* c) {
int64_t num_partitions;
TF_RETURN_IF_ERROR(c->GetAttr("num_partitions", &num_partitions));
ShapeHandle data_shape = c->input(0);
ShapeHandle partitions_shape = c->input(1);
if (!c->RankKnown(partitions_shape)) {
return shape_inference::UnknownShape(c);
}
const int64_t rank = c->Rank(partitions_shape);
ShapeHandle unused;
TF_RETURN_IF_ERROR(
c->MergePrefix(data_shape, partitions_shape, &unused, &unused));
ShapeHandle unknown_dim0 = c->MakeShape({c->UnknownDim()});
ShapeHandle data_suffix_shape;
TF_RETURN_IF_ERROR(c->Subshape(data_shape, rank, &data_suffix_shape));
ShapeHandle result_shape;
TF_RETURN_IF_ERROR(
c->Concatenate(unknown_dim0, data_suffix_shape, &result_shape));
for (int i = 0; i < c->num_outputs(); ++i) {
c->set_output(i, result_shape);
}
return absl::OkStatus();
});
namespace {
Status DynamicStitchShapeFunction(InferenceContext* c) {
int32_t num_partitions;
TF_RETURN_IF_ERROR(c->GetAttr("N", &num_partitions));
bool all_indices_constant = true;
int32_t max_index = -1;
ShapeHandle extra_shape = c->UnknownShape();
for (int i = 0; i < num_partitions; ++i) {
const Tensor* indices_t = c->input_tensor(i);
if (indices_t == nullptr) {
all_indices_constant = false;
}
ShapeHandle indices_shape = c->input(i);
ShapeHandle data_shape = c->input(i + num_partitions);
if (!c->RankKnown(indices_shape)) {
continue;
}
const int64_t indices_rank = c->Rank(indices_shape);
ShapeHandle unused;
TF_RETURN_IF_ERROR(
c->MergePrefix(data_shape, indices_shape, &unused, &unused));
ShapeHandle rest;
TF_RETURN_IF_ERROR(c->Subshape(data_shape, indices_rank, &rest));
TF_RETURN_IF_ERROR(c->Merge(extra_shape, rest, &extra_shape));
if (indices_t != nullptr) {
const int32* indices = indices_t->flat<int32>().data();
int64_t count = indices_t->NumElements();
for (int64_t i = 0; i < count; ++i) {
if (indices[i] > max_index) {
max_index = indices[i];
}
}
}
}
ShapeHandle output_shape = c->Vector(
all_indices_constant ? c->MakeDim(max_index + 1) : c->UnknownDim());
TF_RETURN_IF_ERROR(c->Concatenate(output_shape, extra_shape, &output_shape));
c->set_output(0, output_shape);
return absl::OkStatus();
}
}
REGISTER_OP("DynamicStitch")
.Input("indices: N * int32")
.Input("data: N * T")
.Output("merged: T")
.Attr("N : int >= 1")
.Attr("T : type")
.SetShapeFn(DynamicStitchShapeFunction);
REGISTER_OP("ParallelDynamicStitch")
.Input("indices: N * int32")
.Input("data: N * T")
.Output("merged: T")
.Attr("N : int >= 1")
.Attr("T : type")
.SetShapeFn(DynamicStitchShapeFunction);
namespace {
Status TwoElementVectorInputsAndScalarOutputs(InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_handle;
for (int i = 0; i < c->num_inputs(); ++i) {
TF_RETURN_IF_ERROR(c->WithRank(c->input(i), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_handle));
}
for (int i = 0; i < c->num_outputs(); ++i) {
c->set_output(i, c->Scalar());
}
return absl::OkStatus();
}
Status TwoElementOutput(InferenceContext* c) {
c->set_output(0, c->Vector(2));
return absl::OkStatus();
}
}
REGISTER_OP("RandomShuffleQueue")
.Output("handle: Ref(string)")
.Attr("component_types: list(type) >= 1")
.Attr("shapes: list(shape) >= 0 = []")
.Attr("capacity: int = -1")
.Attr("min_after_dequeue: int = 0")
.Attr("seed: int = 0")
.Attr("seed2: int = 0")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn(TwoElementOutput);
REGISTER_OP("RandomShuffleQueueV2")
.Output("handle: resource")
.Attr("component_types: list(type) >= 1")
.Attr("shapes: list(shape) >= 0 = []")
.Attr("capacity: int = -1")
.Attr("min_after_dequeue: int = 0")
.Attr("seed: int = 0")
.Attr("seed2: int = 0")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("FIFOQueue")
.Output("handle: Ref(string)")
.Attr("component_types: list(type) >= 1")
.Attr("shapes: list(shape) >= 0 = []")
.Attr("capacity: int = -1")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn(TwoElementOutput);
REGISTER_OP("FIFOQueueV2")
.Output("handle: resource")
.Attr("component_types: list(type) >= 1")
.Attr("shapes: list(shape) >= 0 = []")
.Attr("capacity: int = -1")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("PaddingFIFOQueue")
.Output("handle: Ref(string)")
.Attr("component_types: list(type) >= 1")
.Attr("shapes: list(shape) >= 0 = []")
.Attr("capacity: int = -1")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn(TwoElementOutput);
REGISTER_OP("PaddingFIFOQueueV2")
.Output("handle: resource")
.Attr("component_types: list(type) >= 1")
.Attr("shapes: list(shape) >= 0 = []")
.Attr("capacity: int = -1")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("PriorityQueue")
.Output("handle: Ref(string)")
.Attr("component_types: list(type) >= 0 = []")
.Attr("shapes: list(shape) >= 0")
.Attr("capacity: int = -1")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn(TwoElementOutput);
REGISTER_OP("PriorityQueueV2")
.Output("handle: resource")
.Attr("component_types: list(type) >= 0 = []")
.Attr("shapes: list(shape) >= 0")
.Attr("capacity: int = -1")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("FakeQueue")
.Input("resource: resource")
.Output("handle: Ref(string)")
.SetIsStateful()
.SetShapeFn(TwoElementOutput);
REGISTER_OP("QueueEnqueue")
.Input("handle: Ref(string)")
.Input("components: Tcomponents")
.Attr("Tcomponents: list(type) >= 1")
.Attr("timeout_ms: int = -1")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("QueueEnqueueV2")
.Input("handle: resource")
.Input("components: Tcomponents")
.Attr("Tcomponents: list(type) >= 1")
.Attr("timeout_ms: int = -1")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("QueueEnqueueMany")
.Input("handle: Ref(string)")
.Input("components: Tcomponents")
.Attr("Tcomponents: list(type) >= 1")
.Attr("timeout_ms: int = -1")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("QueueEnqueueManyV2")
.Input("handle: resource")
.Input("components: Tcomponents")
.Attr("Tcomponents: list(type) >= 1")
.Attr("timeout_ms: int = -1")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("QueueDequeue")
.Input("handle: Ref(string)")
.Output("components: component_types")
.Attr("component_types: list(type) >= 1")
.Attr("timeout_ms: int = -1")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("QueueDequeueV2")
.Input("handle: resource")
.Output("components: component_types")
.Attr("component_types: list(type) >= 1")
.Attr("timeout_ms: int = -1")
.SetShapeFn([](InferenceContext* c) {
auto* t = c->input_handle_shapes_and_types(0);
if (t != nullptr && t->size() == c->num_outputs()) {
for (int i = 0; i < c->num_outputs(); ++i) {
c->set_output(i, (*t)[i].shape);
}
return absl::OkStatus();
} else {
return shape_inference::UnknownShape(c);
}
});
REGISTER_OP("QueueDequeueMany")
.Input("handle: Ref(string)")
.Input("n: int32")
.Output("components: component_types")
.Attr("component_types: list(type) >= 1")
.Attr("timeout_ms: int = -1")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("QueueDequeueManyV2")
.Input("handle: resource")
.Input("n: int32")
.Output("components: component_types")
.Attr("component_types: list(type) >= 1")
.Attr("timeout_ms: int = -1")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle n_shape;
if (c->input_tensor(1) == nullptr) {
n_shape = c->Vector(InferenceContext::kUnknownDim);
} else {
const int32_t n = c->input_tensor(1)->scalar<int32>()();
if (n < 0) {
return errors::InvalidArgument("Input 'n' must be >= 0, but is ", n);
}
n_shape = c->Vector(n);
}
return DequeueManyV2Shape(c, n_shape);
});
REGISTER_OP("QueueDequeueUpTo")
.Input("handle: Ref(string)")
.Input("n: int32")
.Output("components: component_types")
.Attr("component_types: list(type) >= 1")
.Attr("timeout_ms: int = -1")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("QueueDequeueUpToV2")
.Input("handle: resource")
.Input("n: int32")
.Output("components: component_types")
.Attr("component_types: list(type) >= 1")
.Attr("timeout_ms: int = -1")
.SetShapeFn([](InferenceContext* c) {
return DequeueManyV2Shape(c, c->Vector(InferenceContext::kUnknownDim));
});
REGISTER_OP("QueueClose")
.Input("handle: Ref(string)")
.SetShapeFn(TwoElementVectorInputsAndScalarOutputs)
.Attr("cancel_pending_enqueues: bool = false");
REGISTER_OP("QueueCloseV2")
.Input("handle: resource")
.SetShapeFn(shape_inference::NoOutputs)
.Attr("cancel_pending_enqueues: bool = false");
REGISTER_OP("QueueIsClosed")
.Input("handle: Ref(string)")
.Output("is_closed: bool")
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("QueueIsClosedV2")
.Input("handle: resource")
.Output("is_closed: bool")
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("QueueSize")
.Input("handle: Ref(string)")
.Output("size: int32")
.SetShapeFn(TwoElementVectorInputsAndScalarOutputs);
REGISTER_OP("QueueSizeV2")
.Input("handle: resource")
.Output("size: int32")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("AccumulatorNumAccumulated")
.Input("handle: Ref(string)")
.Output("num_accumulated: int32")
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("AccumulatorSetGlobalStep")
.Input("handle: Ref(string)")
.Input("new_global_step: int64")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
return absl::OkStatus();
});
REGISTER_OP("ConditionalAccumulator")
.Output("handle: Ref(string)")
.Attr("dtype: numbertype")
.Attr("shape: shape")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.Attr("reduction_type: { 'MEAN', 'SUM' } = 'MEAN' ")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
c->set_output(0, c->Vector(2));
return absl::OkStatus();
});
REGISTER_OP("AccumulatorApplyGradient")
.Input("handle: Ref(string)")
.Input("local_step: int64")
.Input("gradient: dtype")
.Attr("dtype: numbertype")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
return absl::OkStatus();
});
REGISTER_OP("AccumulatorTakeGradient")
.Input("handle: Ref(string)")
.Input("num_required: int32")
.Output("average: dtype")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
return shape_inference::UnknownShape(c);
})
.Attr("dtype: numbertype");
REGISTER_OP("ResourceAccumulatorNumAccumulated")
.Input("handle: resource")
.Output("num_accumulated: int32")
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("ResourceAccumulatorSetGlobalStep")
.Input("handle: resource")
.Input("new_global_step: int64")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
return absl::OkStatus();
});
REGISTER_OP("ResourceConditionalAccumulator")
.Output("handle: resource")
.Attr("dtype: numbertype")
.Attr("shape: shape")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.Attr("reduction_type: { 'MEAN', 'SUM' } = 'MEAN' ")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
c->set_output(0, c->Vector(2));
return absl::OkStatus();
});
REGISTER_OP("ResourceAccumulatorApplyGradient")
.Input("handle: resource")
.Input("local_step: int64")
.Input("gradient: dtype")
.Attr("dtype: numbertype")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
return absl::OkStatus();
});
REGISTER_OP("ResourceAccumulatorTakeGradient")
.Input("handle: resource")
.Input("num_required: int32")
.Output("average: dtype")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
return shape_inference::UnknownShape(c);
})
.Attr("dtype: numbertype");
REGISTER_OP("SparseConditionalAccumulator")
.Output("handle: Ref(string)")
.Attr("dtype: numbertype")
.Attr("shape: shape")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.Attr("reduction_type: { 'MEAN', 'SUM' } = 'MEAN' ")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
c->set_output(0, c->Vector(2));
return absl::OkStatus();
});
REGISTER_OP("SparseAccumulatorApplyGradient")
.Input("handle: Ref(string)")
.Input("local_step: int64")
.Input("gradient_indices: int64")
.Input("gradient_values: dtype")
.Input("gradient_shape: int64")
.Attr("dtype: numbertype")
.Attr("has_known_shape: bool")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
return absl::OkStatus();
});
REGISTER_OP("SparseAccumulatorTakeGradient")
.Input("handle: Ref(string)")
.Input("num_required: int32")
.Output("indices: int64")
.Output("values: dtype")
.Output("shape: int64")
.Attr("dtype: numbertype")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
return shape_inference::UnknownShape(c);
});
REGISTER_OP("StackV2")
.Input("max_size: int32")
.Output("handle: resource")
.Attr("elem_type: type")
.Attr("stack_name: string = ''")
.SetIsStateful()
.SetShapeFn(TwoElementOutput);
REGISTER_OP("StackPushV2")
.Input("handle: resource")
.Input("elem: T")
.Output("output: T")
.Attr("T: type")
.Attr("swap_memory: bool = false")
.SetShapeFn([](shape_inference::InferenceContext* c) {
c->set_output(0, c->input(1));
return absl::OkStatus();
});
REGISTER_OP("StackPopV2")
.Input("handle: resource")
.Output("elem: elem_type")
.Attr("elem_type: type")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("StackCloseV2")
.Input("handle: resource")
.SetShapeFn(TwoElementVectorInputsAndScalarOutputs);
REGISTER_OP("Stack")
.Output("handle: Ref(string)")
.Attr("elem_type: type")
.Attr("stack_name: string = ''")
.SetIsStateful()
.SetShapeFn(TwoElementOutput);
REGISTER_OP("StackPush")
.Input("handle: Ref(string)")
.Input("elem: T")
.Output("output: T")
.Attr("T: type")
.Attr("swap_memory: bool = false")
.SetShapeFn([](shape_inference::InferenceContext* c) {
c->set_output(0, c->input(1));
return absl::OkStatus();
});
REGISTER_OP("StackPop")
.Input("handle: Ref(string)")
.Output("elem: elem_type")
.Attr("elem_type: type")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("StackClose")
.Input("handle: Ref(string)")
.SetShapeFn(TwoElementVectorInputsAndScalarOutputs);
REGISTER_OP("TensorArrayV3")
.Input("size: int32")
.Attr("dtype: type")
.Attr("element_shape: shape = { unknown_rank: true }")
.Attr("dynamic_size: bool = false")
.Attr("clear_after_read: bool = true")
.Attr("identical_element_shapes: bool = false")
.Attr("tensor_array_name: string = ''")
.Output("handle: resource")
.Output("flow: float")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused));
c->set_output(0, c->Vector(2));
c->set_output(1, c->Scalar());
bool identical_shapes;
TF_RETURN_IF_ERROR(
c->GetAttr("identical_element_shapes", &identical_shapes));
DataType t;
TF_RETURN_IF_ERROR(c->GetAttr("dtype", &t));
PartialTensorShape p;
TF_RETURN_IF_ERROR(c->GetAttr("element_shape", &p));
ShapeHandle s;
TF_RETURN_IF_ERROR(c->MakeShapeFromPartialTensorShape(p, &s));
if (c->FullyDefined(s) || identical_shapes) {
c->set_output_handle_shapes_and_types(
0, std::vector<shape_inference::ShapeAndType>{{s, t}});
}
return absl::OkStatus();
});
REGISTER_OP("TensorArrayGradV3")
.Input("handle: resource")
.Input("flow_in: float")
.Output("grad_handle: resource")
.Output("flow_out: float")
.Attr("source: string")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
c->set_output(0, c->Vector(2));
c->set_output(1, c->Scalar());
if (c->input_handle_shapes_and_types(0)) {
c->set_output_handle_shapes_and_types(
0, *c->input_handle_shapes_and_types(0));
}
return absl::OkStatus();
});
REGISTER_OP("TensorArrayGradWithShape")
.Input("handle: resource")
.Input("flow_in: float")
.Input("shape_to_prepend: int32")
.Output("grad_handle: resource")
.Output("flow_out: float")
.Attr("source: string")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
c->set_output(0, c->Vector(2));
c->set_output(1, c->Scalar());
auto* shape_and_type = c->input_handle_shapes_and_types(0);
if (shape_and_type) {
auto input_shape = (*shape_and_type)[0].shape;
auto dtype = (*shape_and_type)[0].dtype;
int64_t prepend_rank = c->Value(c->Dim(c->input(2), 0));
if (c->RankKnown(input_shape) &&
prepend_rank != InferenceContext::kUnknownDim) {
int32_t input_rank = c->Rank(input_shape);
std::vector<DimensionHandle> dims;
dims.reserve(prepend_rank + input_rank);
for (int i = 0; i < prepend_rank; ++i) {
dims.push_back(c->UnknownDim());
}
for (int i = 0; i < input_rank; ++i) {
dims.push_back(c->Dim(input_shape, i));
}
c->set_output_handle_shapes_and_types(0,
{{c->MakeShape(dims), dtype}});
} else {
c->set_output_handle_shapes_and_types(0,
{{c->UnknownShape(), dtype}});
}
}
return absl::OkStatus();
});
REGISTER_OP("TensorArrayWriteV3")
.Input("handle: resource")
.Input("index: int32")
.Input("value: T")
.Input("flow_in: float")
.Output("flow_out: float")
.Attr("T: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused));
auto* handle_data = c->input_handle_shapes_and_types(0);
if (handle_data != nullptr && !handle_data->empty()) {
shape_inference::ShapeAndType shape_and_type = (*handle_data)[0];
ShapeHandle value_shape = c->input(2);
TF_RETURN_IF_ERROR(
c->Merge(shape_and_type.shape, value_shape, &unused));
}
return shape_inference::ScalarShape(c);
});
REGISTER_OP("TensorArrayReadV3")
.Input("handle: resource")
.Input("index: int32")
.Input("flow_in: float")
.Output("value: dtype")
.Attr("dtype: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused));
auto shapes = c->input_handle_shapes_and_types(0);
if (shapes != nullptr && !shapes->empty()) {
ShapeHandle tensor_shape = shapes->at(0).shape;
c->set_output(0, tensor_shape);
return absl::OkStatus();
} else {
return shape_inference::UnknownShape(c);
}
});
REGISTER_OP("TensorArrayGatherV3")
.Input("handle: resource")
.Input("indices: int32")
.Input("flow_in: float")
.Output("value: dtype")
.Attr("dtype: type")
.Attr("element_shape: shape = { unknown_rank: true }")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle indices;
ShapeHandle unused;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &indices));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(c->input(0), 0), 2, &unused_dim));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused));
auto shapes = c->input_handle_shapes_and_types(0);
if (shapes != nullptr && !shapes->empty()) {
ShapeHandle tensor_shape = shapes->at(0).shape;
ShapeHandle output_shape;
TF_RETURN_IF_ERROR(
c->Concatenate(indices, tensor_shape, &output_shape));
c->set_output(0, output_shape);
return absl::OkStatus();
} else {
PartialTensorShape p;
TF_RETURN_IF_ERROR(c->GetAttr("element_shape", &p));
ShapeHandle s;
TF_RETURN_IF_ERROR(c->MakeShapeFromPartialTensorShape(p, &s));
ShapeHandle output_shape;
TF_RETURN_IF_ERROR(c->Concatenate(indices, s, &output_shape));
c->set_output(0, output_shape);
return absl::OkStatus();
}
});
REGISTER_OP("TensorArrayScatterV3")
.Input("handle: resource")
.Input("indices: int32")
.Input("value: T")
.Input("flow_in: float")
.Output("flow_out: float")
.Attr("T: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle indices;
ShapeHandle unused;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &indices));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(c->input(0), 0), 2, &unused_dim));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused));
ShapeHandle value_shape;
TF_RETURN_IF_ERROR(
c->MergePrefix(c->input(2), indices, &value_shape, &indices));
auto shapes = c->input_handle_shapes_and_types(0);
if (shapes != nullptr && !shapes->empty()) {
ShapeHandle tensor_shape = shapes->at(0).shape;
ShapeHandle fed_shape;
TF_RETURN_IF_ERROR(c->Subshape(value_shape, 1, &fed_shape));
TF_RETURN_IF_ERROR(c->Merge(tensor_shape, fed_shape, &fed_shape));
}
return shape_inference::ScalarShape(c);
});
REGISTER_OP("TensorArrayConcatV3")
.Input("handle: resource")
.Input("flow_in: float")
.Output("value: dtype")
.Output("lengths: int64")
.Attr("dtype: type")
.Attr("element_shape_except0: shape = { unknown_rank: true }")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
c->set_output(0, c->UnknownShape());
c->set_output(1, c->Vector(c->UnknownDim()));
return absl::OkStatus();
});
REGISTER_OP("TensorArraySplitV3")
.Input("handle: resource")
.Input("value: T")
.Input("lengths: int64")
.Input("flow_in: float")
.Output("flow_out: float")
.Attr("T: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused));
return shape_inference::ScalarShape(c);
});
REGISTER_OP("TensorArraySizeV3")
.Input("handle: resource")
.Input("flow_in: float")
.Output("size: int32")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
return shape_inference::ScalarShape(c);
});
REGISTER_OP("TensorArrayCloseV3")
.Input("handle: resource")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
return absl::OkStatus();
});
REGISTER_OP("TensorArray")
.Input("size: int32")
.Attr("dtype: type")
.Attr("dynamic_size: bool = false")
.Attr("clear_after_read: bool = true")
.Attr("tensor_array_name: string = ''")
.Attr("element_shape: shape = { unknown_rank: true }")
.Output("handle: Ref(string)")
.SetIsStateful()
.SetShapeFn(shape_inference::UnknownShape)
.Deprecated(16, "Use TensorArrayV3");
REGISTER_OP("TensorArrayV2")
.Input("size: int32")
.Attr("dtype: type")
.Attr("element_shape: shape = { unknown_rank: true }")
.Attr("dynamic_size: bool = false")
.Attr("clear_after_read: bool = true")
.Attr("tensor_array_name: string = ''")
.Output("handle: string")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused));
c->set_output(0, c->Vector(2));
return absl::OkStatus();
})
.Deprecated(26, "Use TensorArrayV3");
REGISTER_OP("TensorArrayGrad")
.Input("handle: string")
.Input("flow_in: float")
.Output("grad_handle: Ref(string)")
.Attr("source: string")
.SetIsStateful()
.SetShapeFn(shape_inference::UnknownShape)
.Deprecated(16, "Use TensorArrayGradV3");
REGISTER_OP("TensorArrayGradV2")
.Input("handle: string")
.Input("flow_in: float")
.Output("grad_handle: string")
.Attr("source: string")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
c->set_output(0, c->Vector(2));
return absl::OkStatus();
})
.Deprecated(26, "Use TensorArrayGradV3");
REGISTER_OP("TensorArrayWrite")
.Input("handle: Ref(string)")
.Input("index: int32")
.Input("value: T")
.Input("flow_in: float")
.Output("flow_out: float")
.Attr("T: type")
.SetShapeFn(shape_inference::UnknownShape)
.Deprecated(16, "Use TensorArrayWriteV3");
REGISTER_OP("TensorArrayWriteV2")
.Input("handle: string")
.Input("index: int32")
.Input("value: T")
.Input("flow_in: float")
.Output("flow_out: float")
.Attr("T: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused));
return shape_inference::ScalarShape(c);
})
.Deprecated(26, "Use TensorArrayWriteV3");
REGISTER_OP("TensorArrayRead")
.Input("handle: Ref(string)")
.Input("index: int32")
.Input("flow_in: float")
.Output("value: dtype")
.Attr("dtype: type")
.SetShapeFn(shape_inference::UnknownShape)
.Deprecated(16, "Use TensorArrayReadV3");
REGISTER_OP("TensorArrayReadV2")
.Input("handle: string")
.Input("index: int32")
.Input("flow_in: float")
.Output("value: dtype")
.Attr("dtype: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused));
return shape_inference::UnknownShape(c);
})
.Deprecated(26, "Use TensorArrayReadV3");
REGISTER_OP("TensorArrayPack")
.Input("handle: Ref(string)")
.Input("flow_in: float")
.Output("value: dtype")
.Attr("dtype: type")
.Attr("element_shape: shape = { unknown_rank: true }")
.SetShapeFn(shape_inference::UnknownShape)
.Deprecated(16, "Use TensorArrayGatherV3 with RangeOp");
REGISTER_OP("TensorArrayUnpack")
.Input("handle: Ref(string)")
.Input("value: T")
.Input("flow_in: float")
.Output("flow_out: float")
.Attr("T: type")
.SetShapeFn(shape_inference::UnknownShape)
.Deprecated(20, "Use TensorArrayScatterV3 with RangeOp");
REGISTER_OP("TensorArrayGather")
.Input("handle: Ref(string)")
.Input("indices: int32")
.Input("flow_in: float")
.Output("value: dtype")
.Attr("dtype: type")
.Attr("element_shape: shape = { unknown_rank: true }")
.SetShapeFn(shape_inference::UnknownShape)
.Deprecated(16, "Use TensorArrayGatherV3");
REGISTER_OP("TensorArrayGatherV2")
.Input("handle: string")
.Input("indices: int32")
.Input("flow_in: float")
.Output("value: dtype")
.Attr("dtype: type")
.Attr("element_shape: shape = { unknown_rank: true }")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &unused));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(c->input(0), 0), 2, &unused_dim));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused));
return shape_inference::UnknownShape(c);
})
.Deprecated(26, "Use TensorArrayGatherV3");
REGISTER_OP("TensorArrayScatter")
.Input("handle: Ref(string)")
.Input("indices: int32")
.Input("value: T")
.Input("flow_in: float")
.Output("flow_out: float")
.Attr("T: type")
.SetShapeFn(shape_inference::UnknownShape)
.Deprecated(19, "Use TensorArrayGradV3");
REGISTER_OP("TensorArrayScatterV2")
.Input("handle: string")
.Input("indices: int32")
.Input("value: T")
.Input("flow_in: float")
.Output("flow_out: float")
.Attr("T: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &unused));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(c->input(0), 0), 2, &unused_dim));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused));
return shape_inference::ScalarShape(c);
})
.Deprecated(26, "Use TensorArrayScatterV3");
REGISTER_OP("TensorArrayConcat")
.Input("handle: Ref(string)")
.Input("flow_in: float")
.Output("value: dtype")
.Output("lengths: int64")
.Attr("dtype: type")
.Attr("element_shape_except0: shape = { unknown_rank: true }")
.SetShapeFn(shape_inference::UnknownShape)
.Deprecated(16, "Use TensorArrayGradV3");
REGISTER_OP("TensorArrayConcatV2")
.Input("handle: string")
.Input("flow_in: float")
.Output("value: dtype")
.Output("lengths: int64")
.Attr("dtype: type")
.Attr("element_shape_except0: shape = { unknown_rank: true }")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
c->set_output(0, c->UnknownShape());
c->set_output(1, c->Vector(c->UnknownDim()));
return absl::OkStatus();
});
REGISTER_OP("TensorArraySplit")
.Input("handle: Ref(string)")
.Input("value: T")
.Input("lengths: int64")
.Input("flow_in: float")
.Output("flow_out: float")
.Attr("T: type")
.SetShapeFn(shape_inference::UnknownShape)
.Deprecated(16, "Use TensorArraySplitV3");
REGISTER_OP("TensorArraySplitV2")
.Input("handle: string")
.Input("value: T")
.Input("lengths: int64")
.Input("flow_in: float")
.Output("flow_out: float")
.Attr("T: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused));
return shape_inference::ScalarShape(c);
})
.Deprecated(26, "Use TensorArraySplitV3");
REGISTER_OP("TensorArraySize")
.Input("handle: Ref(string)")
.Input("flow_in: float")
.Output("size: int32")
.SetShapeFn(shape_inference::UnknownShape)
.Deprecated(16, "Use TensorArraySizeV3");
REGISTER_OP("TensorArraySizeV2")
.Input("handle: string")
.Input("flow_in: float")
.Output("size: int32")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
return shape_inference::ScalarShape(c);
})
.Deprecated(26, "Use TensorArraySizeV3");
REGISTER_OP("TensorArrayClose")
.Input("handle: Ref(string)")
.SetShapeFn([](InferenceContext* c) { return absl::OkStatus(); })
.Deprecated(16, "Use TensorArrayCloseV3");
REGISTER_OP("TensorArrayCloseV2")
.Input("handle: string")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
return absl::OkStatus();
})
.Deprecated(26, "Use TensorArrayCloseV3");
REGISTER_OP("Barrier")
.SetIsStateful()
.Output("handle: Ref(string)")
.Attr("component_types: list(type) >= 1")
.Attr("shapes: list(shape) >= 0 = []")
.Attr("capacity: int = -1")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(TwoElementOutput);
REGISTER_OP("BarrierInsertMany")
.Input("handle: Ref(string)")
.Input("keys: string")
.Input("values: T")
.Attr("T: type")
.Attr("component_index: int")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle keys = c->input(1);
ShapeHandle values = c->input(2);
ShapeHandle handle;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
TF_RETURN_IF_ERROR(c->WithRank(keys, 1, &keys));
TF_RETURN_IF_ERROR(c->WithRankAtLeast(values, 1, &values));
TF_RETURN_IF_ERROR(c->Merge(keys, c->Vector(c->Dim(values, 0)), &handle));
return absl::OkStatus();
});
REGISTER_OP("BarrierTakeMany")
.Input("handle: Ref(string)")
.Input("num_elements: int32")
.Output("indices: int64")
.Output("keys: string")
.Output("values: component_types")
.Attr("component_types: list(type) >= 1")
.Attr("allow_small_batch: bool = false")
.Attr("wait_for_incomplete: bool = false")
.Attr("timeout_ms: int = -1")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("BarrierClose")
.Input("handle: Ref(string)")
.SetShapeFn(TwoElementVectorInputsAndScalarOutputs)
.Attr("cancel_pending_enqueues: bool = false");
REGISTER_OP("BarrierReadySize")
.Input("handle: Ref(string)")
.Output("size: int32")
.SetShapeFn(TwoElementVectorInputsAndScalarOutputs);
REGISTER_OP("BarrierIncompleteSize")
.Input("handle: Ref(string)")
.Output("size: int32")
.SetShapeFn(TwoElementVectorInputsAndScalarOutputs);
REGISTER_OP("GetSessionHandle")
.Input("value: T")
.Output("handle: string")
.Attr("T: type")
.SetIsStateful()
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("GetSessionHandleV2")
.Input("value: T")
.Output("handle: resource")
.Attr("T: type")
.SetIsStateful()
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("GetSessionTensor")
.Input("handle: string")
.Output("value: dtype")
.Attr("dtype: type")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused));
return shape_inference::UnknownShape(c);
});
REGISTER_OP("DeleteSessionTensor")
.Input("handle: string")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused));
return absl::OkStatus();
});
REGISTER_OP("Stage")
.Input("values: dtypes")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(shape_inference::UnknownShape)
.SetIsStateful();
REGISTER_OP("Unstage")
.Output("values: dtypes")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(shape_inference::UnknownShape)
.SetIsStateful();
REGISTER_OP("StagePeek")
.Input("index: int32")
.Output("values: dtypes")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(shape_inference::UnknownShape)
.SetIsStateful();
REGISTER_OP("StageSize")
.Output("size: int32")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(shape_inference::ScalarShape)
.SetIsStateful();
REGISTER_OP("StageClear")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(shape_inference::UnknownShape)
.SetIsStateful();
REGISTER_OP("MapStage")
.Input("key: int64")
.Input("indices: int32")
.Input("values: fake_dtypes")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("fake_dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(tensorflow::shape_inference::NoOutputs)
.SetIsStateful();
REGISTER_OP("MapPeek")
.Input("key: int64")
.Input("indices: int32")
.Output("values: dtypes")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(tensorflow::shape_inference::UnknownShape)
.SetIsStateful();
REGISTER_OP("MapUnstage")
.Input("key: int64")
.Input("indices: int32")
.Output("values: dtypes")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(tensorflow::shape_inference::UnknownShape)
.SetIsStateful();
REGISTER_OP("MapUnstageNoKey")
.Input("indices: int32")
.Output("key: int64")
.Output("values: dtypes")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(tensorflow::shape_inference::UnknownShape)
.SetIsStateful();
REGISTER_OP("MapSize")
.Output("size: int32")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(tensorflow::shape_inference::ScalarShape)
.SetIsStateful();
REGISTER_OP("MapIncompleteSize")
.Output("size: int32")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(tensorflow::shape_inference::ScalarShape)
.SetIsStateful();
REGISTER_OP("MapClear")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(tensorflow::shape_inference::NoOutputs)
.SetIsStateful();
REGISTER_OP("OrderedMapStage")
.Input("key: int64")
.Input("indices: int32")
.Input("values: fake_dtypes")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("fake_dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(tensorflow::shape_inference::NoOutputs)
.SetIsStateful();
REGISTER_OP("OrderedMapPeek")
.Input("key: int64")
.Input("indices: int32")
.Output("values: dtypes")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(tensorflow::shape_inference::UnknownShape)
.SetIsStateful();
REGISTER_OP("OrderedMapUnstage")
.Input("key: int64")
.Input("indices: int32")
.Output("values: dtypes")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(tensorflow::shape_inference::UnknownShape)
.SetIsStateful();
REGISTER_OP("OrderedMapUnstageNoKey")
.Input("indices: int32")
.Output("key: int64")
.Output("values: dtypes")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(tensorflow::shape_inference::UnknownShape)
.SetIsStateful();
REGISTER_OP("OrderedMapSize")
.Output("size: int32")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(tensorflow::shape_inference::ScalarShape)
.SetIsStateful();
REGISTER_OP("OrderedMapIncompleteSize")
.Output("size: int32")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(tensorflow::shape_inference::ScalarShape)
.SetIsStateful();
REGISTER_OP("OrderedMapClear")
.Attr("capacity: int >= 0 = 0")
.Attr("memory_limit: int >= 0 = 0")
.Attr("dtypes: list(type)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn(tensorflow::shape_inference::NoOutputs)
.SetIsStateful();
REGISTER_OP("RecordInput")
.Output("records: string")
.Attr("file_pattern: string")
.Attr("file_random_seed: int = 301")
.Attr("file_shuffle_shift_ratio: float = 0")
.Attr("file_buffer_size: int = 10000")
.Attr("file_parallelism: int = 16")
.Attr("batch_size: int = 32")
.Attr("compression_type: string = ''")
.SetIsStateful()
.SetShapeFn(shape_inference::UnknownShape);
} | #include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(DataFlowOpsTest, LookupTableFind) {
ShapeInferenceTestOp op("LookupTableFind");
INFER_OK(op, "?;?;?", "?");
INFER_OK(op, "[2];[];[]", "?");
INFER_OK(op, "[?];[1,2,3];[]", "?");
INFER_ERROR("Shape must be at most rank 1 but is rank 2", op,
"[?];[1,2,3];[1,2]");
}
TEST(DataFlowOpsTest, LookupTableInsert) {
ShapeInferenceTestOp op("LookupTableInsert");
INFER_OK(op, "?;?;?", "");
INFER_OK(op, "[2];[];[]", "");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];[1,2,3];[]");
INFER_OK(op, "[2];[1,?,3];[?,2,?]", "");
}
TEST(DataFlowOpsTest, LookupTableSize) {
ShapeInferenceTestOp op("LookupTableSize");
INFER_OK(op, "?", "[]");
INFER_OK(op, "[2]", "[]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[]");
INFER_ERROR("Dimension must be 2 but is 3", op, "[3]");
}
TEST(DataFlowOpsTest, LookupTableExport) {
ShapeInferenceTestOp op("LookupTableExport");
INFER_OK(op, "?", "[?];?");
INFER_OK(op, "[2]", "[?];?");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[]");
}
TEST(DataFlowOpsTest, InitializeTable) {
ShapeInferenceTestOp op("InitializeTable");
INFER_OK(op, "?;?;?", "");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];[];[]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 2", op,
"?;[1];[2]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[2];[1,2];[1,2]");
}
TEST(DataFlowOpsTest, InitializeTableFromTextFile) {
ShapeInferenceTestOp op("InitializeTableFromTextFile");
INFER_OK(op, "?;?", "");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[2];[1]");
}
TEST(DataFlowOpsTest, DynamicPartition) {
ShapeInferenceTestOp op("DynamicPartition");
TF_ASSERT_OK(NodeDefBuilder("test", "DynamicPartition")
.Input("data", 0, DT_FLOAT_REF)
.Input("indices", 0, DT_INT32)
.Attr("num_partitions", 4)
.Finalize(&op.node_def));
INFER_OK(op, "?;?", "?;?;?;?");
INFER_OK(op, "[3,4,5];[3,4]", "[?,d0_2];[?,d0_2];[?,d0_2];[?,d0_2]");
TF_ASSERT_OK(NodeDefBuilder("test", "DynamicPartition")
.Input("data", 0, DT_FLOAT)
.Input("indices", 0, DT_INT32)
.Attr("num_partitions", 2)
.Finalize(&op.node_def));
INFER_OK(op, "[3,4,5,6];[3,4]", "[?,d0_2,d0_3];[?,d0_2,d0_3]");
INFER_ERROR("Dimensions must be equal, but are 4 and 100", op,
"[3,4,5];[3,100]");
}
TEST(DataFlowOpsTest, DynamicStitch) {
ShapeInferenceTestOp op("DynamicStitch");
TF_ASSERT_OK(
NodeDefBuilder("test", "DynamicStitch")
.Input({{"indices", 0, DT_INT32}, {"indices_2", 1, DT_INT32}})
.Input({{"data", 0, DT_FLOAT}, {"data_2", 1, DT_FLOAT}})
.Attr("N", 2)
.Finalize(&op.node_def));
INFER_ERROR("Dimensions must be equal, but are 10 and 5", op,
"[2,3];[5,6];[2,3,4,5];[10,11,4,5]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 4 and 13", op,
"[2,3];[5,6];[2,3,4,5];[5,6,13,14]");
INFER_OK(op, "[2,3];[5,6];[2,3,4,5];[5,6,4,5]", "[?,d2_2,d2_3]");
Tensor tensor_2 = test::AsTensor<int32>(
std::vector<int32>{2, 4, 6, 0, 10, 11}, TensorShape({2, 3}));
Tensor tensor_5 = test::AsTensor<int32>(
std::vector<int32>{0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
1000, 21, 22, 23, 24, 25, 26, 27, 28, 29},
TensorShape({5, 6}));
op.input_tensors.push_back(nullptr);
op.input_tensors.push_back(&tensor_5);
INFER_OK(op, "[2,3];[5,6];[2,3,4,5];[5,6,4,5]", "[?,d2_2,d2_3]");
op.input_tensors[0] = &tensor_2;
op.input_tensors[1] = nullptr;
INFER_OK(op, "[2,3];[5,6];[2,3,4,5];[5,6,4,5]", "[?,d2_2,d2_3]");
INFER_OK(op, "[2,3];?;[2,3,4,5];[5,6,4,5]", "[?,d2_2,d2_3]");
op.input_tensors[1] = &tensor_5;
INFER_OK(op, "[2,3];[5,6];[2,3,4,5];[5,6,4,5]", "[1001,d2_2,d2_3]");
tensor_2.flat<int32>()(3) = 10000;
INFER_OK(op, "[2,3];[5,6];[2,3,4,5];[5,6,4,5]", "[10001,d2_2,d2_3]");
}
TEST(DataFlowOpsTest, ParallelDynamicStitch) {
ShapeInferenceTestOp op("ParallelDynamicStitch");
TF_ASSERT_OK(
NodeDefBuilder("test", "ParallelDynamicStitch")
.Input({{"indices", 0, DT_INT32}, {"indices_2", 1, DT_INT32}})
.Input({{"data", 0, DT_FLOAT}, {"data_2", 1, DT_FLOAT}})
.Attr("N", 2)
.Finalize(&op.node_def));
INFER_OK(op, "[2,3];[5,6];[2,3,4,5];[5,6,4,5]", "[?,d2_2,d2_3]");
INFER_ERROR("Dimensions must be equal, but are 10 and 5", op,
"[2,3];[5,6];[2,3,4,5];[10,11,4,5]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 4 and 13", op,
"[2,3];[5,6];[2,3,4,5];[5,6,13,14]");
}
TEST(DataFlowOpsTest, TensorArrayV3) {
ShapeInferenceTestOp op("TensorArrayV3");
TF_ASSERT_OK(NodeDefBuilder("test", "TensorArrayV3")
.Input({"size", 0, DT_INT32})
.Attr("dtype", DT_FLOAT)
.Finalize(&op.node_def));
INFER_OK(op, "[]", "[2];[]");
INFER_OK(op, "?", "[2];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[2]");
}
TEST(DataFlowOpsTest, QueueDequeueV2ShapeFn) {
ShapeInferenceTestOp op("QueueDequeueV2");
TF_ASSERT_OK(NodeDefBuilder("test", op.name)
.Input("handle", 0, DT_RESOURCE)
.Attr("component_types", {DT_FLOAT, DT_INT32})
.Finalize(&op.node_def));
INFER_OK(op, "?", "?;?");
std::vector<ShapeInferenceTestOp::ShapeAndType> shapes_and_types;
op.input_resource_handle_shapes_and_types.push_back(&shapes_and_types);
INFER_OK(op, "?", "?;?");
shapes_and_types.emplace_back("[1,?,3]", DT_FLOAT);
INFER_OK(op, "?", "?;?");
shapes_and_types.emplace_back("[?,2]", DT_FLOAT);
INFER_OK(op, "?", "[1,?,3];[?,2]");
}
TEST(DataFlowOpsTest, QueueDequeueManyV2ShapeFn) {
ShapeInferenceTestOp op("QueueDequeueManyV2");
TF_ASSERT_OK(NodeDefBuilder("test", op.name)
.Input("handle", 0, DT_RESOURCE)
.Input("n", 0, DT_INT32)
.Attr("component_types", {DT_FLOAT, DT_INT32})
.Finalize(&op.node_def));
INFER_OK(op, "?;?", "?;?");
std::vector<ShapeInferenceTestOp::ShapeAndType> shapes_and_types;
op.input_resource_handle_shapes_and_types.push_back(&shapes_and_types);
op.input_resource_handle_shapes_and_types.push_back(nullptr);
shapes_and_types.emplace_back("[1,?,3]", DT_FLOAT);
INFER_OK(op, "?;?", "?;?");
shapes_and_types.emplace_back("[?,2]", DT_FLOAT);
INFER_OK(op, "?;?", "[?,1,?,3];[?,?,2]");
Tensor n_tensor = test::AsScalar(12);
op.input_tensors.push_back(nullptr);
op.input_tensors.push_back(&n_tensor);
op.input_resource_handle_shapes_and_types.clear();
shapes_and_types.clear();
INFER_OK(op, "?;?", "?;?");
op.input_resource_handle_shapes_and_types.push_back(&shapes_and_types);
op.input_resource_handle_shapes_and_types.push_back(nullptr);
shapes_and_types.emplace_back("[1,?,3]", DT_FLOAT);
INFER_OK(op, "?;?", "?;?");
shapes_and_types.emplace_back("[?,2]", DT_FLOAT);
INFER_OK(op, "?;?", "[12,1,?,3];[12,?,2]");
n_tensor = test::AsScalar<int32>(-1);
INFER_ERROR("must be >= 0", op, "?;?");
}
TEST(DataFlowOpsTest, QueueDequeueUpToV2ShapeFn) {
for (int pass = 0; pass < 2; ++pass) {
ShapeInferenceTestOp op("QueueDequeueUpToV2");
TF_ASSERT_OK(NodeDefBuilder("test", op.name)
.Input("handle", 0, DT_RESOURCE)
.Input("n", 0, DT_INT32)
.Attr("component_types", {DT_FLOAT, DT_INT32})
.Finalize(&op.node_def));
Tensor n_tensor = test::AsScalar(12);
if (pass == 1) {
op.input_tensors.push_back(nullptr);
op.input_tensors.push_back(&n_tensor);
}
INFER_OK(op, "?;?", "?;?");
std::vector<ShapeInferenceTestOp::ShapeAndType> shapes_and_types;
op.input_resource_handle_shapes_and_types.push_back(&shapes_and_types);
op.input_resource_handle_shapes_and_types.push_back(nullptr);
shapes_and_types.emplace_back("[1,?,3]", DT_FLOAT);
INFER_OK(op, "?;?", "?;?");
shapes_and_types.emplace_back("[?,2]", DT_FLOAT);
INFER_OK(op, "?;?", "[?,1,?,3];[?,?,2]");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/data_flow_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/data_flow_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ee656456-6508-4e0d-a5f3-d0b511981a26 | cpp | abseil/abseil-cpp | cord | absl/strings/cord.cc | absl/strings/cord_test.cc | #include "absl/strings/cord.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <iomanip>
#include <ios>
#include <iostream>
#include <limits>
#include <memory>
#include <ostream>
#include <sstream>
#include <string>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/endian.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/macros.h"
#include "absl/base/optimization.h"
#include "absl/base/nullability.h"
#include "absl/container/inlined_vector.h"
#include "absl/crc/crc32c.h"
#include "absl/crc/internal/crc_cord_state.h"
#include "absl/functional/function_ref.h"
#include "absl/strings/cord_buffer.h"
#include "absl/strings/escaping.h"
#include "absl/strings/internal/cord_data_edge.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_btree.h"
#include "absl/strings/internal/cord_rep_crc.h"
#include "absl/strings/internal/cord_rep_flat.h"
#include "absl/strings/internal/cordz_update_tracker.h"
#include "absl/strings/internal/resize_uninitialized.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
using ::absl::cord_internal::CordRep;
using ::absl::cord_internal::CordRepBtree;
using ::absl::cord_internal::CordRepCrc;
using ::absl::cord_internal::CordRepExternal;
using ::absl::cord_internal::CordRepFlat;
using ::absl::cord_internal::CordRepSubstring;
using ::absl::cord_internal::CordzUpdateTracker;
using ::absl::cord_internal::InlineData;
using ::absl::cord_internal::kMaxFlatLength;
using ::absl::cord_internal::kMinFlatLength;
using ::absl::cord_internal::kInlinedVectorSize;
using ::absl::cord_internal::kMaxBytesToCopy;
static void DumpNode(absl::Nonnull<CordRep*> nonnull_rep, bool include_data,
absl::Nonnull<std::ostream*> os, int indent = 0);
static bool VerifyNode(absl::Nonnull<CordRep*> root,
absl::Nonnull<CordRep*> start_node);
static inline absl::Nullable<CordRep*> VerifyTree(
absl::Nullable<CordRep*> node) {
assert(node == nullptr || VerifyNode(node, node));
static_cast<void>(&VerifyNode);
return node;
}
static absl::Nonnull<CordRepFlat*> CreateFlat(absl::Nonnull<const char*> data,
size_t length,
size_t alloc_hint) {
CordRepFlat* flat = CordRepFlat::New(length + alloc_hint);
flat->length = length;
memcpy(flat->Data(), data, length);
return flat;
}
static absl::Nonnull<CordRep*> NewBtree(absl::Nonnull<const char*> data,
size_t length, size_t alloc_hint) {
if (length <= kMaxFlatLength) {
return CreateFlat(data, length, alloc_hint);
}
CordRepFlat* flat = CreateFlat(data, kMaxFlatLength, 0);
data += kMaxFlatLength;
length -= kMaxFlatLength;
auto* root = CordRepBtree::Create(flat);
return CordRepBtree::Append(root, {data, length}, alloc_hint);
}
static absl::Nullable<CordRep*> NewTree(absl::Nullable<const char*> data,
size_t length, size_t alloc_hint) {
if (length == 0) return nullptr;
return NewBtree(data, length, alloc_hint);
}
namespace cord_internal {
void InitializeCordRepExternal(absl::string_view data,
absl::Nonnull<CordRepExternal*> rep) {
assert(!data.empty());
rep->length = data.size();
rep->tag = EXTERNAL;
rep->base = data.data();
VerifyTree(rep);
}
}
static absl::Nonnull<CordRep*> CordRepFromString(std::string&& src) {
assert(src.length() > cord_internal::kMaxInline);
if (
src.size() <= kMaxBytesToCopy ||
src.size() < src.capacity() / 2
) {
return NewTree(src.data(), src.size(), 0);
}
struct StringReleaser {
void operator()(absl::string_view ) {}
std::string data;
};
const absl::string_view original_data = src;
auto* rep =
static_cast<::absl::cord_internal::CordRepExternalImpl<StringReleaser>*>(
absl::cord_internal::NewExternalRep(original_data,
StringReleaser{std::move(src)}));
rep->base = rep->template get<0>().data.data();
return rep;
}
#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr unsigned char Cord::InlineRep::kMaxInline;
#endif
inline void Cord::InlineRep::set_data(absl::Nonnull<const char*> data,
size_t n) {
static_assert(kMaxInline == 15, "set_data is hard-coded for a length of 15");
data_.set_inline_data(data, n);
}
inline absl::Nonnull<char*> Cord::InlineRep::set_data(size_t n) {
assert(n <= kMaxInline);
ResetToEmpty();
set_inline_size(n);
return data_.as_chars();
}
inline void Cord::InlineRep::reduce_size(size_t n) {
size_t tag = inline_size();
assert(tag <= kMaxInline);
assert(tag >= n);
tag -= n;
memset(data_.as_chars() + tag, 0, n);
set_inline_size(tag);
}
inline void Cord::InlineRep::remove_prefix(size_t n) {
cord_internal::SmallMemmove(data_.as_chars(), data_.as_chars() + n,
inline_size() - n);
reduce_size(n);
}
static absl::Nonnull<CordRepBtree*> ForceBtree(CordRep* rep) {
return rep->IsBtree()
? rep->btree()
: CordRepBtree::Create(cord_internal::RemoveCrcNode(rep));
}
void Cord::InlineRep::AppendTreeToInlined(absl::Nonnull<CordRep*> tree,
MethodIdentifier method) {
assert(!is_tree());
if (!data_.is_empty()) {
CordRepFlat* flat = MakeFlatWithExtraCapacity(0);
tree = CordRepBtree::Append(CordRepBtree::Create(flat), tree);
}
EmplaceTree(tree, method);
}
void Cord::InlineRep::AppendTreeToTree(absl::Nonnull<CordRep*> tree,
MethodIdentifier method) {
assert(is_tree());
const CordzUpdateScope scope(data_.cordz_info(), method);
tree = CordRepBtree::Append(ForceBtree(data_.as_tree()), tree);
SetTree(tree, scope);
}
void Cord::InlineRep::AppendTree(absl::Nonnull<CordRep*> tree,
MethodIdentifier method) {
assert(tree != nullptr);
assert(tree->length != 0);
assert(!tree->IsCrc());
if (data_.is_tree()) {
AppendTreeToTree(tree, method);
} else {
AppendTreeToInlined(tree, method);
}
}
void Cord::InlineRep::PrependTreeToInlined(absl::Nonnull<CordRep*> tree,
MethodIdentifier method) {
assert(!is_tree());
if (!data_.is_empty()) {
CordRepFlat* flat = MakeFlatWithExtraCapacity(0);
tree = CordRepBtree::Prepend(CordRepBtree::Create(flat), tree);
}
EmplaceTree(tree, method);
}
void Cord::InlineRep::PrependTreeToTree(absl::Nonnull<CordRep*> tree,
MethodIdentifier method) {
assert(is_tree());
const CordzUpdateScope scope(data_.cordz_info(), method);
tree = CordRepBtree::Prepend(ForceBtree(data_.as_tree()), tree);
SetTree(tree, scope);
}
void Cord::InlineRep::PrependTree(absl::Nonnull<CordRep*> tree,
MethodIdentifier method) {
assert(tree != nullptr);
assert(tree->length != 0);
assert(!tree->IsCrc());
if (data_.is_tree()) {
PrependTreeToTree(tree, method);
} else {
PrependTreeToInlined(tree, method);
}
}
static inline bool PrepareAppendRegion(
absl::Nonnull<CordRep*> root, absl::Nonnull<absl::Nullable<char*>*> region,
absl::Nonnull<size_t*> size, size_t max_length) {
if (root->IsBtree() && root->refcount.IsOne()) {
Span<char> span = root->btree()->GetAppendBuffer(max_length);
if (!span.empty()) {
*region = span.data();
*size = span.size();
return true;
}
}
CordRep* dst = root;
if (!dst->IsFlat() || !dst->refcount.IsOne()) {
*region = nullptr;
*size = 0;
return false;
}
const size_t in_use = dst->length;
const size_t capacity = dst->flat()->Capacity();
if (in_use == capacity) {
*region = nullptr;
*size = 0;
return false;
}
const size_t size_increase = std::min(capacity - in_use, max_length);
dst->length += size_increase;
*region = dst->flat()->Data() + in_use;
*size = size_increase;
return true;
}
void Cord::InlineRep::AssignSlow(const Cord::InlineRep& src) {
assert(&src != this);
assert(is_tree() || src.is_tree());
auto constexpr method = CordzUpdateTracker::kAssignCord;
if (ABSL_PREDICT_TRUE(!is_tree())) {
EmplaceTree(CordRep::Ref(src.as_tree()), src.data_, method);
return;
}
CordRep* tree = as_tree();
if (CordRep* src_tree = src.tree()) {
data_.set_tree(CordRep::Ref(src_tree));
CordzInfo::MaybeTrackCord(data_, src.data_, method);
} else {
CordzInfo::MaybeUntrackCord(data_.cordz_info());
data_ = src.data_;
}
CordRep::Unref(tree);
}
void Cord::InlineRep::UnrefTree() {
if (is_tree()) {
CordzInfo::MaybeUntrackCord(data_.cordz_info());
CordRep::Unref(tree());
}
}
Cord::Cord(absl::string_view src, MethodIdentifier method)
: contents_(InlineData::kDefaultInit) {
const size_t n = src.size();
if (n <= InlineRep::kMaxInline) {
contents_.set_data(src.data(), n);
} else {
CordRep* rep = NewTree(src.data(), n, 0);
contents_.EmplaceTree(rep, method);
}
}
template <typename T, Cord::EnableIfString<T>>
Cord::Cord(T&& src) : contents_(InlineData::kDefaultInit) {
if (src.size() <= InlineRep::kMaxInline) {
contents_.set_data(src.data(), src.size());
} else {
CordRep* rep = CordRepFromString(std::forward<T>(src));
contents_.EmplaceTree(rep, CordzUpdateTracker::kConstructorString);
}
}
template Cord::Cord(std::string&& src);
void Cord::DestroyCordSlow() {
assert(contents_.is_tree());
CordzInfo::MaybeUntrackCord(contents_.cordz_info());
CordRep::Unref(VerifyTree(contents_.as_tree()));
}
void Cord::Clear() {
if (CordRep* tree = contents_.clear()) {
CordRep::Unref(tree);
}
}
Cord& Cord::AssignLargeString(std::string&& src) {
auto constexpr method = CordzUpdateTracker::kAssignString;
assert(src.size() > kMaxBytesToCopy);
CordRep* rep = CordRepFromString(std::move(src));
if (CordRep* tree = contents_.tree()) {
CordzUpdateScope scope(contents_.cordz_info(), method);
contents_.SetTree(rep, scope);
CordRep::Unref(tree);
} else {
contents_.EmplaceTree(rep, method);
}
return *this;
}
Cord& Cord::operator=(absl::string_view src) {
auto constexpr method = CordzUpdateTracker::kAssignString;
const char* data = src.data();
size_t length = src.size();
CordRep* tree = contents_.tree();
if (length <= InlineRep::kMaxInline) {
if (tree != nullptr) CordzInfo::MaybeUntrackCord(contents_.cordz_info());
contents_.set_data(data, length);
if (tree != nullptr) CordRep::Unref(tree);
return *this;
}
if (tree != nullptr) {
CordzUpdateScope scope(contents_.cordz_info(), method);
if (tree->IsFlat() && tree->flat()->Capacity() >= length &&
tree->refcount.IsOne()) {
memmove(tree->flat()->Data(), data, length);
tree->length = length;
VerifyTree(tree);
return *this;
}
contents_.SetTree(NewTree(data, length, 0), scope);
CordRep::Unref(tree);
} else {
contents_.EmplaceTree(NewTree(data, length, 0), method);
}
return *this;
}
void Cord::InlineRep::AppendArray(absl::string_view src,
MethodIdentifier method) {
if (src.empty()) return;
MaybeRemoveEmptyCrcNode();
size_t appended = 0;
CordRep* rep = tree();
const CordRep* const root = rep;
CordzUpdateScope scope(root ? cordz_info() : nullptr, method);
if (root != nullptr) {
rep = cord_internal::RemoveCrcNode(rep);
char* region;
if (PrepareAppendRegion(rep, ®ion, &appended, src.size())) {
memcpy(region, src.data(), appended);
}
} else {
size_t inline_length = inline_size();
if (src.size() <= kMaxInline - inline_length) {
set_inline_size(inline_length + src.size());
memcpy(data_.as_chars() + inline_length, src.data(), src.size());
return;
}
rep = CordRepFlat::New(inline_length + src.size());
appended = std::min(src.size(), rep->flat()->Capacity() - inline_length);
memcpy(rep->flat()->Data(), data_.as_chars(), inline_length);
memcpy(rep->flat()->Data() + inline_length, src.data(), appended);
rep->length = inline_length + appended;
}
src.remove_prefix(appended);
if (src.empty()) {
CommitTree(root, rep, scope, method);
return;
}
rep = ForceBtree(rep);
const size_t min_growth = std::max<size_t>(rep->length / 10, src.size());
rep = CordRepBtree::Append(rep->btree(), src, min_growth - src.size());
CommitTree(root, rep, scope, method);
}
inline absl::Nonnull<CordRep*> Cord::TakeRep() const& {
return CordRep::Ref(contents_.tree());
}
inline absl::Nonnull<CordRep*> Cord::TakeRep() && {
CordRep* rep = contents_.tree();
contents_.clear();
return rep;
}
template <typename C>
inline void Cord::AppendImpl(C&& src) {
auto constexpr method = CordzUpdateTracker::kAppendCord;
contents_.MaybeRemoveEmptyCrcNode();
if (src.empty()) return;
if (empty()) {
if (src.contents_.is_tree()) {
CordRep* rep =
cord_internal::RemoveCrcNode(std::forward<C>(src).TakeRep());
contents_.EmplaceTree(rep, method);
} else {
contents_.data_ = src.contents_.data_;
}
return;
}
const size_t src_size = src.contents_.size();
if (src_size <= kMaxBytesToCopy) {
CordRep* src_tree = src.contents_.tree();
if (src_tree == nullptr) {
contents_.AppendArray({src.contents_.data(), src_size}, method);
return;
}
if (src_tree->IsFlat()) {
contents_.AppendArray({src_tree->flat()->Data(), src_size}, method);
return;
}
if (&src == this) {
Append(Cord(src));
return;
}
for (absl::string_view chunk : src.Chunks()) {
Append(chunk);
}
return;
}
CordRep* rep = cord_internal::RemoveCrcNode(std::forward<C>(src).TakeRep());
contents_.AppendTree(rep, CordzUpdateTracker::kAppendCord);
}
static CordRep::ExtractResult ExtractAppendBuffer(absl::Nonnull<CordRep*> rep,
size_t min_capacity) {
switch (rep->tag) {
case cord_internal::BTREE:
return CordRepBtree::ExtractAppendBuffer(rep->btree(), min_capacity);
default:
if (rep->IsFlat() && rep->refcount.IsOne() &&
rep->flat()->Capacity() - rep->length >= min_capacity) {
return {nullptr, rep};
}
return {rep, nullptr};
}
}
static CordBuffer CreateAppendBuffer(InlineData& data, size_t block_size,
size_t capacity) {
const size_t size = data.inline_size();
const size_t max_capacity = std::numeric_limits<size_t>::max() - size;
capacity = (std::min)(max_capacity, capacity) + size;
CordBuffer buffer =
block_size ? CordBuffer::CreateWithCustomLimit(block_size, capacity)
: CordBuffer::CreateWithDefaultLimit(capacity);
cord_internal::SmallMemmove(buffer.data(), data.as_chars(), size);
buffer.SetLength(size);
data = {};
return buffer;
}
CordBuffer Cord::GetAppendBufferSlowPath(size_t block_size, size_t capacity,
size_t min_capacity) {
auto constexpr method = CordzUpdateTracker::kGetAppendBuffer;
CordRep* tree = contents_.tree();
if (tree != nullptr) {
CordzUpdateScope scope(contents_.cordz_info(), method);
CordRep::ExtractResult result = ExtractAppendBuffer(tree, min_capacity);
if (result.extracted != nullptr) {
contents_.SetTreeOrEmpty(result.tree, scope);
return CordBuffer(result.extracted->flat());
}
return block_size ? CordBuffer::CreateWithCustomLimit(block_size, capacity)
: CordBuffer::CreateWithDefaultLimit(capacity);
}
return CreateAppendBuffer(contents_.data_, block_size, capacity);
}
void Cord::Append(const Cord& src) { AppendImpl(src); }
void Cord::Append(Cord&& src) { AppendImpl(std::move(src)); }
template <typename T, Cord::EnableIfString<T>>
void Cord::Append(T&& src) {
if (src.size() <= kMaxBytesToCopy) {
Append(absl::string_view(src));
} else {
CordRep* rep = CordRepFromString(std::forward<T>(src));
contents_.AppendTree(rep, CordzUpdateTracker::kAppendString);
}
}
template void Cord::Append(std::string&& src);
void Cord::Prepend(const Cord& src) {
contents_.MaybeRemoveEmptyCrcNode();
if (src.empty()) return;
CordRep* src_tree = src.contents_.tree();
if (src_tree != nullptr) {
CordRep::Ref(src_tree);
contents_.PrependTree(cord_internal::RemoveCrcNode(src_tree),
CordzUpdateTracker::kPrependCord);
return;
}
absl::string_view src_contents(src.contents_.data(), src.contents_.size());
return Prepend(src_contents);
}
void Cord::PrependArray(absl::string_view src, MethodIdentifier method) {
contents_.MaybeRemoveEmptyCrcNode();
if (src.empty()) return;
if (!contents_.is_tree()) {
size_t cur_size = contents_.inline_size();
if (cur_size + src.size() <= InlineRep::kMaxInline) {
InlineData data;
data.set_inline_size(cur_size + src.size());
memcpy(data.as_chars(), src.data(), src.size());
memcpy(data.as_chars() + src.size(), contents_.data(), cur_size);
contents_.data_ = data;
return;
}
}
CordRep* rep = NewTree(src.data(), src.size(), 0);
contents_.PrependTree(rep, method);
}
void Cord::AppendPrecise(absl::string_view src, MethodIdentifier method) {
assert(!src.empty());
assert(src.size() <= cord_internal::kMaxFlatLength);
if (contents_.remaining_inline_capacity() >= src.size()) {
const size_t inline_length = contents_.inline_size();
contents_.set_inline_size(inline_length + src.size());
memcpy(contents_.data_.as_chars() + inline_length, src.data(), src.size());
} else {
contents_.AppendTree(CordRepFlat::Create(src), method);
}
}
void Cord::PrependPrecise(absl::string_view src, MethodIdentifier method) {
assert(!src.empty());
assert(src.size() <= cord_internal::kMaxFlatLength);
if (contents_.remaining_inline_capacity() >= src.size()) {
const size_t cur_size = contents_.inline_size();
InlineData data;
data.set_inline_size(cur_size + src.size());
memcpy(data.as_chars(), src.data(), src.size());
memcpy(data.as_chars() + src.size(), contents_.data(), cur_size);
contents_.data_ = data;
} else {
contents_.PrependTree(CordRepFlat::Create(src), method);
}
}
template <typename T, Cord::EnableIfString<T>>
inline void Cord::Prepend(T&& src) {
if (src.size() <= kMaxBytesToCopy) {
Prepend(absl::string_view(src));
} else {
CordRep* rep = CordRepFromString(std::forward<T>(src));
contents_.PrependTree(rep, CordzUpdateTracker::kPrependString);
}
}
template void Cord::Prepend(std::string&& src);
void Cord::RemovePrefix(size_t n) {
ABSL_INTERNAL_CHECK(n <= size(),
absl::StrCat("Requested prefix size ", n,
" exceeds Cord's size ", size()));
contents_.MaybeRemoveEmptyCrcNode();
CordRep* tree = contents_.tree();
if (tree == nullptr) {
contents_.remove_prefix(n);
} else {
auto constexpr method = CordzUpdateTracker::kRemovePrefix;
CordzUpdateScope scope(contents_.cordz_info(), method);
tree = cord_internal::RemoveCrcNode(tree);
if (n >= tree->length) {
CordRep::Unref(tree);
tree = nullptr;
} else if (tree->IsBtree()) {
CordRep* old = tree;
tree = tree->btree()->SubTree(n, tree->length - n);
CordRep::Unref(old);
} else if (tree->IsSubstring() && tree->refcount.IsOne()) {
tree->substring()->start += n;
tree->length -= n;
} else {
CordRep* rep = CordRepSubstring::Substring(tree, n, tree->length - n);
CordRep::Unref(tree);
tree = rep;
}
contents_.SetTreeOrEmpty(tree, scope);
}
}
void Cord::RemoveSuffix(size_t n) {
ABSL_INTERNAL_CHECK(n <= size(),
absl::StrCat("Requested suffix size ", n,
" exceeds Cord's size ", size()));
contents_.MaybeRemoveEmptyCrcNode();
CordRep* tree = contents_.tree();
if (tree == nullptr) {
contents_.reduce_size(n);
} else {
auto constexpr method = CordzUpdateTracker::kRemoveSuffix;
CordzUpdateScope scope(contents_.cordz_info(), method);
tree = cord_internal::RemoveCrcNode(tree);
if (n >= tree->length) {
CordRep::Unref(tree);
tree = nullptr;
} else if (tree->IsBtree()) {
tree = CordRepBtree::RemoveSuffix(tree->btree(), n);
} else if (!tree->IsExternal() && tree->refcount.IsOne()) {
assert(tree->IsFlat() || tree->IsSubstring());
tree->length -= n;
} else {
CordRep* rep = CordRepSubstring::Substring(tree, 0, tree->length - n);
CordRep::Unref(tree);
tree = rep;
}
contents_.SetTreeOrEmpty(tree, scope);
}
}
Cord Cord::Subcord(size_t pos, size_t new_size) const {
Cord sub_cord;
size_t length = size();
if (pos > length) pos = length;
if (new_size > length - pos) new_size = length - pos;
if (new_size == 0) return sub_cord;
CordRep* tree = contents_.tree();
if (tree == nullptr) {
sub_cord.contents_.set_data(contents_.data() + pos, new_size);
return sub_cord;
}
if (new_size <= InlineRep::kMaxInline) {
sub_cord.contents_.set_inline_size(new_size);
char* dest = sub_cord.contents_.data_.as_chars();
Cord::ChunkIterator it = chunk_begin();
it.AdvanceBytes(pos);
size_t remaining_size = new_size;
while (remaining_size > it->size()) {
cord_internal::SmallMemmove(dest, it->data(), it->size());
remaining_size -= it->size();
dest += it->size();
++it;
}
cord_internal::SmallMemmove(dest, it->data(), remaining_size);
return sub_cord;
}
tree = cord_internal::SkipCrcNode(tree);
if (tree->IsBtree()) {
tree = tree->btree()->SubTree(pos, new_size);
} else {
tree = CordRepSubstring::Substring(tree, pos, new_size);
}
sub_cord.contents_.EmplaceTree(tree, contents_.data_,
CordzUpdateTracker::kSubCord);
return sub_cord;
}
namespace {
int ClampResult(int memcmp_res) {
return static_cast<int>(memcmp_res > 0) - static_cast<int>(memcmp_res < 0);
}
int CompareChunks(absl::Nonnull<absl::string_view*> lhs,
absl::Nonnull<absl::string_view*> rhs,
absl::Nonnull<size_t*> size_to_compare) {
size_t compared_size = std::min(lhs->size(), rhs->size());
assert(*size_to_compare >= compared_size);
*size_to_compare -= compared_size;
int memcmp_res = ::memcmp(lhs->data(), rhs->data(), compared_size);
if (memcmp_res != 0) return memcmp_res;
lhs->remove_prefix(compared_size);
rhs->remove_prefix(compared_size);
return 0;
}
template <typename ResultType>
ResultType ComputeCompareResult(int memcmp_res) {
return ClampResult(memcmp_res);
}
template <>
bool ComputeCompareResult<bool>(int memcmp_res) {
return memcmp_res == 0;
}
}
inline absl::string_view Cord::InlineRep::FindFlatStartPiece() const {
if (!is_tree()) {
return absl::string_view(data_.as_chars(), data_.inline_size());
}
CordRep* node = cord_internal::SkipCrcNode(tree());
if (node->IsFlat()) {
return absl::string_view(node->flat()->Data(), node->length);
}
if (node->IsExternal()) {
return absl::string_view(node->external()->base, node->length);
}
if (node->IsBtree()) {
CordRepBtree* tree = node->btree();
int height = tree->height();
while (--height >= 0) {
tree = tree->Edge(CordRepBtree::kFront)->btree();
}
return tree->Data(tree->begin());
}
size_t offset = 0;
size_t length = node->length;
assert(length != 0);
if (node->IsSubstring()) {
offset = node->substring()->start;
node = node->substring()->child;
}
if (node->IsFlat()) {
return absl::string_view(node->flat()->Data() + offset, length);
}
assert(node->IsExternal() && "Expect FLAT or EXTERNAL node here");
return absl::string_view(node->external()->base + offset, length);
}
void Cord::SetCrcCordState(crc_internal::CrcCordState state) {
auto constexpr method = CordzUpdateTracker::kSetExpectedChecksum;
if (empty()) {
contents_.MaybeRemoveEmptyCrcNode();
CordRep* rep = CordRepCrc::New(nullptr, std::move(state));
contents_.EmplaceTree(rep, method);
} else if (!contents_.is_tree()) {
CordRep* rep = contents_.MakeFlatWithExtraCapacity(0);
rep = CordRepCrc::New(rep, std::move(state));
contents_.EmplaceTree(rep, method);
} else {
const CordzUpdateScope scope(contents_.data_.cordz_info(), method);
CordRep* rep = CordRepCrc::New(contents_.data_.as_tree(), std::move(state));
contents_.SetTree(rep, scope);
}
}
void Cord::SetExpectedChecksum(uint32_t crc) {
crc_internal::CrcCordState state;
state.mutable_rep()->prefix_crc.push_back(
crc_internal::CrcCordState::PrefixCrc(size(), absl::crc32c_t{crc}));
SetCrcCordState(std::move(state));
}
absl::Nullable<const crc_internal::CrcCordState*> Cord::MaybeGetCrcCordState()
const {
if (!contents_.is_tree() || !contents_.tree()->IsCrc()) {
return nullptr;
}
return &contents_.tree()->crc()->crc_cord_state;
}
absl::optional<uint32_t> Cord::ExpectedChecksum() const {
if (!contents_.is_tree() || !contents_.tree()->IsCrc()) {
return absl::nullopt;
}
return static_cast<uint32_t>(
contents_.tree()->crc()->crc_cord_state.Checksum());
}
inline int Cord::CompareSlowPath(absl::string_view rhs, size_t compared_size,
size_t size_to_compare) const {
auto advance = [](absl::Nonnull<Cord::ChunkIterator*> it,
absl::Nonnull<absl::string_view*> chunk) {
if (!chunk->empty()) return true;
++*it;
if (it->bytes_remaining_ == 0) return false;
*chunk = **it;
return true;
};
Cord::ChunkIterator lhs_it = chunk_begin();
absl::string_view lhs_chunk =
(lhs_it.bytes_remaining_ != 0) ? *lhs_it : absl::string_view();
assert(compared_size <= lhs_chunk.size());
assert(compared_size <= rhs.size());
lhs_chunk.remove_prefix(compared_size);
rhs.remove_prefix(compared_size);
size_to_compare -= compared_size;
while (advance(&lhs_it, &lhs_chunk) && !rhs.empty()) {
int comparison_result = CompareChunks(&lhs_chunk, &rhs, &size_to_compare);
if (comparison_result != 0) return comparison_result;
if (size_to_compare == 0) return 0;
}
return static_cast<int>(rhs.empty()) - static_cast<int>(lhs_chunk.empty());
}
inline int Cord::CompareSlowPath(const Cord& rhs, size_t compared_size,
size_t size_to_compare) const {
auto advance = [](absl::Nonnull<Cord::ChunkIterator*> it,
absl::Nonnull<absl::string_view*> chunk) {
if (!chunk->empty()) return true;
++*it;
if (it->bytes_remaining_ == 0) return false;
*chunk = **it;
return true;
};
Cord::ChunkIterator lhs_it = chunk_begin();
Cord::ChunkIterator rhs_it = rhs.chunk_begin();
absl::string_view lhs_chunk =
(lhs_it.bytes_remaining_ != 0) ? *lhs_it : absl::string_view();
absl::string_view rhs_chunk =
(rhs_it.bytes_remaining_ != 0) ? *rhs_it : absl::string_view();
assert(compared_size <= lhs_chunk.size());
assert(compared_size <= rhs_chunk.size());
lhs_chunk.remove_prefix(compared_size);
rhs_chunk.remove_prefix(compared_size);
size_to_compare -= compared_size;
while (advance(&lhs_it, &lhs_chunk) && advance(&rhs_it, &rhs_chunk)) {
int memcmp_res = CompareChunks(&lhs_chunk, &rhs_chunk, &size_to_compare);
if (memcmp_res != 0) return memcmp_res;
if (size_to_compare == 0) return 0;
}
return static_cast<int>(rhs_chunk.empty()) -
static_cast<int>(lhs_chunk.empty());
}
inline absl::string_view Cord::GetFirstChunk(const Cord& c) {
if (c.empty()) return {};
return c.contents_.FindFlatStartPiece();
}
inline absl::string_view Cord::GetFirstChunk(absl::string_view sv) {
return sv;
}
template <typename ResultType, typename RHS>
ResultType GenericCompare(const Cord& lhs, const RHS& rhs,
size_t size_to_compare) {
absl::string_view lhs_chunk = Cord::GetFirstChunk(lhs);
absl::string_view rhs_chunk = Cord::GetFirstChunk(rhs);
size_t compared_size = std::min(lhs_chunk.size(), rhs_chunk.size());
assert(size_to_compare >= compared_size);
int memcmp_res = ::memcmp(lhs_chunk.data(), rhs_chunk.data(), compared_size);
if (compared_size == size_to_compare || memcmp_res != 0) {
return ComputeCompareResult<ResultType>(memcmp_res);
}
return ComputeCompareResult<ResultType>(
lhs.CompareSlowPath(rhs, compared_size, size_to_compare));
}
bool Cord::EqualsImpl(absl::string_view rhs, size_t size_to_compare) const {
return GenericCompare<bool>(*this, rhs, size_to_compare);
}
bool Cord::EqualsImpl(const Cord& rhs, size_t size_to_compare) const {
return GenericCompare<bool>(*this, rhs, size_to_compare);
}
template <typename RHS>
inline int SharedCompareImpl(const Cord& lhs, const RHS& rhs) {
size_t lhs_size = lhs.size();
size_t rhs_size = rhs.size();
if (lhs_size == rhs_size) {
return GenericCompare<int>(lhs, rhs, lhs_size);
}
if (lhs_size < rhs_size) {
auto data_comp_res = GenericCompare<int>(lhs, rhs, lhs_size);
return data_comp_res == 0 ? -1 : data_comp_res;
}
auto data_comp_res = GenericCompare<int>(lhs, rhs, rhs_size);
return data_comp_res == 0 ? +1 : data_comp_res;
}
int Cord::Compare(absl::string_view rhs) const {
return SharedCompareImpl(*this, rhs);
}
int Cord::CompareImpl(const Cord& rhs) const {
return SharedCompareImpl(*this, rhs);
}
bool Cord::EndsWith(absl::string_view rhs) const {
size_t my_size = size();
size_t rhs_size = rhs.size();
if (my_size < rhs_size) return false;
Cord tmp(*this);
tmp.RemovePrefix(my_size - rhs_size);
return tmp.EqualsImpl(rhs, rhs_size);
}
bool Cord::EndsWith(const Cord& rhs) const {
size_t my_size = size();
size_t rhs_size = rhs.size();
if (my_size < rhs_size) return false;
Cord tmp(*this);
tmp.RemovePrefix(my_size - rhs_size);
return tmp.EqualsImpl(rhs, rhs_size);
}
Cord::operator std::string() const {
std::string s;
absl::CopyCordToString(*this, &s);
return s;
}
void CopyCordToString(const Cord& src, absl::Nonnull<std::string*> dst) {
if (!src.contents_.is_tree()) {
src.contents_.CopyTo(dst);
} else {
absl::strings_internal::STLStringResizeUninitialized(dst, src.size());
src.CopyToArraySlowPath(&(*dst)[0]);
}
}
void AppendCordToString(const Cord& src, absl::Nonnull<std::string*> dst) {
const size_t cur_dst_size = dst->size();
const size_t new_dst_size = cur_dst_size + src.size();
absl::strings_internal::STLStringResizeUninitializedAmortized(dst,
new_dst_size);
char* append_ptr = &(*dst)[cur_dst_size];
src.CopyToArrayImpl(append_ptr);
}
void Cord::CopyToArraySlowPath(absl::Nonnull<char*> dst) const {
assert(contents_.is_tree());
absl::string_view fragment;
if (GetFlatAux(contents_.tree(), &fragment)) {
memcpy(dst, fragment.data(), fragment.size());
return;
}
for (absl::string_view chunk : Chunks()) {
memcpy(dst, chunk.data(), chunk.size());
dst += chunk.size();
}
}
Cord Cord::ChunkIterator::AdvanceAndReadBytes(size_t n) {
ABSL_HARDENING_ASSERT(bytes_remaining_ >= n &&
"Attempted to iterate past `end()`");
Cord subcord;
auto constexpr method = CordzUpdateTracker::kCordReader;
if (n <= InlineRep::kMaxInline) {
char* data = subcord.contents_.set_data(n);
while (n > current_chunk_.size()) {
memcpy(data, current_chunk_.data(), current_chunk_.size());
data += current_chunk_.size();
n -= current_chunk_.size();
++*this;
}
memcpy(data, current_chunk_.data(), n);
if (n < current_chunk_.size()) {
RemoveChunkPrefix(n);
} else if (n > 0) {
++*this;
}
return subcord;
}
if (btree_reader_) {
size_t chunk_size = current_chunk_.size();
if (n <= chunk_size && n <= kMaxBytesToCopy) {
subcord = Cord(current_chunk_.substr(0, n), method);
if (n < chunk_size) {
current_chunk_.remove_prefix(n);
} else {
current_chunk_ = btree_reader_.Next();
}
} else {
CordRep* rep;
current_chunk_ = btree_reader_.Read(n, chunk_size, rep);
subcord.contents_.EmplaceTree(rep, method);
}
bytes_remaining_ -= n;
return subcord;
}
assert(current_leaf_ != nullptr);
if (n == current_leaf_->length) {
bytes_remaining_ = 0;
current_chunk_ = {};
CordRep* tree = CordRep::Ref(current_leaf_);
subcord.contents_.EmplaceTree(VerifyTree(tree), method);
return subcord;
}
CordRep* payload = current_leaf_->IsSubstring()
? current_leaf_->substring()->child
: current_leaf_;
const char* data = payload->IsExternal() ? payload->external()->base
: payload->flat()->Data();
const size_t offset = static_cast<size_t>(current_chunk_.data() - data);
auto* tree = CordRepSubstring::Substring(payload, offset, n);
subcord.contents_.EmplaceTree(VerifyTree(tree), method);
bytes_remaining_ -= n;
current_chunk_.remove_prefix(n);
return subcord;
}
char Cord::operator[](size_t i) const {
ABSL_HARDENING_ASSERT(i < size());
size_t offset = i;
const CordRep* rep = contents_.tree();
if (rep == nullptr) {
return contents_.data()[i];
}
rep = cord_internal::SkipCrcNode(rep);
while (true) {
assert(rep != nullptr);
assert(offset < rep->length);
if (rep->IsFlat()) {
return rep->flat()->Data()[offset];
} else if (rep->IsBtree()) {
return rep->btree()->GetCharacter(offset);
} else if (rep->IsExternal()) {
return rep->external()->base[offset];
} else {
assert(rep->IsSubstring());
offset += rep->substring()->start;
rep = rep->substring()->child;
}
}
}
namespace {
bool IsSubstringInCordAt(absl::Cord::CharIterator position,
absl::string_view needle) {
auto haystack_chunk = absl::Cord::ChunkRemaining(position);
while (true) {
assert(!haystack_chunk.empty());
auto min_length = std::min(haystack_chunk.size(), needle.size());
if (!absl::ConsumePrefix(&needle, haystack_chunk.substr(0, min_length))) {
return false;
}
if (needle.empty()) {
return true;
}
absl::Cord::Advance(&position, min_length);
haystack_chunk = absl::Cord::ChunkRemaining(position);
}
}
}
absl::Cord::CharIterator absl::Cord::FindImpl(CharIterator it,
absl::string_view needle) const {
assert(!needle.empty());
assert(it.chunk_iterator_.bytes_remaining_ >= needle.size());
while (it.chunk_iterator_.bytes_remaining_ >= needle.size()) {
auto haystack_chunk = Cord::ChunkRemaining(it);
assert(!haystack_chunk.empty());
auto idx = haystack_chunk.find(needle.front());
if (idx == absl::string_view::npos) {
Cord::Advance(&it, haystack_chunk.size());
continue;
}
Cord::Advance(&it, idx);
if (it.chunk_iterator_.bytes_remaining_ < needle.size()) {
break;
}
if (IsSubstringInCordAt(it, needle)) {
return it;
}
Cord::Advance(&it, 1);
}
return char_end();
}
absl::Cord::CharIterator absl::Cord::Find(absl::string_view needle) const {
if (needle.empty()) {
return char_begin();
}
if (needle.size() > size()) {
return char_end();
}
if (needle.size() == size()) {
return *this == needle ? char_begin() : char_end();
}
return FindImpl(char_begin(), needle);
}
namespace {
bool IsSubcordInCordAt(absl::Cord::CharIterator haystack,
absl::Cord::CharIterator needle_begin,
absl::Cord::CharIterator needle_end) {
while (needle_begin != needle_end) {
auto haystack_chunk = absl::Cord::ChunkRemaining(haystack);
assert(!haystack_chunk.empty());
auto needle_chunk = absl::Cord::ChunkRemaining(needle_begin);
auto min_length = std::min(haystack_chunk.size(), needle_chunk.size());
if (haystack_chunk.substr(0, min_length) !=
needle_chunk.substr(0, min_length)) {
return false;
}
absl::Cord::Advance(&haystack, min_length);
absl::Cord::Advance(&needle_begin, min_length);
}
return true;
}
bool IsSubcordInCordAt(absl::Cord::CharIterator position,
const absl::Cord& needle) {
return IsSubcordInCordAt(position, needle.char_begin(), needle.char_end());
}
}
absl::Cord::CharIterator absl::Cord::Find(const absl::Cord& needle) const {
if (needle.empty()) {
return char_begin();
}
const auto needle_size = needle.size();
if (needle_size > size()) {
return char_end();
}
if (needle_size == size()) {
return *this == needle ? char_begin() : char_end();
}
const auto needle_chunk = Cord::ChunkRemaining(needle.char_begin());
auto haystack_it = char_begin();
while (true) {
haystack_it = FindImpl(haystack_it, needle_chunk);
if (haystack_it == char_end() ||
haystack_it.chunk_iterator_.bytes_remaining_ < needle_size) {
break;
}
auto haystack_advanced_it = haystack_it;
auto needle_it = needle.char_begin();
Cord::Advance(&haystack_advanced_it, needle_chunk.size());
Cord::Advance(&needle_it, needle_chunk.size());
if (IsSubcordInCordAt(haystack_advanced_it, needle_it, needle.char_end())) {
return haystack_it;
}
Cord::Advance(&haystack_it, 1);
if (haystack_it.chunk_iterator_.bytes_remaining_ < needle_size) {
break;
}
if (haystack_it.chunk_iterator_.bytes_remaining_ == needle_size) {
if (IsSubcordInCordAt(haystack_it, needle)) {
return haystack_it;
}
break;
}
}
return char_end();
}
bool Cord::Contains(absl::string_view rhs) const {
return rhs.empty() || Find(rhs) != char_end();
}
bool Cord::Contains(const absl::Cord& rhs) const {
return rhs.empty() || Find(rhs) != char_end();
}
absl::string_view Cord::FlattenSlowPath() {
assert(contents_.is_tree());
size_t total_size = size();
CordRep* new_rep;
char* new_buffer;
if (total_size <= kMaxFlatLength) {
new_rep = CordRepFlat::New(total_size);
new_rep->length = total_size;
new_buffer = new_rep->flat()->Data();
CopyToArraySlowPath(new_buffer);
} else {
new_buffer = std::allocator<char>().allocate(total_size);
CopyToArraySlowPath(new_buffer);
new_rep = absl::cord_internal::NewExternalRep(
absl::string_view(new_buffer, total_size), [](absl::string_view s) {
std::allocator<char>().deallocate(const_cast<char*>(s.data()),
s.size());
});
}
CordzUpdateScope scope(contents_.cordz_info(), CordzUpdateTracker::kFlatten);
CordRep::Unref(contents_.as_tree());
contents_.SetTree(new_rep, scope);
return absl::string_view(new_buffer, total_size);
}
bool Cord::GetFlatAux(absl::Nonnull<CordRep*> rep,
absl::Nonnull<absl::string_view*> fragment) {
assert(rep != nullptr);
if (rep->length == 0) {
*fragment = absl::string_view();
return true;
}
rep = cord_internal::SkipCrcNode(rep);
if (rep->IsFlat()) {
*fragment = absl::string_view(rep->flat()->Data(), rep->length);
return true;
} else if (rep->IsExternal()) {
*fragment = absl::string_view(rep->external()->base, rep->length);
return true;
} else if (rep->IsBtree()) {
return rep->btree()->IsFlat(fragment);
} else if (rep->IsSubstring()) {
CordRep* child = rep->substring()->child;
if (child->IsFlat()) {
*fragment = absl::string_view(
child->flat()->Data() + rep->substring()->start, rep->length);
return true;
} else if (child->IsExternal()) {
*fragment = absl::string_view(
child->external()->base + rep->substring()->start, rep->length);
return true;
} else if (child->IsBtree()) {
return child->btree()->IsFlat(rep->substring()->start, rep->length,
fragment);
}
}
return false;
}
void Cord::ForEachChunkAux(
absl::Nonnull<absl::cord_internal::CordRep*> rep,
absl::FunctionRef<void(absl::string_view)> callback) {
assert(rep != nullptr);
if (rep->length == 0) return;
rep = cord_internal::SkipCrcNode(rep);
if (rep->IsBtree()) {
ChunkIterator it(rep), end;
while (it != end) {
callback(*it);
++it;
}
return;
}
absl::cord_internal::CordRep* current_node = cord_internal::SkipCrcNode(rep);
absl::string_view chunk;
bool success = GetFlatAux(current_node, &chunk);
assert(success);
if (success) {
callback(chunk);
}
}
static void DumpNode(absl::Nonnull<CordRep*> nonnull_rep, bool include_data,
absl::Nonnull<std::ostream*> os, int indent) {
CordRep* rep = nonnull_rep;
const int kIndentStep = 1;
for (;;) {
*os << std::setw(3) << (rep == nullptr ? 0 : rep->refcount.Get());
*os << " " << std::setw(7) << (rep == nullptr ? 0 : rep->length);
*os << " [";
if (include_data) *os << static_cast<void*>(rep);
*os << "]";
*os << " " << std::setw(indent) << "";
bool leaf = false;
if (rep == nullptr) {
*os << "NULL\n";
leaf = true;
} else if (rep->IsCrc()) {
*os << "CRC crc=" << rep->crc()->crc_cord_state.Checksum() << "\n";
indent += kIndentStep;
rep = rep->crc()->child;
} else if (rep->IsSubstring()) {
*os << "SUBSTRING @ " << rep->substring()->start << "\n";
indent += kIndentStep;
rep = rep->substring()->child;
} else {
leaf = true;
if (rep->IsExternal()) {
*os << "EXTERNAL [";
if (include_data)
*os << absl::CEscape(
absl::string_view(rep->external()->base, rep->length));
*os << "]\n";
} else if (rep->IsFlat()) {
*os << "FLAT cap=" << rep->flat()->Capacity() << " [";
if (include_data)
*os << absl::CEscape(
absl::string_view(rep->flat()->Data(), rep->length));
*os << "]\n";
} else {
CordRepBtree::Dump(rep, "", include_data, *os);
}
}
if (leaf) {
break;
}
}
}
static std::string ReportError(absl::Nonnull<CordRep*> root,
absl::Nonnull<CordRep*> node) {
std::ostringstream buf;
buf << "Error at node " << node << " in:";
DumpNode(root, true, &buf);
return buf.str();
}
static bool VerifyNode(absl::Nonnull<CordRep*> root,
absl::Nonnull<CordRep*> start_node) {
absl::InlinedVector<absl::Nonnull<CordRep*>, 2> worklist;
worklist.push_back(start_node);
do {
CordRep* node = worklist.back();
worklist.pop_back();
ABSL_INTERNAL_CHECK(node != nullptr, ReportError(root, node));
if (node != root) {
ABSL_INTERNAL_CHECK(node->length != 0, ReportError(root, node));
ABSL_INTERNAL_CHECK(!node->IsCrc(), ReportError(root, node));
}
if (node->IsFlat()) {
ABSL_INTERNAL_CHECK(node->length <= node->flat()->Capacity(),
ReportError(root, node));
} else if (node->IsExternal()) {
ABSL_INTERNAL_CHECK(node->external()->base != nullptr,
ReportError(root, node));
} else if (node->IsSubstring()) {
ABSL_INTERNAL_CHECK(
node->substring()->start < node->substring()->child->length,
ReportError(root, node));
ABSL_INTERNAL_CHECK(node->substring()->start + node->length <=
node->substring()->child->length,
ReportError(root, node));
} else if (node->IsCrc()) {
ABSL_INTERNAL_CHECK(
node->crc()->child != nullptr || node->crc()->length == 0,
ReportError(root, node));
if (node->crc()->child != nullptr) {
ABSL_INTERNAL_CHECK(node->crc()->length == node->crc()->child->length,
ReportError(root, node));
worklist.push_back(node->crc()->child);
}
}
} while (!worklist.empty());
return true;
}
std::ostream& operator<<(std::ostream& out, const Cord& cord) {
for (absl::string_view chunk : cord.Chunks()) {
out.write(chunk.data(), static_cast<std::streamsize>(chunk.size()));
}
return out;
}
namespace strings_internal {
size_t CordTestAccess::FlatOverhead() { return cord_internal::kFlatOverhead; }
size_t CordTestAccess::MaxFlatLength() { return cord_internal::kMaxFlatLength; }
size_t CordTestAccess::FlatTagToLength(uint8_t tag) {
return cord_internal::TagToLength(tag);
}
uint8_t CordTestAccess::LengthToTag(size_t s) {
ABSL_INTERNAL_CHECK(s <= kMaxFlatLength, absl::StrCat("Invalid length ", s));
return cord_internal::AllocatedSizeToTag(s + cord_internal::kFlatOverhead);
}
size_t CordTestAccess::SizeofCordRepExternal() {
return sizeof(CordRepExternal);
}
size_t CordTestAccess::SizeofCordRepSubstring() {
return sizeof(CordRepSubstring);
}
}
ABSL_NAMESPACE_END
} | #include "absl/strings/cord.h"
#include <algorithm>
#include <array>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <cstdio>
#include <cstring>
#include <iostream>
#include <iterator>
#include <limits>
#include <random>
#include <set>
#include <sstream>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/endian.h"
#include "absl/base/macros.h"
#include "absl/base/no_destructor.h"
#include "absl/base/options.h"
#include "absl/container/fixed_array.h"
#include "absl/functional/function_ref.h"
#include "absl/hash/hash.h"
#include "absl/hash/hash_testing.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/random/random.h"
#include "absl/strings/cord_buffer.h"
#include "absl/strings/cord_test_helpers.h"
#include "absl/strings/cordz_test_helpers.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_crc.h"
#include "absl/strings/internal/cord_rep_flat.h"
#include "absl/strings/internal/cordz_statistics.h"
#include "absl/strings/internal/cordz_update_tracker.h"
#include "absl/strings/internal/string_constant.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/compare.h"
#include "absl/types/optional.h"
static constexpr auto FLAT = absl::cord_internal::FLAT;
static constexpr auto MAX_FLAT_TAG = absl::cord_internal::MAX_FLAT_TAG;
typedef std::mt19937_64 RandomEngine;
using absl::cord_internal::CordRep;
using absl::cord_internal::CordRepBtree;
using absl::cord_internal::CordRepConcat;
using absl::cord_internal::CordRepCrc;
using absl::cord_internal::CordRepExternal;
using absl::cord_internal::CordRepFlat;
using absl::cord_internal::CordRepSubstring;
using absl::cord_internal::CordzUpdateTracker;
using absl::cord_internal::kFlatOverhead;
using absl::cord_internal::kMaxFlatLength;
using ::testing::ElementsAre;
using ::testing::Le;
static std::string RandomLowercaseString(RandomEngine* rng);
static std::string RandomLowercaseString(RandomEngine* rng, size_t length);
static int GetUniformRandomUpTo(RandomEngine* rng, int upper_bound) {
if (upper_bound > 0) {
std::uniform_int_distribution<int> uniform(0, upper_bound - 1);
return uniform(*rng);
} else {
return 0;
}
}
static size_t GetUniformRandomUpTo(RandomEngine* rng, size_t upper_bound) {
if (upper_bound > 0) {
std::uniform_int_distribution<size_t> uniform(0, upper_bound - 1);
return uniform(*rng);
} else {
return 0;
}
}
static int32_t GenerateSkewedRandom(RandomEngine* rng, int max_log) {
const uint32_t base = (*rng)() % (max_log + 1);
const uint32_t mask = ((base < 32) ? (1u << base) : 0u) - 1u;
return (*rng)() & mask;
}
static std::string RandomLowercaseString(RandomEngine* rng) {
int length;
std::bernoulli_distribution one_in_1k(0.001);
std::bernoulli_distribution one_in_10k(0.0001);
if (one_in_10k(*rng)) {
length = GetUniformRandomUpTo(rng, 1048576);
} else if (one_in_1k(*rng)) {
length = GetUniformRandomUpTo(rng, 10000);
} else {
length = GenerateSkewedRandom(rng, 10);
}
return RandomLowercaseString(rng, length);
}
static std::string RandomLowercaseString(RandomEngine* rng, size_t length) {
std::string result(length, '\0');
std::uniform_int_distribution<int> chars('a', 'z');
std::generate(result.begin(), result.end(),
[&]() { return static_cast<char>(chars(*rng)); });
return result;
}
static void DoNothing(absl::string_view , void* ) {}
static void DeleteExternalString(absl::string_view data, void* arg) {
std::string* s = reinterpret_cast<std::string*>(arg);
EXPECT_EQ(data, *s);
delete s;
}
static void AddExternalMemory(absl::string_view s, absl::Cord* dst) {
std::string* str = new std::string(s.data(), s.size());
dst->Append(absl::MakeCordFromExternal(*str, [str](absl::string_view data) {
DeleteExternalString(data, str);
}));
}
static void DumpGrowth() {
absl::Cord str;
for (int i = 0; i < 1000; i++) {
char c = 'a' + i % 26;
str.Append(absl::string_view(&c, 1));
}
}
static size_t AppendWithFragments(const std::string& s, RandomEngine* rng,
absl::Cord* cord) {
size_t j = 0;
const size_t max_size = s.size() / 5;
size_t min_size = max_size;
while (j < s.size()) {
size_t N = 1 + GetUniformRandomUpTo(rng, max_size);
if (N > (s.size() - j)) {
N = s.size() - j;
}
if (N < min_size) {
min_size = N;
}
std::bernoulli_distribution coin_flip(0.5);
if (coin_flip(*rng)) {
AddExternalMemory(absl::string_view(s.data() + j, N), cord);
} else {
cord->Append(absl::string_view(s.data() + j, N));
}
j += N;
}
return min_size;
}
static void AddNewStringBlock(const std::string& str, absl::Cord* dst) {
char* data = new char[str.size()];
memcpy(data, str.data(), str.size());
dst->Append(absl::MakeCordFromExternal(
absl::string_view(data, str.size()),
[](absl::string_view s) { delete[] s.data(); }));
}
static absl::Cord MakeComposite() {
absl::Cord cord;
cord.Append("the");
AddExternalMemory(" quick brown", &cord);
AddExternalMemory(" fox jumped", &cord);
absl::Cord full(" over");
AddExternalMemory(" the lazy", &full);
AddNewStringBlock(" dog slept the whole day away", &full);
absl::Cord substring = full.Subcord(0, 18);
substring.Append(std::string(1000, '.'));
cord.Append(substring);
cord = cord.Subcord(0, cord.size() - 998);
return cord;
}
namespace absl {
ABSL_NAMESPACE_BEGIN
class CordTestPeer {
public:
static void ForEachChunk(
const Cord& c, absl::FunctionRef<void(absl::string_view)> callback) {
c.ForEachChunk(callback);
}
static bool IsTree(const Cord& c) { return c.contents_.is_tree(); }
static CordRep* Tree(const Cord& c) { return c.contents_.tree(); }
static cord_internal::CordzInfo* GetCordzInfo(const Cord& c) {
return c.contents_.cordz_info();
}
static Cord MakeSubstring(Cord src, size_t offset, size_t length) {
CHECK(src.contents_.is_tree()) << "Can not be inlined";
CHECK(!src.ExpectedChecksum().has_value()) << "Can not be hardened";
Cord cord;
auto* tree = cord_internal::SkipCrcNode(src.contents_.tree());
auto* rep = CordRepSubstring::Create(CordRep::Ref(tree), offset, length);
cord.contents_.EmplaceTree(rep, CordzUpdateTracker::kSubCord);
return cord;
}
};
ABSL_NAMESPACE_END
}
class CordTest : public testing::TestWithParam<bool > {
public:
bool UseCrc() const { return GetParam(); }
void MaybeHarden(absl::Cord& c) {
if (UseCrc()) {
c.SetExpectedChecksum(1);
}
}
absl::Cord MaybeHardened(absl::Cord c) {
MaybeHarden(c);
return c;
}
static std::string ToString(testing::TestParamInfo<bool> useCrc) {
if (useCrc.param) {
return "BtreeHardened";
} else {
return "Btree";
}
}
};
INSTANTIATE_TEST_SUITE_P(WithParam, CordTest, testing::Bool(),
CordTest::ToString);
TEST(CordRepFlat, AllFlatCapacities) {
static_assert(absl::cord_internal::kFlatOverhead < 32, "");
static_assert(absl::cord_internal::kMinFlatSize == 32, "");
static_assert(absl::cord_internal::kMaxLargeFlatSize == 256 << 10, "");
EXPECT_EQ(absl::cord_internal::TagToAllocatedSize(FLAT), 32);
EXPECT_EQ(absl::cord_internal::TagToAllocatedSize(MAX_FLAT_TAG), 256 << 10);
size_t last_size = 0;
for (int tag = FLAT; tag <= MAX_FLAT_TAG; ++tag) {
size_t size = absl::cord_internal::TagToAllocatedSize(tag);
ASSERT_GT(size, last_size);
ASSERT_EQ(absl::cord_internal::TagToAllocatedSize(tag), size);
last_size = size;
}
for (size_t size = 32; size <= 512; size += 8) {
ASSERT_EQ(absl::cord_internal::RoundUpForTag(size), size);
uint8_t tag = absl::cord_internal::AllocatedSizeToTag(size);
ASSERT_EQ(absl::cord_internal::TagToAllocatedSize(tag), size);
}
for (size_t size = 512; size <= 8192; size += 64) {
ASSERT_EQ(absl::cord_internal::RoundUpForTag(size), size);
uint8_t tag = absl::cord_internal::AllocatedSizeToTag(size);
ASSERT_EQ(absl::cord_internal::TagToAllocatedSize(tag), size);
}
for (size_t size = 8192; size <= 256 * 1024; size += 4 * 1024) {
ASSERT_EQ(absl::cord_internal::RoundUpForTag(size), size);
uint8_t tag = absl::cord_internal::AllocatedSizeToTag(size);
ASSERT_EQ(absl::cord_internal::TagToAllocatedSize(tag), size);
}
}
TEST(CordRepFlat, MaxFlatSize) {
CordRepFlat* flat = CordRepFlat::New(kMaxFlatLength);
EXPECT_EQ(flat->Capacity(), kMaxFlatLength);
CordRep::Unref(flat);
flat = CordRepFlat::New(kMaxFlatLength * 4);
EXPECT_EQ(flat->Capacity(), kMaxFlatLength);
CordRep::Unref(flat);
}
TEST(CordRepFlat, MaxLargeFlatSize) {
const size_t size = 256 * 1024 - kFlatOverhead;
CordRepFlat* flat = CordRepFlat::New(CordRepFlat::Large(), size);
EXPECT_GE(flat->Capacity(), size);
CordRep::Unref(flat);
}
TEST(CordRepFlat, AllFlatSizes) {
const size_t kMaxSize = 256 * 1024;
for (size_t size = 32; size <= kMaxSize; size *=2) {
const size_t length = size - kFlatOverhead - 1;
CordRepFlat* flat = CordRepFlat::New(CordRepFlat::Large(), length);
EXPECT_GE(flat->Capacity(), length);
memset(flat->Data(), 0xCD, flat->Capacity());
CordRep::Unref(flat);
}
}
TEST_P(CordTest, AllFlatSizes) {
using absl::strings_internal::CordTestAccess;
for (size_t s = 0; s < CordTestAccess::MaxFlatLength(); s++) {
std::string src;
while (src.size() < s) {
src.push_back('a' + (src.size() % 26));
}
absl::Cord dst(src);
MaybeHarden(dst);
EXPECT_EQ(std::string(dst), src) << s;
}
}
TEST_P(CordTest, GigabyteCordFromExternal) {
const size_t one_gig = 1024U * 1024U * 1024U;
size_t max_size = 2 * one_gig;
if (sizeof(max_size) > 4) max_size = 128 * one_gig;
size_t length = 128 * 1024;
char* data = new char[length];
absl::Cord from = absl::MakeCordFromExternal(
absl::string_view(data, length),
[](absl::string_view sv) { delete[] sv.data(); });
absl::Cord c;
c.Append(from);
while (c.size() < max_size) {
c.Append(c);
c.Append(from);
c.Append(from);
c.Append(from);
c.Append(from);
MaybeHarden(c);
}
for (int i = 0; i < 1024; ++i) {
c.Append(from);
}
LOG(INFO) << "Made a Cord with " << c.size() << " bytes!";
}
static absl::Cord MakeExternalCord(int size) {
char* buffer = new char[size];
memset(buffer, 'x', size);
absl::Cord cord;
cord.Append(absl::MakeCordFromExternal(
absl::string_view(buffer, size),
[](absl::string_view s) { delete[] s.data(); }));
return cord;
}
extern bool my_unique_true_boolean;
bool my_unique_true_boolean = true;
TEST_P(CordTest, Assignment) {
absl::Cord x(absl::string_view("hi there"));
absl::Cord y(x);
MaybeHarden(y);
ASSERT_EQ(x.ExpectedChecksum(), absl::nullopt);
ASSERT_EQ(std::string(x), "hi there");
ASSERT_EQ(std::string(y), "hi there");
ASSERT_TRUE(x == y);
ASSERT_TRUE(x <= y);
ASSERT_TRUE(y <= x);
x = absl::string_view("foo");
ASSERT_EQ(std::string(x), "foo");
ASSERT_EQ(std::string(y), "hi there");
ASSERT_TRUE(x < y);
ASSERT_TRUE(y > x);
ASSERT_TRUE(x != y);
ASSERT_TRUE(x <= y);
ASSERT_TRUE(y >= x);
x = "foo";
ASSERT_EQ(x, "foo");
std::vector<std::pair<absl::string_view, absl::string_view>>
test_string_pairs = {{"hi there", "foo"},
{"loooooong coooooord", "short cord"},
{"short cord", "loooooong coooooord"},
{"loooooong coooooord1", "loooooong coooooord2"}};
for (std::pair<absl::string_view, absl::string_view> test_strings :
test_string_pairs) {
absl::Cord tmp(test_strings.first);
absl::Cord z(std::move(tmp));
ASSERT_EQ(std::string(z), test_strings.first);
tmp = test_strings.second;
z = std::move(tmp);
ASSERT_EQ(std::string(z), test_strings.second);
}
{
absl::Cord my_small_cord("foo");
absl::Cord my_big_cord("loooooong coooooord");
absl::Cord* my_small_alias =
my_unique_true_boolean ? &my_small_cord : &my_big_cord;
absl::Cord* my_big_alias =
!my_unique_true_boolean ? &my_small_cord : &my_big_cord;
*my_small_alias = std::move(my_small_cord);
*my_big_alias = std::move(my_big_cord);
}
}
TEST_P(CordTest, StartsEndsWith) {
absl::Cord x(absl::string_view("abcde"));
MaybeHarden(x);
absl::Cord empty("");
ASSERT_TRUE(x.StartsWith(absl::Cord("abcde")));
ASSERT_TRUE(x.StartsWith(absl::Cord("abc")));
ASSERT_TRUE(x.StartsWith(absl::Cord("")));
ASSERT_TRUE(empty.StartsWith(absl::Cord("")));
ASSERT_TRUE(x.EndsWith(absl::Cord("abcde")));
ASSERT_TRUE(x.EndsWith(absl::Cord("cde")));
ASSERT_TRUE(x.EndsWith(absl::Cord("")));
ASSERT_TRUE(empty.EndsWith(absl::Cord("")));
ASSERT_TRUE(!x.StartsWith(absl::Cord("xyz")));
ASSERT_TRUE(!empty.StartsWith(absl::Cord("xyz")));
ASSERT_TRUE(!x.EndsWith(absl::Cord("xyz")));
ASSERT_TRUE(!empty.EndsWith(absl::Cord("xyz")));
ASSERT_TRUE(x.StartsWith("abcde"));
ASSERT_TRUE(x.StartsWith("abc"));
ASSERT_TRUE(x.StartsWith(""));
ASSERT_TRUE(empty.StartsWith(""));
ASSERT_TRUE(x.EndsWith("abcde"));
ASSERT_TRUE(x.EndsWith("cde"));
ASSERT_TRUE(x.EndsWith(""));
ASSERT_TRUE(empty.EndsWith(""));
ASSERT_TRUE(!x.StartsWith("xyz"));
ASSERT_TRUE(!empty.StartsWith("xyz"));
ASSERT_TRUE(!x.EndsWith("xyz"));
ASSERT_TRUE(!empty.EndsWith("xyz"));
}
TEST_P(CordTest, Contains) {
auto flat_haystack = absl::Cord("this is a flat cord");
auto fragmented_haystack = absl::MakeFragmentedCord(
{"this", " ", "is", " ", "a", " ", "fragmented", " ", "cord"});
EXPECT_TRUE(flat_haystack.Contains(""));
EXPECT_TRUE(fragmented_haystack.Contains(""));
EXPECT_TRUE(flat_haystack.Contains(absl::Cord("")));
EXPECT_TRUE(fragmented_haystack.Contains(absl::Cord("")));
EXPECT_TRUE(absl::Cord("").Contains(""));
EXPECT_TRUE(absl::Cord("").Contains(absl::Cord("")));
EXPECT_FALSE(absl::Cord("").Contains(flat_haystack));
EXPECT_FALSE(absl::Cord("").Contains(fragmented_haystack));
EXPECT_FALSE(flat_haystack.Contains("z"));
EXPECT_FALSE(fragmented_haystack.Contains("z"));
EXPECT_FALSE(flat_haystack.Contains(absl::Cord("z")));
EXPECT_FALSE(fragmented_haystack.Contains(absl::Cord("z")));
EXPECT_FALSE(flat_haystack.Contains("is an"));
EXPECT_FALSE(fragmented_haystack.Contains("is an"));
EXPECT_FALSE(flat_haystack.Contains(absl::Cord("is an")));
EXPECT_FALSE(fragmented_haystack.Contains(absl::Cord("is an")));
EXPECT_FALSE(
flat_haystack.Contains(absl::MakeFragmentedCord({"is", " ", "an"})));
EXPECT_FALSE(fragmented_haystack.Contains(
absl::MakeFragmentedCord({"is", " ", "an"})));
EXPECT_TRUE(flat_haystack.Contains("is a"));
EXPECT_TRUE(fragmented_haystack.Contains("is a"));
EXPECT_TRUE(flat_haystack.Contains(absl::Cord("is a")));
EXPECT_TRUE(fragmented_haystack.Contains(absl::Cord("is a")));
EXPECT_TRUE(
flat_haystack.Contains(absl::MakeFragmentedCord({"is", " ", "a"})));
EXPECT_TRUE(
fragmented_haystack.Contains(absl::MakeFragmentedCord({"is", " ", "a"})));
}
TEST_P(CordTest, Find) {
auto flat_haystack = absl::Cord("this is a flat cord");
auto fragmented_haystack = absl::MakeFragmentedCord(
{"this", " ", "is", " ", "a", " ", "fragmented", " ", "cord"});
auto empty_haystack = absl::Cord("");
EXPECT_EQ(flat_haystack.Find(""), flat_haystack.char_begin());
EXPECT_EQ(fragmented_haystack.Find(""), fragmented_haystack.char_begin());
EXPECT_EQ(flat_haystack.Find(absl::Cord("")), flat_haystack.char_begin());
EXPECT_EQ(fragmented_haystack.Find(absl::Cord("")),
fragmented_haystack.char_begin());
EXPECT_EQ(empty_haystack.Find(""), empty_haystack.char_begin());
EXPECT_EQ(empty_haystack.Find(absl::Cord("")), empty_haystack.char_begin());
EXPECT_EQ(empty_haystack.Find(flat_haystack), empty_haystack.char_end());
EXPECT_EQ(empty_haystack.Find(fragmented_haystack),
empty_haystack.char_end());
EXPECT_EQ(flat_haystack.Find("z"), flat_haystack.char_end());
EXPECT_EQ(fragmented_haystack.Find("z"), fragmented_haystack.char_end());
EXPECT_EQ(flat_haystack.Find(absl::Cord("z")), flat_haystack.char_end());
EXPECT_EQ(fragmented_haystack.Find(absl::Cord("z")),
fragmented_haystack.char_end());
EXPECT_EQ(flat_haystack.Find("is an"), flat_haystack.char_end());
EXPECT_EQ(fragmented_haystack.Find("is an"), fragmented_haystack.char_end());
EXPECT_EQ(flat_haystack.Find(absl::Cord("is an")), flat_haystack.char_end());
EXPECT_EQ(fragmented_haystack.Find(absl::Cord("is an")),
fragmented_haystack.char_end());
EXPECT_EQ(flat_haystack.Find(absl::MakeFragmentedCord({"is", " ", "an"})),
flat_haystack.char_end());
EXPECT_EQ(
fragmented_haystack.Find(absl::MakeFragmentedCord({"is", " ", "an"})),
fragmented_haystack.char_end());
EXPECT_EQ(flat_haystack.Find("is a"),
std::next(flat_haystack.char_begin(), 5));
EXPECT_EQ(fragmented_haystack.Find("is a"),
std::next(fragmented_haystack.char_begin(), 5));
EXPECT_EQ(flat_haystack.Find(absl::Cord("is a")),
std::next(flat_haystack.char_begin(), 5));
EXPECT_EQ(fragmented_haystack.Find(absl::Cord("is a")),
std::next(fragmented_haystack.char_begin(), 5));
EXPECT_EQ(flat_haystack.Find(absl::MakeFragmentedCord({"is", " ", "a"})),
std::next(flat_haystack.char_begin(), 5));
EXPECT_EQ(
fragmented_haystack.Find(absl::MakeFragmentedCord({"is", " ", "a"})),
std::next(fragmented_haystack.char_begin(), 5));
}
TEST_P(CordTest, Subcord) {
RandomEngine rng(GTEST_FLAG_GET(random_seed));
const std::string s = RandomLowercaseString(&rng, 1024);
absl::Cord a;
AppendWithFragments(s, &rng, &a);
MaybeHarden(a);
ASSERT_EQ(s, std::string(a));
std::set<size_t> positions;
for (int i = 0; i <= 32; ++i) {
positions.insert(i);
positions.insert(i * 32 - 1);
positions.insert(i * 32);
positions.insert(i * 32 + 1);
positions.insert(a.size() - i);
}
positions.insert(237);
positions.insert(732);
for (size_t pos : positions) {
if (pos > a.size()) continue;
for (size_t end_pos : positions) {
if (end_pos < pos || end_pos > a.size()) continue;
absl::Cord sa = a.Subcord(pos, end_pos - pos);
ASSERT_EQ(absl::string_view(s).substr(pos, end_pos - pos),
std::string(sa))
<< a;
if (pos != 0 || end_pos != a.size()) {
ASSERT_EQ(sa.ExpectedChecksum(), absl::nullopt);
}
}
}
const std::string sh = "short";
absl::Cord c(sh);
for (size_t pos = 0; pos <= sh.size(); ++pos) {
for (size_t n = 0; n <= sh.size() - pos; ++n) {
absl::Cord sc = c.Subcord(pos, n);
ASSERT_EQ(sh.substr(pos, n), std::string(sc)) << c;
}
}
absl::Cord sa = a.Subcord(0, a.size());
std::string ss = s.substr(0, s.size());
while (sa.size() > 1) {
sa = sa.Subcord(1, sa.size() - 2);
ss = ss.substr(1, ss.size() - 2);
ASSERT_EQ(ss, std::string(sa)) << a;
if (HasFailure()) break;
}
sa = a.Subcord(0, a.size() + 1);
EXPECT_EQ(s, std::string(sa));
sa = a.Subcord(a.size() + 1, 0);
EXPECT_TRUE(sa.empty());
sa = a.Subcord(a.size() + 1, 1);
EXPECT_TRUE(sa.empty());
}
TEST_P(CordTest, Swap) {
absl::string_view a("Dexter");
absl::string_view b("Mandark");
absl::Cord x(a);
absl::Cord y(b);
MaybeHarden(x);
swap(x, y);
if (UseCrc()) {
ASSERT_EQ(x.ExpectedChecksum(), absl::nullopt);
ASSERT_EQ(y.ExpectedChecksum(), 1);
}
ASSERT_EQ(x, absl::Cord(b));
ASSERT_EQ(y, absl::Cord(a));
x.swap(y);
if (UseCrc()) {
ASSERT_EQ(x.ExpectedChecksum(), 1);
ASSERT_EQ(y.ExpectedChecksum(), absl::nullopt);
}
ASSERT_EQ(x, absl::Cord(a));
ASSERT_EQ(y, absl::Cord(b));
}
static void VerifyCopyToString(const absl::Cord& cord) {
std::string initially_empty;
absl::CopyCordToString(cord, &initially_empty);
EXPECT_EQ(initially_empty, cord);
constexpr size_t kInitialLength = 1024;
std::string has_initial_contents(kInitialLength, 'x');
const char* address_before_copy = has_initial_contents.data();
absl::CopyCordToString(cord, &has_initial_contents);
EXPECT_EQ(has_initial_contents, cord);
if (cord.size() <= kInitialLength) {
EXPECT_EQ(has_initial_contents.data(), address_before_copy)
<< "CopyCordToString allocated new string storage; "
"has_initial_contents = \""
<< has_initial_contents << "\"";
}
}
TEST_P(CordTest, CopyToString) {
VerifyCopyToString(absl::Cord());
VerifyCopyToString(MaybeHardened(absl::Cord("small cord")));
VerifyCopyToString(MaybeHardened(
absl::MakeFragmentedCord({"fragmented ", "cord ", "to ", "test ",
"copying ", "to ", "a ", "string."})));
}
static void VerifyAppendCordToString(const absl::Cord& cord) {
std::string initially_empty;
absl::AppendCordToString(cord, &initially_empty);
EXPECT_EQ(initially_empty, cord);
const absl::string_view kInitialContents = "initial contents.";
std::string expected_after_append =
absl::StrCat(kInitialContents, std::string(cord));
std::string no_reserve(kInitialContents);
absl::AppendCordToString(cord, &no_reserve);
EXPECT_EQ(no_reserve, expected_after_append);
std::string has_reserved_capacity(kInitialContents);
has_reserved_capacity.reserve(has_reserved_capacity.size() + cord.size());
const char* address_before_copy = has_reserved_capacity.data();
absl::AppendCordToString(cord, &has_reserved_capacity);
EXPECT_EQ(has_reserved_capacity, expected_after_append);
EXPECT_EQ(has_reserved_capacity.data(), address_before_copy)
<< "AppendCordToString allocated new string storage; "
"has_reserved_capacity = \""
<< has_reserved_capacity << "\"";
}
TEST_P(CordTest, AppendToString) {
VerifyAppendCordToString(absl::Cord());
VerifyAppendCordToString(MaybeHardened(absl::Cord("small cord")));
VerifyAppendCordToString(MaybeHardened(
absl::MakeFragmentedCord({"fragmented ", "cord ", "to ", "test ",
"appending ", "to ", "a ", "string."})));
}
TEST_P(CordTest, AppendEmptyBuffer) {
absl::Cord cord;
cord.Append(absl::CordBuffer());
cord.Append(absl::CordBuffer::CreateWithDefaultLimit(2000));
}
TEST_P(CordTest, AppendEmptyBufferToFlat) {
absl::Cord cord(std::string(2000, 'x'));
cord.Append(absl::CordBuffer());
cord.Append(absl::CordBuffer::CreateWithDefaultLimit(2000));
}
TEST_P(CordTest, AppendEmptyBufferToTree) {
absl::Cord cord(std::string(2000, 'x'));
cord.Append(std::string(2000, 'y'));
cord.Append(absl::CordBuffer());
cord.Append(absl::CordBuffer::CreateWithDefaultLimit(2000));
}
TEST_P(CordTest, AppendSmallBuffer) {
absl::Cord cord;
absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(3);
ASSERT_THAT(buffer.capacity(), Le(15));
memcpy(buffer.data(), "Abc", 3);
buffer.SetLength(3);
cord.Append(std::move(buffer));
EXPECT_EQ(buffer.length(), 0);
EXPECT_GT(buffer.capacity(), 0);
buffer = absl::CordBuffer::CreateWithDefaultLimit(3);
memcpy(buffer.data(), "defgh", 5);
buffer.SetLength(5);
cord.Append(std::move(buffer));
EXPECT_EQ(buffer.length(), 0);
EXPECT_GT(buffer.capacity(), 0);
EXPECT_THAT(cord.Chunks(), ElementsAre("Abcdefgh"));
}
TEST_P(CordTest, AppendAndPrependBufferArePrecise) {
std::string test_data(absl::cord_internal::kMaxFlatLength * 10, 'x');
absl::Cord cord1(test_data);
absl::Cord cord2(test_data);
const size_t size1 = cord1.EstimatedMemoryUsage();
const size_t size2 = cord2.EstimatedMemoryUsage();
absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(3);
memcpy(buffer.data(), "Abc", 3);
buffer.SetLength(3);
cord1.Append(std::move(buffer));
buffer = absl::CordBuffer::CreateWithDefaultLimit(3);
memcpy(buffer.data(), "Abc", 3);
buffer.SetLength(3);
cord2.Prepend(std::move(buffer));
#ifndef NDEBUG
constexpr size_t kMaxDelta = 128 + 32;
#else
constexpr size_t kMaxDelta = 128 + 32 + 256;
#endif
EXPECT_LE(cord1.EstimatedMemoryUsage() - size1, kMaxDelta);
EXPECT_LE(cord2.EstimatedMemoryUsage() - size2, kMaxDelta);
EXPECT_EQ(cord1, absl::StrCat(test_data, "Abc"));
EXPECT_EQ(cord2, absl::StrCat("Abc", test_data));
}
TEST_P(CordTest, PrependSmallBuffer) {
absl::Cord cord;
absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(3);
ASSERT_THAT(buffer.capacity(), Le(15));
memcpy(buffer.data(), "Abc", 3);
buffer.SetLength(3);
cord.Prepend(std::move(buffer));
EXPECT_EQ(buffer.length(), 0);
EXPECT_GT(buffer.capacity(), 0);
buffer = absl::CordBuffer::CreateWithDefaultLimit(3);
memcpy(buffer.data(), "defgh", 5);
buffer.SetLength(5);
cord.Prepend(std::move(buffer));
EXPECT_EQ(buffer.length(), 0);
EXPECT_GT(buffer.capacity(), 0);
EXPECT_THAT(cord.Chunks(), ElementsAre("defghAbc"));
}
TEST_P(CordTest, AppendLargeBuffer) {
absl::Cord cord;
std::string s1(700, '1');
absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(s1.size());
memcpy(buffer.data(), s1.data(), s1.size());
buffer.SetLength(s1.size());
cord.Append(std::move(buffer));
EXPECT_EQ(buffer.length(), 0);
EXPECT_GT(buffer.capacity(), 0);
std::string s2(1000, '2');
buffer = absl::CordBuffer::CreateWithDefaultLimit(s2.size());
memcpy(buffer.data(), s2.data(), s2.size());
buffer.SetLength(s2.size());
cord.Append(std::move(buffer));
EXPECT_EQ(buffer.length(), 0);
EXPECT_GT(buffer.capacity(), 0);
EXPECT_THAT(cord.Chunks(), ElementsAre(s1, s2));
}
TEST_P(CordTest, PrependLargeBuffer) {
absl::Cord cord;
std::string s1(700, '1');
absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(s1.size());
memcpy(buffer.data(), s1.data(), s1.size());
buffer.SetLength(s1.size());
cord.Prepend(std::move(buffer));
EXPECT_EQ(buffer.length(), 0);
EXPECT_GT(buffer.capacity(), 0);
std::string s2(1000, '2');
buffer = absl::CordBuffer::CreateWithDefaultLimit(s2.size());
memcpy(buffer.data(), s2.data(), s2.size());
buffer.SetLength(s2.size());
cord.Prepend(std::move(buffer));
EXPECT_EQ(buffer.length(), 0);
EXPECT_GT(buffer.capacity(), 0);
EXPECT_THAT(cord.Chunks(), ElementsAre(s2, s1));
}
class CordAppendBufferTest : public testing::TestWithParam<bool> {
public:
size_t is_default() const { return GetParam(); }
static std::string ToString(testing::TestParamInfo<bool> param) {
return param.param ? "DefaultLimit" : "CustomLimit";
}
size_t limit() const {
return is_default() ? absl::CordBuffer::kDefaultLimit
: absl::CordBuffer::kCustomLimit;
}
size_t maximum_payload() const {
return is_default() ? absl::CordBuffer::MaximumPayload()
: absl::CordBuffer::MaximumPayload(limit());
}
absl::CordBuffer GetAppendBuffer(absl::Cord& cord, size_t capacity,
size_t min_capacity = 16) {
return is_default()
? cord.GetAppendBuffer(capacity, min_capacity)
: cord.GetCustomAppendBuffer(limit(), capacity, min_capacity);
}
};
INSTANTIATE_TEST_SUITE_P(WithParam, CordAppendBufferTest, testing::Bool(),
CordAppendBufferTest::ToString);
TEST_P(CordAppendBufferTest, GetAppendBufferOnEmptyCord) {
absl::Cord cord;
absl::CordBuffer buffer = GetAppendBuffer(cord, 1000);
EXPECT_GE(buffer.capacity(), 1000);
EXPECT_EQ(buffer.length(), 0);
}
TEST_P(CordAppendBufferTest, GetAppendBufferOnInlinedCord) {
static constexpr int kInlinedSize = sizeof(absl::CordBuffer) - 1;
for (int size : {6, kInlinedSize - 3, kInlinedSize - 2, 1000}) {
absl::Cord cord("Abc");
absl::CordBuffer buffer = GetAppendBuffer(cord, size, 1);
EXPECT_GE(buffer.capacity(), 3 + size);
EXPECT_EQ(buffer.length(), 3);
EXPECT_EQ(absl::string_view(buffer.data(), buffer.length()), "Abc");
EXPECT_TRUE(cord.empty());
}
}
TEST_P(CordAppendBufferTest, GetAppendBufferOnInlinedCordCapacityCloseToMax) {
for (size_t dist_from_max = 0; dist_from_max <= 4; ++dist_from_max) {
absl::Cord cord("Abc");
size_t size = std::numeric_limits<size_t>::max() - dist_from_max;
absl::CordBuffer buffer = GetAppendBuffer(cord, size, 1);
EXPECT_GE(buffer.capacity(), maximum_payload());
EXPECT_EQ(buffer.length(), 3);
EXPECT_EQ(absl::string_view(buffer.data(), buffer.length()), "Abc");
EXPECT_TRUE(cord.empty());
}
}
TEST_P(CordAppendBufferTest, GetAppendBufferOnFlat) {
absl::Cord cord;
absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(500);
const size_t expected_capacity = buffer.capacity();
buffer.SetLength(3);
memcpy(buffer.data(), "Abc", 3);
cord.Append(std::move(buffer));
buffer = GetAppendBuffer(cord, 6);
EXPECT_EQ(buffer.capacity(), expected_capacity);
EXPECT_EQ(buffer.length(), 3);
EXPECT_EQ(absl::string_view(buffer.data(), buffer.length()), "Abc");
EXPECT_TRUE(cord.empty());
}
TEST_P(CordAppendBufferTest, GetAppendBufferOnFlatWithoutMinCapacity) {
absl::Cord cord;
absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(500);
buffer.SetLength(30);
memset(buffer.data(), 'x', 30);
cord.Append(std::move(buffer));
buffer = GetAppendBuffer(cord, 1000, 900);
EXPECT_GE(buffer.capacity(), 1000);
EXPECT_EQ(buffer.length(), 0);
EXPECT_EQ(cord, std::string(30, 'x'));
}
TEST_P(CordAppendBufferTest, GetAppendBufferOnTree) {
RandomEngine rng;
for (int num_flats : {2, 3, 100}) {
absl::Cord cord;
std::string prefix;
std::string last;
for (int i = 0; i < num_flats - 1; ++i) {
prefix += last;
last = RandomLowercaseString(&rng, 10);
absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(500);
buffer.SetLength(10);
memcpy(buffer.data(), last.data(), 10);
cord.Append(std::move(buffer));
}
absl::CordBuffer buffer = GetAppendBuffer(cord, 6);
EXPECT_GE(buffer.capacity(), 500);
EXPECT_EQ(buffer.length(), 10);
EXPECT_EQ(absl::string_view(buffer.data(), buffer.length()), last);
EXPECT_EQ(cord, prefix);
}
}
TEST_P(CordAppendBufferTest, GetAppendBufferOnTreeWithoutMinCapacity) {
absl::Cord cord;
for (int i = 0; i < 2; ++i) {
absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(500);
buffer.SetLength(3);
memcpy(buffer.data(), i ? "def" : "Abc", 3);
cord.Append(std::move(buffer));
}
absl::CordBuffer buffer = GetAppendBuffer(cord, 1000, 900);
EXPECT_GE(buffer.capacity(), 1000);
EXPECT_EQ(buffer.length(), 0);
EXPECT_EQ(cord, "Abcdef");
}
TEST_P(CordAppendBufferTest, GetAppendBufferOnSubstring) {
absl::Cord cord;
absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(500);
buffer.SetLength(450);
memset(buffer.data(), 'x', 450);
cord.Append(std::move(buffer));
cord.RemovePrefix(1);
buffer = GetAppendBuffer(cord, 6);
EXPECT_EQ(buffer.length(), 0);
EXPECT_EQ(cord, std::string(449, 'x'));
}
TEST_P(CordAppendBufferTest, GetAppendBufferOnSharedCord) {
absl::Cord cord;
absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(500);
buffer.SetLength(3);
memcpy(buffer.data(), "Abc", 3);
cord.Append(std::move(buffer));
absl::Cord shared_cord = cord;
buffer = GetAppendBuffer(cord, 6);
EXPECT_EQ(buffer.length(), 0);
EXPECT_EQ(cord, "Abc");
buffer = absl::CordBuffer::CreateWithDefaultLimit(500);
buffer.SetLength(3);
memcpy(buffer.data(), "def", 3);
cord.Append(std::move(buffer));
shared_cord = cord;
buffer = GetAppendBuffer(cord, 6);
EXPECT_EQ(buffer.length(), 0);
EXPECT_EQ(cord, "Abcdef");
}
TEST_P(CordTest, TryFlatEmpty) {
absl::Cord c;
EXPECT_EQ(c.TryFlat(), "");
}
TEST_P(CordTest, TryFlatFlat) {
absl::Cord c("hello");
MaybeHarden(c);
EXPECT_EQ(c.TryFlat(), "hello");
}
TEST_P(CordTest, TryFlatSubstrInlined) {
absl::Cord c("hello");
c.RemovePrefix(1);
MaybeHarden(c);
EXPECT_EQ(c.TryFlat(), "ello");
}
TEST_P(CordTest, TryFlatSubstrFlat) {
absl::Cord c("longer than 15 bytes");
absl::Cord sub = absl::CordTestPeer::MakeSubstring(c, 1, c.size() - 1);
MaybeHarden(sub);
EXPECT_EQ(sub.TryFlat(), "onger than 15 bytes");
}
TEST_P(CordTest, TryFlatConcat) {
absl::Cord c = absl::MakeFragmentedCord({"hel", "lo"});
MaybeHarden(c);
EXPECT_EQ(c.TryFlat(), absl::nullopt);
}
TEST_P(CordTest, TryFlatExternal) {
absl::Cord c = absl::MakeCordFromExternal("hell", [](absl::string_view) {});
MaybeHarden(c);
EXPECT_EQ(c.TryFlat(), "hell");
}
TEST_P(CordTest, TryFlatSubstrExternal) {
absl::Cord c = absl::MakeCordFromExternal("hell", [](absl::string_view) {});
absl::Cord sub = absl::CordTestPeer::MakeSubstring(c, 1, c.size() - 1);
MaybeHarden(sub);
EXPECT_EQ(sub.TryFlat(), "ell");
}
TEST_P(CordTest, TryFlatCommonlyAssumedInvariants) {
absl::string_view fragments[] = {"A fragmented test",
" cord",
" to test subcords",
" of ",
"a",
" cord for",
" each chunk "
"returned by the ",
"iterator"};
absl::Cord c = absl::MakeFragmentedCord(fragments);
MaybeHarden(c);
int fragment = 0;
int offset = 0;
absl::Cord::CharIterator itc = c.char_begin();
for (absl::string_view sv : c.Chunks()) {
absl::string_view expected = fragments[fragment];
absl::Cord subcord1 = c.Subcord(offset, sv.length());
absl::Cord subcord2 = absl::Cord::AdvanceAndRead(&itc, sv.size());
EXPECT_EQ(subcord1.TryFlat(), expected);
EXPECT_EQ(subcord2.TryFlat(), expected);
++fragment;
offset += sv.length();
}
}
static bool IsFlat(const absl::Cord& c) {
return c.chunk_begin() == c.chunk_end() || ++c.chunk_begin() == c.chunk_end();
}
static void VerifyFlatten(absl::Cord c) {
std::string old_contents(c);
absl::string_view old_flat;
bool already_flat_and_non_empty = IsFlat(c) && !c.empty();
if (already_flat_and_non_empty) {
old_flat = *c.chunk_begin();
}
absl::string_view new_flat = c.Flatten();
EXPECT_EQ(new_flat, old_contents);
EXPECT_EQ(std::string(c), old_contents);
if (already_flat_and_non_empty) {
EXPECT_EQ(old_flat.data(), new_flat.data())
<< "Allocated new memory even though the Cord was already flat.";
}
EXPECT_TRUE(IsFlat(c));
}
TEST_P(CordTest, Flatten) {
VerifyFlatten(absl::Cord());
VerifyFlatten(MaybeHardened(absl::Cord("small cord")));
VerifyFlatten(
MaybeHardened(absl::Cord("larger than small buffer optimization")));
VerifyFlatten(MaybeHardened(
absl::MakeFragmentedCord({"small ", "fragmented ", "cord"})));
RandomEngine rng(GTEST_FLAG_GET(random_seed));
VerifyFlatten(MaybeHardened(absl::Cord(RandomLowercaseString(&rng, 8192))));
}
namespace {
class TestData {
private:
std::vector<std::string> data_;
static std::string MakeString(int length) {
std::string result;
char buf[30];
snprintf(buf, sizeof(buf), "(%d)", length);
while (result.size() < length) {
result += buf;
}
result.resize(length);
return result;
}
public:
TestData() {
for (int i = 0; i < 30; i++) {
data_.push_back(MakeString(i));
}
static const int kMaxFlatLength = 4096 - 9;
static const int kHalf = kMaxFlatLength / 2;
for (int i = -10; i <= +10; i++) {
data_.push_back(MakeString(kHalf + i));
}
for (int i = -10; i <= +10; i++) {
data_.push_back(MakeString(kMaxFlatLength + i));
}
}
size_t size() const { return data_.size(); }
const std::string& data(size_t i) const { return data_[i]; }
};
}
TEST_P(CordTest, MultipleLengths) {
TestData d;
for (size_t i = 0; i < d.size(); i++) {
std::string a = d.data(i);
{
absl::Cord tmp(a);
absl::Cord x(tmp);
MaybeHarden(x);
EXPECT_EQ(a, std::string(x)) << "'" << a << "'";
}
{
absl::Cord x(a);
MaybeHarden(x);
EXPECT_EQ(a, std::string(x)) << "'" << a << "'";
}
{
absl::Cord self(a);
MaybeHarden(self);
self.Append(self);
EXPECT_EQ(a + a, std::string(self)) << "'" << a << "' + '" << a << "'";
}
{
absl::Cord self(a);
MaybeHarden(self);
self.Prepend(self);
EXPECT_EQ(a + a, std::string(self)) << "'" << a << "' + '" << a << "'";
}
for (size_t j = 0; j < d.size(); j++) {
std::string b = d.data(j);
{
absl::Cord x(a);
absl::Cord y(b);
MaybeHarden(x);
x = y;
EXPECT_EQ(b, std::string(x)) << "'" << a << "' + '" << b << "'";
}
{
absl::Cord x(a);
MaybeHarden(x);
x = b;
EXPECT_EQ(b, std::string(x)) << "'" << a << "' + '" << b << "'";
}
{
absl::Cord x(a);
absl::Cord y(b);
MaybeHarden(x);
x.Append(y);
EXPECT_EQ(a + b, std::string(x)) << "'" << a << "' + '" << b << "'";
}
{
absl::Cord x(a);
MaybeHarden(x);
x.Append(b);
EXPECT_EQ(a + b, std::string(x)) << "'" << a << "' + '" << b << "'";
}
{
absl::Cord x(a);
absl::Cord y(b);
MaybeHarden(x);
x.Prepend(y);
EXPECT_EQ(b + a, std::string(x)) << "'" << b << "' + '" << a << "'";
}
{
absl::Cord x(a);
MaybeHarden(x);
x.Prepend(b);
EXPECT_EQ(b + a, std::string(x)) << "'" << b << "' + '" << a << "'";
}
}
}
}
namespace {
TEST_P(CordTest, RemoveSuffixWithExternalOrSubstring) {
absl::Cord cord = absl::MakeCordFromExternal(
"foo bar baz", [](absl::string_view s) { DoNothing(s, nullptr); });
EXPECT_EQ("foo bar baz", std::string(cord));
MaybeHarden(cord);
cord.RemoveSuffix(4);
EXPECT_EQ("foo bar", std::string(cord));
MaybeHarden(cord);
cord.RemoveSuffix(4);
EXPECT_EQ("foo", std::string(cord));
}
TEST_P(CordTest, RemoveSuffixMakesZeroLengthNode) {
absl::Cord c;
c.Append(absl::Cord(std::string(100, 'x')));
absl::Cord other_ref = c;
EXPECT_THAT(other_ref, testing::Eq(c));
MaybeHarden(c);
c.Append(absl::Cord(std::string(200, 'y')));
c.RemoveSuffix(200);
EXPECT_EQ(std::string(100, 'x'), std::string(c));
}
}
namespace {
absl::Cord CordWithZedBlock(size_t size) {
char* data = new char[size];
if (size > 0) {
memset(data, 'z', size);
}
absl::Cord cord = absl::MakeCordFromExternal(
absl::string_view(data, size),
[](absl::string_view s) { delete[] s.data(); });
return cord;
}
TEST_P(CordTest, CordSpliceTestZedBlock) {
absl::Cord blob = CordWithZedBlock(10);
MaybeHarden(blob);
EXPECT_EQ(10, blob.size());
std::string s;
absl::CopyCordToString(blob, &s);
EXPECT_EQ("zzzzzzzzzz", s);
}
TEST_P(CordTest, CordSpliceTestZedBlock0) {
absl::Cord blob = CordWithZedBlock(0);
MaybeHarden(blob);
EXPECT_EQ(0, blob.size());
std::string s;
absl::CopyCordToString(blob, &s);
EXPECT_EQ("", s);
}
TEST_P(CordTest, CordSpliceTestZedBlockSuffix1) {
absl::Cord blob = CordWithZedBlock(10);
MaybeHarden(blob);
EXPECT_EQ(10, blob.size());
absl::Cord suffix(blob);
suffix.RemovePrefix(9);
EXPECT_EQ(1, suffix.size());
std::string s;
absl::CopyCordToString(suffix, &s);
EXPECT_EQ("z", s);
}
TEST_P(CordTest, CordSpliceTestZedBlockSuffix0) {
absl::Cord blob = CordWithZedBlock(10);
MaybeHarden(blob);
EXPECT_EQ(10, blob.size());
absl::Cord suffix(blob);
suffix.RemovePrefix(10);
EXPECT_EQ(0, suffix.size());
std::string s;
absl::CopyCordToString(suffix, &s);
EXPECT_EQ("", s);
}
absl::Cord BigCord(size_t len, char v) {
std::string s(len, v);
return absl::Cord(s);
}
absl::Cord SpliceCord(const absl::Cord& blob, int64_t offset,
const absl::Cord& block) {
CHECK_GE(offset, 0);
CHECK_LE(static_cast<size_t>(offset) + block.size(), blob.size());
absl::Cord result(blob);
result.RemoveSuffix(blob.size() - offset);
result.Append(block);
absl::Cord suffix(blob);
suffix.RemovePrefix(offset + block.size());
result.Append(suffix);
CHECK_EQ(blob.size(), result.size());
return result;
}
TEST_P(CordTest, CordSpliceTestRemoveEntireBlock1) {
absl::Cord zero = CordWithZedBlock(10);
MaybeHarden(zero);
absl::Cord suffix(zero);
suffix.RemovePrefix(10);
absl::Cord result;
result.Append(suffix);
}
TEST_P(CordTest, CordSpliceTestRemoveEntireBlock2) {
absl::Cord zero = CordWithZedBlock(10);
MaybeHarden(zero);
absl::Cord prefix(zero);
prefix.RemoveSuffix(10);
absl::Cord suffix(zero);
suffix.RemovePrefix(10);
absl::Cord result(prefix);
result.Append(suffix);
}
TEST_P(CordTest, CordSpliceTestRemoveEntireBlock3) {
absl::Cord blob = CordWithZedBlock(10);
absl::Cord block = BigCord(10, 'b');
MaybeHarden(blob);
MaybeHarden(block);
blob = SpliceCord(blob, 0, block);
}
struct CordCompareTestCase {
template <typename LHS, typename RHS>
CordCompareTestCase(const LHS& lhs, const RHS& rhs, bool use_crc)
: lhs_cord(lhs), rhs_cord(rhs) {
if (use_crc) {
lhs_cord.SetExpectedChecksum(1);
}
}
absl::Cord lhs_cord;
absl::Cord rhs_cord;
};
const auto sign = [](int x) { return x == 0 ? 0 : (x > 0 ? 1 : -1); };
void VerifyComparison(const CordCompareTestCase& test_case) {
std::string lhs_string(test_case.lhs_cord);
std::string rhs_string(test_case.rhs_cord);
int expected = sign(lhs_string.compare(rhs_string));
EXPECT_EQ(expected, test_case.lhs_cord.Compare(test_case.rhs_cord))
<< "LHS=" << lhs_string << "; RHS=" << rhs_string;
EXPECT_EQ(expected, test_case.lhs_cord.Compare(rhs_string))
<< "LHS=" << lhs_string << "; RHS=" << rhs_string;
EXPECT_EQ(-expected, test_case.rhs_cord.Compare(test_case.lhs_cord))
<< "LHS=" << rhs_string << "; RHS=" << lhs_string;
EXPECT_EQ(-expected, test_case.rhs_cord.Compare(lhs_string))
<< "LHS=" << rhs_string << "; RHS=" << lhs_string;
}
TEST_P(CordTest, Compare) {
absl::Cord subcord("aaaaaBBBBBcccccDDDDD");
subcord = subcord.Subcord(3, 10);
absl::Cord tmp("aaaaaaaaaaaaaaaa");
tmp.Append("BBBBBBBBBBBBBBBB");
absl::Cord concat = absl::Cord("cccccccccccccccc");
concat.Append("DDDDDDDDDDDDDDDD");
concat.Prepend(tmp);
absl::Cord concat2("aaaaaaaaaaaaa");
concat2.Append("aaaBBBBBBBBBBBBBBBBccccc");
concat2.Append("cccccccccccDDDDDDDDDDDDDD");
concat2.Append("DD");
const bool use_crc = UseCrc();
std::vector<CordCompareTestCase> test_cases = {{
{"abcdef", "abcdef", use_crc},
{"abcdef", "abcdee", use_crc},
{"abcdef", "abcdeg", use_crc},
{"bbcdef", "abcdef", use_crc},
{"bbcdef", "abcdeg", use_crc},
{"abcdefa", "abcdef", use_crc},
{"abcdef", "abcdefa", use_crc},
{"aaaaaBBBBBcccccDDDDD", "aaaaaBBBBBcccccDDDDD", use_crc},
{"aaaaaBBBBBcccccDDDDD", "aaaaaBBBBBxccccDDDDD", use_crc},
{"aaaaaBBBBBcxcccDDDDD", "aaaaaBBBBBcccccDDDDD", use_crc},
{"aaaaaBBBBBxccccDDDDD", "aaaaaBBBBBcccccDDDDX", use_crc},
{"aaaaaBBBBBcccccDDDDDa", "aaaaaBBBBBcccccDDDDD", use_crc},
{"aaaaaBBBBBcccccDDDDD", "aaaaaBBBBBcccccDDDDDa", use_crc},
{subcord, subcord, use_crc},
{subcord, "aaBBBBBccc", use_crc},
{subcord, "aaBBBBBccd", use_crc},
{subcord, "aaBBBBBccb", use_crc},
{subcord, "aaBBBBBxcb", use_crc},
{subcord, "aaBBBBBccca", use_crc},
{subcord, "aaBBBBBcc", use_crc},
{concat, concat, use_crc},
{concat,
"aaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBccccccccccccccccDDDDDDDDDDDDDDDD",
use_crc},
{concat,
"aaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBcccccccccccccccxDDDDDDDDDDDDDDDD",
use_crc},
{concat,
"aaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBacccccccccccccccDDDDDDDDDDDDDDDD",
use_crc},
{concat,
"aaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBccccccccccccccccDDDDDDDDDDDDDDD",
use_crc},
{concat,
"aaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBccccccccccccccccDDDDDDDDDDDDDDDDe",
use_crc},
{concat, concat2, use_crc},
}};
for (const auto& tc : test_cases) {
VerifyComparison(tc);
}
}
TEST_P(CordTest, CompareAfterAssign) {
absl::Cord a("aaaaaa1111111");
absl::Cord b("aaaaaa2222222");
MaybeHarden(a);
a = "cccccc";
b = "cccccc";
EXPECT_EQ(a, b);
EXPECT_FALSE(a < b);
a = "aaaa";
b = "bbbbb";
a = "";
b = "";
EXPECT_EQ(a, b);
EXPECT_FALSE(a < b);
}
static void TestCompare(const absl::Cord& c, const absl::Cord& d,
RandomEngine* rng) {
int expected = sign(std::string(c).compare(std::string(d)));
EXPECT_EQ(expected, sign(c.Compare(d))) << c << ", " << d;
}
TEST_P(CordTest, CompareComparisonIsUnsigned) {
RandomEngine rng(GTEST_FLAG_GET(random_seed));
std::uniform_int_distribution<uint32_t> uniform_uint8(0, 255);
char x = static_cast<char>(uniform_uint8(rng));
TestCompare(
absl::Cord(std::string(GetUniformRandomUpTo(&rng, 100), x)),
absl::Cord(std::string(GetUniformRandomUpTo(&rng, 100), x ^ 0x80)), &rng);
}
TEST_P(CordTest, CompareRandomComparisons) {
const int kIters = 5000;
RandomEngine rng(GTEST_FLAG_GET(random_seed));
int n = GetUniformRandomUpTo(&rng, 5000);
absl::Cord a[] = {MakeExternalCord(n),
absl::Cord("ant"),
absl::Cord("elephant"),
absl::Cord("giraffe"),
absl::Cord(std::string(GetUniformRandomUpTo(&rng, 100),
GetUniformRandomUpTo(&rng, 100))),
absl::Cord(""),
absl::Cord("x"),
absl::Cord("A"),
absl::Cord("B"),
absl::Cord("C")};
for (int i = 0; i < kIters; i++) {
absl::Cord c, d;
for (int j = 0; j < (i % 7) + 1; j++) {
c.Append(a[GetUniformRandomUpTo(&rng, ABSL_ARRAYSIZE(a))]);
d.Append(a[GetUniformRandomUpTo(&rng, ABSL_ARRAYSIZE(a))]);
}
std::bernoulli_distribution coin_flip(0.5);
MaybeHarden(c);
MaybeHarden(d);
TestCompare(coin_flip(rng) ? c : absl::Cord(std::string(c)),
coin_flip(rng) ? d : absl::Cord(std::string(d)), &rng);
}
}
template <typename T1, typename T2>
void CompareOperators() {
const T1 a("a");
const T2 b("b");
EXPECT_TRUE(a == a);
EXPECT_TRUE(std::is_pointer<T1>::value || a == T1("a"));
EXPECT_TRUE(std::is_pointer<T2>::value || a == T2("a"));
EXPECT_FALSE(a == b);
EXPECT_TRUE(a != b);
EXPECT_FALSE(a != a);
EXPECT_TRUE(a < b);
EXPECT_FALSE(b < a);
EXPECT_TRUE(b > a);
EXPECT_FALSE(a > b);
EXPECT_TRUE(a >= a);
EXPECT_TRUE(b >= a);
EXPECT_FALSE(a >= b);
EXPECT_TRUE(a <= a);
EXPECT_TRUE(a <= b);
EXPECT_FALSE(b <= a);
}
TEST_P(CordTest, ComparisonOperators_Cord_Cord) {
CompareOperators<absl::Cord, absl::Cord>();
}
TEST_P(CordTest, ComparisonOperators_Cord_StringPiece) {
CompareOperators<absl::Cord, absl::string_view>();
}
TEST_P(CordTest, ComparisonOperators_StringPiece_Cord) {
CompareOperators<absl::string_view, absl::Cord>();
}
TEST_P(CordTest, ComparisonOperators_Cord_string) {
CompareOperators<absl::Cord, std::string>();
}
TEST_P(CordTest, ComparisonOperators_string_Cord) {
CompareOperators<std::string, absl::Cord>();
}
TEST_P(CordTest, ComparisonOperators_stdstring_Cord) {
CompareOperators<std::string, absl::Cord>();
}
TEST_P(CordTest, ComparisonOperators_Cord_stdstring) {
CompareOperators<absl::Cord, std::string>();
}
TEST_P(CordTest, ComparisonOperators_charstar_Cord) {
CompareOperators<const char*, absl::Cord>();
}
TEST_P(CordTest, ComparisonOperators_Cord_charstar) {
CompareOperators<absl::Cord, const char*>();
}
TEST_P(CordTest, ConstructFromExternalReleaserInvoked) {
{
bool invoked = false;
auto releaser = [&invoked](absl::string_view) { invoked = true; };
{
auto c = absl::MakeCordFromExternal("", releaser);
EXPECT_THAT(c, testing::Eq(""));
EXPECT_TRUE(invoked);
}
}
std::string large_dummy(2048, 'c');
{
bool invoked = false;
auto releaser = [&invoked](absl::string_view) { invoked = true; };
{
auto c = absl::MakeCordFromExternal(large_dummy, releaser);
EXPECT_THAT(c, testing::Eq(large_dummy));
EXPECT_FALSE(invoked);
}
EXPECT_TRUE(invoked);
}
{
bool invoked = false;
auto releaser = [&invoked](absl::string_view) { invoked = true; };
{
absl::Cord copy;
{
auto c = absl::MakeCordFromExternal(large_dummy, releaser);
copy = c;
EXPECT_FALSE(invoked);
}
EXPECT_FALSE(invoked);
}
EXPECT_TRUE(invoked);
}
}
TEST_P(CordTest, ConstructFromExternalCompareContents) {
RandomEngine rng(GTEST_FLAG_GET(random_seed));
for (int length = 1; length <= 2048; length *= 2) {
std::string data = RandomLowercaseString(&rng, length);
auto* external = new std::string(data);
auto cord =
absl::MakeCordFromExternal(*external, [external](absl::string_view sv) {
EXPECT_EQ(external->data(), sv.data());
EXPECT_EQ(external->size(), sv.size());
delete external;
});
MaybeHarden(cord);
EXPECT_EQ(data, cord);
}
}
TEST_P(CordTest, ConstructFromExternalLargeReleaser) {
RandomEngine rng(GTEST_FLAG_GET(random_seed));
constexpr size_t kLength = 256;
std::string data = RandomLowercaseString(&rng, kLength);
std::array<char, kLength> data_array;
for (size_t i = 0; i < kLength; ++i) data_array[i] = data[i];
bool invoked = false;
auto releaser = [data_array, &invoked](absl::string_view data) {
EXPECT_EQ(data, absl::string_view(data_array.data(), data_array.size()));
invoked = true;
};
(void)MaybeHardened(absl::MakeCordFromExternal(data, releaser));
EXPECT_TRUE(invoked);
}
TEST_P(CordTest, ConstructFromExternalFunctionPointerReleaser) {
static absl::string_view data("hello world");
static bool invoked;
auto* releaser =
static_cast<void (*)(absl::string_view)>([](absl::string_view sv) {
EXPECT_EQ(data, sv);
invoked = true;
});
invoked = false;
(void)MaybeHardened(absl::MakeCordFromExternal(data, releaser));
EXPECT_TRUE(invoked);
invoked = false;
(void)MaybeHardened(absl::MakeCordFromExternal(data, *releaser));
EXPECT_TRUE(invoked);
}
TEST_P(CordTest, ConstructFromExternalMoveOnlyReleaser) {
struct Releaser {
explicit Releaser(bool* invoked) : invoked(invoked) {}
Releaser(Releaser&& other) noexcept : invoked(other.invoked) {}
void operator()(absl::string_view) const { *invoked = true; }
bool* invoked;
};
bool invoked = false;
(void)MaybeHardened(absl::MakeCordFromExternal("dummy", Releaser(&invoked)));
EXPECT_TRUE(invoked);
}
TEST_P(CordTest, ConstructFromExternalNoArgLambda) {
bool invoked = false;
(void)MaybeHardened(
absl::MakeCordFromExternal("dummy", [&invoked]() { invoked = true; }));
EXPECT_TRUE(invoked);
}
TEST_P(CordTest, ConstructFromExternalStringViewArgLambda) {
bool invoked = false;
(void)MaybeHardened(absl::MakeCordFromExternal(
"dummy", [&invoked](absl::string_view) { invoked = true; }));
EXPECT_TRUE(invoked);
}
TEST_P(CordTest, ConstructFromExternalNonTrivialReleaserDestructor) {
struct Releaser {
explicit Releaser(bool* destroyed) : destroyed(destroyed) {}
~Releaser() { *destroyed = true; }
void operator()(absl::string_view) const {}
bool* destroyed;
};
bool destroyed = false;
Releaser releaser(&destroyed);
(void)MaybeHardened(absl::MakeCordFromExternal("dummy", releaser));
EXPECT_TRUE(destroyed);
}
TEST_P(CordTest, ConstructFromExternalReferenceQualifierOverloads) {
enum InvokedAs { kMissing, kLValue, kRValue };
enum CopiedAs { kNone, kMove, kCopy };
struct Tracker {
CopiedAs copied_as = kNone;
InvokedAs invoked_as = kMissing;
void Record(InvokedAs rhs) {
ASSERT_EQ(invoked_as, kMissing);
invoked_as = rhs;
}
void Record(CopiedAs rhs) {
if (copied_as == kNone || rhs == kCopy) copied_as = rhs;
}
} tracker;
class Releaser {
public:
explicit Releaser(Tracker* tracker) : tr_(tracker) { *tracker = Tracker(); }
Releaser(Releaser&& rhs) : tr_(rhs.tr_) { tr_->Record(kMove); }
Releaser(const Releaser& rhs) : tr_(rhs.tr_) { tr_->Record(kCopy); }
void operator()(absl::string_view) & { tr_->Record(kLValue); }
void operator()(absl::string_view) && { tr_->Record(kRValue); }
private:
Tracker* tr_;
};
const Releaser releaser1(&tracker);
(void)MaybeHardened(absl::MakeCordFromExternal("", releaser1));
EXPECT_EQ(tracker.copied_as, kCopy);
EXPECT_EQ(tracker.invoked_as, kRValue);
const Releaser releaser2(&tracker);
(void)MaybeHardened(absl::MakeCordFromExternal("", releaser2));
EXPECT_EQ(tracker.copied_as, kCopy);
EXPECT_EQ(tracker.invoked_as, kRValue);
Releaser releaser3(&tracker);
(void)MaybeHardened(absl::MakeCordFromExternal("", std::move(releaser3)));
EXPECT_EQ(tracker.copied_as, kMove);
EXPECT_EQ(tracker.invoked_as, kRValue);
Releaser releaser4(&tracker);
(void)MaybeHardened(absl::MakeCordFromExternal("dummy", releaser4));
EXPECT_EQ(tracker.copied_as, kCopy);
EXPECT_EQ(tracker.invoked_as, kRValue);
const Releaser releaser5(&tracker);
(void)MaybeHardened(absl::MakeCordFromExternal("dummy", releaser5));
EXPECT_EQ(tracker.copied_as, kCopy);
EXPECT_EQ(tracker.invoked_as, kRValue);
Releaser releaser6(&tracker);
(void)MaybeHardened(absl::MakeCordFromExternal("foo", std::move(releaser6)));
EXPECT_EQ(tracker.copied_as, kMove);
EXPECT_EQ(tracker.invoked_as, kRValue);
}
TEST_P(CordTest, ExternalMemoryBasicUsage) {
static const char* strings[] = {"", "hello", "there"};
for (const char* str : strings) {
absl::Cord dst("(prefix)");
MaybeHarden(dst);
AddExternalMemory(str, &dst);
MaybeHarden(dst);
dst.Append("(suffix)");
EXPECT_EQ((std::string("(prefix)") + str + std::string("(suffix)")),
std::string(dst));
}
}
TEST_P(CordTest, ExternalMemoryRemovePrefixSuffix) {
absl::Cord cord = MakeComposite();
std::string s = std::string(cord);
for (int offset = 0; offset <= s.size(); offset++) {
for (int length = 0; length <= s.size() - offset; length++) {
absl::Cord result(cord);
MaybeHarden(result);
result.RemovePrefix(offset);
MaybeHarden(result);
result.RemoveSuffix(result.size() - length);
EXPECT_EQ(s.substr(offset, length), std::string(result))
<< offset << " " << length;
}
}
}
TEST_P(CordTest, ExternalMemoryGet) {
absl::Cord cord("hello");
AddExternalMemory(" world!", &cord);
MaybeHarden(cord);
AddExternalMemory(" how are ", &cord);
cord.Append(" you?");
MaybeHarden(cord);
std::string s = std::string(cord);
for (int i = 0; i < s.size(); i++) {
EXPECT_EQ(s[i], cord[i]);
}
}
constexpr auto kFairShare = absl::CordMemoryAccounting::kFairShare;
constexpr auto kTotalMorePrecise =
absl::CordMemoryAccounting::kTotalMorePrecise;
absl::Cord MakeCord(size_t n, char c) {
const std::string s(n, c);
return absl::Cord(s);
}
TEST(CordTest, CordMemoryUsageEmpty) {
absl::Cord cord;
EXPECT_EQ(sizeof(absl::Cord), cord.EstimatedMemoryUsage());
EXPECT_EQ(sizeof(absl::Cord), cord.EstimatedMemoryUsage(kFairShare));
EXPECT_EQ(sizeof(absl::Cord), cord.EstimatedMemoryUsage(kTotalMorePrecise));
}
TEST(CordTest, CordMemoryUsageInlined) {
absl::Cord a("hello");
EXPECT_EQ(a.EstimatedMemoryUsage(), sizeof(absl::Cord));
EXPECT_EQ(a.EstimatedMemoryUsage(kFairShare), sizeof(absl::Cord));
EXPECT_EQ(a.EstimatedMemoryUsage(kTotalMorePrecise), sizeof(absl::Cord));
}
TEST(CordTest, CordMemoryUsageExternalMemory) {
absl::Cord cord;
AddExternalMemory(std::string(1000, 'x'), &cord);
const size_t expected =
sizeof(absl::Cord) + 1000 + sizeof(CordRepExternal) + sizeof(intptr_t);
EXPECT_EQ(cord.EstimatedMemoryUsage(), expected);
EXPECT_EQ(cord.EstimatedMemoryUsage(kFairShare), expected);
EXPECT_EQ(cord.EstimatedMemoryUsage(kTotalMorePrecise), expected);
}
TEST(CordTest, CordMemoryUsageFlat) {
absl::Cord cord = MakeCord(1000, 'a');
const size_t flat_size =
absl::CordTestPeer::Tree(cord)->flat()->AllocatedSize();
EXPECT_EQ(cord.EstimatedMemoryUsage(), sizeof(absl::Cord) + flat_size);
EXPECT_EQ(cord.EstimatedMemoryUsage(kFairShare),
sizeof(absl::Cord) + flat_size);
EXPECT_EQ(cord.EstimatedMemoryUsage(kTotalMorePrecise),
sizeof(absl::Cord) + flat_size);
}
TEST(CordTest, CordMemoryUsageSubStringSharedFlat) {
absl::Cord flat = MakeCord(2000, 'a');
const size_t flat_size =
absl::CordTestPeer::Tree(flat)->flat()->AllocatedSize();
absl::Cord cord = flat.Subcord(500, 1000);
EXPECT_EQ(cord.EstimatedMemoryUsage(),
sizeof(absl::Cord) + sizeof(CordRepSubstring) + flat_size);
EXPECT_EQ(cord.EstimatedMemoryUsage(kTotalMorePrecise),
sizeof(absl::Cord) + sizeof(CordRepSubstring) + flat_size);
EXPECT_EQ(cord.EstimatedMemoryUsage(kFairShare),
sizeof(absl::Cord) + sizeof(CordRepSubstring) + flat_size / 2);
}
TEST(CordTest, CordMemoryUsageFlatShared) {
absl::Cord shared = MakeCord(1000, 'a');
absl::Cord cord(shared);
const size_t flat_size =
absl::CordTestPeer::Tree(cord)->flat()->AllocatedSize();
EXPECT_EQ(cord.EstimatedMemoryUsage(), sizeof(absl::Cord) + flat_size);
EXPECT_EQ(cord.EstimatedMemoryUsage(kTotalMorePrecise),
sizeof(absl::Cord) + flat_size);
EXPECT_EQ(cord.EstimatedMemoryUsage(kFairShare),
sizeof(absl::Cord) + flat_size / 2);
}
TEST(CordTest, CordMemoryUsageFlatHardenedAndShared) {
absl::Cord shared = MakeCord(1000, 'a');
absl::Cord cord(shared);
const size_t flat_size =
absl::CordTestPeer::Tree(cord)->flat()->AllocatedSize();
cord.SetExpectedChecksum(1);
EXPECT_EQ(cord.EstimatedMemoryUsage(),
sizeof(absl::Cord) + sizeof(CordRepCrc) + flat_size);
EXPECT_EQ(cord.EstimatedMemoryUsage(kFairShare),
sizeof(absl::Cord) + sizeof(CordRepCrc) + flat_size / 2);
absl::Cord cord2(cord);
EXPECT_EQ(cord2.EstimatedMemoryUsage(),
sizeof(absl::Cord) + sizeof(CordRepCrc) + flat_size);
EXPECT_EQ(cord2.EstimatedMemoryUsage(kTotalMorePrecise),
sizeof(absl::Cord) + sizeof(CordRepCrc) + flat_size);
EXPECT_EQ(cord2.EstimatedMemoryUsage(kFairShare),
sizeof(absl::Cord) + (sizeof(CordRepCrc) + flat_size / 2) / 2);
}
TEST(CordTest, CordMemoryUsageBTree) {
absl::Cord cord1;
size_t flats1_size = 0;
absl::Cord flats1[4] = {MakeCord(1000, 'a'), MakeCord(1100, 'a'),
MakeCord(1200, 'a'), MakeCord(1300, 'a')};
for (absl::Cord flat : flats1) {
flats1_size += absl::CordTestPeer::Tree(flat)->flat()->AllocatedSize();
cord1.Append(std::move(flat));
}
if (!absl::CordTestPeer::Tree(cord1)->IsBtree()) {
LOG(WARNING) << "Cord library code not respecting btree flag";
return;
}
size_t rep1_size = sizeof(CordRepBtree) + flats1_size;
size_t rep1_shared_size = sizeof(CordRepBtree) + flats1_size / 2;
EXPECT_EQ(cord1.EstimatedMemoryUsage(), sizeof(absl::Cord) + rep1_size);
EXPECT_EQ(cord1.EstimatedMemoryUsage(kTotalMorePrecise),
sizeof(absl::Cord) + rep1_size);
EXPECT_EQ(cord1.EstimatedMemoryUsage(kFairShare),
sizeof(absl::Cord) + rep1_shared_size);
absl::Cord cord2;
size_t flats2_size = 0;
absl::Cord flats2[4] = {MakeCord(600, 'a'), MakeCord(700, 'a'),
MakeCord(800, 'a'), MakeCord(900, 'a')};
for (absl::Cord& flat : flats2) {
flats2_size += absl::CordTestPeer::Tree(flat)->flat()->AllocatedSize();
cord2.Append(std::move(flat));
}
size_t rep2_size = sizeof(CordRepBtree) + flats2_size;
EXPECT_EQ(cord2.EstimatedMemoryUsage(), sizeof(absl::Cord) + rep2_size);
EXPECT_EQ(cord2.EstimatedMemoryUsage(kTotalMorePrecise),
sizeof(absl::Cord) + rep2_size);
EXPECT_EQ(cord2.EstimatedMemoryUsage(kFairShare),
sizeof(absl::Cord) + rep2_size);
absl::Cord cord(cord1);
cord.Append(std::move(cord2));
EXPECT_EQ(cord.EstimatedMemoryUsage(),
sizeof(absl::Cord) + sizeof(CordRepBtree) + rep1_size + rep2_size);
EXPECT_EQ(cord.EstimatedMemoryUsage(kTotalMorePrecise),
sizeof(absl::Cord) + sizeof(CordRepBtree) + rep1_size + rep2_size);
EXPECT_EQ(cord.EstimatedMemoryUsage(kFairShare),
sizeof(absl::Cord) + sizeof(CordRepBtree) + rep1_shared_size / 2 +
rep2_size);
}
TEST(CordTest, TestHashFragmentation) {
EXPECT_EQ(1024, absl::hash_internal::PiecewiseChunkSize());
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({
absl::Cord(),
absl::MakeFragmentedCord({std::string(600, 'a'), std::string(600, 'a')}),
absl::MakeFragmentedCord({std::string(1200, 'a')}),
absl::MakeFragmentedCord({std::string(900, 'b'), std::string(900, 'b')}),
absl::MakeFragmentedCord({std::string(1800, 'b')}),
absl::MakeFragmentedCord(
{std::string(2000, 'c'), std::string(2000, 'c')}),
absl::MakeFragmentedCord({std::string(4000, 'c')}),
absl::MakeFragmentedCord({std::string(1024, 'd')}),
absl::MakeFragmentedCord({std::string(1023, 'd'), "d"}),
absl::MakeFragmentedCord({std::string(1025, 'e')}),
absl::MakeFragmentedCord({std::string(1024, 'e'), "e"}),
absl::MakeFragmentedCord({std::string(1023, 'e'), "e", "e"}),
}));
}
TEST_P(CordTest, CordMemoryUsageInlineRep) {
constexpr size_t kMaxInline = 15;
const std::string small_string(kMaxInline, 'x');
absl::Cord c1(small_string);
absl::Cord c2;
c2.Append(small_string);
EXPECT_EQ(c1, c2);
EXPECT_EQ(c1.EstimatedMemoryUsage(), c2.EstimatedMemoryUsage());
}
TEST_P(CordTest, CordMemoryUsageTotalMorePreciseMode) {
constexpr size_t kChunkSize = 2000;
std::string tmp_str(kChunkSize, 'x');
const absl::Cord flat(std::move(tmp_str));
absl::Cord fragmented(flat);
fragmented.Append(flat);
const size_t flat_internal_usage =
flat.EstimatedMemoryUsage() - sizeof(absl::Cord);
EXPECT_EQ(fragmented.EstimatedMemoryUsage(kTotalMorePrecise),
sizeof(absl::Cord) +
sizeof(CordRepBtree) +
flat_internal_usage);
EXPECT_EQ(fragmented.EstimatedMemoryUsage(),
sizeof(absl::Cord) +
sizeof(CordRepBtree) +
2 * flat_internal_usage);
}
TEST_P(CordTest, CordMemoryUsageTotalMorePreciseModeWithSubstring) {
constexpr size_t kChunkSize = 2000;
std::string tmp_str(kChunkSize, 'x');
const absl::Cord flat(std::move(tmp_str));
absl::Cord fragmented;
fragmented.Append(flat.Subcord(1, kChunkSize - 2));
fragmented.Append(flat.Subcord(1, kChunkSize - 2));
const size_t flat_internal_usage =
flat.EstimatedMemoryUsage() - sizeof(absl::Cord);
EXPECT_EQ(fragmented.EstimatedMemoryUsage(kTotalMorePrecise),
sizeof(absl::Cord) +
sizeof(CordRepBtree) +
2 * sizeof(CordRepSubstring) +
flat_internal_usage);
EXPECT_EQ(fragmented.EstimatedMemoryUsage(),
sizeof(absl::Cord) +
sizeof(CordRepBtree) +
2 * sizeof(CordRepSubstring) +
2 * flat_internal_usage);
}
}
TEST_P(CordTest, Concat_Append) {
absl::Cord s1("foobarbarbarbarbar");
MaybeHarden(s1);
s1.Append("abcdefgabcdefgabcdefgabcdefgabcdefgabcdefgabcdefg");
size_t size = s1.size();
absl::Cord s2 = s1;
MaybeHarden(s2);
s2.Append("x");
EXPECT_EQ(s1.size(), size);
EXPECT_EQ(s2.size(), size + 1);
}
TEST_P(CordTest, DiabolicalGrowth) {
RandomEngine rng(GTEST_FLAG_GET(random_seed));
const std::string expected = RandomLowercaseString(&rng, 5000);
absl::Cord cord;
for (char c : expected) {
absl::Cord shared(cord);
EXPECT_THAT(cord, testing::Eq(shared));
cord.Append(absl::string_view(&c, 1));
MaybeHarden(cord);
}
std::string value;
absl::CopyCordToString(cord, &value);
EXPECT_EQ(value, expected);
LOG(INFO) << "Diabolical size allocated = " << cord.EstimatedMemoryUsage();
}
static absl::Cord MakeHuge(absl::string_view prefix) {
absl::Cord cord;
if (sizeof(size_t) > 4) {
const size_t size =
static_cast<size_t>(std::numeric_limits<uint32_t>::max()) + 314;
cord.Append(absl::MakeCordFromExternal(
absl::string_view(prefix.data(), size),
[](absl::string_view s) { DoNothing(s, nullptr); }));
} else {
const size_t s1 = (1u << 31) - 1;
const size_t s2 = 600;
cord.Append(absl::MakeCordFromExternal(
absl::string_view(prefix.data(), s1),
[](absl::string_view s) { DoNothing(s, nullptr); }));
cord.Append(absl::MakeCordFromExternal(
absl::string_view("", s2),
[](absl::string_view s) { DoNothing(s, nullptr); }));
}
return cord;
}
TEST_P(CordTest, HugeCord) {
absl::Cord cord = MakeHuge("huge cord");
MaybeHarden(cord);
const size_t acceptable_delta =
100 + (UseCrc() ? sizeof(absl::cord_internal::CordRepCrc) : 0);
EXPECT_LE(cord.size(), cord.EstimatedMemoryUsage());
EXPECT_GE(cord.size() + acceptable_delta, cord.EstimatedMemoryUsage());
}
TEST_P(CordTest, AppendSelf) {
absl::Cord empty;
MaybeHarden(empty);
empty.Append(empty);
ASSERT_EQ(empty, "");
std::string control_data = "Abc";
absl::Cord data(control_data);
while (control_data.length() < 0x4000) {
MaybeHarden(data);
data.Append(data);
control_data.append(control_data);
ASSERT_EQ(control_data, data);
}
}
TEST_P(CordTest, MakeFragmentedCordFromInitializerList) {
absl::Cord fragmented =
absl::MakeFragmentedCord({"A ", "fragmented ", "Cord"});
MaybeHarden(fragmented);
EXPECT_EQ("A fragmented Cord", fragmented);
auto chunk_it = fragmented.chunk_begin();
ASSERT_TRUE(chunk_it != fragmented.chunk_end());
EXPECT_EQ("A ", *chunk_it);
ASSERT_TRUE(++chunk_it != fragmented.chunk_end());
EXPECT_EQ("fragmented ", *chunk_it);
ASSERT_TRUE(++chunk_it != fragmented.chunk_end());
EXPECT_EQ("Cord", *chunk_it);
ASSERT_TRUE(++chunk_it == fragmented.chunk_end());
}
TEST_P(CordTest, MakeFragmentedCordFromVector) {
std::vector<absl::string_view> chunks = {"A ", "fragmented ", "Cord"};
absl::Cord fragmented = absl::MakeFragmentedCord(chunks);
MaybeHarden(fragmented);
EXPECT_EQ("A fragmented Cord", fragmented);
auto chunk_it = fragmented.chunk_begin();
ASSERT_TRUE(chunk_it != fragmented.chunk_end());
EXPECT_EQ("A ", *chunk_it);
ASSERT_TRUE(++chunk_it != fragmented.chunk_end());
EXPECT_EQ("fragmented ", *chunk_it);
ASSERT_TRUE(++chunk_it != fragmented.chunk_end());
EXPECT_EQ("Cord", *chunk_it);
ASSERT_TRUE(++chunk_it == fragmented.chunk_end());
}
TEST_P(CordTest, CordChunkIteratorTraits) {
static_assert(std::is_copy_constructible<absl::Cord::ChunkIterator>::value,
"");
static_assert(std::is_copy_assignable<absl::Cord::ChunkIterator>::value, "");
static_assert(std::is_move_constructible<absl::Cord::ChunkIterator>::value,
"");
static_assert(std::is_move_assignable<absl::Cord::ChunkIterator>::value, "");
static_assert(
std::is_same<
std::iterator_traits<absl::Cord::ChunkIterator>::iterator_category,
std::input_iterator_tag>::value,
"");
static_assert(
std::is_same<std::iterator_traits<absl::Cord::ChunkIterator>::value_type,
absl::string_view>::value,
"");
static_assert(
std::is_same<
std::iterator_traits<absl::Cord::ChunkIterator>::difference_type,
ptrdiff_t>::value,
"");
static_assert(
std::is_same<std::iterator_traits<absl::Cord::ChunkIterator>::pointer,
const absl::string_view*>::value,
"");
static_assert(
std::is_same<std::iterator_traits<absl::Cord::ChunkIterator>::reference,
absl::string_view>::value,
"");
}
static void VerifyChunkIterator(const absl::Cord& cord,
size_t expected_chunks) {
EXPECT_EQ(cord.chunk_begin() == cord.chunk_end(), cord.empty()) << cord;
EXPECT_EQ(cord.chunk_begin() != cord.chunk_end(), !cord.empty());
absl::Cord::ChunkRange range = cord.Chunks();
EXPECT_EQ(range.begin() == range.end(), cord.empty());
EXPECT_EQ(range.begin() != range.end(), !cord.empty());
std::string content(cord);
size_t pos = 0;
auto pre_iter = cord.chunk_begin(), post_iter = cord.chunk_begin();
size_t n_chunks = 0;
while (pre_iter != cord.chunk_end() && post_iter != cord.chunk_end()) {
EXPECT_FALSE(pre_iter == cord.chunk_end());
EXPECT_FALSE(post_iter == cord.chunk_end());
EXPECT_EQ(pre_iter, post_iter);
EXPECT_EQ(*pre_iter, *post_iter);
EXPECT_EQ(pre_iter->data(), (*pre_iter).data());
EXPECT_EQ(pre_iter->size(), (*pre_iter).size());
absl::string_view chunk = *pre_iter;
EXPECT_FALSE(chunk.empty());
EXPECT_LE(pos + chunk.size(), content.size());
EXPECT_EQ(absl::string_view(content.c_str() + pos, chunk.size()), chunk);
int n_equal_iterators = 0;
for (absl::Cord::ChunkIterator it = range.begin(); it != range.end();
++it) {
n_equal_iterators += static_cast<int>(it == pre_iter);
}
EXPECT_EQ(n_equal_iterators, 1);
++pre_iter;
EXPECT_EQ(*post_iter++, chunk);
pos += chunk.size();
++n_chunks;
}
EXPECT_EQ(expected_chunks, n_chunks);
EXPECT_EQ(pos, content.size());
EXPECT_TRUE(pre_iter == cord.chunk_end());
EXPECT_TRUE(post_iter == cord.chunk_end());
}
TEST_P(CordTest, CordChunkIteratorOperations) {
absl::Cord empty_cord;
VerifyChunkIterator(empty_cord, 0);
absl::Cord small_buffer_cord("small cord");
MaybeHarden(small_buffer_cord);
VerifyChunkIterator(small_buffer_cord, 1);
absl::Cord flat_node_cord("larger than small buffer optimization");
MaybeHarden(flat_node_cord);
VerifyChunkIterator(flat_node_cord, 1);
VerifyChunkIterator(MaybeHardened(absl::MakeFragmentedCord(
{"a ", "small ", "fragmented ", "cord ", "for ",
"testing ", "chunk ", "iterations."})),
8);
absl::Cord reused_nodes_cord(std::string(40, 'c'));
reused_nodes_cord.Prepend(absl::Cord(std::string(40, 'b')));
MaybeHarden(reused_nodes_cord);
reused_nodes_cord.Prepend(absl::Cord(std::string(40, 'a')));
size_t expected_chunks = 3;
for (int i = 0; i < 8; ++i) {
reused_nodes_cord.Prepend(reused_nodes_cord);
MaybeHarden(reused_nodes_cord);
expected_chunks *= 2;
VerifyChunkIterator(reused_nodes_cord, expected_chunks);
}
RandomEngine rng(GTEST_FLAG_GET(random_seed));
absl::Cord flat_cord(RandomLowercaseString(&rng, 256));
absl::Cord subcords;
for (int i = 0; i < 128; ++i) subcords.Prepend(flat_cord.Subcord(i, 128));
VerifyChunkIterator(subcords, 128);
}
TEST_P(CordTest, AdvanceAndReadOnDataEdge) {
RandomEngine rng(GTEST_FLAG_GET(random_seed));
const std::string data = RandomLowercaseString(&rng, 2000);
for (bool as_flat : {true, false}) {
SCOPED_TRACE(as_flat ? "Flat" : "External");
absl::Cord cord =
as_flat ? absl::Cord(data)
: absl::MakeCordFromExternal(data, [](absl::string_view) {});
auto it = cord.Chars().begin();
#if !defined(NDEBUG) || ABSL_OPTION_HARDENED
EXPECT_DEATH_IF_SUPPORTED(cord.AdvanceAndRead(&it, 2001), ".*");
#endif
it = cord.Chars().begin();
absl::Cord frag = cord.AdvanceAndRead(&it, 2000);
EXPECT_EQ(frag, data);
EXPECT_TRUE(it == cord.Chars().end());
it = cord.Chars().begin();
frag = cord.AdvanceAndRead(&it, 200);
EXPECT_EQ(frag, data.substr(0, 200));
EXPECT_FALSE(it == cord.Chars().end());
frag = cord.AdvanceAndRead(&it, 1500);
EXPECT_EQ(frag, data.substr(200, 1500));
EXPECT_FALSE(it == cord.Chars().end());
frag = cord.AdvanceAndRead(&it, 300);
EXPECT_EQ(frag, data.substr(1700, 300));
EXPECT_TRUE(it == cord.Chars().end());
}
}
TEST_P(CordTest, AdvanceAndReadOnSubstringDataEdge) {
RandomEngine rng(GTEST_FLAG_GET(random_seed));
const std::string data = RandomLowercaseString(&rng, 2500);
for (bool as_flat : {true, false}) {
SCOPED_TRACE(as_flat ? "Flat" : "External");
absl::Cord cord =
as_flat ? absl::Cord(data)
: absl::MakeCordFromExternal(data, [](absl::string_view) {});
cord = cord.Subcord(200, 2000);
const std::string substr = data.substr(200, 2000);
auto it = cord.Chars().begin();
#if !defined(NDEBUG) || ABSL_OPTION_HARDENED
EXPECT_DEATH_IF_SUPPORTED(cord.AdvanceAndRead(&it, 2001), ".*");
#endif
it = cord.Chars().begin();
absl::Cord frag = cord.AdvanceAndRead(&it, 2000);
EXPECT_EQ(frag, substr);
EXPECT_TRUE(it == cord.Chars().end());
it = cord.Chars().begin();
frag = cord.AdvanceAndRead(&it, 200);
EXPECT_EQ(frag, substr.substr(0, 200));
EXPECT_FALSE(it == cord.Chars().end());
frag = cord.AdvanceAndRead(&it, 1500);
EXPECT_EQ(frag, substr.substr(200, 1500));
EXPECT_FALSE(it == cord.Chars().end());
frag = cord.AdvanceAndRead(&it, 300);
EXPECT_EQ(frag, substr.substr(1700, 300));
EXPECT_TRUE(it == cord.Chars().end());
}
}
TEST_P(CordTest, CharIteratorTraits) {
static_assert(std::is_copy_constructible<absl::Cord::CharIterator>::value,
"");
static_assert(std::is_copy_assignable<absl::Cord::CharIterator>::value, "");
static_assert(std::is_move_constructible<absl::Cord::CharIterator>::value,
"");
static_assert(std::is_move_assignable<absl::Cord::CharIterator>::value, "");
static_assert(
std::is_same<
std::iterator_traits<absl::Cord::CharIterator>::iterator_category,
std::input_iterator_tag>::value,
"");
static_assert(
std::is_same<std::iterator_traits<absl::Cord::CharIterator>::value_type,
char>::value,
"");
static_assert(
std::is_same<
std::iterator_traits<absl::Cord::CharIterator>::difference_type,
ptrdiff_t>::value,
"");
static_assert(
std::is_same<std::iterator_traits<absl::Cord::CharIterator>::pointer,
const char*>::value,
"");
static_assert(
std::is_same<std::iterator_traits<absl::Cord::CharIterator>::reference,
const char&>::value,
"");
}
static void VerifyCharIterator(const absl::Cord& cord) {
EXPECT_EQ(cord.char_begin() == cord.char_end(), cord.empty());
EXPECT_EQ(cord.char_begin() != cord.char_end(), !cord.empty());
absl::Cord::CharRange range = cord.Chars();
EXPECT_EQ(range.begin() == range.end(), cord.empty());
EXPECT_EQ(range.begin() != range.end(), !cord.empty());
size_t i = 0;
absl::Cord::CharIterator pre_iter = cord.char_begin();
absl::Cord::CharIterator post_iter = cord.char_begin();
std::string content(cord);
while (pre_iter != cord.char_end() && post_iter != cord.char_end()) {
EXPECT_FALSE(pre_iter == cord.char_end());
EXPECT_FALSE(post_iter == cord.char_end());
EXPECT_LT(i, cord.size());
EXPECT_EQ(content[i], *pre_iter);
EXPECT_EQ(pre_iter, post_iter);
EXPECT_EQ(*pre_iter, *post_iter);
EXPECT_EQ(&*pre_iter, &*post_iter);
const char* character_address = &*pre_iter;
absl::Cord::CharIterator copy = pre_iter;
++copy;
EXPECT_EQ(character_address, &*pre_iter);
int n_equal_iterators = 0;
for (absl::Cord::CharIterator it = range.begin(); it != range.end(); ++it) {
n_equal_iterators += static_cast<int>(it == pre_iter);
}
EXPECT_EQ(n_equal_iterators, 1);
absl::Cord::CharIterator advance_iter = range.begin();
absl::Cord::Advance(&advance_iter, i);
EXPECT_EQ(pre_iter, advance_iter);
advance_iter = range.begin();
EXPECT_EQ(absl::Cord::AdvanceAndRead(&advance_iter, i), cord.Subcord(0, i));
EXPECT_EQ(pre_iter, advance_iter);
advance_iter = pre_iter;
absl::Cord::Advance(&advance_iter, cord.size() - i);
EXPECT_EQ(range.end(), advance_iter);
advance_iter = pre_iter;
EXPECT_EQ(absl::Cord::AdvanceAndRead(&advance_iter, cord.size() - i),
cord.Subcord(i, cord.size() - i));
EXPECT_EQ(range.end(), advance_iter);
++i;
++pre_iter;
post_iter++;
}
EXPECT_EQ(i, cord.size());
EXPECT_TRUE(pre_iter == cord.char_end());
EXPECT_TRUE(post_iter == cord.char_end());
absl::Cord::CharIterator zero_advanced_end = cord.char_end();
absl::Cord::Advance(&zero_advanced_end, 0);
EXPECT_EQ(zero_advanced_end, cord.char_end());
absl::Cord::CharIterator it = cord.char_begin();
for (absl::string_view chunk : cord.Chunks()) {
while (!chunk.empty()) {
EXPECT_EQ(absl::Cord::ChunkRemaining(it), chunk);
chunk.remove_prefix(1);
++it;
}
}
}
TEST_P(CordTest, CharIteratorOperations) {
absl::Cord empty_cord;
VerifyCharIterator(empty_cord);
absl::Cord small_buffer_cord("small cord");
MaybeHarden(small_buffer_cord);
VerifyCharIterator(small_buffer_cord);
absl::Cord flat_node_cord("larger than small buffer optimization");
MaybeHarden(flat_node_cord);
VerifyCharIterator(flat_node_cord);
VerifyCharIterator(MaybeHardened(
absl::MakeFragmentedCord({"a ", "small ", "fragmented ", "cord ", "for ",
"testing ", "character ", "iteration."})));
absl::Cord reused_nodes_cord("ghi");
reused_nodes_cord.Prepend(absl::Cord("def"));
reused_nodes_cord.Prepend(absl::Cord("abc"));
for (int i = 0; i < 4; ++i) {
reused_nodes_cord.Prepend(reused_nodes_cord);
MaybeHarden(reused_nodes_cord);
VerifyCharIterator(reused_nodes_cord);
}
RandomEngine rng(GTEST_FLAG_GET(random_seed));
absl::Cord flat_cord(RandomLowercaseString(&rng, 256));
absl::Cord subcords;
for (int i = 0; i < 4; ++i) {
subcords.Prepend(flat_cord.Subcord(16 * i, 128));
MaybeHarden(subcords);
}
VerifyCharIterator(subcords);
}
TEST_P(CordTest, CharIteratorAdvanceAndRead) {
constexpr int kBlocks = 6;
constexpr size_t kBlockSize = 2500;
constexpr size_t kChunkSize1 = 1500;
constexpr size_t kChunkSize2 = 2500;
constexpr size_t kChunkSize3 = 3000;
constexpr size_t kChunkSize4 = 150;
RandomEngine rng;
std::string data = RandomLowercaseString(&rng, kBlocks * kBlockSize);
absl::Cord cord;
for (int i = 0; i < kBlocks; ++i) {
const std::string block = data.substr(i * kBlockSize, kBlockSize);
cord.Append(absl::Cord(block));
}
MaybeHarden(cord);
for (size_t chunk_size :
{kChunkSize1, kChunkSize2, kChunkSize3, kChunkSize4}) {
absl::Cord::CharIterator it = cord.char_begin();
size_t offset = 0;
while (offset < data.length()) {
const size_t n = std::min<size_t>(data.length() - offset, chunk_size);
absl::Cord chunk = cord.AdvanceAndRead(&it, n);
ASSERT_EQ(chunk.size(), n);
ASSERT_EQ(chunk.Compare(data.substr(offset, n)), 0);
offset += n;
}
}
}
TEST_P(CordTest, StreamingOutput) {
absl::Cord c =
absl::MakeFragmentedCord({"A ", "small ", "fragmented ", "Cord", "."});
MaybeHarden(c);
std::stringstream output;
output << c;
EXPECT_EQ("A small fragmented Cord.", output.str());
}
TEST_P(CordTest, ForEachChunk) {
for (int num_elements : {1, 10, 200}) {
SCOPED_TRACE(num_elements);
std::vector<std::string> cord_chunks;
for (int i = 0; i < num_elements; ++i) {
cord_chunks.push_back(absl::StrCat("[", i, "]"));
}
absl::Cord c = absl::MakeFragmentedCord(cord_chunks);
MaybeHarden(c);
std::vector<std::string> iterated_chunks;
absl::CordTestPeer::ForEachChunk(c,
[&iterated_chunks](absl::string_view sv) {
iterated_chunks.emplace_back(sv);
});
EXPECT_EQ(iterated_chunks, cord_chunks);
}
}
TEST_P(CordTest, SmallBufferAssignFromOwnData) {
constexpr size_t kMaxInline = 15;
std::string contents = "small buff cord";
EXPECT_EQ(contents.size(), kMaxInline);
for (size_t pos = 0; pos < contents.size(); ++pos) {
for (size_t count = contents.size() - pos; count > 0; --count) {
absl::Cord c(contents);
MaybeHarden(c);
absl::string_view flat = c.Flatten();
c = flat.substr(pos, count);
EXPECT_EQ(c, contents.substr(pos, count))
<< "pos = " << pos << "; count = " << count;
}
}
}
TEST_P(CordTest, Format) {
absl::Cord c;
absl::Format(&c, "There were %04d little %s.", 3, "pigs");
EXPECT_EQ(c, "There were 0003 little pigs.");
MaybeHarden(c);
absl::Format(&c, "And %-3llx bad wolf!", 1);
MaybeHarden(c);
EXPECT_EQ(c, "There were 0003 little pigs.And 1 bad wolf!");
}
TEST_P(CordTest, Stringify) {
absl::Cord c =
absl::MakeFragmentedCord({"A ", "small ", "fragmented ", "Cord", "."});
MaybeHarden(c);
EXPECT_EQ(absl::StrCat(c), "A small fragmented Cord.");
}
TEST_P(CordTest, Hardening) {
absl::Cord cord("hello");
MaybeHarden(cord);
EXPECT_DEATH_IF_SUPPORTED(cord.RemovePrefix(6), "");
EXPECT_DEATH_IF_SUPPORTED(cord.RemoveSuffix(6), "");
bool test_hardening = false;
ABSL_HARDENING_ASSERT([&]() {
test_hardening = true;
return true;
}());
if (!test_hardening) return;
EXPECT_DEATH_IF_SUPPORTED(cord[5], "");
EXPECT_DEATH_IF_SUPPORTED(*cord.chunk_end(), "");
EXPECT_DEATH_IF_SUPPORTED(static_cast<void>(cord.chunk_end()->empty()), "");
EXPECT_DEATH_IF_SUPPORTED(++cord.chunk_end(), "");
}
TEST_P(CordTest, BtreeHostileSplitInsertJoin) {
absl::BitGen bitgen;
std::string data(1 << 10, 'x');
absl::Cord buffer(data);
absl::Cord cord;
for (int i = 0; i < 1000000; ++i) {
cord.Append(buffer);
}
for (int j = 0; j < 1000; ++j) {
MaybeHarden(cord);
size_t offset = absl::Uniform(bitgen, 0u, cord.size());
size_t length = absl::Uniform(bitgen, 100u, data.size());
if (cord.size() == offset) {
cord.Append(absl::string_view(data.data(), length));
} else {
absl::Cord suffix;
if (offset + length < cord.size()) {
suffix = cord;
suffix.RemovePrefix(offset + length);
}
if (cord.size() > offset) {
cord.RemoveSuffix(cord.size() - offset);
}
cord.Append(absl::string_view(data.data(), length));
if (!suffix.empty()) {
cord.Append(suffix);
}
}
}
}
class AfterExitCordTester {
public:
bool Set(absl::Cord* cord, absl::string_view expected) {
cord_ = cord;
expected_ = expected;
return true;
}
~AfterExitCordTester() {
EXPECT_EQ(*cord_, expected_);
}
private:
absl::Cord* cord_;
absl::string_view expected_;
};
template <typename Str>
void TestAfterExit(Str) {
const auto expected = Str::value;
static AfterExitCordTester exit_tester;
static absl::NoDestructor<absl::Cord> cord_leaker(Str{});
static absl::Cord& cord = *cord_leaker;
static bool init_exit_tester = exit_tester.Set(&cord, expected);
(void)init_exit_tester;
EXPECT_EQ(cord, expected);
{
absl::Cord copy = cord;
EXPECT_EQ(copy, expected);
}
EXPECT_EQ(cord, expected);
{
absl::Cord copy = cord;
std::string expected_copy(expected);
for (int i = 0; i < 10; ++i) {
copy.Append(cord);
absl::StrAppend(&expected_copy, expected);
EXPECT_EQ(copy, expected_copy);
}
}
EXPECT_EQ(absl::CordTestPeer::IsTree(cord), cord.size() >= 16);
for (int i = 0; i < 10; ++i) {
EXPECT_EQ(expected, absl::Cord(Str{}));
}
}
constexpr int SimpleStrlen(const char* p) {
return *p ? 1 + SimpleStrlen(p + 1) : 0;
}
struct ShortView {
constexpr absl::string_view operator()() const {
return absl::string_view("SSO string", SimpleStrlen("SSO string"));
}
};
struct LongView {
constexpr absl::string_view operator()() const {
return absl::string_view("String that does not fit SSO.",
SimpleStrlen("String that does not fit SSO."));
}
};
TEST_P(CordTest, AfterExit) {
TestAfterExit(absl::strings_internal::MakeStringConstant(ShortView{}));
TestAfterExit(absl::strings_internal::MakeStringConstant(LongView{}));
}
namespace {
class PopulatedCordFactory {
public:
constexpr PopulatedCordFactory(absl::string_view name,
absl::Cord (*generator)())
: name_(name), generator_(generator) {}
absl::string_view Name() const { return name_; }
absl::Cord Generate() const { return generator_(); }
private:
absl::string_view name_;
absl::Cord (*generator_)();
};
PopulatedCordFactory cord_factories[] = {
{"sso", [] { return absl::Cord("abcde"); }},
{"flat", [] {
absl::Cord flat(absl::StrCat("abcde", std::string(1000, 'x')));
flat.Flatten();
return flat;
}},
{"external", [] {
return absl::MakeCordFromExternal("abcde External!", []{});
}},
{"external substring", [] {
absl::Cord ext = absl::MakeCordFromExternal("-abcde External!", []{});
return absl::CordTestPeer::MakeSubstring(ext, 1, ext.size() - 1);
}},
{"substring", [] {
absl::Cord flat(absl::StrCat("-abcde", std::string(1000, 'x')));
flat.Flatten();
return flat.Subcord(1, 998);
}},
{"fragmented", [] {
std::string fragment = absl::StrCat("abcde", std::string(195, 'x'));
std::vector<std::string> fragments(200, fragment);
absl::Cord cord = absl::MakeFragmentedCord(fragments);
assert(cord.size() == 40000);
return cord;
}},
};
class CordMutator {
public:
constexpr CordMutator(absl::string_view name, void (*mutate)(absl::Cord&),
void (*undo)(absl::Cord&) = nullptr)
: name_(name), mutate_(mutate), undo_(undo) {}
absl::string_view Name() const { return name_; }
void Mutate(absl::Cord& cord) const { mutate_(cord); }
bool CanUndo() const { return undo_ != nullptr; }
void Undo(absl::Cord& cord) const { undo_(cord); }
private:
absl::string_view name_;
void (*mutate_)(absl::Cord&);
void (*undo_)(absl::Cord&);
};
CordMutator cord_mutators[] = {
{"clear", [](absl::Cord& c) { c.Clear(); }},
{"overwrite", [](absl::Cord& c) { c = "overwritten"; }},
{
"append string",
[](absl::Cord& c) { c.Append("0123456789"); },
[](absl::Cord& c) { c.RemoveSuffix(10); }
},
{
"append cord",
[](absl::Cord& c) {
c.Append(absl::MakeFragmentedCord({"12345", "67890"}));
},
[](absl::Cord& c) { c.RemoveSuffix(10); }
},
{
"append checksummed cord",
[](absl::Cord& c) {
absl::Cord to_append = absl::MakeFragmentedCord({"12345", "67890"});
to_append.SetExpectedChecksum(999);
c.Append(to_append);
},
[](absl::Cord& c) { c.RemoveSuffix(10); }
},
{
"append self",
[](absl::Cord& c) { c.Append(c); },
[](absl::Cord& c) { c.RemoveSuffix(c.size() / 2); }
},
{
"append empty string",
[](absl::Cord& c) { c.Append(""); },
[](absl::Cord& c) { }
},
{
"append empty cord",
[](absl::Cord& c) { c.Append(absl::Cord()); },
[](absl::Cord& c) { }
},
{
"append empty checksummed cord",
[](absl::Cord& c) {
absl::Cord to_append;
to_append.SetExpectedChecksum(999);
c.Append(to_append);
},
[](absl::Cord& c) { }
},
{
"prepend string",
[](absl::Cord& c) { c.Prepend("9876543210"); },
[](absl::Cord& c) { c.RemovePrefix(10); }
},
{
"prepend cord",
[](absl::Cord& c) {
c.Prepend(absl::MakeFragmentedCord({"98765", "43210"}));
},
[](absl::Cord& c) { c.RemovePrefix(10); }
},
{
"prepend checksummed cord",
[](absl::Cord& c) {
absl::Cord to_prepend = absl::MakeFragmentedCord({"98765", "43210"});
to_prepend.SetExpectedChecksum(999);
c.Prepend(to_prepend);
},
[](absl::Cord& c) { c.RemovePrefix(10); }
},
{
"prepend empty string",
[](absl::Cord& c) { c.Prepend(""); },
[](absl::Cord& c) { }
},
{
"prepend empty cord",
[](absl::Cord& c) { c.Prepend(absl::Cord()); },
[](absl::Cord& c) { }
},
{
"prepend empty checksummed cord",
[](absl::Cord& c) {
absl::Cord to_prepend;
to_prepend.SetExpectedChecksum(999);
c.Prepend(to_prepend);
},
[](absl::Cord& c) { }
},
{
"prepend self",
[](absl::Cord& c) { c.Prepend(c); },
[](absl::Cord& c) { c.RemovePrefix(c.size() / 2); }
},
{"remove prefix", [](absl::Cord& c) { c.RemovePrefix(c.size() / 2); }},
{"remove suffix", [](absl::Cord& c) { c.RemoveSuffix(c.size() / 2); }},
{"remove 0-prefix", [](absl::Cord& c) { c.RemovePrefix(0); }},
{"remove 0-suffix", [](absl::Cord& c) { c.RemoveSuffix(0); }},
{"subcord", [](absl::Cord& c) { c = c.Subcord(1, c.size() - 2); }},
{
"swap inline",
[](absl::Cord& c) {
absl::Cord other("swap");
c.swap(other);
}
},
{
"swap tree",
[](absl::Cord& c) {
absl::Cord other(std::string(10000, 'x'));
c.swap(other);
}
},
};
}
TEST_P(CordTest, ExpectedChecksum) {
for (const PopulatedCordFactory& factory : cord_factories) {
SCOPED_TRACE(factory.Name());
for (bool shared : {false, true}) {
SCOPED_TRACE(shared);
absl::Cord shared_cord_source = factory.Generate();
auto make_instance = [=] {
return shared ? shared_cord_source : factory.Generate();
};
const absl::Cord base_value = factory.Generate();
const std::string base_value_as_string(factory.Generate().Flatten());
absl::Cord c1 = make_instance();
EXPECT_FALSE(c1.ExpectedChecksum().has_value());
c1.SetExpectedChecksum(12345);
EXPECT_EQ(c1.ExpectedChecksum().value_or(0), 12345);
EXPECT_EQ(c1, base_value);
c1.SetExpectedChecksum(12345);
EXPECT_EQ(c1.ExpectedChecksum().value_or(0), 12345);
EXPECT_EQ(c1, base_value);
absl::Cord c1_copy_construct = c1;
EXPECT_EQ(c1_copy_construct.ExpectedChecksum().value_or(0), 12345);
absl::Cord c1_copy_assign;
c1_copy_assign = c1;
EXPECT_EQ(c1_copy_assign.ExpectedChecksum().value_or(0), 12345);
absl::Cord c1_move(std::move(c1_copy_assign));
EXPECT_EQ(c1_move.ExpectedChecksum().value_or(0), 12345);
EXPECT_EQ(c1.ExpectedChecksum().value_or(0), 12345);
EXPECT_EQ(c1, make_instance());
for (const CordMutator& mutator : cord_mutators) {
SCOPED_TRACE(mutator.Name());
absl::Cord c2 = make_instance();
c2.SetExpectedChecksum(24680);
mutator.Mutate(c2);
if (c1 == c2) {
continue;
}
EXPECT_EQ(c2.ExpectedChecksum(), absl::nullopt);
if (mutator.CanUndo()) {
mutator.Undo(c2);
EXPECT_EQ(c2, base_value);
EXPECT_EQ(c2.ExpectedChecksum(), absl::nullopt);
}
}
absl::Cord c3 = make_instance();
c3.SetExpectedChecksum(999);
const absl::Cord& cc3 = c3;
ASSERT_TRUE(cc3.StartsWith("abcde"));
EXPECT_EQ(cc3.size(), base_value_as_string.size());
EXPECT_FALSE(cc3.empty());
EXPECT_EQ(cc3.Compare(base_value), 0);
EXPECT_EQ(cc3.Compare(base_value_as_string), 0);
EXPECT_EQ(cc3.Compare("wxyz"), -1);
EXPECT_EQ(cc3.Compare(absl::Cord("wxyz")), -1);
EXPECT_EQ(cc3.Compare("aaaa"), 1);
EXPECT_EQ(cc3.Compare(absl::Cord("aaaa")), 1);
EXPECT_EQ(absl::Cord("wxyz").Compare(cc3), 1);
EXPECT_EQ(absl::Cord("aaaa").Compare(cc3), -1);
EXPECT_TRUE(cc3.StartsWith("abcd"));
EXPECT_EQ(std::string(cc3), base_value_as_string);
std::string dest;
absl::CopyCordToString(cc3, &dest);
EXPECT_EQ(dest, base_value_as_string);
bool first_pass = true;
for (absl::string_view chunk : cc3.Chunks()) {
if (first_pass) {
EXPECT_TRUE(absl::StartsWith(chunk, "abcde"));
}
first_pass = false;
}
first_pass = true;
for (char ch : cc3.Chars()) {
if (first_pass) {
EXPECT_EQ(ch, 'a');
}
first_pass = false;
}
EXPECT_TRUE(absl::StartsWith(*cc3.chunk_begin(), "abcde"));
EXPECT_EQ(*cc3.char_begin(), 'a');
auto char_it = cc3.char_begin();
absl::Cord::Advance(&char_it, 2);
EXPECT_EQ(absl::Cord::AdvanceAndRead(&char_it, 2), "cd");
EXPECT_EQ(*char_it, 'e');
char_it = cc3.char_begin();
absl::Cord::Advance(&char_it, 2);
EXPECT_TRUE(absl::StartsWith(absl::Cord::ChunkRemaining(char_it), "cde"));
EXPECT_EQ(cc3[0], 'a');
EXPECT_EQ(cc3[4], 'e');
EXPECT_EQ(absl::HashOf(cc3), absl::HashOf(base_value));
EXPECT_EQ(absl::HashOf(cc3), absl::HashOf(base_value_as_string));
}
}
}
TEST_P(CordTest, ChecksummedEmptyCord) {
absl::Cord c1;
EXPECT_FALSE(c1.ExpectedChecksum().has_value());
c1.SetExpectedChecksum(12345);
EXPECT_EQ(c1.ExpectedChecksum().value_or(0), 12345);
EXPECT_EQ(c1, "");
EXPECT_TRUE(c1.empty());
c1.SetExpectedChecksum(12345);
EXPECT_EQ(c1.ExpectedChecksum().value_or(0), 12345);
EXPECT_EQ(c1, "");
EXPECT_TRUE(c1.empty());
absl::Cord c1_copy_construct = c1;
EXPECT_EQ(c1_copy_construct.ExpectedChecksum().value_or(0), 12345);
absl::Cord c1_copy_assign;
c1_copy_assign = c1;
EXPECT_EQ(c1_copy_assign.ExpectedChecksum().value_or(0), 12345);
absl::Cord c1_move(std::move(c1_copy_assign));
EXPECT_EQ(c1_move.ExpectedChecksum().value_or(0), 12345);
EXPECT_EQ(c1.ExpectedChecksum().value_or(0), 12345);
EXPECT_EQ(c1, absl::Cord());
for (const CordMutator& mutator : cord_mutators) {
SCOPED_TRACE(mutator.Name());
absl::Cord c2;
c2.SetExpectedChecksum(24680);
mutator.Mutate(c2);
if (c2.empty()) {
continue;
}
EXPECT_EQ(c2.ExpectedChecksum(), absl::nullopt);
if (mutator.CanUndo()) {
mutator.Undo(c2);
}
}
absl::Cord c3;
c3.SetExpectedChecksum(999);
const absl::Cord& cc3 = c3;
EXPECT_TRUE(cc3.StartsWith(""));
EXPECT_TRUE(cc3.EndsWith(""));
EXPECT_TRUE(cc3.empty());
EXPECT_EQ(cc3, "");
EXPECT_EQ(cc3, absl::Cord());
EXPECT_EQ(cc3.size(), 0);
EXPECT_EQ(cc3.Compare(absl::Cord()), 0);
EXPECT_EQ(cc3.Compare(c1), 0);
EXPECT_EQ(cc3.Compare(cc3), 0);
EXPECT_EQ(cc3.Compare(""), 0);
EXPECT_EQ(cc3.Compare("wxyz"), -1);
EXPECT_EQ(cc3.Compare(absl::Cord("wxyz")), -1);
EXPECT_EQ(absl::Cord("wxyz").Compare(cc3), 1);
EXPECT_EQ(std::string(cc3), "");
std::string dest;
absl::CopyCordToString(cc3, &dest);
EXPECT_EQ(dest, "");
for (absl::string_view chunk : cc3.Chunks()) {
static_cast<void>(chunk);
GTEST_FAIL() << "no chunks expected";
}
EXPECT_TRUE(cc3.chunk_begin() == cc3.chunk_end());
for (char ch : cc3.Chars()) {
static_cast<void>(ch);
GTEST_FAIL() << "no chars expected";
}
EXPECT_TRUE(cc3.char_begin() == cc3.char_end());
EXPECT_EQ(cc3.TryFlat(), "");
EXPECT_EQ(absl::HashOf(c3), absl::HashOf(absl::Cord()));
EXPECT_EQ(absl::HashOf(c3), absl::HashOf(absl::string_view()));
}
ABSL_ATTRIBUTE_WEAK
size_t FalseReport(const absl::Cord& a, bool f);
ABSL_ATTRIBUTE_NOINLINE
size_t FalseReport(const absl::Cord& a, bool f) {
absl::Cord b;
const absl::Cord& ref = f ? b : a;
return ref.size();
}
TEST(CordSanitizerTest, SanitizesCordFalseReport) {
absl::Cord c;
for (int i = 0; i < 1000; ++i) c.Append("a");
FalseReport(c, false);
}
TEST(CrcCordTest, ChecksummedEmptyCordEstimateMemoryUsage) {
absl::Cord cord;
cord.SetExpectedChecksum(0);
EXPECT_NE(cord.EstimatedMemoryUsage(), 0);
}
TEST(CordThreeWayComparisonTest, CompareCords) {
#ifndef __cpp_impl_three_way_comparison
GTEST_SKIP() << "C++20 three-way <=> comparison not supported";
#else
EXPECT_EQ(absl::Cord("a") <=> absl::Cord("a"), std::strong_ordering::equal);
EXPECT_EQ(absl::Cord("aaaa") <=> absl::Cord("aaab"),
std::strong_ordering::less);
EXPECT_EQ(absl::Cord("baaa") <=> absl::Cord("a"),
std::strong_ordering::greater);
#endif
}
TEST(CordThreeWayComparisonTest, CompareCordsAndStringViews) {
#ifndef __cpp_impl_three_way_comparison
GTEST_SKIP() << "C++20 three-way <=> comparison not supported";
#else
EXPECT_EQ(absl::string_view("a") <=> absl::Cord("a"),
std::strong_ordering::equal);
EXPECT_EQ(absl::Cord("a") <=> absl::string_view("b"),
std::strong_ordering::less);
EXPECT_EQ(absl::string_view("b") <=> absl::Cord("a"),
std::strong_ordering::greater);
#endif
}
#if defined(GTEST_HAS_DEATH_TEST) && defined(ABSL_INTERNAL_CORD_HAVE_SANITIZER)
const char* MASanDeathExpr() {
return "(use-after-poison|use-of-uninitialized-value)";
}
TEST(CordSanitizerTest, SanitizesEmptyCord) {
absl::Cord cord;
const char* data = cord.Flatten().data();
EXPECT_DEATH(EXPECT_EQ(data[0], 0), MASanDeathExpr());
}
TEST(CordSanitizerTest, SanitizesSmallCord) {
absl::Cord cord("Hello");
const char* data = cord.Flatten().data();
EXPECT_DEATH(EXPECT_EQ(data[5], 0), MASanDeathExpr());
}
TEST(CordSanitizerTest, SanitizesCordOnSetSSOValue) {
absl::Cord cord("String that is too big to be an SSO value");
cord = "Hello";
const char* data = cord.Flatten().data();
EXPECT_DEATH(EXPECT_EQ(data[5], 0), MASanDeathExpr());
}
TEST(CordSanitizerTest, SanitizesCordOnCopyCtor) {
absl::Cord src("hello");
absl::Cord dst(src);
const char* data = dst.Flatten().data();
EXPECT_DEATH(EXPECT_EQ(data[5], 0), MASanDeathExpr());
}
TEST(CordSanitizerTest, SanitizesCordOnMoveCtor) {
absl::Cord src("hello");
absl::Cord dst(std::move(src));
const char* data = dst.Flatten().data();
EXPECT_DEATH(EXPECT_EQ(data[5], 0), MASanDeathExpr());
}
TEST(CordSanitizerTest, SanitizesCordOnAssign) {
absl::Cord src("hello");
absl::Cord dst;
dst = src;
const char* data = dst.Flatten().data();
EXPECT_DEATH(EXPECT_EQ(data[5], 0), MASanDeathExpr());
}
TEST(CordSanitizerTest, SanitizesCordOnMoveAssign) {
absl::Cord src("hello");
absl::Cord dst;
dst = std::move(src);
const char* data = dst.Flatten().data();
EXPECT_DEATH(EXPECT_EQ(data[5], 0), MASanDeathExpr());
}
TEST(CordSanitizerTest, SanitizesCordOnSsoAssign) {
absl::Cord src("hello");
absl::Cord dst("String that is too big to be an SSO value");
dst = src;
const char* data = dst.Flatten().data();
EXPECT_DEATH(EXPECT_EQ(data[5], 0), MASanDeathExpr());
}
#endif | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/cord.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/cord_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
8e51eff6-da41-42b0-ae49-1405c1a5da7d | cpp | tensorflow/tensorflow | mfcc_op | tensorflow/core/kernels/mfcc_op.cc | tensorflow/core/kernels/mfcc_op_test.cc | #include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/mfcc.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
class MfccOp : public OpKernel {
public:
explicit MfccOp(OpKernelConstruction* context) : OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("upper_frequency_limit",
&upper_frequency_limit_));
OP_REQUIRES_OK(context, context->GetAttr("lower_frequency_limit",
&lower_frequency_limit_));
OP_REQUIRES_OK(context, context->GetAttr("filterbank_channel_count",
&filterbank_channel_count_));
OP_REQUIRES_OK(context, context->GetAttr("dct_coefficient_count",
&dct_coefficient_count_));
}
void Compute(OpKernelContext* context) override {
const Tensor& spectrogram = context->input(0);
OP_REQUIRES(context, spectrogram.dims() == 3,
errors::InvalidArgument("spectrogram must be 3-dimensional",
spectrogram.shape().DebugString()));
const Tensor& sample_rate_tensor = context->input(1);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(sample_rate_tensor.shape()),
errors::InvalidArgument(
"Input sample_rate should be a scalar tensor, got ",
sample_rate_tensor.shape().DebugString(), " instead."));
const int32_t sample_rate = sample_rate_tensor.scalar<int32>()();
const int spectrogram_channels = spectrogram.dim_size(2);
const int spectrogram_samples = spectrogram.dim_size(1);
const int audio_channels = spectrogram.dim_size(0);
Mfcc mfcc;
mfcc.set_upper_frequency_limit(upper_frequency_limit_);
mfcc.set_lower_frequency_limit(lower_frequency_limit_);
mfcc.set_filterbank_channel_count(filterbank_channel_count_);
mfcc.set_dct_coefficient_count(dct_coefficient_count_);
OP_REQUIRES(
context, mfcc.Initialize(spectrogram_channels, sample_rate),
errors::InvalidArgument("Mfcc initialization failed for channel count ",
spectrogram_channels, ", sample rate ",
sample_rate, " and filterbank_channel_count ",
filterbank_channel_count_));
Tensor* output_tensor = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(
0,
TensorShape({audio_channels, spectrogram_samples,
dct_coefficient_count_}),
&output_tensor));
const float* spectrogram_flat = spectrogram.flat<float>().data();
float* output_flat = output_tensor->flat<float>().data();
for (int audio_channel = 0; audio_channel < audio_channels;
++audio_channel) {
for (int spectrogram_sample = 0; spectrogram_sample < spectrogram_samples;
++spectrogram_sample) {
const float* sample_data =
spectrogram_flat +
(audio_channel * spectrogram_samples * spectrogram_channels) +
(spectrogram_sample * spectrogram_channels);
std::vector<double> mfcc_input(sample_data,
sample_data + spectrogram_channels);
std::vector<double> mfcc_output;
mfcc.Compute(mfcc_input, &mfcc_output);
DCHECK_EQ(dct_coefficient_count_, mfcc_output.size());
float* output_data =
output_flat +
(audio_channel * spectrogram_samples * dct_coefficient_count_) +
(spectrogram_sample * dct_coefficient_count_);
for (int i = 0; i < dct_coefficient_count_; ++i) {
output_data[i] = mfcc_output[i];
}
}
}
}
private:
float upper_frequency_limit_;
float lower_frequency_limit_;
int32 filterbank_channel_count_;
int32 dct_coefficient_count_;
};
REGISTER_KERNEL_BUILDER(Name("Mfcc").Device(DEVICE_CPU), MfccOp);
} | #define EIGEN_USE_THREADS
#include <functional>
#include <memory>
#include <vector>
#include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/ops/audio_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace ops {
namespace {
TEST(MfccOpTest, SimpleTest) {
Scope root = Scope::DisabledShapeInferenceScope();
Tensor spectrogram_tensor(DT_FLOAT, TensorShape({1, 1, 513}));
test::FillIota<float>(&spectrogram_tensor, 1.0f);
Output spectrogram_const_op = Const(root.WithOpName("spectrogram_const_op"),
Input::Initializer(spectrogram_tensor));
Output sample_rate_const_op =
Const(root.WithOpName("sample_rate_const_op"), 22050);
Mfcc mfcc_op = Mfcc(root.WithOpName("mfcc_op"), spectrogram_const_op,
sample_rate_const_op);
TF_ASSERT_OK(root.status());
ClientSession session(root);
std::vector<Tensor> outputs;
TF_EXPECT_OK(
session.Run(ClientSession::FeedType(), {mfcc_op.output}, &outputs));
const Tensor& mfcc_tensor = outputs[0];
EXPECT_EQ(3, mfcc_tensor.dims());
EXPECT_EQ(13, mfcc_tensor.dim_size(2));
EXPECT_EQ(1, mfcc_tensor.dim_size(1));
EXPECT_EQ(1, mfcc_tensor.dim_size(0));
test::ExpectTensorNear<float>(
mfcc_tensor,
test::AsTensor<float>(
{29.13970072, -6.41568601, -0.61903012, -0.96778652, -0.26819878,
-0.40907028, -0.15614748, -0.23203119, -0.10481487, -0.1543029,
-0.0769791, -0.10806114, -0.06047613},
TensorShape({1, 1, 13})),
1e-3);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mfcc_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mfcc_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cfd1497b-0282-4c8e-ba59-e0bbdda6328f | cpp | google/quiche | quic_test_utils | quiche/quic/test_tools/quic_test_utils.cc | quiche/quic/test_tools/quic_test_utils_test.cc | #include "quiche/quic/test_tools/quic_test_utils.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/macros.h"
#include "absl/strings/string_view.h"
#include "openssl/chacha.h"
#include "openssl/sha.h"
#include "quiche/quic/core/crypto/crypto_framer.h"
#include "quiche/quic/core/crypto/crypto_handshake.h"
#include "quiche/quic/core/crypto/crypto_utils.h"
#include "quiche/quic/core/crypto/null_decrypter.h"
#include "quiche/quic/core/crypto/null_encrypter.h"
#include "quiche/quic/core/crypto/quic_decrypter.h"
#include "quiche/quic/core/crypto/quic_encrypter.h"
#include "quiche/quic/core/http/quic_spdy_client_session.h"
#include "quiche/quic/core/quic_config.h"
#include "quiche/quic/core/quic_data_writer.h"
#include "quiche/quic/core/quic_framer.h"
#include "quiche/quic/core/quic_packet_creator.h"
#include "quiche/quic/core/quic_packets.h"
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/test_tools/crypto_test_utils.h"
#include "quiche/quic/test_tools/quic_config_peer.h"
#include "quiche/quic/test_tools/quic_connection_peer.h"
#include "quiche/common/quiche_buffer_allocator.h"
#include "quiche/common/quiche_endian.h"
#include "quiche/common/simple_buffer_allocator.h"
using testing::_;
using testing::Invoke;
using testing::Return;
namespace quic {
namespace test {
QuicConnectionId TestConnectionId() {
return TestConnectionId(42);
}
QuicConnectionId TestConnectionId(uint64_t connection_number) {
const uint64_t connection_id64_net =
quiche::QuicheEndian::HostToNet64(connection_number);
return QuicConnectionId(reinterpret_cast<const char*>(&connection_id64_net),
sizeof(connection_id64_net));
}
QuicConnectionId TestConnectionIdNineBytesLong(uint64_t connection_number) {
const uint64_t connection_number_net =
quiche::QuicheEndian::HostToNet64(connection_number);
char connection_id_bytes[9] = {};
static_assert(
sizeof(connection_id_bytes) == 1 + sizeof(connection_number_net),
"bad lengths");
memcpy(connection_id_bytes + 1, &connection_number_net,
sizeof(connection_number_net));
return QuicConnectionId(connection_id_bytes, sizeof(connection_id_bytes));
}
uint64_t TestConnectionIdToUInt64(QuicConnectionId connection_id) {
QUICHE_DCHECK_EQ(connection_id.length(), kQuicDefaultConnectionIdLength);
uint64_t connection_id64_net = 0;
memcpy(&connection_id64_net, connection_id.data(),
std::min<size_t>(static_cast<size_t>(connection_id.length()),
sizeof(connection_id64_net)));
return quiche::QuicheEndian::NetToHost64(connection_id64_net);
}
std::vector<uint8_t> CreateStatelessResetTokenForTest() {
static constexpr uint8_t kStatelessResetTokenDataForTest[16] = {
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D, 0x9E, 0x9F};
return std::vector<uint8_t>(kStatelessResetTokenDataForTest,
kStatelessResetTokenDataForTest +
sizeof(kStatelessResetTokenDataForTest));
}
std::string TestHostname() { return "test.example.com"; }
QuicServerId TestServerId() { return QuicServerId(TestHostname(), kTestPort); }
QuicAckFrame InitAckFrame(const std::vector<QuicAckBlock>& ack_blocks) {
QUICHE_DCHECK_GT(ack_blocks.size(), 0u);
QuicAckFrame ack;
QuicPacketNumber end_of_previous_block(1);
for (const QuicAckBlock& block : ack_blocks) {
QUICHE_DCHECK_GE(block.start, end_of_previous_block);
QUICHE_DCHECK_GT(block.limit, block.start);
ack.packets.AddRange(block.start, block.limit);
end_of_previous_block = block.limit;
}
ack.largest_acked = ack.packets.Max();
return ack;
}
QuicAckFrame InitAckFrame(uint64_t largest_acked) {
return InitAckFrame(QuicPacketNumber(largest_acked));
}
QuicAckFrame InitAckFrame(QuicPacketNumber largest_acked) {
return InitAckFrame({{QuicPacketNumber(1), largest_acked + 1}});
}
QuicAckFrame MakeAckFrameWithAckBlocks(size_t num_ack_blocks,
uint64_t least_unacked) {
QuicAckFrame ack;
ack.largest_acked = QuicPacketNumber(2 * num_ack_blocks + least_unacked);
for (QuicPacketNumber i = QuicPacketNumber(2);
i < QuicPacketNumber(2 * num_ack_blocks + 1); i += 2) {
ack.packets.Add(i + least_unacked);
}
return ack;
}
QuicAckFrame MakeAckFrameWithGaps(uint64_t gap_size, size_t max_num_gaps,
uint64_t largest_acked) {
QuicAckFrame ack;
ack.largest_acked = QuicPacketNumber(largest_acked);
ack.packets.Add(QuicPacketNumber(largest_acked));
for (size_t i = 0; i < max_num_gaps; ++i) {
if (largest_acked <= gap_size) {
break;
}
largest_acked -= gap_size;
ack.packets.Add(QuicPacketNumber(largest_acked));
}
return ack;
}
EncryptionLevel HeaderToEncryptionLevel(const QuicPacketHeader& header) {
if (header.form == IETF_QUIC_SHORT_HEADER_PACKET) {
return ENCRYPTION_FORWARD_SECURE;
} else if (header.form == IETF_QUIC_LONG_HEADER_PACKET) {
if (header.long_packet_type == HANDSHAKE) {
return ENCRYPTION_HANDSHAKE;
} else if (header.long_packet_type == ZERO_RTT_PROTECTED) {
return ENCRYPTION_ZERO_RTT;
}
}
return ENCRYPTION_INITIAL;
}
std::unique_ptr<QuicPacket> BuildUnsizedDataPacket(
QuicFramer* framer, const QuicPacketHeader& header,
const QuicFrames& frames) {
const size_t max_plaintext_size =
framer->GetMaxPlaintextSize(kMaxOutgoingPacketSize);
size_t packet_size = GetPacketHeaderSize(framer->transport_version(), header);
for (size_t i = 0; i < frames.size(); ++i) {
QUICHE_DCHECK_LE(packet_size, max_plaintext_size);
bool first_frame = i == 0;
bool last_frame = i == frames.size() - 1;
const size_t frame_size = framer->GetSerializedFrameLength(
frames[i], max_plaintext_size - packet_size, first_frame, last_frame,
header.packet_number_length);
QUICHE_DCHECK(frame_size);
packet_size += frame_size;
}
return BuildUnsizedDataPacket(framer, header, frames, packet_size);
}
std::unique_ptr<QuicPacket> BuildUnsizedDataPacket(
QuicFramer* framer, const QuicPacketHeader& header,
const QuicFrames& frames, size_t packet_size) {
char* buffer = new char[packet_size];
EncryptionLevel level = HeaderToEncryptionLevel(header);
size_t length =
framer->BuildDataPacket(header, frames, buffer, packet_size, level);
if (length == 0) {
delete[] buffer;
return nullptr;
}
return std::make_unique<QuicPacket>(
buffer, length, true,
GetIncludedDestinationConnectionIdLength(header),
GetIncludedSourceConnectionIdLength(header), header.version_flag,
header.nonce != nullptr, header.packet_number_length,
header.retry_token_length_length, header.retry_token.length(),
header.length_length);
}
std::string Sha1Hash(absl::string_view data) {
char buffer[SHA_DIGEST_LENGTH];
SHA1(reinterpret_cast<const uint8_t*>(data.data()), data.size(),
reinterpret_cast<uint8_t*>(buffer));
return std::string(buffer, ABSL_ARRAYSIZE(buffer));
}
bool ClearControlFrame(const QuicFrame& frame) {
DeleteFrame(&const_cast<QuicFrame&>(frame));
return true;
}
bool ClearControlFrameWithTransmissionType(const QuicFrame& frame,
TransmissionType ) {
return ClearControlFrame(frame);
}
uint64_t SimpleRandom::RandUint64() {
uint64_t result;
RandBytes(&result, sizeof(result));
return result;
}
void SimpleRandom::RandBytes(void* data, size_t len) {
uint8_t* data_bytes = reinterpret_cast<uint8_t*>(data);
while (len > 0) {
const size_t buffer_left = sizeof(buffer_) - buffer_offset_;
const size_t to_copy = std::min(buffer_left, len);
memcpy(data_bytes, buffer_ + buffer_offset_, to_copy);
data_bytes += to_copy;
buffer_offset_ += to_copy;
len -= to_copy;
if (buffer_offset_ == sizeof(buffer_)) {
FillBuffer();
}
}
}
void SimpleRandom::InsecureRandBytes(void* data, size_t len) {
RandBytes(data, len);
}
uint64_t SimpleRandom::InsecureRandUint64() { return RandUint64(); }
void SimpleRandom::FillBuffer() {
uint8_t nonce[12];
memcpy(nonce, buffer_, sizeof(nonce));
CRYPTO_chacha_20(buffer_, buffer_, sizeof(buffer_), key_, nonce, 0);
buffer_offset_ = 0;
}
void SimpleRandom::set_seed(uint64_t seed) {
static_assert(sizeof(key_) == SHA256_DIGEST_LENGTH, "Key has to be 256 bits");
SHA256(reinterpret_cast<const uint8_t*>(&seed), sizeof(seed), key_);
memset(buffer_, 0, sizeof(buffer_));
FillBuffer();
}
MockFramerVisitor::MockFramerVisitor() {
ON_CALL(*this, OnProtocolVersionMismatch(_))
.WillByDefault(testing::Return(false));
ON_CALL(*this, OnUnauthenticatedHeader(_))
.WillByDefault(testing::Return(true));
ON_CALL(*this, OnUnauthenticatedPublicHeader(_))
.WillByDefault(testing::Return(true));
ON_CALL(*this, OnPacketHeader(_)).WillByDefault(testing::Return(true));
ON_CALL(*this, OnStreamFrame(_)).WillByDefault(testing::Return(true));
ON_CALL(*this, OnCryptoFrame(_)).WillByDefault(testing::Return(true));
ON_CALL(*this, OnStopWaitingFrame(_)).WillByDefault(testing::Return(true));
ON_CALL(*this, OnPaddingFrame(_)).WillByDefault(testing::Return(true));
ON_CALL(*this, OnPingFrame(_)).WillByDefault(testing::Return(true));
ON_CALL(*this, OnRstStreamFrame(_)).WillByDefault(testing::Return(true));
ON_CALL(*this, OnConnectionCloseFrame(_))
.WillByDefault(testing::Return(true));
ON_CALL(*this, OnStopSendingFrame(_)).WillByDefault(testing::Return(true));
ON_CALL(*this, OnPathChallengeFrame(_)).WillByDefault(testing::Return(true));
ON_CALL(*this, OnPathResponseFrame(_)).WillByDefault(testing::Return(true));
ON_CALL(*this, OnGoAwayFrame(_)).WillByDefault(testing::Return(true));
ON_CALL(*this, OnMaxStreamsFrame(_)).WillByDefault(testing::Return(true));
ON_CALL(*this, OnStreamsBlockedFrame(_)).WillByDefault(testing::Return(true));
}
MockFramerVisitor::~MockFramerVisitor() {}
bool NoOpFramerVisitor::OnProtocolVersionMismatch(
ParsedQuicVersion ) {
return false;
}
bool NoOpFramerVisitor::OnUnauthenticatedPublicHeader(
const QuicPacketHeader& ) {
return true;
}
bool NoOpFramerVisitor::OnUnauthenticatedHeader(
const QuicPacketHeader& ) {
return true;
}
bool NoOpFramerVisitor::OnPacketHeader(const QuicPacketHeader& ) {
return true;
}
void NoOpFramerVisitor::OnCoalescedPacket(
const QuicEncryptedPacket& ) {}
void NoOpFramerVisitor::OnUndecryptablePacket(
const QuicEncryptedPacket& , EncryptionLevel ,
bool ) {}
bool NoOpFramerVisitor::OnStreamFrame(const QuicStreamFrame& ) {
return true;
}
bool NoOpFramerVisitor::OnCryptoFrame(const QuicCryptoFrame& ) {
return true;
}
bool NoOpFramerVisitor::OnAckFrameStart(QuicPacketNumber ,
QuicTime::Delta ) {
return true;
}
bool NoOpFramerVisitor::OnAckRange(QuicPacketNumber ,
QuicPacketNumber ) {
return true;
}
bool NoOpFramerVisitor::OnAckTimestamp(QuicPacketNumber ,
QuicTime ) {
return true;
}
bool NoOpFramerVisitor::OnAckFrameEnd(
QuicPacketNumber ,
const std::optional<QuicEcnCounts>& ) {
return true;
}
bool NoOpFramerVisitor::OnStopWaitingFrame(
const QuicStopWaitingFrame& ) {
return true;
}
bool NoOpFramerVisitor::OnPaddingFrame(const QuicPaddingFrame& ) {
return true;
}
bool NoOpFramerVisitor::OnPingFrame(const QuicPingFrame& ) {
return true;
}
bool NoOpFramerVisitor::OnRstStreamFrame(const QuicRstStreamFrame& ) {
return true;
}
bool NoOpFramerVisitor::OnConnectionCloseFrame(
const QuicConnectionCloseFrame& ) {
return true;
}
bool NoOpFramerVisitor::OnNewConnectionIdFrame(
const QuicNewConnectionIdFrame& ) {
return true;
}
bool NoOpFramerVisitor::OnRetireConnectionIdFrame(
const QuicRetireConnectionIdFrame& ) {
return true;
}
bool NoOpFramerVisitor::OnNewTokenFrame(const QuicNewTokenFrame& ) {
return true;
}
bool NoOpFramerVisitor::OnStopSendingFrame(
const QuicStopSendingFrame& ) {
return true;
}
bool NoOpFramerVisitor::OnPathChallengeFrame(
const QuicPathChallengeFrame& ) {
return true;
}
bool NoOpFramerVisitor::OnPathResponseFrame(
const QuicPathResponseFrame& ) {
return true;
}
bool NoOpFramerVisitor::OnGoAwayFrame(const QuicGoAwayFrame& ) {
return true;
}
bool NoOpFramerVisitor::OnMaxStreamsFrame(
const QuicMaxStreamsFrame& ) {
return true;
}
bool NoOpFramerVisitor::OnStreamsBlockedFrame(
const QuicStreamsBlockedFrame& ) {
return true;
}
bool NoOpFramerVisitor::OnWindowUpdateFrame(
const QuicWindowUpdateFrame& ) {
return true;
}
bool NoOpFramerVisitor::OnBlockedFrame(const QuicBlockedFrame& ) {
return true;
}
bool NoOpFramerVisitor::OnMessageFrame(const QuicMessageFrame& ) {
return true;
}
bool NoOpFramerVisitor::OnHandshakeDoneFrame(
const QuicHandshakeDoneFrame& ) {
return true;
}
bool NoOpFramerVisitor::OnAckFrequencyFrame(
const QuicAckFrequencyFrame& ) {
return true;
}
bool NoOpFramerVisitor::OnResetStreamAtFrame(
const QuicResetStreamAtFrame& ) {
return true;
}
bool NoOpFramerVisitor::IsValidStatelessResetToken(
const StatelessResetToken& ) const {
return false;
}
MockQuicConnectionVisitor::MockQuicConnectionVisitor() {
ON_CALL(*this, GetFlowControlSendWindowSize(_))
.WillByDefault(Return(std::numeric_limits<QuicByteCount>::max()));
}
MockQuicConnectionVisitor::~MockQuicConnectionVisitor() {}
MockQuicConnectionHelper::MockQuicConnectionHelper() {}
MockQuicConnectionHelper::~MockQuicConnectionHelper() {}
const MockClock* MockQuicConnectionHelper::GetClock() const { return &clock_; }
MockClock* MockQuicConnectionHelper::GetClock() { return &clock_; }
QuicRandom* MockQuicConnectionHelper::GetRandomGenerator() {
return &random_generator_;
}
QuicAlarm* MockAlarmFactory::CreateAlarm(QuicAlarm::Delegate* delegate) {
return new MockAlarmFactory::TestAlarm(
QuicArenaScopedPtr<QuicAlarm::Delegate>(delegate));
}
QuicArenaScopedPtr<QuicAlarm> MockAlarmFactory::CreateAlarm(
QuicArenaScopedPtr<QuicAlarm::Delegate> delegate,
QuicConnectionArena* arena) {
if (arena != nullptr) {
return arena->New<TestAlarm>(std::move(delegate));
} else {
return QuicArenaScopedPtr<TestAlarm>(new TestAlarm(std::move(delegate)));
}
}
quiche::QuicheBufferAllocator*
MockQuicConnectionHelper::GetStreamSendBufferAllocator() {
return &buffer_allocator_;
}
void MockQuicConnectionHelper::AdvanceTime(QuicTime::Delta delta) {
clock_.AdvanceTime(delta);
}
MockQuicConnection::MockQuicConnection(QuicConnectionHelperInterface* helper,
QuicAlarmFactory* alarm_factory,
Perspective perspective)
: MockQuicConnection(TestConnectionId(),
QuicSocketAddress(TestPeerIPAddress(), kTestPort),
helper, alarm_factory, perspective,
ParsedVersionOfIndex(CurrentSupportedVersions(), 0)) {}
MockQuicConnection::MockQuicConnection(QuicSocketAddress address,
QuicConnectionHelperInterface* helper,
QuicAlarmFactory* alarm_factory,
Perspective perspective)
: MockQuicConnection(TestConnectionId(), address, helper, alarm_factory,
perspective,
ParsedVersionOfIndex(CurrentSupportedVersions(), 0)) {}
MockQuicConnection::MockQuicConnection(QuicConnectionId connection_id,
QuicConnectionHelperInterface* helper,
QuicAlarmFactory* alarm_factory,
Perspective perspective)
: MockQuicConnection(connection_id,
QuicSocketAddress(TestPeerIPAddress(), kTestPort),
helper, alarm_factory, perspective,
ParsedVersionOfIndex(CurrentSupportedVersions(), 0)) {}
MockQuicConnection::MockQuicConnection(
QuicConnectionHelperInterface* helper, QuicAlarmFactory* alarm_factory,
Perspective perspective, const ParsedQuicVersionVector& supported_versions)
: MockQuicConnection(
TestConnectionId(), QuicSocketAddress(TestPeerIPAddress(), kTestPort),
helper, alarm_factory, perspective, supported_versions) {}
MockQuicConnection::MockQuicConnection(
QuicConnectionId connection_id, QuicSocketAddress initial_peer_address,
QuicConnectionHelperInterface* helper, QuicAlarmFactory* alarm_factory,
Perspective perspective, const ParsedQuicVersionVector& supported_versions)
: QuicConnection(
connection_id,
QuicSocketAddress(QuicIpAddress::Any4(), 5),
initial_peer_address, helper, alarm_factory,
new testing::NiceMock<MockPacketWriter>(),
true, perspective, supported_versions,
connection_id_generator_) {
ON_CALL(*this, OnError(_))
.WillByDefault(
Invoke(this, &PacketSavingConnection::QuicConnection_OnError));
ON_CALL(*this, SendCryptoData(_, _, _))
.WillByDefault(
Invoke(this, &MockQuicConnection::QuicConnection_SendCryptoData));
SetSelfAddress(QuicSocketAddress(QuicIpAddress::Any4(), 5));
}
MockQuicConnection::~MockQuicConnection() {}
void MockQuicConnection::AdvanceTime(QuicTime::Delta delta) {
static_cast<MockQuicConnectionHelper*>(helper())->AdvanceTime(delta);
}
bool MockQuicConnection::OnProtocolVersionMismatch(
ParsedQuicVersion ) {
return false;
}
PacketSavingConnection::PacketSavingConnection(MockQuicConnectionHelper* helper,
QuicAlarmFactory* alarm_factory,
Perspective perspective)
: MockQuicConnection(helper, alarm_factory, perspective),
mock_helper_(helper) {}
PacketSavingConnection::PacketSavingConnection(
MockQuicConnectionHelper* helper, QuicAlarmFactory* alarm_factory,
Perspective perspective, const ParsedQuicVersionVector& supported_versions)
: MockQuicConnection(helper, alarm_factory, perspective,
supported_versions),
mock_helper_(helper) {}
PacketSavingConnection::~PacketSavingConnection() {}
SerializedPacketFate PacketSavingConnection::GetSerializedPacketFate(
bool , EncryptionLevel ) {
return SEND_TO_WRITER;
}
void PacketSavingConnection::SendOrQueuePacket(SerializedPacket packet) {
encrypted_packets_.push_back(std::make_unique<QuicEncryptedPacket>(
CopyBuffer(packet), packet.encrypted_length, true));
MockClock& clock = *mock_helper_->GetClock();
clock.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
OnPacketSent(packet.encryption_level, packet.transmission_type);
QuicConnectionPeer::GetSentPacketManager(this)->OnPacketSent(
&packet, clock.ApproximateNow(), NOT_RETRANSMISSION,
HAS_RETRANSMITTABLE_DATA, true, ECN_NOT_ECT);
}
std::vector<const QuicEncryptedPacket*> PacketSavingConnection::GetPackets()
const {
std::vector<const QuicEncryptedPacket*> packets;
for (size_t i = num_cleared_packets_; i < encrypted_packets_.size(); ++i) {
packets.push_back(encrypted_packets_[i].get());
}
return packets;
}
void PacketSavingConnection::ClearPackets() {
num_cleared_packets_ = encrypted_packets_.size();
}
MockQuicSession::MockQuicSession(QuicConnection* connection)
: MockQuicSession(connection, true) {}
MockQuicSession::MockQuicSession(QuicConnection* connection,
bool create_mock_crypto_stream)
: QuicSession(connection, nullptr, DefaultQuicConfig(),
connection->supported_versions(),
0) {
if (create_mock_crypto_stream) {
crypto_stream_ =
std::make_unique<testing::NiceMock<MockQuicCryptoStream>>(this);
}
ON_CALL(*this, WritevData(_, _, _, _, _, _))
.WillByDefault(testing::Return(QuicConsumedData(0, false)));
}
MockQuicSession::~MockQuicSession() { DeleteConnection(); }
QuicCryptoStream* MockQuicSession::GetMutableCryptoStream() {
return crypto_stream_.get();
}
const QuicCryptoStream* MockQuicSession::GetCryptoStream() const {
return crypto_stream_.get();
}
void MockQuicSession::SetCryptoStream(QuicCryptoStream* crypto_stream) {
crypto_stream_.reset(crypto_stream);
}
QuicConsumedData MockQuicSession::ConsumeData(
QuicStreamId id, size_t write_length, QuicStreamOffset offset,
StreamSendingState state, TransmissionType ,
std::optional<EncryptionLevel> ) {
if (write_length > 0) {
auto buf = std::make_unique<char[]>(write_length);
QuicStream* stream = GetOrCreateStream(id);
QUICHE_DCHECK(stream);
QuicDataWriter writer(write_length, buf.get(), quiche::HOST_BYTE_ORDER);
stream->WriteStreamData(offset, write_length, &writer);
} else {
QUICHE_DCHECK(state != NO_FIN);
}
return QuicConsumedData(write_length, state != NO_FIN);
}
MockQuicCryptoStream::MockQuicCryptoStream(QuicSession* session)
: QuicCryptoStream(session), params_(new QuicCryptoNegotiatedParameters) {}
MockQuicCryptoStream::~MockQuicCryptoStream() {}
ssl_early_data_reason_t MockQuicCryptoStream::EarlyDataReason() const {
return ssl_early_data_unknown;
}
bool MockQuicCryptoStream::one_rtt_keys_available() const { return false; }
const QuicCryptoNegotiatedParameters&
MockQuicCryptoStream::crypto_negotiated_params() const {
return *params_;
}
CryptoMessageParser* MockQuicCryptoStream::crypto_message_parser() {
return &crypto_framer_;
}
MockQuicSpdySession::MockQuicSpdySession(QuicConnection* connection)
: MockQuicSpdySession(connection, true) {}
MockQuicSpdySession::MockQuicSpdySession(QuicConnection* connection,
bool create_mock_crypto_stream)
: QuicSpdySession(connection, nullptr, DefaultQuicConfig(),
connection->supported_versions()) {
if (create_mock_crypto_stream) {
crypto_stream_ = std::make_unique<MockQuicCryptoStream>(this);
}
ON_CALL(*this, WritevData(_, _, _, _, _, _))
.WillByDefault(testing::Return(QuicConsumedData(0, false)));
ON_CALL(*this, SendWindowUpdate(_, _))
.WillByDefault([this](QuicStreamId id, QuicStreamOffset byte_offset) {
return QuicSpdySession::SendWindowUpdate(id, byte_offset);
});
ON_CALL(*this, SendBlocked(_, _))
.WillByDefault([this](QuicStreamId id, QuicStreamOffset byte_offset) {
return QuicSpdySession::SendBlocked(id, byte_offset);
});
ON_CALL(*this, OnCongestionWindowChange(_)).WillByDefault(testing::Return());
}
MockQuicSpdySession::~MockQuicSpdySession() { DeleteConnection(); }
QuicCryptoStream* MockQuicSpdySession::GetMutableCryptoStream() {
return crypto_stream_.get();
}
const QuicCryptoStream* MockQuicSpdySession::GetCryptoStream() const {
return crypto_stream_.get();
}
void MockQuicSpdySession::SetCryptoStream(QuicCryptoStream* crypto_stream) {
crypto_stream_.reset(crypto_stream);
}
QuicConsumedData MockQuicSpdySession::ConsumeData(
QuicStreamId id, size_t write_length, QuicStreamOffset offset,
StreamSendingState state, TransmissionType ,
std::optional<EncryptionLevel> ) {
if (write_length > 0) {
auto buf = std::make_unique<char[]>(write_length);
QuicStream* stream = GetOrCreateStream(id);
QUICHE_DCHECK(stream);
QuicDataWriter writer(write_length, buf.get(), quiche::HOST_BYTE_ORDER);
stream->WriteStreamData(offset, write_length, &writer);
} else {
QUICHE_DCHECK(state != NO_FIN);
}
return QuicConsumedData(write_length, state != NO_FIN);
}
TestQuicSpdyServerSession::TestQuicSpdyServerSession(
QuicConnection* connection, const QuicConfig& config,
const ParsedQuicVersionVector& supported_versions,
const QuicCryptoServerConfig* crypto_config,
QuicCompressedCertsCache* compressed_certs_cache)
: QuicServerSessionBase(config, supported_versions, connection, &visitor_,
&helper_, crypto_config, compressed_certs_cache) {
ON_CALL(helper_, CanAcceptClientHello(_, _, _, _, _))
.WillByDefault(testing::Return(true));
}
TestQuicSpdyServerSession::~TestQuicSpdyServerSession() { DeleteConnection(); }
std::unique_ptr<QuicCryptoServerStreamBase>
TestQuicSpdyServerSession::CreateQuicCryptoServerStream(
const QuicCryptoServerConfig* crypto_config,
QuicCompressedCertsCache* compressed_certs_cache) {
return CreateCryptoServerStream(crypto_config, compressed_certs_cache, this,
&helper_);
}
QuicCryptoServerStreamBase*
TestQuicSpdyServerSession::GetMutableCryptoStream() {
return QuicServerSessionBase::GetMutableCryptoStream();
}
const QuicCryptoServerStreamBase* TestQuicSpdyServerSession::GetCryptoStream()
const {
return QuicServerSessionBase::GetCryptoStream();
}
TestQuicSpdyClientSession::TestQuicSpdyClientSession(
QuicConnection* connection, const QuicConfig& config,
const ParsedQuicVersionVector& supported_versions,
const QuicServerId& server_id, QuicCryptoClientConfig* crypto_config,
std::optional<QuicSSLConfig> ssl_config)
: QuicSpdyClientSessionBase(connection, nullptr, config,
supported_versions),
ssl_config_(std::move(ssl_config)) {
crypto_stream_ = std::make_unique<QuicCryptoClientStream>(
server_id, this, crypto_test_utils::ProofVerifyContextForTesting(),
crypto_config, this, false);
Initialize();
ON_CALL(*this, OnConfigNegotiated())
.WillByDefault(
Invoke(this, &TestQuicSpdyClientSession::RealOnConfigNegotiated));
}
TestQuicSpdyClientSession::~TestQuicSpdyClientSession() {}
QuicCryptoClientStream* TestQuicSpdyClientSession::GetMutableCryptoStream() {
return crypto_stream_.get();
}
const QuicCryptoClientStream* TestQuicSpdyClientSession::GetCryptoStream()
const {
return crypto_stream_.get();
}
void TestQuicSpdyClientSession::RealOnConfigNegotiated() {
QuicSpdyClientSessionBase::OnConfigNegotiated();
}
MockPacketWriter::MockPacketWriter() {
ON_CALL(*this, GetMaxPacketSize(_))
.WillByDefault(testing::Return(kMaxOutgoingPacketSize));
ON_CALL(*this, IsBatchMode()).WillByDefault(testing::Return(false));
ON_CALL(*this, GetNextWriteLocation(_, _))
.WillByDefault(testing::Return(QuicPacketBuffer()));
ON_CALL(*this, Flush())
.WillByDefault(testing::Return(WriteResult(WRITE_STATUS_OK, 0)));
ON_CALL(*this, SupportsReleaseTime()).WillByDefault(testing::Return(false));
}
MockPacketWriter::~MockPacketWriter() {}
MockSendAlgorithm::MockSendAlgorithm() {
ON_CALL(*this, PacingRate(_))
.WillByDefault(testing::Return(QuicBandwidth::Zero()));
ON_CALL(*this, BandwidthEstimate())
.WillByDefault(testing::Return(QuicBandwidth::Zero()));
}
MockSendAlgorithm::~MockSendAlgorithm() {}
MockLossAlgorithm::MockLossAlgorithm() {}
MockLossAlgorithm::~MockLossAlgorithm() {}
MockAckListener::MockAckListener() {}
MockAckListener::~MockAckListener() {}
MockNetworkChangeVisitor::MockNetworkChangeVisitor() {}
MockNetworkChangeVisitor::~MockNetworkChangeVisitor() {}
QuicIpAddress TestPeerIPAddress() { return QuicIpAddress::Loopback4(); }
ParsedQuicVersion QuicVersionMax() { return AllSupportedVersions().front(); }
ParsedQuicVersion QuicVersionMin() { return AllSupportedVersions().back(); }
void DisableQuicVersionsWithTls() {
for (const ParsedQuicVersion& version : AllSupportedVersionsWithTls()) {
QuicDisableVersion(version);
}
}
QuicEncryptedPacket* ConstructEncryptedPacket(
QuicConnectionId destination_connection_id,
QuicConnectionId source_connection_id, bool version_flag, bool reset_flag,
uint64_t packet_number, const std::string& data) {
return ConstructEncryptedPacket(
destination_connection_id, source_connection_id, version_flag, reset_flag,
packet_number, data, CONNECTION_ID_PRESENT, CONNECTION_ID_ABSENT,
PACKET_4BYTE_PACKET_NUMBER);
}
QuicEncryptedPacket* ConstructEncryptedPacket(
QuicConnectionId destination_connection_id,
QuicConnectionId source_connection_id, bool version_flag, bool reset_flag,
uint64_t packet_number, const std::string& data,
QuicConnectionIdIncluded destination_connection_id_included,
QuicConnectionIdIncluded source_connection_id_included,
QuicPacketNumberLength packet_number_length) {
return ConstructEncryptedPacket(
destination_connection_id, source_connection_id, version_flag, reset_flag,
packet_number, data, destination_connection_id_included,
source_connection_id_included, packet_number_length, nullptr);
}
QuicEncryptedPacket* ConstructEncryptedPacket(
QuicConnectionId destination_connection_id,
QuicConnectionId source_connection_id, bool version_flag, bool reset_flag,
uint64_t packet_number, const std::string& data,
QuicConnectionIdIncluded destination_connection_id_included,
QuicConnectionIdIncluded source_connection_id_included,
QuicPacketNumberLength packet_number_length,
ParsedQuicVersionVector* versions) {
return ConstructEncryptedPacket(
destination_connection_id, source_connection_id, version_flag, reset_flag,
packet_number, data, false, destination_connection_id_included,
source_connection_id_included, packet_number_length, versions,
Perspective::IS_CLIENT);
}
QuicEncryptedPacket* ConstructEncryptedPacket(
QuicConnectionId destination_connection_id,
QuicConnectionId source_connection_id, bool version_flag, bool reset_flag,
uint64_t packet_number, const std::string& data, bool full_padding,
QuicConnectionIdIncluded destination_connection_id_included,
QuicConnectionIdIncluded source_connection_id_included,
QuicPacketNumberLength packet_number_length,
ParsedQuicVersionVector* versions) {
return ConstructEncryptedPacket(
destination_connection_id, source_connection_id, version_flag, reset_flag,
packet_number, data, full_padding, destination_connection_id_included,
source_connection_id_included, packet_number_length, versions,
Perspective::IS_CLIENT);
}
QuicEncryptedPacket* ConstructEncryptedPacket(
QuicConnectionId destination_connection_id,
QuicConnectionId source_connection_id, bool version_flag, bool reset_flag,
uint64_t packet_number, const std::string& data, bool full_padding,
QuicConnectionIdIncluded destination_connection_id_included,
QuicConnectionIdIncluded source_connection_id_included,
QuicPacketNumberLength packet_number_length,
ParsedQuicVersionVector* versions, Perspective perspective) {
QuicPacketHeader header;
header.destination_connection_id = destination_connection_id;
header.destination_connection_id_included =
destination_connection_id_included;
header.source_connection_id = source_connection_id;
header.source_connection_id_included = source_connection_id_included;
header.version_flag = version_flag;
header.reset_flag = reset_flag;
header.packet_number_length = packet_number_length;
header.packet_number = QuicPacketNumber(packet_number);
ParsedQuicVersionVector supported_versions = CurrentSupportedVersions();
if (!versions) {
versions = &supported_versions;
}
EXPECT_FALSE(versions->empty());
ParsedQuicVersion version = (*versions)[0];
if (QuicVersionHasLongHeaderLengths(version.transport_version) &&
version_flag) {
header.retry_token_length_length = quiche::VARIABLE_LENGTH_INTEGER_LENGTH_1;
header.length_length = quiche::VARIABLE_LENGTH_INTEGER_LENGTH_2;
}
QuicFrames frames;
QuicFramer framer(*versions, QuicTime::Zero(), perspective,
kQuicDefaultConnectionIdLength);
framer.SetInitialObfuscators(destination_connection_id);
EncryptionLevel level =
header.version_flag ? ENCRYPTION_INITIAL : ENCRYPTION_FORWARD_SECURE;
if (level != ENCRYPTION_INITIAL) {
framer.SetEncrypter(level, std::make_unique<TaggingEncrypter>(level));
}
if (!QuicVersionUsesCryptoFrames(version.transport_version)) {
QuicFrame frame(
QuicStreamFrame(QuicUtils::GetCryptoStreamId(version.transport_version),
false, 0, absl::string_view(data)));
frames.push_back(frame);
} else {
QuicFrame frame(new QuicCryptoFrame(level, 0, data));
frames.push_back(frame);
}
if (full_padding) {
frames.push_back(QuicFrame(QuicPaddingFrame(-1)));
} else {
size_t min_plaintext_size = QuicPacketCreator::MinPlaintextPacketSize(
version, packet_number_length);
if (data.length() < min_plaintext_size) {
size_t padding_length = min_plaintext_size - data.length();
frames.push_back(QuicFrame(QuicPaddingFrame(padding_length)));
}
}
std::unique_ptr<QuicPacket> packet(
BuildUnsizedDataPacket(&framer, header, frames));
EXPECT_TRUE(packet != nullptr);
char* buffer = new char[kMaxOutgoingPacketSize];
size_t encrypted_length =
framer.EncryptPayload(level, QuicPacketNumber(packet_number), *packet,
buffer, kMaxOutgoingPacketSize);
EXPECT_NE(0u, encrypted_length);
DeleteFrames(&frames);
return new QuicEncryptedPacket(buffer, encrypted_length, true);
}
std::unique_ptr<QuicEncryptedPacket> GetUndecryptableEarlyPacket(
const ParsedQuicVersion& version,
const QuicConnectionId& server_connection_id) {
QuicPacketHeader header;
header.destination_connection_id = server_connection_id;
header.destination_connection_id_included = CONNECTION_ID_PRESENT;
header.source_connection_id = EmptyQuicConnectionId();
header.source_connection_id_included = CONNECTION_ID_PRESENT;
if (!version.SupportsClientConnectionIds()) {
header.source_connection_id_included = CONNECTION_ID_ABSENT;
}
header.version_flag = true;
header.reset_flag = false;
header.packet_number_length = PACKET_4BYTE_PACKET_NUMBER;
header.packet_number = QuicPacketNumber(33);
header.long_packet_type = ZERO_RTT_PROTECTED;
if (version.HasLongHeaderLengths()) {
header.retry_token_length_length = quiche::VARIABLE_LENGTH_INTEGER_LENGTH_1;
header.length_length = quiche::VARIABLE_LENGTH_INTEGER_LENGTH_2;
}
QuicFrames frames;
frames.push_back(QuicFrame(QuicPingFrame()));
frames.push_back(QuicFrame(QuicPaddingFrame(100)));
QuicFramer framer({version}, QuicTime::Zero(), Perspective::IS_CLIENT,
kQuicDefaultConnectionIdLength);
framer.SetInitialObfuscators(server_connection_id);
framer.SetEncrypter(ENCRYPTION_ZERO_RTT,
std::make_unique<TaggingEncrypter>(ENCRYPTION_ZERO_RTT));
std::unique_ptr<QuicPacket> packet(
BuildUnsizedDataPacket(&framer, header, frames));
EXPECT_TRUE(packet != nullptr);
char* buffer = new char[kMaxOutgoingPacketSize];
size_t encrypted_length =
framer.EncryptPayload(ENCRYPTION_ZERO_RTT, header.packet_number, *packet,
buffer, kMaxOutgoingPacketSize);
EXPECT_NE(0u, encrypted_length);
DeleteFrames(&frames);
return std::make_unique<QuicEncryptedPacket>(buffer, encrypted_length,
true);
}
QuicReceivedPacket* ConstructReceivedPacket(
const QuicEncryptedPacket& encrypted_packet, QuicTime receipt_time) {
return ConstructReceivedPacket(encrypted_packet, receipt_time, ECN_NOT_ECT);
}
QuicReceivedPacket* ConstructReceivedPacket(
const QuicEncryptedPacket& encrypted_packet, QuicTime receipt_time,
QuicEcnCodepoint ecn) {
char* buffer = new char[encrypted_packet.length()];
memcpy(buffer, encrypted_packet.data(), encrypted_packet.length());
return new QuicReceivedPacket(buffer, encrypted_packet.length(), receipt_time,
true, 0, true, nullptr, 0, false, ecn);
}
QuicEncryptedPacket* ConstructMisFramedEncryptedPacket(
QuicConnectionId destination_connection_id,
QuicConnectionId source_connection_id, bool version_flag, bool reset_flag,
uint64_t packet_number, const std::string& data,
QuicConnectionIdIncluded destination_connection_id_included,
QuicConnectionIdIncluded source_connection_id_included,
QuicPacketNumberLength packet_number_length, ParsedQuicVersion version,
Perspective perspective) {
QuicPacketHeader header;
header.destination_connection_id = destination_connection_id;
header.destination_connection_id_included =
destination_connection_id_included;
header.source_connection_id = source_connection_id;
header.source_connection_id_included = source_connection_id_included;
header.version_flag = version_flag;
header.reset_flag = reset_flag;
header.packet_number_length = packet_number_length;
header.packet_number = QuicPacketNumber(packet_number);
if (QuicVersionHasLongHeaderLengths(version.transport_version) &&
version_flag) {
header.retry_token_length_length = quiche::VARIABLE_LENGTH_INTEGER_LENGTH_1;
header.length_length = quiche::VARIABLE_LENGTH_INTEGER_LENGTH_2;
}
QuicFrame frame(QuicStreamFrame(1, false, 0, absl::string_view(data)));
QuicFrames frames;
frames.push_back(frame);
QuicFramer framer({version}, QuicTime::Zero(), perspective,
kQuicDefaultConnectionIdLength);
framer.SetInitialObfuscators(destination_connection_id);
EncryptionLevel level =
version_flag ? ENCRYPTION_INITIAL : ENCRYPTION_FORWARD_SECURE;
if (level != ENCRYPTION_INITIAL) {
framer.SetEncrypter(level, std::make_unique<TaggingEncrypter>(level));
}
if (data.length() < 7) {
size_t padding_length = 7 - data.length();
frames.push_back(QuicFrame(QuicPaddingFrame(padding_length)));
}
std::unique_ptr<QuicPacket> packet(
BuildUnsizedDataPacket(&framer, header, frames));
EXPECT_TRUE(packet != nullptr);
reinterpret_cast<unsigned char*>(
packet->mutable_data())[GetStartOfEncryptedData(
framer.transport_version(),
GetIncludedDestinationConnectionIdLength(header),
GetIncludedSourceConnectionIdLength(header), version_flag,
false , packet_number_length,
header.retry_token_length_length, 0, header.length_length)] = 0x1F;
char* buffer = new char[kMaxOutgoingPacketSize];
size_t encrypted_length =
framer.EncryptPayload(level, QuicPacketNumber(packet_number), *packet,
buffer, kMaxOutgoingPacketSize);
EXPECT_NE(0u, encrypted_length);
return new QuicEncryptedPacket(buffer, encrypted_length, true);
}
QuicConfig DefaultQuicConfig() {
QuicConfig config;
config.SetInitialMaxStreamDataBytesIncomingBidirectionalToSend(
kInitialStreamFlowControlWindowForTest);
config.SetInitialMaxStreamDataBytesOutgoingBidirectionalToSend(
kInitialStreamFlowControlWindowForTest);
config.SetInitialMaxStreamDataBytesUnidirectionalToSend(
kInitialStreamFlowControlWindowForTest);
config.SetInitialStreamFlowControlWindowToSend(
kInitialStreamFlowControlWindowForTest);
config.SetInitialSessionFlowControlWindowToSend(
kInitialSessionFlowControlWindowForTest);
QuicConfigPeer::SetReceivedMaxBidirectionalStreams(
&config, kDefaultMaxStreamsPerConnection);
if (!config.HasClientSentConnectionOption(quic::kNSTP,
quic::Perspective::IS_CLIENT)) {
quic::QuicTagVector connection_options;
connection_options.push_back(quic::kNSTP);
config.SetConnectionOptionsToSend(connection_options);
}
return config;
}
ParsedQuicVersionVector SupportedVersions(ParsedQuicVersion version) {
ParsedQuicVersionVector versions;
versions.push_back(version);
return versions;
}
MockQuicConnectionDebugVisitor::MockQuicConnectionDebugVisitor() {}
MockQuicConnectionDebugVisitor::~MockQuicConnectionDebugVisitor() {}
MockReceivedPacketManager::MockReceivedPacketManager(QuicConnectionStats* stats)
: QuicReceivedPacketManager(stats) {}
MockReceivedPacketManager::~MockReceivedPacketManager() {}
MockPacketCreatorDelegate::MockPacketCreatorDelegate() {}
MockPacketCreatorDelegate::~MockPacketCreatorDelegate() {}
MockSessionNotifier::MockSessionNotifier() {}
MockSessionNotifier::~MockSessionNotifier() {}
QuicCryptoClientStream::HandshakerInterface*
QuicCryptoClientStreamPeer::GetHandshaker(QuicCryptoClientStream* stream) {
return stream->handshaker_.get();
}
void CreateClientSessionForTest(
QuicServerId server_id, QuicTime::Delta connection_start_time,
const ParsedQuicVersionVector& supported_versions,
MockQuicConnectionHelper* helper, QuicAlarmFactory* alarm_factory,
QuicCryptoClientConfig* crypto_client_config,
PacketSavingConnection** client_connection,
TestQuicSpdyClientSession** client_session) {
QUICHE_CHECK(crypto_client_config);
QUICHE_CHECK(client_connection);
QUICHE_CHECK(client_session);
QUICHE_CHECK(!connection_start_time.IsZero())
<< "Connections must start at non-zero times, otherwise the "
<< "strike-register will be unhappy.";
QuicConfig config = DefaultQuicConfig();
*client_connection = new PacketSavingConnection(
helper, alarm_factory, Perspective::IS_CLIENT, supported_versions);
*client_session = new TestQuicSpdyClientSession(*client_connection, config,
supported_versions, server_id,
crypto_client_config);
(*client_connection)->AdvanceTime(connection_start_time);
}
void CreateServerSessionForTest(
QuicServerId , QuicTime::Delta connection_start_time,
ParsedQuicVersionVector supported_versions,
MockQuicConnectionHelper* helper, QuicAlarmFactory* alarm_factory,
QuicCryptoServerConfig* server_crypto_config,
QuicCompressedCertsCache* compressed_certs_cache,
PacketSavingConnection** server_connection,
TestQuicSpdyServerSession** server_session) {
QUICHE_CHECK(server_crypto_config);
QUICHE_CHECK(server_connection);
QUICHE_CHECK(server_session);
QUICHE_CHECK(!connection_start_time.IsZero())
<< "Connections must start at non-zero times, otherwise the "
<< "strike-register will be unhappy.";
*server_connection =
new PacketSavingConnection(helper, alarm_factory, Perspective::IS_SERVER,
ParsedVersionOfIndex(supported_versions, 0));
*server_session = new TestQuicSpdyServerSession(
*server_connection, DefaultQuicConfig(), supported_versions,
server_crypto_config, compressed_certs_cache);
(*server_session)->Initialize();
(*server_connection)->AdvanceTime(connection_start_time);
}
QuicStreamId GetNthClientInitiatedBidirectionalStreamId(
QuicTransportVersion version, int n) {
int num = n;
if (!VersionUsesHttp3(version)) {
num++;
}
return QuicUtils::GetFirstBidirectionalStreamId(version,
Perspective::IS_CLIENT) +
QuicUtils::StreamIdDelta(version) * num;
}
QuicStreamId GetNthServerInitiatedBidirectionalStreamId(
QuicTransportVersion version, int n) {
return QuicUtils::GetFirstBidirectionalStreamId(version,
Perspective::IS_SERVER) +
QuicUtils::StreamIdDelta(version) * n;
}
QuicStreamId GetNthServerInitiatedUnidirectionalStreamId(
QuicTransportVersion version, int n) {
return QuicUtils::GetFirstUnidirectionalStreamId(version,
Perspective::IS_SERVER) +
QuicUtils::StreamIdDelta(version) * n;
}
QuicStreamId GetNthClientInitiatedUnidirectionalStreamId(
QuicTransportVersion version, int n) {
return QuicUtils::GetFirstUnidirectionalStreamId(version,
Perspective::IS_CLIENT) +
QuicUtils::StreamIdDelta(version) * n;
}
StreamType DetermineStreamType(QuicStreamId id, ParsedQuicVersion version,
Perspective perspective, bool is_incoming,
StreamType default_type) {
return version.HasIetfQuicFrames()
? QuicUtils::GetStreamType(id, perspective, is_incoming, version)
: default_type;
}
quiche::QuicheMemSlice MemSliceFromString(absl::string_view data) {
if (data.empty()) {
return quiche::QuicheMemSlice();
}
static quiche::SimpleBufferAllocator* allocator =
new quiche::SimpleBufferAllocator();
return quiche::QuicheMemSlice(quiche::QuicheBuffer::Copy(allocator, data));
}
bool TaggingEncrypter::EncryptPacket(uint64_t ,
absl::string_view ,
absl::string_view plaintext, char* output,
size_t* output_length,
size_t max_output_length) {
const size_t len = plaintext.size() + kTagSize;
if (max_output_length < len) {
return false;
}
memmove(output, plaintext.data(), plaintext.size());
output += plaintext.size();
memset(output, tag_, kTagSize);
*output_length = len;
return true;
}
bool TaggingDecrypter::DecryptPacket(uint64_t ,
absl::string_view ,
absl::string_view ciphertext, char* output,
size_t* output_length,
size_t ) {
if (ciphertext.size() < kTagSize) {
return false;
}
if (!CheckTag(ciphertext, GetTag(ciphertext))) {
return false;
}
*output_length = ciphertext.size() - kTagSize;
memcpy(output, ciphertext.data(), *output_length);
return true;
}
bool TaggingDecrypter::CheckTag(absl::string_view ciphertext, uint8_t tag) {
for (size_t i = ciphertext.size() - kTagSize; i < ciphertext.size(); i++) {
if (ciphertext.data()[i] != tag) {
return false;
}
}
return true;
}
TestPacketWriter::TestPacketWriter(ParsedQuicVersion version, MockClock* clock,
Perspective perspective)
: version_(version),
framer_(SupportedVersions(version_),
QuicUtils::InvertPerspective(perspective)),
clock_(clock) {
QuicFramerPeer::SetLastSerializedServerConnectionId(framer_.framer(),
TestConnectionId());
framer_.framer()->SetInitialObfuscators(TestConnectionId());
for (int i = 0; i < 128; ++i) {
PacketBuffer* p = new PacketBuffer();
packet_buffer_pool_.push_back(p);
packet_buffer_pool_index_[p->buffer] = p;
packet_buffer_free_list_.push_back(p);
}
}
TestPacketWriter::~TestPacketWriter() {
EXPECT_EQ(packet_buffer_pool_.size(), packet_buffer_free_list_.size())
<< packet_buffer_pool_.size() - packet_buffer_free_list_.size()
<< " out of " << packet_buffer_pool_.size()
<< " packet buffers have been leaked.";
for (auto p : packet_buffer_pool_) {
delete p;
}
}
WriteResult TestPacketWriter::WritePacket(
const char* buffer, size_t buf_len, const QuicIpAddress& self_address,
const QuicSocketAddress& peer_address, PerPacketOptions* ,
const QuicPacketWriterParams& params) {
last_write_source_address_ = self_address;
last_write_peer_address_ = peer_address;
if (packet_buffer_pool_index_.find(const_cast<char*>(buffer)) !=
packet_buffer_pool_index_.end()) {
FreePacketBuffer(buffer);
}
QuicEncryptedPacket packet(buffer, buf_len);
++packets_write_attempts_;
if (packet.length() >= sizeof(final_bytes_of_last_packet_)) {
final_bytes_of_previous_packet_ = final_bytes_of_last_packet_;
memcpy(&final_bytes_of_last_packet_, packet.data() + packet.length() - 4,
sizeof(final_bytes_of_last_packet_));
}
if (framer_.framer()->version().KnowsWhichDecrypterToUse()) {
framer_.framer()->InstallDecrypter(ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingDecrypter>());
framer_.framer()->InstallDecrypter(ENCRYPTION_ZERO_RTT,
std::make_unique<TaggingDecrypter>());
framer_.framer()->InstallDecrypter(ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingDecrypter>());
} else if (!framer_.framer()->HasDecrypterOfEncryptionLevel(
ENCRYPTION_FORWARD_SECURE) &&
!framer_.framer()->HasDecrypterOfEncryptionLevel(
ENCRYPTION_ZERO_RTT)) {
framer_.framer()->SetAlternativeDecrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_FORWARD_SECURE),
false);
}
EXPECT_EQ(next_packet_processable_, framer_.ProcessPacket(packet))
<< framer_.framer()->detailed_error() << " perspective "
<< framer_.framer()->perspective();
next_packet_processable_ = true;
if (block_on_next_write_) {
write_blocked_ = true;
block_on_next_write_ = false;
}
if (next_packet_too_large_) {
next_packet_too_large_ = false;
return WriteResult(WRITE_STATUS_ERROR, *MessageTooBigErrorCode());
}
if (always_get_packet_too_large_) {
return WriteResult(WRITE_STATUS_ERROR, *MessageTooBigErrorCode());
}
if (IsWriteBlocked()) {
return WriteResult(is_write_blocked_data_buffered_
? WRITE_STATUS_BLOCKED_DATA_BUFFERED
: WRITE_STATUS_BLOCKED,
0);
}
if (ShouldWriteFail()) {
return WriteResult(WRITE_STATUS_ERROR, write_error_code_);
}
last_packet_size_ = packet.length();
total_bytes_written_ += packet.length();
last_packet_header_ = framer_.header();
if (!framer_.connection_close_frames().empty()) {
++connection_close_packets_;
}
if (!write_pause_time_delta_.IsZero()) {
clock_->AdvanceTime(write_pause_time_delta_);
}
if (is_batch_mode_) {
bytes_buffered_ += last_packet_size_;
return WriteResult(WRITE_STATUS_OK, 0);
}
last_ecn_sent_ = params.ecn_codepoint;
return WriteResult(WRITE_STATUS_OK, last_packet_size_);
}
QuicPacketBuffer TestPacketWriter::GetNextWriteLocation(
const QuicIpAddress& ,
const QuicSocketAddress& ) {
return {AllocPacketBuffer(), [this](const char* p) { FreePacketBuffer(p); }};
}
WriteResult TestPacketWriter::Flush() {
flush_attempts_++;
if (block_on_next_flush_) {
block_on_next_flush_ = false;
SetWriteBlocked();
return WriteResult(WRITE_STATUS_BLOCKED, -1);
}
if (write_should_fail_) {
return WriteResult(WRITE_STATUS_ERROR, -1);
}
int bytes_flushed = bytes_buffered_;
bytes_buffered_ = 0;
return WriteResult(WRITE_STATUS_OK, bytes_flushed);
}
char* TestPacketWriter::AllocPacketBuffer() {
PacketBuffer* p = packet_buffer_free_list_.front();
EXPECT_FALSE(p->in_use);
p->in_use = true;
packet_buffer_free_list_.pop_front();
return p->buffer;
}
void TestPacketWriter::FreePacketBuffer(const char* buffer) {
auto iter = packet_buffer_pool_index_.find(const_cast<char*>(buffer));
ASSERT_TRUE(iter != packet_buffer_pool_index_.end());
PacketBuffer* p = iter->second;
ASSERT_TRUE(p->in_use);
p->in_use = false;
packet_buffer_free_list_.push_back(p);
}
bool WriteServerVersionNegotiationProbeResponse(
char* packet_bytes, size_t* packet_length_out,
const char* source_connection_id_bytes,
uint8_t source_connection_id_length) {
if (packet_bytes == nullptr) {
QUIC_BUG(quic_bug_10256_1) << "Invalid packet_bytes";
return false;
}
if (packet_length_out == nullptr) {
QUIC_BUG(quic_bug_10256_2) << "Invalid packet_length_out";
return false;
}
QuicConnectionId source_connection_id(source_connection_id_bytes,
source_connection_id_length);
std::unique_ptr<QuicEncryptedPacket> encrypted_packet =
QuicFramer::BuildVersionNegotiationPacket(
source_connection_id, EmptyQuicConnectionId(),
true, true,
ParsedQuicVersionVector{});
if (!encrypted_packet) {
QUIC_BUG(quic_bug_10256_3) << "Failed to create version negotiation packet";
return false;
}
if (*packet_length_out < encrypted_packet->length()) {
QUIC_BUG(quic_bug_10256_4)
<< "Invalid *packet_length_out " << *packet_length_out << " < "
<< encrypted_packet->length();
return false;
}
*packet_length_out = encrypted_packet->length();
memcpy(packet_bytes, encrypted_packet->data(), *packet_length_out);
return true;
}
bool ParseClientVersionNegotiationProbePacket(
const char* packet_bytes, size_t packet_length,
char* destination_connection_id_bytes,
uint8_t* destination_connection_id_length_out) {
if (packet_bytes == nullptr) {
QUIC_BUG(quic_bug_10256_5) << "Invalid packet_bytes";
return false;
}
if (packet_length < kMinPacketSizeForVersionNegotiation ||
packet_length > 65535) {
QUIC_BUG(quic_bug_10256_6) << "Invalid packet_length";
return false;
}
if (destination_connection_id_bytes == nullptr) {
QUIC_BUG(quic_bug_10256_7) << "Invalid destination_connection_id_bytes";
return false;
}
if (destination_connection_id_length_out == nullptr) {
QUIC_BUG(quic_bug_10256_8)
<< "Invalid destination_connection_id_length_out";
return false;
}
QuicEncryptedPacket encrypted_packet(packet_bytes, packet_length);
PacketHeaderFormat format;
QuicLongHeaderType long_packet_type;
bool version_present, has_length_prefix;
QuicVersionLabel version_label;
ParsedQuicVersion parsed_version = ParsedQuicVersion::Unsupported();
QuicConnectionId destination_connection_id, source_connection_id;
std::optional<absl::string_view> retry_token;
std::string detailed_error;
QuicErrorCode error = QuicFramer::ParsePublicHeaderDispatcher(
encrypted_packet,
0, &format,
&long_packet_type, &version_present, &has_length_prefix, &version_label,
&parsed_version, &destination_connection_id, &source_connection_id,
&retry_token, &detailed_error);
if (error != QUIC_NO_ERROR) {
QUIC_BUG(quic_bug_10256_9) << "Failed to parse packet: " << detailed_error;
return false;
}
if (!version_present) {
QUIC_BUG(quic_bug_10256_10) << "Packet is not a long header";
return false;
}
if (*destination_connection_id_length_out <
destination_connection_id.length()) {
QUIC_BUG(quic_bug_10256_11)
<< "destination_connection_id_length_out too small";
return false;
}
*destination_connection_id_length_out = destination_connection_id.length();
memcpy(destination_connection_id_bytes, destination_connection_id.data(),
*destination_connection_id_length_out);
return true;
}
}
} | #include "quiche/quic/test_tools/quic_test_utils.h"
#include <string>
#include "quiche/quic/platform/api/quic_test.h"
namespace quic {
namespace test {
class QuicTestUtilsTest : public QuicTest {};
TEST_F(QuicTestUtilsTest, ConnectionId) {
EXPECT_NE(EmptyQuicConnectionId(), TestConnectionId());
EXPECT_NE(EmptyQuicConnectionId(), TestConnectionId(1));
EXPECT_EQ(TestConnectionId(), TestConnectionId());
EXPECT_EQ(TestConnectionId(33), TestConnectionId(33));
EXPECT_NE(TestConnectionId(0xdead), TestConnectionId(0xbeef));
EXPECT_EQ(0x1337u, TestConnectionIdToUInt64(TestConnectionId(0x1337)));
EXPECT_NE(0xdeadu, TestConnectionIdToUInt64(TestConnectionId(0xbeef)));
}
TEST_F(QuicTestUtilsTest, BasicApproxEq) {
EXPECT_APPROX_EQ(10, 10, 1e-6f);
EXPECT_APPROX_EQ(1000, 1001, 0.01f);
EXPECT_NONFATAL_FAILURE(EXPECT_APPROX_EQ(1000, 1100, 0.01f), "");
EXPECT_APPROX_EQ(64, 31, 0.55f);
EXPECT_NONFATAL_FAILURE(EXPECT_APPROX_EQ(31, 64, 0.55f), "");
}
TEST_F(QuicTestUtilsTest, QuicTimeDelta) {
EXPECT_APPROX_EQ(QuicTime::Delta::FromMicroseconds(1000),
QuicTime::Delta::FromMicroseconds(1003), 0.01f);
EXPECT_NONFATAL_FAILURE(
EXPECT_APPROX_EQ(QuicTime::Delta::FromMicroseconds(1000),
QuicTime::Delta::FromMicroseconds(1200), 0.01f),
"");
}
TEST_F(QuicTestUtilsTest, QuicBandwidth) {
EXPECT_APPROX_EQ(QuicBandwidth::FromBytesPerSecond(1000),
QuicBandwidth::FromBitsPerSecond(8005), 0.01f);
EXPECT_NONFATAL_FAILURE(
EXPECT_APPROX_EQ(QuicBandwidth::FromBytesPerSecond(1000),
QuicBandwidth::FromBitsPerSecond(9005), 0.01f),
"");
}
TEST_F(QuicTestUtilsTest, SimpleRandomStability) {
SimpleRandom rng;
rng.set_seed(UINT64_C(0x1234567800010001));
EXPECT_EQ(UINT64_C(12589383305231984671), rng.RandUint64());
EXPECT_EQ(UINT64_C(17775425089941798664), rng.RandUint64());
}
TEST_F(QuicTestUtilsTest, SimpleRandomChunks) {
SimpleRandom rng;
std::string reference(16 * 1024, '\0');
rng.RandBytes(&reference[0], reference.size());
for (size_t chunk_size : {3, 4, 7, 4096}) {
rng.set_seed(0);
size_t chunks = reference.size() / chunk_size;
std::string buffer(chunks * chunk_size, '\0');
for (size_t i = 0; i < chunks; i++) {
rng.RandBytes(&buffer[i * chunk_size], chunk_size);
}
EXPECT_EQ(reference.substr(0, buffer.size()), buffer)
<< "Failed for chunk_size = " << chunk_size;
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/test_tools/quic_test_utils.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/test_tools/quic_test_utils_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
3a502638-f7f6-4331-a6e7-35592c04f18e | cpp | google/quiche | quic_bandwidth | quiche/quic/core/quic_bandwidth.cc | quiche/quic/core/quic_bandwidth_test.cc | #include "quiche/quic/core/quic_bandwidth.h"
#include <cinttypes>
#include <string>
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
namespace quic {
std::string QuicBandwidth::ToDebuggingValue() const {
if (bits_per_second_ < 80000) {
return absl::StrFormat("%d bits/s (%d bytes/s)", bits_per_second_,
bits_per_second_ / 8);
}
double divisor;
char unit;
if (bits_per_second_ < 8 * 1000 * 1000) {
divisor = 1e3;
unit = 'k';
} else if (bits_per_second_ < INT64_C(8) * 1000 * 1000 * 1000) {
divisor = 1e6;
unit = 'M';
} else {
divisor = 1e9;
unit = 'G';
}
double bits_per_second_with_unit = bits_per_second_ / divisor;
double bytes_per_second_with_unit = bits_per_second_with_unit / 8;
return absl::StrFormat("%.2f %cbits/s (%.2f %cbytes/s)",
bits_per_second_with_unit, unit,
bytes_per_second_with_unit, unit);
}
} | #include "quiche/quic/core/quic_bandwidth.h"
#include <limits>
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/platform/api/quic_test.h"
namespace quic {
namespace test {
class QuicBandwidthTest : public QuicTest {};
TEST_F(QuicBandwidthTest, FromTo) {
EXPECT_EQ(QuicBandwidth::FromKBitsPerSecond(1),
QuicBandwidth::FromBitsPerSecond(1000));
EXPECT_EQ(QuicBandwidth::FromKBytesPerSecond(1),
QuicBandwidth::FromBytesPerSecond(1000));
EXPECT_EQ(QuicBandwidth::FromBitsPerSecond(8000),
QuicBandwidth::FromBytesPerSecond(1000));
EXPECT_EQ(QuicBandwidth::FromKBitsPerSecond(8),
QuicBandwidth::FromKBytesPerSecond(1));
EXPECT_EQ(0, QuicBandwidth::Zero().ToBitsPerSecond());
EXPECT_EQ(0, QuicBandwidth::Zero().ToKBitsPerSecond());
EXPECT_EQ(0, QuicBandwidth::Zero().ToBytesPerSecond());
EXPECT_EQ(0, QuicBandwidth::Zero().ToKBytesPerSecond());
EXPECT_EQ(1, QuicBandwidth::FromBitsPerSecond(1000).ToKBitsPerSecond());
EXPECT_EQ(1000, QuicBandwidth::FromKBitsPerSecond(1).ToBitsPerSecond());
EXPECT_EQ(1, QuicBandwidth::FromBytesPerSecond(1000).ToKBytesPerSecond());
EXPECT_EQ(1000, QuicBandwidth::FromKBytesPerSecond(1).ToBytesPerSecond());
}
TEST_F(QuicBandwidthTest, Add) {
QuicBandwidth bandwidht_1 = QuicBandwidth::FromKBitsPerSecond(1);
QuicBandwidth bandwidht_2 = QuicBandwidth::FromKBytesPerSecond(1);
EXPECT_EQ(9000, (bandwidht_1 + bandwidht_2).ToBitsPerSecond());
EXPECT_EQ(9000, (bandwidht_2 + bandwidht_1).ToBitsPerSecond());
}
TEST_F(QuicBandwidthTest, Subtract) {
QuicBandwidth bandwidht_1 = QuicBandwidth::FromKBitsPerSecond(1);
QuicBandwidth bandwidht_2 = QuicBandwidth::FromKBytesPerSecond(1);
EXPECT_EQ(7000, (bandwidht_2 - bandwidht_1).ToBitsPerSecond());
}
TEST_F(QuicBandwidthTest, TimeDelta) {
EXPECT_EQ(QuicBandwidth::FromKBytesPerSecond(1000),
QuicBandwidth::FromBytesAndTimeDelta(
1000, QuicTime::Delta::FromMilliseconds(1)));
EXPECT_EQ(QuicBandwidth::FromKBytesPerSecond(10),
QuicBandwidth::FromBytesAndTimeDelta(
1000, QuicTime::Delta::FromMilliseconds(100)));
EXPECT_EQ(QuicBandwidth::Zero(), QuicBandwidth::FromBytesAndTimeDelta(
0, QuicTime::Delta::FromSeconds(9)));
EXPECT_EQ(
QuicBandwidth::FromBitsPerSecond(1),
QuicBandwidth::FromBytesAndTimeDelta(1, QuicTime::Delta::FromSeconds(9)));
}
TEST_F(QuicBandwidthTest, Scale) {
EXPECT_EQ(QuicBandwidth::FromKBytesPerSecond(500),
QuicBandwidth::FromKBytesPerSecond(1000) * 0.5f);
EXPECT_EQ(QuicBandwidth::FromKBytesPerSecond(750),
0.75f * QuicBandwidth::FromKBytesPerSecond(1000));
EXPECT_EQ(QuicBandwidth::FromKBytesPerSecond(1250),
QuicBandwidth::FromKBytesPerSecond(1000) * 1.25f);
EXPECT_EQ(QuicBandwidth::FromBitsPerSecond(5),
QuicBandwidth::FromBitsPerSecond(9) * 0.5f);
EXPECT_EQ(QuicBandwidth::FromBitsPerSecond(2),
QuicBandwidth::FromBitsPerSecond(12) * 0.2f);
}
TEST_F(QuicBandwidthTest, BytesPerPeriod) {
EXPECT_EQ(2000, QuicBandwidth::FromKBytesPerSecond(2000).ToBytesPerPeriod(
QuicTime::Delta::FromMilliseconds(1)));
EXPECT_EQ(2, QuicBandwidth::FromKBytesPerSecond(2000).ToKBytesPerPeriod(
QuicTime::Delta::FromMilliseconds(1)));
EXPECT_EQ(200000, QuicBandwidth::FromKBytesPerSecond(2000).ToBytesPerPeriod(
QuicTime::Delta::FromMilliseconds(100)));
EXPECT_EQ(200, QuicBandwidth::FromKBytesPerSecond(2000).ToKBytesPerPeriod(
QuicTime::Delta::FromMilliseconds(100)));
EXPECT_EQ(200, QuicBandwidth::FromBitsPerSecond(1599).ToBytesPerPeriod(
QuicTime::Delta::FromMilliseconds(1001)));
EXPECT_EQ(200, QuicBandwidth::FromBitsPerSecond(1599).ToKBytesPerPeriod(
QuicTime::Delta::FromSeconds(1001)));
}
TEST_F(QuicBandwidthTest, TransferTime) {
EXPECT_EQ(QuicTime::Delta::FromSeconds(1),
QuicBandwidth::FromKBytesPerSecond(1).TransferTime(1000));
EXPECT_EQ(QuicTime::Delta::Zero(), QuicBandwidth::Zero().TransferTime(1000));
}
TEST_F(QuicBandwidthTest, RelOps) {
const QuicBandwidth b1 = QuicBandwidth::FromKBitsPerSecond(1);
const QuicBandwidth b2 = QuicBandwidth::FromKBytesPerSecond(2);
EXPECT_EQ(b1, b1);
EXPECT_NE(b1, b2);
EXPECT_LT(b1, b2);
EXPECT_GT(b2, b1);
EXPECT_LE(b1, b1);
EXPECT_LE(b1, b2);
EXPECT_GE(b1, b1);
EXPECT_GE(b2, b1);
}
TEST_F(QuicBandwidthTest, DebuggingValue) {
EXPECT_EQ("128 bits/s (16 bytes/s)",
QuicBandwidth::FromBytesPerSecond(16).ToDebuggingValue());
EXPECT_EQ("4096 bits/s (512 bytes/s)",
QuicBandwidth::FromBytesPerSecond(512).ToDebuggingValue());
QuicBandwidth bandwidth = QuicBandwidth::FromBytesPerSecond(1000 * 50);
EXPECT_EQ("400.00 kbits/s (50.00 kbytes/s)", bandwidth.ToDebuggingValue());
bandwidth = bandwidth * 1000;
EXPECT_EQ("400.00 Mbits/s (50.00 Mbytes/s)", bandwidth.ToDebuggingValue());
bandwidth = bandwidth * 1000;
EXPECT_EQ("400.00 Gbits/s (50.00 Gbytes/s)", bandwidth.ToDebuggingValue());
}
TEST_F(QuicBandwidthTest, SpecialValues) {
EXPECT_EQ(0, QuicBandwidth::Zero().ToBitsPerSecond());
EXPECT_EQ(std::numeric_limits<int64_t>::max(),
QuicBandwidth::Infinite().ToBitsPerSecond());
EXPECT_TRUE(QuicBandwidth::Zero().IsZero());
EXPECT_FALSE(QuicBandwidth::Zero().IsInfinite());
EXPECT_TRUE(QuicBandwidth::Infinite().IsInfinite());
EXPECT_FALSE(QuicBandwidth::Infinite().IsZero());
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_bandwidth.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_bandwidth_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
1ecf36f2-7ab5-4fb0-8321-4f385b395019 | cpp | tensorflow/tensorflow | backend_async_kernel_interface | tensorflow/lite/async/backend_async_kernel_interface.cc | tensorflow/lite/async/backend_async_kernel_interface_test.cc | #include "tensorflow/lite/async/backend_async_kernel_interface.h"
#include <vector>
#include "tensorflow/lite/async/c/async_kernel.h"
#include "tensorflow/lite/async/c/types.h"
namespace tflite {
namespace delegates {
namespace internal {
TfLiteStatus RegisterBuffer(TfLiteAsyncKernel* async_kernel,
TfLiteOpaqueContext* context, TfLiteIoType io_type,
const TfLiteBackendBuffer* buffer,
const TfLiteAttributeMap* attrs,
TfLiteBufferHandle handle) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->RegisterBuffer(context, io_type, buffer, attrs, handle);
}
TfLiteStatus RegisterBufferSlice(TfLiteAsyncKernel* async_kernel,
TfLiteOpaqueContext* context,
TfLiteBufferHandle buffer,
const TfLiteAttributeMap* attrs,
TfLiteBufferHandle handle) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->RegisterBufferSlice(context, buffer, attrs, handle);
}
TfLiteStatus UnregisterBuffer(TfLiteAsyncKernel* async_kernel,
TfLiteOpaqueContext* context,
const TfLiteBufferHandle handle) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->UnregisterBuffer(context, handle);
}
void SupportedBufferTypes(const TfLiteAsyncKernel* async_kernel,
TfLiteIoType io_type, const char* const** types,
size_t* n_types) {
if (types == nullptr || n_types == nullptr) return;
const auto& buf_types = reinterpret_cast<const BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->SupportedBufferTypes(io_type);
*types = buf_types.data();
*n_types = buf_types.size();
}
void SupportedSynchronizations(const TfLiteAsyncKernel* async_kernel,
TfLiteIoType io_type, const char* const** types,
size_t* n_types) {
if (types == nullptr || n_types == nullptr) return;
const auto& sync_types = reinterpret_cast<const BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->SupportedSynchronizations(io_type);
*types = sync_types.data();
*n_types = sync_types.size();
}
bool ReconcileRestrictions(const TfLiteAsyncKernel* async_kernel,
const TfLiteOpaqueContext* context,
const TfLiteOpaqueNode* node, int tensor_index,
const TfLiteAttributeMap* user_provided_attributes,
TfLiteAttributeMap* merged,
TfLiteAttributeMap* conflict) {
return reinterpret_cast<const BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->ReconcileRestrictions(context, node, tensor_index,
user_provided_attributes, merged, conflict);
}
TfLiteStatus SetAttributes(TfLiteAsyncKernel* async_kernel,
TfLiteOpaqueContext* context, TfLiteOpaqueNode* node,
int tensor_index, const TfLiteAttributeMap* attrs) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->SetAttributes(context, node, tensor_index, attrs);
}
TfLiteStatus Prepare(TfLiteAsyncKernel* async_kernel,
TfLiteOpaqueContext* context, TfLiteOpaqueNode* node) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->Prepare(context, node);
}
TfLiteStatus Eval(TfLiteAsyncKernel* async_kernel, TfLiteOpaqueContext* context,
TfLiteOpaqueNode* node, TfLiteExecutionTask* task) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->Eval(context, node, task);
}
TfLiteStatus Wait(TfLiteAsyncKernel* async_kernel, TfLiteOpaqueContext* context,
TfLiteExecutionTask* task) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->Wait(context, task);
}
TfLiteStatus Finish(TfLiteAsyncKernel* async_kernel,
TfLiteOpaqueContext* context, TfLiteExecutionTask* task) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->Finish(context, task);
}
TfLiteStatus SetBufferAttributes(TfLiteAsyncKernel* async_kernel,
const TfLiteBackendBuffer* buffer,
const TfLiteAttributeMap* attrs) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->SetBufferAttributes(buffer, attrs);
}
TfLiteStatus GetBufferAttributes(TfLiteAsyncKernel* async_kernel,
const TfLiteBackendBuffer* buffer,
TfLiteAttributeMap* attrs) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->GetBufferAttributes(buffer, attrs);
}
}
BackendAsyncKernelInterface::BackendAsyncKernelInterface() {
kernel_ = TfLiteAsyncKernelCreate(this);
TfLiteAsyncKernelSetRegisterBuffer(kernel_, internal::RegisterBuffer);
TfLiteAsyncKernelSetRegisterBufferSlice(kernel_,
internal::RegisterBufferSlice);
TfLiteAsyncKernelSetUnregisterBuffer(kernel_, internal::UnregisterBuffer);
TfLiteAsyncKernelSetSupportedBufferTypes(kernel_,
internal::SupportedBufferTypes);
TfLiteAsyncKernelSetSupportedSynchronizations(
kernel_, internal::SupportedSynchronizations);
TfLiteAsyncKernelSetReconcileRestrictions(kernel_,
internal::ReconcileRestrictions);
TfLiteAsyncKernelSetSetAttributes(kernel_, internal::SetAttributes);
TfLiteAsyncKernelSetSetBufferAttributes(kernel_,
internal::SetBufferAttributes);
TfLiteAsyncKernelSetGetBufferAttributes(kernel_,
internal::GetBufferAttributes);
TfLiteAsyncKernelSetPrepare(kernel_, internal::Prepare);
TfLiteAsyncKernelSetEval(kernel_, internal::Eval);
TfLiteAsyncKernelSetWait(kernel_, internal::Wait);
TfLiteAsyncKernelSetFinish(kernel_, internal::Finish);
}
}
} | #include "tensorflow/lite/async/backend_async_kernel_interface.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/async/c/types.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/async/async_kernel_internal.h"
#include "tensorflow/lite/core/async/testing/mock_async_kernel.h"
using ::testing::_;
namespace tflite::delegates {
namespace {
TEST(BackendAsyncKernelInterfaceTest, BasicTest) {
testing::StrictMock<async::testing::MockAsyncKernel> kernel;
EXPECT_CALL(kernel, RegisterBuffer(_, _, _, _, _));
EXPECT_CALL(kernel, RegisterBufferSlice(_, _, _, _));
EXPECT_CALL(kernel, UnregisterBuffer(_, _));
EXPECT_CALL(kernel, ReconcileRestrictions(_, _, _, _, _, _));
EXPECT_CALL(kernel, SetAttributes(_, _, _, _));
EXPECT_CALL(kernel, SetBufferAttributes(_, _));
EXPECT_CALL(kernel, GetBufferAttributes(_, _));
EXPECT_CALL(kernel, Prepare(_, _));
EXPECT_CALL(kernel, Eval(_, _, _));
EXPECT_CALL(kernel, Wait(_, _));
EXPECT_CALL(kernel, Finish(_, _));
auto* tflite_kernel = kernel.kernel();
tflite_kernel->register_buffer(tflite_kernel, nullptr, kTfLiteIoTypeInput,
nullptr, nullptr, 0);
tflite_kernel->register_buffer_slice(tflite_kernel, nullptr, 0, nullptr, 0);
tflite_kernel->unregister_buffer(tflite_kernel, nullptr, 0);
tflite_kernel->reconcile_restrictions(tflite_kernel, nullptr, nullptr, 0,
nullptr, nullptr, nullptr);
tflite_kernel->set_attributes(tflite_kernel, nullptr, nullptr, 0, nullptr);
tflite_kernel->set_buffer_attributes(tflite_kernel, nullptr, nullptr);
tflite_kernel->get_buffer_attributes(tflite_kernel, nullptr, nullptr);
tflite_kernel->prepare(tflite_kernel, nullptr, nullptr);
tflite_kernel->eval(tflite_kernel, nullptr, nullptr, nullptr);
tflite_kernel->wait(tflite_kernel, nullptr, nullptr);
tflite_kernel->finish(tflite_kernel, nullptr, nullptr);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/async/backend_async_kernel_interface.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/async/backend_async_kernel_interface_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
68532c9a-3dc4-4766-8829-0e780474d83e | cpp | tensorflow/tensorflow | error_collector_inst | tensorflow/compiler/mlir/lite/metrics/error_collector_inst.cc | tensorflow/compiler/mlir/lite/metrics/error_collector_inst_test.cc | #include "tensorflow/compiler/mlir/lite/metrics/error_collector_inst.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/strings/match.h"
#include "absl/strings/str_split.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/Location.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/lite/metrics/error_collector.h"
#include "tensorflow/compiler/mlir/lite/metrics/types_util.h"
namespace mlir {
namespace TFL {
namespace {
inline std::string extract_pass_name(const std::string &signature) {
const std::vector<std::string> &v = absl::StrSplit(signature, "::");
return v.back();
}
inline std::string extract_op_name_from_error_message(
const std::string &error_message) {
int end_pos = error_message.find("' op");
if ((absl::StartsWith(error_message, "'tf.") ||
absl::StartsWith(error_message, "'tfl.")) &&
end_pos != std::string::npos) {
return error_message.substr(1, end_pos - 1);
}
return "";
}
const int kMaxAcceptedNoteSize = 1024;
}
ErrorCollectorInstrumentation::ErrorCollectorInstrumentation(
MLIRContext *context)
: error_collector_(ErrorCollector::GetErrorCollector()) {
handler_ = std::make_unique<ScopedDiagnosticHandler>(
context, [this](Diagnostic &diag) {
if (diag.getSeverity() == DiagnosticSeverity::Error) {
Location loc = diag.getLocation();
std::string error_message = diag.str();
std::string op_name, error_code;
if (loc_to_name_.count(loc)) {
op_name = loc_to_name_[loc];
} else {
op_name = extract_op_name_from_error_message(diag.str());
}
for (const auto ¬e : diag.getNotes()) {
const std::string note_str = note.str();
if (absl::StartsWith(note_str, kErrorCodePrefix)) {
error_code = note_str.substr(sizeof(kErrorCodePrefix) - 1);
}
error_message += "\n";
if (note_str.size() <= kMaxAcceptedNoteSize) {
error_message += note_str;
} else {
error_message += note_str.substr(0, kMaxAcceptedNoteSize);
error_message += "...";
}
}
ErrorCode error_code_enum = ConverterErrorData::UNKNOWN;
bool has_valid_error_code =
ConverterErrorData::ErrorCode_Parse(error_code, &error_code_enum);
if (!op_name.empty() || has_valid_error_code) {
error_collector_->ReportError(NewConverterErrorData(
pass_name_, error_message, error_code_enum, op_name, loc));
} else {
common_error_message_ += diag.str();
common_error_message_ += "\n";
}
}
return failure();
});
}
void ErrorCollectorInstrumentation::runBeforePass(Pass *pass,
Operation *module) {
auto collectOps = [this](Operation *op) {
const auto &op_name = op->getName().getStringRef().str();
if (absl::StartsWith(op_name, "tf.") || absl::StartsWith(op_name, "tfl.")) {
loc_to_name_.emplace(op->getLoc(), op_name);
}
};
for (auto ®ion : module->getRegions()) {
region.walk(collectOps);
}
pass_name_ = extract_pass_name(pass->getName().str());
error_collector_->Clear();
}
void ErrorCollectorInstrumentation::runAfterPass(Pass *pass,
Operation *module) {
loc_to_name_.clear();
pass_name_.clear();
common_error_message_.clear();
error_collector_->Clear();
}
void ErrorCollectorInstrumentation::runAfterPassFailed(Pass *pass,
Operation *module) {
if (error_collector_->CollectedErrors().empty() &&
!common_error_message_.empty()) {
error_collector_->ReportError(NewConverterErrorData(
pass_name_, common_error_message_, ConverterErrorData::UNKNOWN,
"", module->getLoc()));
}
loc_to_name_.clear();
pass_name_.clear();
common_error_message_.clear();
}
}
} | #include "tensorflow/compiler/mlir/lite/metrics/error_collector_inst.h"
#include <cstddef>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "llvm/Support/SMLoc.h"
#include "llvm/Support/SourceMgr.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/FileUtilities.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Support/TypeID.h"
#include "tensorflow/compiler/mlir/lite/metrics/converter_error_data.pb.h"
#include "tensorflow/compiler/mlir/lite/metrics/error_collector.h"
#include "tensorflow/compiler/mlir/lite/metrics/types_util.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/platform/statusor.h"
namespace mlir {
namespace TFL {
namespace {
using tsl::StatusOr;
class MockSuccessPass
: public PassWrapper<MockSuccessPass, OperationPass<ModuleOp>> {
void getDependentDialects(DialectRegistry& registry) const override {
registry.insert<TF::TensorFlowDialect>();
}
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(MockSuccessPass)
explicit MockSuccessPass() = default;
private:
void runOnOperation() override {
getOperation().walk([](Operation* nestedOp) {
nestedOp->emitError()
<< "Error at " << nestedOp->getName().getStringRef().str() << " op";
});
};
};
class MockFailurePass
: public PassWrapper<MockFailurePass, OperationPass<ModuleOp>> {
void getDependentDialects(DialectRegistry& registry) const override {
registry.insert<TF::TensorFlowDialect>();
}
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(MockFailurePass)
explicit MockFailurePass() = default;
private:
void runOnOperation() override {
getOperation().walk([](Operation* nestedOp) {
if (nestedOp->getName().getStringRef().str().rfind("tf.") != -1) {
AttachErrorCode(
nestedOp->emitError()
<< "Failed at " << nestedOp->getName().getStringRef().str()
<< " op",
tflite::metrics::ConverterErrorData::ERROR_NEEDS_FLEX_OPS);
}
});
signalPassFailure();
};
};
absl::StatusOr<OwningOpRef<mlir::ModuleOp>> LoadModule(
MLIRContext* context, const std::string& file_name) {
std::string error_message;
auto file = openInputFile(file_name, &error_message);
if (!file) {
return tensorflow::errors::InvalidArgument("fail to open input file");
}
llvm::SourceMgr source_mgr;
source_mgr.AddNewSourceBuffer(std::move(file), llvm::SMLoc());
return OwningOpRef<mlir::ModuleOp>(
parseSourceFile<mlir::ModuleOp>(source_mgr, context));
}
TEST(ErrorCollectorTest, TessSuccessPass) {
std::string input_file = tensorflow::GetDataDependencyFilepath(
"tensorflow/compiler/mlir/lite/metrics/testdata/strided_slice.mlir");
MLIRContext context;
context.getOrLoadDialect<mlir::func::FuncDialect>();
context.getOrLoadDialect<TF::TensorFlowDialect>();
context.enableMultithreading();
auto module = LoadModule(&context, input_file);
EXPECT_EQ(module.ok(), true);
PassManager pm(module.value().get()->getName(),
OpPassManager::Nesting::Implicit);
pm.addPass(std::make_unique<MockSuccessPass>());
pm.addInstrumentation(
std::make_unique<ErrorCollectorInstrumentation>(&context));
EXPECT_EQ(succeeded(pm.run(module.value().get())), true);
auto collected_errors =
ErrorCollector::GetErrorCollector()->CollectedErrors();
EXPECT_EQ(collected_errors.size(), 0);
}
TEST(ErrorCollectorTest, TessFailurePass) {
using tflite::metrics::ConverterErrorData;
MLIRContext context;
context.getOrLoadDialect<mlir::func::FuncDialect>();
context.getOrLoadDialect<TF::TensorFlowDialect>();
const std::string input_file =
"tensorflow/compiler/mlir/lite/metrics/testdata/strided_slice.mlir";
auto input_file_id = StringAttr::get(&context, input_file);
context.enableMultithreading();
auto module =
LoadModule(&context, tensorflow::GetDataDependencyFilepath(input_file));
EXPECT_EQ(module.ok(), true);
PassManager pm(module.value().get()->getName(),
OpPassManager::Nesting::Implicit);
pm.addPass(std::make_unique<MockSuccessPass>());
pm.addPass(std::make_unique<MockFailurePass>());
pm.addInstrumentation(
std::make_unique<ErrorCollectorInstrumentation>(&context));
EXPECT_EQ(succeeded(pm.run(module.value().get())), false);
auto collected_errors =
ErrorCollector::GetErrorCollector()->CollectedErrors();
EXPECT_EQ(collected_errors.size(), 3);
EXPECT_EQ(collected_errors.count(NewConverterErrorData(
"MockFailurePass",
"Failed at tf.Const op\nsee current operation: %0 = "
"\"tf.Const\"() <{value = dense<1> : tensor<4xi32>}> : () -> "
"tensor<4xi32>\nError code: ERROR_NEEDS_FLEX_OPS",
ConverterErrorData::ERROR_NEEDS_FLEX_OPS, "tf.Const",
mlir::FileLineColLoc::get(input_file_id, 2, 9))),
1);
EXPECT_EQ(collected_errors.count(NewConverterErrorData(
"MockFailurePass",
"Failed at tf.Const op\nsee current operation: %1 = "
"\"tf.Const\"() <{value = dense<0> : tensor<4xi32>}> : () -> "
"tensor<4xi32>\nError code: ERROR_NEEDS_FLEX_OPS",
ConverterErrorData::ERROR_NEEDS_FLEX_OPS, "tf.Const",
mlir::FileLineColLoc::get(input_file_id, 2, 9))),
1);
EXPECT_EQ(
collected_errors.count(NewConverterErrorData(
"MockFailurePass",
"Failed at tf.StridedSlice op\nsee current operation: %2 = "
"\"tf.StridedSlice\"(%arg0, %1, %1, %0) <{begin_mask = 11 : "
"i64, ellipsis_mask = 0 : i64, end_mask = 11 : i64, new_axis_mask = "
"4 : i64, shrink_axis_mask = 0 : i64}> {device = \"\"} : "
"(tensor<*xf32>, tensor<4xi32>, tensor<4xi32>, tensor<4xi32>) "
"-> tensor<*xf32>\nError code: ERROR_NEEDS_FLEX_OPS",
ConverterErrorData::ERROR_NEEDS_FLEX_OPS, "tf.StridedSlice",
mlir::FileLineColLoc::get(input_file_id, 4, 10))),
1);
std::vector<std::string> locations;
for (const auto& error : collected_errors) {
EXPECT_TRUE(error.has_location());
locations.push_back(error.location().DebugString());
}
EXPECT_THAT(locations, Each(testing::HasSubstr("CALLSITELOC")));
EXPECT_THAT(locations, Each(testing::HasSubstr(input_file)));
EXPECT_THAT(locations, Contains(testing::HasSubstr("line: 2")));
EXPECT_THAT(locations, Contains(testing::HasSubstr("column: 9")));
EXPECT_THAT(locations, Contains(testing::HasSubstr("line: 4")));
EXPECT_THAT(locations, Contains(testing::HasSubstr("column: 10")));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/metrics/error_collector_inst.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/metrics/error_collector_inst_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0df8f891-2efd-4dd7-ba09-7e8edd2d97e0 | cpp | google/tensorstore | s3_key_value_store | tensorstore/kvstore/s3/s3_key_value_store.cc | tensorstore/kvstore/s3/s3_key_value_store_test.cc | #include <stddef.h>
#include <stdint.h>
#include <atomic>
#include <cassert>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <variant>
#include "absl/base/attributes.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/context.h"
#include "tensorstore/internal/data_copy_concurrency_resource.h"
#include "tensorstore/internal/digest/sha256.h"
#include "tensorstore/internal/http/curl_transport.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/internal/metrics/counter.h"
#include "tensorstore/internal/metrics/histogram.h"
#include "tensorstore/internal/rate_limiter/rate_limiter.h"
#include "tensorstore/internal/source_location.h"
#include "tensorstore/internal/thread/schedule_at.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/kvstore/batch_util.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/common_metrics.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/generic_coalescing_batch_util.h"
#include "tensorstore/kvstore/http/byte_range_util.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/registry.h"
#include "tensorstore/kvstore/s3/aws_credentials_resource.h"
#include "tensorstore/kvstore/s3/credentials/aws_credentials.h"
#include "tensorstore/kvstore/s3/s3_endpoint.h"
#include "tensorstore/kvstore/s3/s3_metadata.h"
#include "tensorstore/kvstore/s3/s3_request_builder.h"
#include "tensorstore/kvstore/s3/s3_resource.h"
#include "tensorstore/kvstore/s3/s3_uri_utils.h"
#include "tensorstore/kvstore/s3/validate.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/url_registry.h"
#include "tensorstore/util/execution/any_receiver.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/garbage_collection/fwd.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
#include "tinyxml2.h"
#include "tensorstore/internal/cache_key/std_optional.h"
#include "tensorstore/internal/json_binding/std_array.h"
#include "tensorstore/internal/json_binding/std_optional.h"
#include "tensorstore/serialization/fwd.h"
#include "tensorstore/serialization/std_optional.h"
#include "tensorstore/util/garbage_collection/std_optional.h"
using ::tensorstore::internal::DataCopyConcurrencyResource;
using ::tensorstore::internal::IntrusivePtr;
using ::tensorstore::internal::RateLimiter;
using ::tensorstore::internal::RateLimiterNode;
using ::tensorstore::internal::ScheduleAt;
using ::tensorstore::internal::SHA256Digester;
using ::tensorstore::internal_http::HttpRequest;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_http::HttpTransport;
using ::tensorstore::internal_kvstore_s3::AwsCredentials;
using ::tensorstore::internal_kvstore_s3::AwsCredentialsResource;
using ::tensorstore::internal_kvstore_s3::AwsHttpResponseToStatus;
using ::tensorstore::internal_kvstore_s3::GetNodeInt;
using ::tensorstore::internal_kvstore_s3::GetNodeText;
using ::tensorstore::internal_kvstore_s3::IsValidBucketName;
using ::tensorstore::internal_kvstore_s3::IsValidObjectName;
using ::tensorstore::internal_kvstore_s3::IsValidStorageGeneration;
using ::tensorstore::internal_kvstore_s3::S3ConcurrencyResource;
using ::tensorstore::internal_kvstore_s3::S3EndpointRegion;
using ::tensorstore::internal_kvstore_s3::S3RateLimiterResource;
using ::tensorstore::internal_kvstore_s3::S3RequestBuilder;
using ::tensorstore::internal_kvstore_s3::S3RequestRetries;
using ::tensorstore::internal_kvstore_s3::S3UriEncode;
using ::tensorstore::internal_kvstore_s3::StorageGenerationFromHeaders;
using ::tensorstore::kvstore::Key;
using ::tensorstore::kvstore::ListEntry;
using ::tensorstore::kvstore::ListOptions;
using ::tensorstore::kvstore::ListReceiver;
namespace tensorstore {
namespace {
namespace jb = tensorstore::internal_json_binding;
struct S3Metrics : public internal_kvstore::CommonMetrics {
internal_metrics::Counter<int64_t>& retries;
};
auto s3_metrics = []() -> S3Metrics {
return {
TENSORSTORE_KVSTORE_COMMON_METRICS(s3),
TENSORSTORE_KVSTORE_COUNTER_IMPL(
s3, retries, "count of all retried requests (read/write/delete)")};
}();
ABSL_CONST_INIT internal_log::VerboseFlag s3_logging("s3");
static constexpr char kUriScheme[] = "s3";
static constexpr char kEmptySha256[] =
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855";
static constexpr char kEmptyEtag[] = "\"\"";
static constexpr size_t kMaxS3PutSize = size_t{5} * 1024 * 1024 * 1024;
bool AddGenerationHeader(S3RequestBuilder* builder, std::string_view header,
const StorageGeneration& gen) {
if (StorageGeneration::IsUnknown(gen)) {
return false;
}
auto etag = StorageGeneration::IsNoValue(gen)
? kEmptyEtag
: StorageGeneration::DecodeString(gen);
builder->AddHeader(absl::StrCat(header, ": ", etag));
return true;
}
std::string payload_sha256(const absl::Cord& cord = absl::Cord()) {
SHA256Digester sha256;
sha256.Write(cord);
auto digest = sha256.Digest();
auto digest_sv = std::string_view(reinterpret_cast<const char*>(&digest[0]),
digest.size());
return absl::BytesToHexString(digest_sv);
}
bool DefaultIsRetryableCode(absl::StatusCode code) {
return code == absl::StatusCode::kDeadlineExceeded ||
code == absl::StatusCode::kUnavailable;
}
struct S3KeyValueStoreSpecData {
std::string bucket;
bool requester_pays;
std::optional<std::string> endpoint;
std::optional<std::string> host_header;
std::string aws_region;
Context::Resource<AwsCredentialsResource> aws_credentials;
Context::Resource<S3ConcurrencyResource> request_concurrency;
std::optional<Context::Resource<S3RateLimiterResource>> rate_limiter;
Context::Resource<S3RequestRetries> retries;
Context::Resource<DataCopyConcurrencyResource> data_copy_concurrency;
constexpr static auto ApplyMembers = [](auto& x, auto f) {
return f(x.bucket, x.requester_pays, x.endpoint, x.host_header,
x.aws_region, x.aws_credentials, x.request_concurrency,
x.rate_limiter, x.retries, x.data_copy_concurrency);
};
constexpr static auto default_json_binder = jb::Object(
jb::Member("bucket",
jb::Projection<&S3KeyValueStoreSpecData::bucket>(jb::Validate(
[](const auto& options, const std::string* x) {
if (!IsValidBucketName(*x)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Invalid S3 bucket name: ", QuoteString(*x)));
}
return absl::OkStatus();
}))),
jb::Member("requester_pays",
jb::Projection<&S3KeyValueStoreSpecData::requester_pays>(
jb::DefaultValue([](auto* v) { *v = false; }))),
jb::Member("host_header",
jb::Projection<&S3KeyValueStoreSpecData::host_header>()),
jb::Member("endpoint",
jb::Projection<&S3KeyValueStoreSpecData::endpoint>()),
jb::Member("aws_region",
jb::Projection<&S3KeyValueStoreSpecData::aws_region>(
jb::DefaultValue([](auto* v) { *v = ""; }))),
jb::Member(AwsCredentialsResource::id,
jb::Projection<&S3KeyValueStoreSpecData::aws_credentials>()),
jb::Member(
S3ConcurrencyResource::id,
jb::Projection<&S3KeyValueStoreSpecData::request_concurrency>()),
jb::Member(S3RateLimiterResource::id,
jb::Projection<&S3KeyValueStoreSpecData::rate_limiter>()),
jb::Member(S3RequestRetries::id,
jb::Projection<&S3KeyValueStoreSpecData::retries>()),
jb::Member(DataCopyConcurrencyResource::id,
jb::Projection<
&S3KeyValueStoreSpecData::data_copy_concurrency>())
);
};
std::string GetS3Url(std::string_view bucket, std::string_view path) {
return tensorstore::StrCat(kUriScheme, ":
}
class S3KeyValueStoreSpec
: public internal_kvstore::RegisteredDriverSpec<S3KeyValueStoreSpec,
S3KeyValueStoreSpecData> {
public:
static constexpr char id[] = "s3";
Future<kvstore::DriverPtr> DoOpen() const override;
Result<std::string> ToUrl(std::string_view path) const override {
return GetS3Url(data_.bucket, path);
}
};
class S3KeyValueStore
: public internal_kvstore::RegisteredDriver<S3KeyValueStore,
S3KeyValueStoreSpec> {
public:
S3KeyValueStore(std::shared_ptr<HttpTransport> transport,
S3KeyValueStoreSpecData spec)
: transport_(std::move(transport)),
spec_(std::move(spec)),
host_header_(spec_.host_header.value_or(std::string())) {}
internal_kvstore_batch::CoalescingOptions GetBatchReadCoalescingOptions()
const {
internal_kvstore_batch::CoalescingOptions options;
options.max_extra_read_bytes = 4095;
options.target_coalesced_size = 128 * 1024 * 1024;
return options;
}
Future<ReadResult> Read(Key key, ReadOptions options) override;
Future<ReadResult> ReadImpl(Key&& key, ReadOptions&& options);
Future<TimestampedStorageGeneration> Write(Key key,
std::optional<Value> value,
WriteOptions options) override;
void ListImpl(ListOptions options, ListReceiver receiver) override;
Future<const void> DeleteRange(KeyRange range) override;
absl::Status GetBoundSpecData(SpecData& spec) const {
spec = spec_;
return absl::OkStatus();
}
const Executor& executor() const {
return spec_.data_copy_concurrency->executor;
}
RateLimiter& read_rate_limiter() {
if (spec_.rate_limiter.has_value()) {
return *(spec_.rate_limiter.value()->read_limiter);
}
return no_rate_limiter_;
}
RateLimiter& write_rate_limiter() {
if (spec_.rate_limiter.has_value()) {
return *(spec_.rate_limiter.value()->write_limiter);
}
return no_rate_limiter_;
}
RateLimiter& admission_queue() { return *spec_.request_concurrency->queue; }
Result<std::optional<AwsCredentials>> GetCredentials() {
return spec_.aws_credentials->GetCredentials();
}
Future<const S3EndpointRegion> MaybeResolveRegion();
template <typename Task>
absl::Status BackoffForAttemptAsync(
absl::Status status, int attempt, Task* task,
SourceLocation loc = ::tensorstore::SourceLocation::current()) {
assert(task != nullptr);
auto delay = spec_.retries->BackoffForAttempt(attempt);
if (!delay) {
return MaybeAnnotateStatus(std::move(status),
absl::StrFormat("All %d retry attempts failed",
spec_.retries->max_retries),
absl::StatusCode::kAborted, loc);
}
s3_metrics.retries.Increment();
ScheduleAt(absl::Now() + *delay,
WithExecutor(executor(), [task = IntrusivePtr<Task>(task)] {
task->Retry();
}));
return absl::OkStatus();
}
internal::NoRateLimiter no_rate_limiter_;
std::shared_ptr<HttpTransport> transport_;
S3KeyValueStoreSpecData spec_;
std::string host_header_;
absl::Mutex mutex_;
Future<const S3EndpointRegion> resolve_ehr_;
};
struct ReadTask : public RateLimiterNode,
public internal::AtomicReferenceCount<ReadTask> {
IntrusivePtr<S3KeyValueStore> owner;
std::string object_name;
kvstore::ReadOptions options;
Promise<kvstore::ReadResult> promise;
std::string read_url_;
ReadyFuture<const S3EndpointRegion> endpoint_region_;
int attempt_ = 0;
absl::Time start_time_;
ReadTask(IntrusivePtr<S3KeyValueStore> owner, std::string object_name,
kvstore::ReadOptions options, Promise<kvstore::ReadResult> promise)
: owner(std::move(owner)),
object_name(std::move(object_name)),
options(std::move(options)),
promise(std::move(promise)) {}
~ReadTask() { owner->admission_queue().Finish(this); }
static void Start(void* task) {
auto* self = reinterpret_cast<ReadTask*>(task);
self->owner->read_rate_limiter().Finish(self);
self->owner->admission_queue().Admit(self, &ReadTask::Admit);
}
static void Admit(void* task) {
auto* self = reinterpret_cast<ReadTask*>(task);
self->owner->executor()(
[state = IntrusivePtr<ReadTask>(self, internal::adopt_object_ref)] {
state->Retry();
});
}
void Retry() {
if (!promise.result_needed()) {
return;
}
AwsCredentials credentials;
if (auto maybe_credentials = owner->GetCredentials();
!maybe_credentials.ok()) {
promise.SetResult(maybe_credentials.status());
return;
} else if (maybe_credentials.value().has_value()) {
credentials = std::move(*maybe_credentials.value());
}
auto request_builder = S3RequestBuilder(
options.byte_range.size() == 0 ? "HEAD" : "GET", read_url_);
AddGenerationHeader(&request_builder, "if-none-match",
options.generation_conditions.if_not_equal);
AddGenerationHeader(&request_builder, "if-match",
options.generation_conditions.if_equal);
if (options.byte_range.size() != 0) {
request_builder.MaybeAddRangeHeader(options.byte_range);
}
const auto& ehr = endpoint_region_.value();
start_time_ = absl::Now();
auto request = request_builder.EnableAcceptEncoding()
.MaybeAddRequesterPayer(owner->spec_.requester_pays)
.BuildRequest(owner->host_header_, credentials,
ehr.aws_region, kEmptySha256, start_time_);
ABSL_LOG_IF(INFO, s3_logging) << "ReadTask: " << request;
auto future = owner->transport_->IssueRequest(request, {});
future.ExecuteWhenReady([self = IntrusivePtr<ReadTask>(this)](
ReadyFuture<HttpResponse> response) {
self->OnResponse(response.result());
});
}
void OnResponse(const Result<HttpResponse>& response) {
if (!promise.result_needed()) {
return;
}
ABSL_LOG_IF(INFO, s3_logging.Level(1) && response.ok())
<< "ReadTask " << *response;
bool is_retryable = false;
absl::Status status = [&]() -> absl::Status {
if (!response.ok()) {
is_retryable = DefaultIsRetryableCode(response.status().code());
return response.status();
}
switch (response.value().status_code) {
case 412:
case 404:
case 304:
return absl::OkStatus();
}
return AwsHttpResponseToStatus(response.value(), is_retryable);
}();
if (!status.ok() && is_retryable) {
status =
owner->BackoffForAttemptAsync(std::move(status), attempt_++, this);
if (status.ok()) {
return;
}
}
if (!status.ok()) {
promise.SetResult(status);
} else {
promise.SetResult(FinishResponse(response.value()));
}
}
Result<kvstore::ReadResult> FinishResponse(const HttpResponse& httpresponse) {
s3_metrics.bytes_read.IncrementBy(httpresponse.payload.size());
auto latency = absl::Now() - start_time_;
s3_metrics.read_latency_ms.Observe(absl::ToInt64Milliseconds(latency));
switch (httpresponse.status_code) {
case 204:
case 404:
return kvstore::ReadResult::Missing(start_time_);
case 412:
return kvstore::ReadResult::Unspecified(TimestampedStorageGeneration{
StorageGeneration::Unknown(), start_time_});
case 304:
return kvstore::ReadResult::Unspecified(TimestampedStorageGeneration{
options.generation_conditions.if_not_equal, start_time_});
}
absl::Cord value;
if (options.byte_range.size() != 0) {
ByteRange byte_range;
int64_t total_size;
TENSORSTORE_RETURN_IF_ERROR(internal_http::ValidateResponseByteRange(
httpresponse, options.byte_range, value, byte_range, total_size));
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto generation, StorageGenerationFromHeaders(httpresponse.headers));
return kvstore::ReadResult::Value(
std::move(value),
TimestampedStorageGeneration{std::move(generation), start_time_});
}
};
Future<kvstore::ReadResult> S3KeyValueStore::Read(Key key,
ReadOptions options) {
s3_metrics.read.Increment();
if (!IsValidObjectName(key)) {
return absl::InvalidArgumentError("Invalid S3 object name");
}
if (!IsValidStorageGeneration(options.generation_conditions.if_equal) ||
!IsValidStorageGeneration(options.generation_conditions.if_not_equal)) {
return absl::InvalidArgumentError("Malformed StorageGeneration");
}
return internal_kvstore_batch::HandleBatchRequestByGenericByteRangeCoalescing(
*this, std::move(key), std::move(options));
}
Future<kvstore::ReadResult> S3KeyValueStore::ReadImpl(Key&& key,
ReadOptions&& options) {
s3_metrics.batch_read.Increment();
auto op = PromiseFuturePair<ReadResult>::Make();
auto state = internal::MakeIntrusivePtr<ReadTask>(
internal::IntrusivePtr<S3KeyValueStore>(this), key, std::move(options),
std::move(op.promise));
MaybeResolveRegion().ExecuteWhenReady(
[state = std::move(state)](ReadyFuture<const S3EndpointRegion> ready) {
if (!ready.status().ok()) {
state->promise.SetResult(ready.status());
return;
}
state->read_url_ = tensorstore::StrCat(ready.value().endpoint, "/",
state->object_name);
state->endpoint_region_ = std::move(ready);
intrusive_ptr_increment(state.get());
state->owner->read_rate_limiter().Admit(state.get(), &ReadTask::Start);
});
return std::move(op.future);
}
template <typename Base>
struct ConditionTask : public RateLimiterNode,
public internal::AtomicReferenceCount<Base> {
using Self = ConditionTask<Base>;
IntrusivePtr<S3KeyValueStore> owner;
kvstore::WriteOptions options_;
ReadyFuture<const S3EndpointRegion> endpoint_region_;
std::string object_url_;
AwsCredentials credentials_;
ConditionTask(IntrusivePtr<S3KeyValueStore> owner,
kvstore::WriteOptions options,
ReadyFuture<const S3EndpointRegion> endpoint_region,
std::string object_url)
: owner(std::move(owner)),
options_(std::move(options)),
endpoint_region_(std::move(endpoint_region)),
object_url_(std::move(object_url)) {}
static void Start(void* task) {
auto* self = reinterpret_cast<Base*>(task);
self->owner->write_rate_limiter().Finish(self);
self->owner->admission_queue().Admit(self, &Base::Admit);
}
static void Admit(void* task) {
auto* self = reinterpret_cast<Base*>(task);
self->owner->executor()(
[state = IntrusivePtr<Base>(self, internal::adopt_object_ref)] {
state->Retry();
});
}
void Retry() {
if (static_cast<Base*>(this)->IsCancelled()) {
return;
}
if (auto maybe_credentials = owner->GetCredentials();
!maybe_credentials.ok()) {
static_cast<Base*>(this)->Fail(maybe_credentials.status());
return;
} else if (maybe_credentials.value().has_value()) {
credentials_ = std::move(*maybe_credentials.value());
}
if (StorageGeneration::IsUnknown(options_.generation_conditions.if_equal)) {
static_cast<Base*>(this)->AfterHeadRequest();
return;
}
auto builder = S3RequestBuilder("HEAD", object_url_);
AddGenerationHeader(&builder, "if-match",
options_.generation_conditions.if_equal);
auto now = absl::Now();
const auto& ehr = endpoint_region_.value();
auto request = builder.MaybeAddRequesterPayer(owner->spec_.requester_pays)
.BuildRequest(owner->host_header_, credentials_,
ehr.aws_region, kEmptySha256, now);
ABSL_LOG_IF(INFO, s3_logging) << "Peek: " << request;
auto future = owner->transport_->IssueRequest(request, {});
future.ExecuteWhenReady([self = IntrusivePtr<Base>(static_cast<Base*>(
this))](ReadyFuture<HttpResponse> response) {
ABSL_LOG_IF(INFO, s3_logging.Level(1) && response.result().ok())
<< "Peek (Response): " << response.value();
if (self->IsCancelled()) return;
self->OnHeadResponse(response.result());
});
}
};
struct WriteTask : public ConditionTask<WriteTask> {
using Base = ConditionTask<WriteTask>;
absl::Cord value_;
Promise<TimestampedStorageGeneration> promise;
int attempt_ = 0;
absl::Time start_time_;
WriteTask(IntrusivePtr<S3KeyValueStore> o, kvstore::WriteOptions options,
ReadyFuture<const S3EndpointRegion> endpoint_region,
std::string object_url, absl::Cord value,
Promise<TimestampedStorageGeneration> promise)
: Base(std::move(o), std::move(options), std::move(endpoint_region),
std::move(object_url)),
value_(std::move(value)),
promise(std::move(promise)) {}
~WriteTask() { owner->admission_queue().Finish(this); }
bool IsCancelled() { return !promise.result_needed(); }
void Fail(absl::Status status) { promise.SetResult(std::move(status)); }
void OnHeadResponse(const Result<HttpResponse>& response) {
if (!response.ok()) {
Fail(response.status());
return;
}
TimestampedStorageGeneration r;
r.time = absl::Now();
switch (response.value().status_code) {
case 304:
[[fallthrough]];
case 412:
r.generation = StorageGeneration::Unknown();
promise.SetResult(r);
return;
case 404:
if (!options_.generation_conditions.MatchesNoValue()) {
r.generation = StorageGeneration::Unknown();
promise.SetResult(r);
return;
}
break;
default:
break;
}
AfterHeadRequest();
}
void AfterHeadRequest() {
start_time_ = absl::Now();
auto content_sha256 = payload_sha256(value_);
const auto& ehr = endpoint_region_.value();
auto request =
S3RequestBuilder("PUT", object_url_)
.AddHeader("Content-Type: application/octet-stream")
.AddHeader(absl::StrCat("Content-Length: ", value_.size()))
.MaybeAddRequesterPayer(owner->spec_.requester_pays)
.BuildRequest(owner->host_header_, credentials_, ehr.aws_region,
content_sha256, start_time_);
ABSL_LOG_IF(INFO, s3_logging)
<< "WriteTask: " << request << " size=" << value_.size();
auto future = owner->transport_->IssueRequest(
request, internal_http::IssueRequestOptions(value_));
future.ExecuteWhenReady([self = IntrusivePtr<WriteTask>(this)](
ReadyFuture<HttpResponse> response) {
self->OnResponse(response.result());
});
}
void OnResponse(const Result<HttpResponse>& response) {
if (!promise.result_needed()) {
return;
}
ABSL_LOG_IF(INFO, s3_logging.Level(1) && response.ok())
<< "WriteTask " << *response;
bool is_retryable = false;
absl::Status status = [&]() -> absl::Status {
if (!response.ok()) {
is_retryable = DefaultIsRetryableCode(response.status().code());
return response.status();
}
return AwsHttpResponseToStatus(response.value(), is_retryable);
}();
if (!status.ok() && is_retryable) {
status =
owner->BackoffForAttemptAsync(std::move(status), attempt_++, this);
if (status.ok()) {
return;
}
}
if (!status.ok()) {
promise.SetResult(status);
return;
}
promise.SetResult(FinishResponse(response.value()));
}
Result<TimestampedStorageGeneration> FinishResponse(
const HttpResponse& response) {
TimestampedStorageGeneration r;
r.time = start_time_;
switch (response.status_code) {
case 404:
if (!StorageGeneration::IsUnknown(
options_.generation_conditions.if_equal)) {
r.generation = StorageGeneration::Unknown();
return r;
}
}
auto latency = absl::Now() - start_time_;
s3_metrics.write_latency_ms.Observe(absl::ToInt64Milliseconds(latency));
s3_metrics.bytes_written.IncrementBy(value_.size());
TENSORSTORE_ASSIGN_OR_RETURN(
r.generation, StorageGenerationFromHeaders(response.headers));
return r;
}
};
struct DeleteTask : public ConditionTask<DeleteTask> {
using Base = ConditionTask<DeleteTask>;
Promise<TimestampedStorageGeneration> promise;
int attempt_ = 0;
absl::Time start_time_;
DeleteTask(IntrusivePtr<S3KeyValueStore> o, kvstore::WriteOptions options,
ReadyFuture<const S3EndpointRegion> endpoint_region,
std::string object_url,
Promise<TimestampedStorageGeneration> promise)
: Base(std::move(o), std::move(options), std::move(endpoint_region),
std::move(object_url)),
promise(std::move(promise)) {}
~DeleteTask() { owner->admission_queue().Finish(this); }
bool IsCancelled() { return !promise.result_needed(); }
void Fail(absl::Status status) { promise.SetResult(std::move(status)); }
void OnHeadResponse(const Result<HttpResponse>& response) {
if (!response.ok()) {
promise.SetResult(response.status());
return;
}
TimestampedStorageGeneration r;
r.time = absl::Now();
switch (response.value().status_code) {
case 412:
r.generation = StorageGeneration::Unknown();
promise.SetResult(std::move(r));
return;
case 404:
if (!options_.generation_conditions.MatchesNoValue()) {
r.generation = StorageGeneration::Unknown();
promise.SetResult(std::move(r));
return;
}
break;
default:
break;
}
AfterHeadRequest();
}
void AfterHeadRequest() {
start_time_ = absl::Now();
const auto& ehr = endpoint_region_.value();
auto request = S3RequestBuilder("DELETE", object_url_)
.MaybeAddRequesterPayer(owner->spec_.requester_pays)
.BuildRequest(owner->host_header_, credentials_,
ehr.aws_region, kEmptySha256, start_time_);
ABSL_LOG_IF(INFO, s3_logging) << "DeleteTask: " << request;
auto future = owner->transport_->IssueRequest(request, {});
future.ExecuteWhenReady([self = IntrusivePtr<DeleteTask>(this)](
ReadyFuture<HttpResponse> response) {
self->OnResponse(response.result());
});
}
void OnResponse(const Result<HttpResponse>& response) {
if (!promise.result_needed()) {
return;
}
ABSL_LOG_IF(INFO, s3_logging.Level(1) && response.ok())
<< "DeleteTask " << *response;
bool is_retryable = false;
absl::Status status = [&]() -> absl::Status {
if (!response.ok()) {
is_retryable = DefaultIsRetryableCode(response.status().code());
return response.status();
}
switch (response.value().status_code) {
case 404:
return absl::OkStatus();
default:
break;
}
return AwsHttpResponseToStatus(response.value(), is_retryable);
}();
if (!status.ok() && is_retryable) {
status =
owner->BackoffForAttemptAsync(std::move(status), attempt_++, this);
if (status.ok()) {
return;
}
}
if (!status.ok()) {
promise.SetResult(status);
return;
}
TimestampedStorageGeneration r;
r.time = start_time_;
switch (response.value().status_code) {
case 404:
if (!StorageGeneration::IsNoValue(
options_.generation_conditions.if_equal) &&
!StorageGeneration::IsUnknown(
options_.generation_conditions.if_equal)) {
r.generation = StorageGeneration::Unknown();
break;
}
[[fallthrough]];
default:
r.generation = StorageGeneration::NoValue();
break;
}
promise.SetResult(std::move(r));
}
};
Future<TimestampedStorageGeneration> S3KeyValueStore::Write(
Key key, std::optional<Value> value, WriteOptions options) {
s3_metrics.write.Increment();
if (!IsValidObjectName(key)) {
return absl::InvalidArgumentError("Invalid S3 object name");
}
if (!IsValidStorageGeneration(options.generation_conditions.if_equal)) {
return absl::InvalidArgumentError("Malformed StorageGeneration");
}
if (value && value->size() > kMaxS3PutSize) {
return absl::InvalidArgumentError(absl::StrCat(
"Object size ", value->size(), " exceeds S3 limit of ", kMaxS3PutSize));
}
auto op = PromiseFuturePair<TimestampedStorageGeneration>::Make();
MaybeResolveRegion().ExecuteWhenReady(
[self = IntrusivePtr<S3KeyValueStore>(this),
promise = std::move(op.promise), key = std::move(key),
value = std::move(value), options = std::move(options)](
ReadyFuture<const S3EndpointRegion> ready) {
if (!ready.status().ok()) {
promise.SetResult(ready.status());
return;
}
std::string object_url =
tensorstore::StrCat(ready.value().endpoint, "/", key);
if (!value) {
auto state = internal::MakeIntrusivePtr<DeleteTask>(
std::move(self), std::move(options), std::move(ready),
std::move(object_url), std::move(promise));
intrusive_ptr_increment(
state.get());
state->owner->write_rate_limiter().Admit(state.get(),
&DeleteTask::Start);
return;
}
auto state = internal::MakeIntrusivePtr<WriteTask>(
std::move(self), std::move(options), std::move(ready),
std::move(object_url), *std::move(value), std::move(promise));
intrusive_ptr_increment(state.get());
state->owner->write_rate_limiter().Admit(state.get(),
&WriteTask::Start);
});
return std::move(op.future);
}
struct ListTask : public RateLimiterNode,
public internal::AtomicReferenceCount<ListTask> {
internal::IntrusivePtr<S3KeyValueStore> owner_;
ListOptions options_;
ListReceiver receiver_;
std::string resource_;
ReadyFuture<const S3EndpointRegion> endpoint_region_;
std::string continuation_token_;
absl::Time start_time_;
int attempt_ = 0;
bool has_query_parameters_;
std::atomic<bool> cancelled_{false};
ListTask(internal::IntrusivePtr<S3KeyValueStore>&& owner,
ListOptions&& options, ListReceiver&& receiver)
: owner_(std::move(owner)),
options_(std::move(options)),
receiver_(std::move(receiver)) {
execution::set_starting(receiver_, [this] {
cancelled_.store(true, std::memory_order_relaxed);
});
}
~ListTask() {
execution::set_stopping(receiver_);
owner_->admission_queue().Finish(this);
}
inline bool is_cancelled() {
return cancelled_.load(std::memory_order_relaxed);
}
static void Start(void* task) {
auto* self = reinterpret_cast<ListTask*>(task);
self->owner_->read_rate_limiter().Finish(self);
self->owner_->admission_queue().Admit(self, &ListTask::Admit);
}
static void Admit(void* task) {
auto* self = reinterpret_cast<ListTask*>(task);
self->owner_->executor()(
[state = IntrusivePtr<ListTask>(self, internal::adopt_object_ref)] {
state->IssueRequest();
});
}
void Retry() { IssueRequest(); }
void IssueRequest() {
if (is_cancelled()) {
execution::set_done(receiver_);
return;
}
auto request_builder =
S3RequestBuilder("GET", resource_).AddQueryParameter("list-type", "2");
if (auto prefix = LongestPrefix(options_.range); !prefix.empty()) {
request_builder.AddQueryParameter("prefix", std::string(prefix));
}
if (!continuation_token_.empty()) {
request_builder.AddQueryParameter("continuation-token",
continuation_token_);
}
AwsCredentials credentials;
if (auto maybe_credentials = owner_->GetCredentials();
!maybe_credentials.ok()) {
execution::set_error(receiver_, std::move(maybe_credentials).status());
return;
} else if (maybe_credentials.value().has_value()) {
credentials = std::move(*maybe_credentials.value());
}
const auto& ehr = endpoint_region_.value();
start_time_ = absl::Now();
auto request =
request_builder.BuildRequest(owner_->host_header_, credentials,
ehr.aws_region, kEmptySha256, start_time_);
ABSL_LOG_IF(INFO, s3_logging) << "List: " << request;
auto future = owner_->transport_->IssueRequest(request, {});
future.ExecuteWhenReady(WithExecutor(
owner_->executor(), [self = IntrusivePtr<ListTask>(this)](
ReadyFuture<HttpResponse> response) {
self->OnResponse(response.result());
}));
}
void OnResponse(const Result<HttpResponse>& response) {
auto status = OnResponseImpl(response);
if (absl::IsCancelled(status)) {
execution::set_done(receiver_);
return;
}
if (!status.ok()) {
execution::set_error(receiver_, std::move(status));
return;
}
}
absl::Status OnResponseImpl(const Result<HttpResponse>& response) {
if (is_cancelled()) {
return absl::CancelledError();
}
ABSL_LOG_IF(INFO, s3_logging.Level(1) && response.ok())
<< "List " << *response;
bool is_retryable = false;
absl::Status status = [&]() -> absl::Status {
if (!response.ok()) {
is_retryable = DefaultIsRetryableCode(response.status().code());
return response.status();
}
return AwsHttpResponseToStatus(response.value(), is_retryable);
}();
if (!status.ok() && is_retryable) {
return owner_->BackoffForAttemptAsync(std::move(status), attempt_++,
this);
}
auto cord = response->payload;
auto payload = cord.Flatten();
tinyxml2::XMLDocument xmlDocument;
if (int xmlcode = xmlDocument.Parse(payload.data(), payload.size());
xmlcode != tinyxml2::XML_SUCCESS) {
return absl::InvalidArgumentError(
absl::StrCat("Malformed List response: ", xmlcode));
}
auto* root = xmlDocument.FirstChildElement("ListBucketResult");
if (root == nullptr) {
return absl::InvalidArgumentError(
"Malformed List response: missing <ListBucketResult>");
}
for (auto* contents = root->FirstChildElement("Contents");
contents != nullptr;
contents = contents->NextSiblingElement("Contents")) {
if (is_cancelled()) {
return absl::CancelledError();
}
auto* key_node = contents->FirstChildElement("Key");
if (key_node == nullptr) {
return absl::InvalidArgumentError(
"Malformed List response: missing <Key> in <Contents>");
}
std::string key = GetNodeText(key_node);
if (key < options_.range.inclusive_min) continue;
if (KeyRange::CompareKeyAndExclusiveMax(
key, options_.range.exclusive_max) >= 0) {
execution::set_done(receiver_);
return absl::OkStatus();
}
int64_t size =
GetNodeInt(contents->FirstChildElement("Size")).value_or(-1);
if (key.size() > options_.strip_prefix_length) {
execution::set_value(
receiver_,
ListEntry{key.substr(options_.strip_prefix_length), size});
}
}
attempt_ = 0;
if (GetNodeText(root->FirstChildElement("IsTruncated")) == "true") {
auto* next_continuation_token =
root->FirstChildElement("NextContinuationToken");
if (next_continuation_token == nullptr) {
return absl::InvalidArgumentError(
"Malformed List response: missing <NextContinuationToken>");
}
continuation_token_ = GetNodeText(next_continuation_token);
IssueRequest();
} else {
execution::set_done(receiver_);
}
return absl::OkStatus();
}
};
void S3KeyValueStore::ListImpl(ListOptions options, ListReceiver receiver) {
s3_metrics.list.Increment();
if (options.range.empty()) {
execution::set_starting(receiver, [] {});
execution::set_done(receiver);
execution::set_stopping(receiver);
return;
}
auto state = internal::MakeIntrusivePtr<ListTask>(
IntrusivePtr<S3KeyValueStore>(this), std::move(options),
std::move(receiver));
MaybeResolveRegion().ExecuteWhenReady(
[state = std::move(state)](ReadyFuture<const S3EndpointRegion> ready) {
if (!ready.status().ok()) {
execution::set_error(state->receiver_, ready.status());
return;
}
state->resource_ = tensorstore::StrCat(ready.value().endpoint, "/");
state->endpoint_region_ = std::move(ready);
intrusive_ptr_increment(state.get());
state->owner_->read_rate_limiter().Admit(state.get(), &ListTask::Start);
});
}
struct DeleteRangeListReceiver {
IntrusivePtr<S3KeyValueStore> owner_;
Promise<void> promise_;
FutureCallbackRegistration cancel_registration_;
void set_starting(AnyCancelReceiver cancel) {
cancel_registration_ = promise_.ExecuteWhenNotNeeded(std::move(cancel));
}
void set_value(ListEntry entry) {
assert(!entry.key.empty());
if (!entry.key.empty()) {
LinkError(promise_, owner_->Delete(std::move(entry.key)));
}
}
void set_error(absl::Status error) {
SetDeferredResult(promise_, std::move(error));
promise_ = Promise<void>();
}
void set_done() { promise_ = Promise<void>(); }
void set_stopping() { cancel_registration_.Unregister(); }
};
Future<const void> S3KeyValueStore::DeleteRange(KeyRange range) {
s3_metrics.delete_range.Increment();
if (range.empty()) return absl::OkStatus();
auto op = PromiseFuturePair<void>::Make(tensorstore::MakeResult());
ListOptions list_options;
list_options.range = std::move(range);
ListImpl(list_options, DeleteRangeListReceiver{
internal::IntrusivePtr<S3KeyValueStore>(this),
std::move(op.promise)});
return std::move(op.future);
}
Future<const S3EndpointRegion> S3KeyValueStore::MaybeResolveRegion() {
absl::MutexLock l(&mutex_);
if (!resolve_ehr_.null()) return resolve_ehr_;
resolve_ehr_ = internal_kvstore_s3::ResolveEndpointRegion(
spec_.bucket,
!spec_.endpoint.has_value() || spec_.endpoint.value().empty()
? std::string_view{}
: std::string_view(spec_.endpoint.value()),
spec_.host_header.value_or(std::string{}), transport_);
resolve_ehr_.ExecuteWhenReady([](ReadyFuture<const S3EndpointRegion> ready) {
if (!ready.status().ok()) {
ABSL_LOG_IF(INFO, s3_logging)
<< "S3 driver failed to resolve endpoint: " << ready.status();
} else {
ABSL_LOG_IF(INFO, s3_logging)
<< "S3 driver using endpoint [" << ready.value() << "]";
}
});
return resolve_ehr_;
}
Future<kvstore::DriverPtr> S3KeyValueStoreSpec::DoOpen() const {
auto driver = internal::MakeIntrusivePtr<S3KeyValueStore>(
internal_http::GetDefaultHttpTransport(), data_);
if (data_.rate_limiter.has_value()) {
ABSL_LOG_IF(INFO, s3_logging) << "Using experimental_s3_rate_limiter";
}
auto result = internal_kvstore_s3::ValidateEndpoint(
data_.bucket, data_.aws_region, data_.endpoint.value_or(std::string{}),
driver->host_header_);
if (auto* status = std::get_if<absl::Status>(&result);
status != nullptr && !status->ok()) {
return std::move(*status);
}
if (auto* ehr = std::get_if<S3EndpointRegion>(&result); ehr != nullptr) {
ABSL_LOG_IF(INFO, s3_logging)
<< "S3 driver using endpoint [" << *ehr << "]";
driver->resolve_ehr_ = MakeReadyFuture<S3EndpointRegion>(std::move(*ehr));
}
return driver;
}
Result<kvstore::Spec> ParseS3Url(std::string_view url) {
auto parsed = internal::ParseGenericUri(url);
assert(parsed.scheme == kUriScheme);
if (!parsed.query.empty()) {
return absl::InvalidArgumentError("Query string not supported");
}
if (!parsed.fragment.empty()) {
return absl::InvalidArgumentError("Fragment identifier not supported");
}
if (!IsValidBucketName(parsed.authority)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Invalid S3 bucket name: ", QuoteString(parsed.authority)));
}
auto decoded_path = parsed.path.empty()
? std::string()
: internal::PercentDecode(parsed.path.substr(1));
auto driver_spec = internal::MakeIntrusivePtr<S3KeyValueStoreSpec>();
driver_spec->data_.bucket = std::string(parsed.authority);
driver_spec->data_.requester_pays = false;
driver_spec->data_.aws_credentials =
Context::Resource<AwsCredentialsResource>::DefaultSpec();
driver_spec->data_.request_concurrency =
Context::Resource<S3ConcurrencyResource>::DefaultSpec();
driver_spec->data_.retries =
Context::Resource<S3RequestRetries>::DefaultSpec();
driver_spec->data_.data_copy_concurrency =
Context::Resource<DataCopyConcurrencyResource>::DefaultSpec();
return {std::in_place, std::move(driver_spec), std::move(decoded_path)};
}
const tensorstore::internal_kvstore::DriverRegistration<
tensorstore::S3KeyValueStoreSpec>
registration;
const tensorstore::internal_kvstore::UrlSchemeRegistration
url_scheme_registration{kUriScheme, tensorstore::ParseS3Url};
}
}
TENSORSTORE_DECLARE_GARBAGE_COLLECTION_NOT_REQUIRED(
tensorstore::S3KeyValueStore) | #include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/match.h"
#include "tensorstore/context.h"
#include "tensorstore/internal/http/curl_transport.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/http/mock_http_transport.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/test_matchers.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/status_testutil.h"
namespace kvstore = ::tensorstore::kvstore;
using ::tensorstore::Context;
using ::tensorstore::MatchesStatus;
using ::tensorstore::StatusIs;
using ::tensorstore::StorageGeneration;
using ::tensorstore::internal::MatchesKvsReadResult;
using ::tensorstore::internal::MatchesListEntry;
using ::tensorstore::internal::MatchesTimestampedStorageGeneration;
using ::tensorstore::internal_http::DefaultMockHttpTransport;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_http::HttpTransport;
using ::tensorstore::internal_http::SetDefaultHttpTransport;
namespace {
Context DefaultTestContext() {
return Context{Context::Spec::FromJson({
{"s3_request_retries",
{{"max_retries", 2},
{"initial_delay", "1ms"},
{"max_delay", "2ms"}}},
})
.value()};
}
TEST(S3KeyValueStoreTest, BadBucketNames) {
auto context = DefaultTestContext();
for (auto bucket : {"a", "_abc", "abc_", "a..b", "a.-.b"}) {
EXPECT_FALSE(kvstore::Open({{"driver", "s3"},
{"bucket", bucket},
{"endpoint", "https:
context)
.result())
<< "bucket: " << bucket;
}
for (auto bucket :
{"abc", "abc.1-2-3.abc",
"a."
"0123456789123456789012345678912345678901234567891234567890"
"1234567891234567890123456789123456789012345678912345678901"
"23456789123456789.B"}) {
EXPECT_TRUE(kvstore::Open({{"driver", "s3"},
{"bucket", bucket},
{"endpoint", "https:
{"aws_region", "us-east-1"}},
context)
.result())
<< "bucket: " << bucket;
}
}
TEST(S3KeyValueStoreTest, SpecRoundtrip) {
tensorstore::internal::KeyValueStoreSpecRoundtripOptions options;
options.check_write_read = false;
options.check_data_persists = false;
options.check_data_after_serialization = false;
options.full_spec = {{"driver", "s3"}, {"bucket", "mybucket"}};
tensorstore::internal::TestKeyValueStoreSpecRoundtrip(options);
}
TEST(S3KeyValueStoreTest, InvalidSpec) {
auto context = DefaultTestContext();
EXPECT_THAT(kvstore::Open(
{{"driver", "s3"}, {"bucket", "my-bucket"}, {"extra", "key"}},
context)
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(kvstore::Open({{"driver", "s3"}}, context).result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
kvstore::Open({{"driver", "s3"}, {"bucket", "a"}}, context).result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
struct DefaultHttpTransportSetter {
DefaultHttpTransportSetter(std::shared_ptr<HttpTransport> transport) {
SetDefaultHttpTransport(transport);
}
~DefaultHttpTransportSetter() { SetDefaultHttpTransport(nullptr); }
};
TEST(S3KeyValueStoreTest, SimpleMock_VirtualHost) {
absl::flat_hash_map<std::string, HttpResponse> url_to_response{
{"HEAD https:
HttpResponse{200, absl::Cord(), {{"x-amz-bucket-region", "us-east-1"}}}},
{"GET https:
HttpResponse{200,
absl::Cord("abcd"),
{{"etag", "900150983cd24fb0d6963f7d28e17f72"}}}},
{"PUT https:
HttpResponse{
200, absl::Cord(), {{"etag", "900150983cd24fb0d6963f7d28e17f72"}}}},
};
auto mock_transport =
std::make_shared<DefaultMockHttpTransport>(url_to_response);
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
auto context = DefaultTestContext();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open(
{{"driver", "s3"}, {"bucket", "my-bucket"}, {"path", "tmp:1/"}},
context)
.result());
auto read_result = kvstore::Read(store, "key_read").result();
EXPECT_THAT(read_result,
MatchesKvsReadResult(absl::Cord("abcd"),
StorageGeneration::FromString(
"900150983cd24fb0d6963f7d28e17f72")));
EXPECT_THAT(kvstore::Write(store, "key_write", absl::Cord("xyz")).result(),
MatchesTimestampedStorageGeneration(StorageGeneration::FromString(
"900150983cd24fb0d6963f7d28e17f72")));
TENSORSTORE_EXPECT_OK(kvstore::Delete(store, "key_delete"));
int host_header_validated = 0;
for (const auto& request : mock_transport->requests()) {
if (absl::StartsWith(request.url,
"https:
host_header_validated++;
EXPECT_THAT(
request.headers,
testing::Contains("host: my-bucket.s3.us-east-1.amazonaws.com"));
}
}
EXPECT_THAT(host_header_validated, testing::Ge(2));
}
TEST(S3KeyValueStoreTest, SimpleMock_NoVirtualHost) {
absl::flat_hash_map<std::string, HttpResponse> url_to_response{
{"HEAD https:
HttpResponse{200, absl::Cord(), {{"x-amz-bucket-region", "us-east-1"}}}},
{"GET https:
HttpResponse{200,
absl::Cord("abcd"),
{{"etag", "900150983cd24fb0d6963f7d28e17f72"}}}},
{"PUT https:
HttpResponse{
200, absl::Cord(), {{"etag", "900150983cd24fb0d6963f7d28e17f72"}}}},
};
auto mock_transport =
std::make_shared<DefaultMockHttpTransport>(url_to_response);
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
auto context = DefaultTestContext();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store,
kvstore::Open({{"driver", "s3"},
{"bucket", "my.bucket"},
{"aws_region", "us-east-1"}},
context)
.result());
auto read_result = kvstore::Read(store, "key_read").result();
EXPECT_THAT(read_result,
MatchesKvsReadResult(absl::Cord("abcd"),
StorageGeneration::FromString(
"900150983cd24fb0d6963f7d28e17f72")));
EXPECT_THAT(kvstore::Write(store, "key_write", absl::Cord("xyz")).result(),
MatchesTimestampedStorageGeneration(StorageGeneration::FromString(
"900150983cd24fb0d6963f7d28e17f72")));
TENSORSTORE_EXPECT_OK(kvstore::Delete(store, "key_delete"));
int host_header_validated = 0;
for (const auto& request : mock_transport->requests()) {
if (absl::StartsWith(request.url, "https:
host_header_validated++;
EXPECT_THAT(request.headers,
testing::Contains("host: s3.us-east-1.amazonaws.com"));
}
}
EXPECT_THAT(host_header_validated, testing::Ge(2));
}
TEST(S3KeyValueStoreTest, SimpleMock_Endpoint) {
absl::flat_hash_map<std::string, HttpResponse> url_to_response{
{"HEAD https:
HttpResponse{200, absl::Cord(), {{"x-amz-bucket-region", "us-east-1"}}}},
{"GET https:
HttpResponse{200,
absl::Cord("abcd"),
{{"etag", "900150983cd24fb0d6963f7d28e17f72"}}}},
{"PUT https:
HttpResponse{
200, absl::Cord(), {{"etag", "900150983cd24fb0d6963f7d28e17f72"}}}},
};
auto mock_transport =
std::make_shared<DefaultMockHttpTransport>(url_to_response);
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
auto context = DefaultTestContext();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open({{"driver", "s3"},
{"bucket", "my-bucket"},
{"endpoint", "https:
{"aws_credentials", {{"anonymous", true}}},
{"path", "tmp:1/"}},
context)
.result());
auto read_result = kvstore::Read(store, "key_read").result();
EXPECT_THAT(read_result,
MatchesKvsReadResult(absl::Cord("abcd"),
StorageGeneration::FromString(
"900150983cd24fb0d6963f7d28e17f72")));
EXPECT_THAT(kvstore::Write(store, "key_write", absl::Cord("xyz")).result(),
MatchesTimestampedStorageGeneration(StorageGeneration::FromString(
"900150983cd24fb0d6963f7d28e17f72")));
TENSORSTORE_EXPECT_OK(kvstore::Delete(store, "key_delete"));
int host_header_validated = 0;
for (const auto& request : mock_transport->requests()) {
if (absl::StartsWith(request.url, "https:
host_header_validated++;
EXPECT_THAT(request.headers, testing::Contains("host: localhost:1234"));
}
}
EXPECT_THAT(host_header_validated, testing::Ge(2));
}
TEST(S3KeyValueStoreTest, SimpleMock_List) {
const auto kListResultA =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
"<ListBucketResult xmlns=\"http:
"<Name>bucket</Name>"
"<Prefix></Prefix>"
"<KeyCount>3</KeyCount>"
"<MaxKeys>1000</MaxKeys>"
"<IsTruncated>true</IsTruncated>"
"<NextContinuationToken>CONTINUE</NextContinuationToken>"
"<Contents><Key>a</Key>"
"<LastModified>2023-09-06T17:53:27.000Z</LastModified>"
"<ETag>"d41d8cd98f00b204e9800998ecf8427e"</ETag>"
"<Size>0</Size><StorageClass>STANDARD</StorageClass></Contents>"
"<Contents><Key>b</Key>"
"<LastModified>2023-09-06T17:53:28.000Z</LastModified>"
"<ETag>"d41d8cd98f00b204e9800998ecf8427e"</ETag>"
"<Size>0</Size><StorageClass>STANDARD</StorageClass></Contents>"
"<Contents><Key>b/a</Key>"
"<LastModified>2023-09-06T17:53:28.000Z</LastModified>"
"<ETag>"d41d8cd98f00b204e9800998ecf8427e"</ETag>"
"<Size>0</Size><StorageClass>STANDARD</StorageClass></Contents>"
"</ListBucketResult>";
const auto kListResultB =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
"<ListBucketResult xmlns=\"http:
"<Name>bucket</Name>"
"<Prefix></Prefix>"
"<KeyCount>2</KeyCount>"
"<MaxKeys>1000</MaxKeys>"
"<IsTruncated>false</IsTruncated>"
"<Contents><Key>b/b</Key>"
"<LastModified>2023-09-06T17:53:28.000Z</LastModified>"
"<ETag>"d41d8cd98f00b204e9800998ecf8427e"</ETag>"
"<Size>0</Size><StorageClass>STANDARD</StorageClass></Contents>"
"<Contents><Key>c</Key>"
"<LastModified>2023-09-06T17:53:28.000Z</LastModified>"
"<ETag>"d41d8cd98f00b204e9800998ecf8427e"</ETag>"
"<Size>0</Size><StorageClass>STANDARD</StorageClass></Contents>"
"</ListBucketResult>";
absl::flat_hash_map<std::string, HttpResponse> url_to_response{
{"HEAD https:
HttpResponse{200, absl::Cord(), {{"x-amz-bucket-region", "us-east-1"}}}},
{"GET https:
HttpResponse{200, absl::Cord(kListResultA), {}}},
{"GET "
"https:
"?continuation-token=CONTINUE&list-type=2",
HttpResponse{200, absl::Cord(kListResultB), {}}},
};
auto mock_transport =
std::make_shared<DefaultMockHttpTransport>(url_to_response);
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
auto context = DefaultTestContext();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open({{"driver", "s3"}, {"bucket", "my-bucket"}}, context)
.result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto list_result,
kvstore::ListFuture(store, {}).result());
EXPECT_THAT(list_result, ::testing::ElementsAre(
MatchesListEntry("a"), MatchesListEntry("b"),
MatchesListEntry("b/a"), MatchesListEntry("b/b"),
MatchesListEntry("c")));
}
TEST(S3KeyValueStoreTest, SimpleMock_ListPrefix) {
const auto kListResult =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
"<ListBucketResult xmlns=\"http:
"<Name>bucket</Name>"
"<Prefix>b</Prefix>"
"<KeyCount>4</KeyCount>"
"<MaxKeys>1000</MaxKeys>"
"<IsTruncated>false</IsTruncated>"
"<Contents><Key>b</Key>"
"<LastModified>2023-09-06T17:53:28.000Z</LastModified>"
"<ETag>"d41d8cd98f00b204e9800998ecf8427e"</ETag>"
"<Size>0</Size><StorageClass>STANDARD</StorageClass></Contents>"
"<Contents><Key>b/a</Key>"
"<LastModified>2023-09-06T17:53:28.000Z</LastModified>"
"<ETag>"d41d8cd98f00b204e9800998ecf8427e"</ETag>"
"<Size>0</Size><StorageClass>STANDARD</StorageClass></Contents>"
"<Contents><Key>b/b</Key>"
"<LastModified>2023-09-06T17:53:28.000Z</LastModified>"
"<ETag>"d41d8cd98f00b204e9800998ecf8427e"</ETag>"
"<Size>0</Size><StorageClass>STANDARD</StorageClass></Contents>"
"<Contents><Key>c</Key>"
"<LastModified>2023-09-06T17:53:28.000Z</LastModified>"
"<ETag>"d41d8cd98f00b204e9800998ecf8427e"</ETag>"
"<Size>0</Size><StorageClass>STANDARD</StorageClass></Contents>"
"</ListBucketResult>";
absl::flat_hash_map<std::string, HttpResponse> url_to_response{
{"HEAD https:
HttpResponse{200, absl::Cord(), {{"x-amz-bucket-region", "us-east-1"}}}},
{"GET "
"https:
"?list-type=2&prefix=b",
HttpResponse{200,
absl::Cord(kListResult),
{{"etag", "900150983cd24fb0d6963f7d28e17f72"}}}},
};
auto mock_transport =
std::make_shared<DefaultMockHttpTransport>(url_to_response);
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
auto context = DefaultTestContext();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open({{"driver", "s3"}, {"bucket", "my-bucket"}}, context)
.result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto list_result,
kvstore::ListFuture(store, {::tensorstore::KeyRange::Prefix("b")})
.result());
EXPECT_THAT(list_result, ::testing::ElementsAre(MatchesListEntry("b"),
MatchesListEntry("b/a"),
MatchesListEntry("b/b")));
}
TEST(S3KeyValueStoreTest, SimpleMock_RetryTimesOut) {
absl::flat_hash_map<std::string, HttpResponse> url_to_response{
{"HEAD https:
HttpResponse{200, absl::Cord(), {{"x-amz-bucket-region", "us-east-1"}}}},
{"GET https:
HttpResponse{400,
absl::Cord(R"(<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>ThrottledException</Code>
<Message>Endless retry</Message>
<Resource>/my-bucket/tmp:1/key_read</Resource>
<RequestId>4442587FB7D0A2F9</RequestId>
</Error>
)"),
{{"etag", "900150983cd24fb0d6963f7d28e17f72"}}}},
};
auto mock_transport =
std::make_shared<DefaultMockHttpTransport>(url_to_response);
DefaultHttpTransportSetter mock_transport_setter{mock_transport};
auto context = DefaultTestContext();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open({{"driver", "s3"},
{"bucket", "my-bucket"},
{"endpoint", "https:
{"path", "tmp:1/"}},
context)
.result());
auto read_result = kvstore::Read(store, "key_read").result();
EXPECT_THAT(read_result, StatusIs(absl::StatusCode::kAborted));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/s3/s3_key_value_store.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/s3/s3_key_value_store_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
02993a93-84f7-455b-baaa-72db6f6534c1 | cpp | tensorflow/tensorflow | sqrt | tensorflow/lite/experimental/shlo/ops/sqrt.cc | tensorflow/lite/delegates/xnnpack/sqrt_test.cc | #include "tensorflow/lite/experimental/shlo/ops/sqrt.h"
#include <cmath>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Sqrt {
template <class T>
T operator()(T v) const {
return std::sqrt(v);
}
};
template <>
F16 Sqrt::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Sqrt::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
SqrtOp Create(SqrtOp::Attributes) { return {}; }
absl::Status Prepare(SqrtOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(
CheckCtx("sqrt"), input, IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("sqrt"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(SqrtOp& op, const Tensor& input, Tensor& output) {
Sqrt sqrt;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), sqrt, input,
output)
} else if (IsFloatTensor(input)) {
DISPATCH_FLOAT(detail::EvaluateNoQuantization, input.tensor_element_type(),
sqrt, input, output);
}
return absl::FailedPreconditionError(
"stablehlo.sqrt: Unsupported tensor type.");
}
}; | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/unary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(Sqrt, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_SQRT, xnnpack_delegate.get());
}
TEST(Sqrt, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, width, channels})
.Test(BuiltinOperator_SQRT, xnnpack_delegate.get());
}
TEST(Sqrt, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, channels})
.Test(BuiltinOperator_SQRT, xnnpack_delegate.get());
}
TEST(Sqrt, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
UnaryElementwiseTester().Shape({batch}).Test(BuiltinOperator_SQRT,
xnnpack_delegate.get());
}
TEST(Sqrt, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_SQRT, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/sqrt.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/sqrt_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ae8ab56f-beae-43b8-9489-c0242595dbcc | cpp | tensorflow/tensorflow | join | tensorflow/lite/testing/join.h | tensorflow/lite/testing/join_test.cc | #ifndef TENSORFLOW_LITE_TESTING_JOIN_H_
#define TENSORFLOW_LITE_TESTING_JOIN_H_
#include <cstdint>
#include <cstdlib>
#include <iomanip>
#include <sstream>
#include "tensorflow/lite/string_type.h"
namespace tflite {
namespace testing {
template <typename T>
string JoinDefault(T* data, size_t len, const string& delimiter) {
if (len == 0 || data == nullptr) {
return "";
}
std::stringstream result;
result << data[0];
for (int i = 1; i < len; i++) {
result << delimiter << data[i];
}
return result.str();
}
template <typename T>
string Join(T* data, size_t len, const string& delimiter) {
if (len == 0 || data == nullptr) {
return "";
}
std::stringstream result;
result << std::setprecision(9) << data[0];
for (int i = 1; i < len; i++) {
result << std::setprecision(9) << delimiter << data[i];
}
return result.str();
}
template <>
inline string Join<uint8_t>(uint8_t* data, size_t len,
const string& delimiter) {
if (len == 0 || data == nullptr) {
return "";
}
std::stringstream result;
result << static_cast<int>(data[0]);
for (int i = 1; i < len; i++) {
result << delimiter << static_cast<int>(data[i]);
}
return result.str();
}
template <>
inline string Join<int8_t>(int8_t* data, size_t len, const string& delimiter) {
if (len == 0 || data == nullptr) {
return "";
}
std::stringstream result;
result << static_cast<int>(data[0]);
for (int i = 1; i < len; i++) {
result << delimiter << static_cast<int>(data[i]);
}
return result.str();
}
}
}
#endif | #include "tensorflow/lite/testing/join.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite {
namespace testing {
namespace {
TEST(JoinTest, JoinInt) {
std::vector<int> data = {1, 2, 3};
EXPECT_EQ(Join(data.data(), data.size(), ","), "1,2,3");
}
TEST(JoinDefaultTest, JoinFloat) {
float data[] = {1.0, -3, 2.3, 1e-5};
EXPECT_EQ(JoinDefault(data, 4, " "), "1 -3 2.3 1e-05");
}
TEST(JoinTest, JoinFloat) {
float data[] = {1.0, -3, 2.3, 1e-5};
EXPECT_EQ(Join(data, 4, " "), "1 -3 2.29999995 9.99999975e-06");
}
TEST(JoinTest, JoinNullData) { EXPECT_THAT(Join<int>(nullptr, 3, ","), ""); }
TEST(JoinTest, JoinZeroData) {
std::vector<int> data;
EXPECT_THAT(Join(data.data(), 0, ","), "");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/testing/join.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/testing/join_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2bd5b8cb-2a69-4e2d-8ee5-48abd5be59bf | cpp | google/tensorstore | curl_wrappers | tensorstore/internal/http/curl_wrappers.cc | tensorstore/internal/http/curl_wrappers_test.cc | #include "tensorstore/internal/http/curl_wrappers.h"
#include <string>
#include <string_view>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include <curl/curl.h>
#include "tensorstore/internal/source_location.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_http {
void CurlPtrCleanup::operator()(CURL* c) { curl_easy_cleanup(c); }
void CurlMultiCleanup::operator()(CURLM* m) { curl_multi_cleanup(m); }
void CurlSlistCleanup::operator()(curl_slist* s) { curl_slist_free_all(s); }
std::string GetCurlUserAgentSuffix() {
static std::string agent =
tensorstore::StrCat("tensorstore/0.1 ", curl_version());
return agent;
}
absl::Status CurlCodeToStatus(CURLcode code, std::string_view detail,
SourceLocation loc) {
auto error_code = absl::StatusCode::kUnknown;
switch (code) {
case CURLE_OK:
return absl::OkStatus();
case CURLE_COULDNT_RESOLVE_PROXY:
error_code = absl::StatusCode::kUnavailable;
if (detail.empty()) detail = "Failed to resolve proxy";
break;
case CURLE_OPERATION_TIMEDOUT:
error_code = absl::StatusCode::kDeadlineExceeded;
if (detail.empty()) detail = "Timed out";
break;
case CURLE_COULDNT_CONNECT:
case CURLE_COULDNT_RESOLVE_HOST:
case CURLE_GOT_NOTHING:
case CURLE_HTTP2:
case CURLE_HTTP2_STREAM:
case CURLE_PARTIAL_FILE:
case CURLE_RECV_ERROR:
case CURLE_SEND_ERROR:
case CURLE_SSL_CONNECT_ERROR:
case CURLE_UNSUPPORTED_PROTOCOL:
error_code = absl::StatusCode::kUnavailable;
break;
case CURLE_URL_MALFORMAT:
error_code = absl::StatusCode::kInvalidArgument;
break;
case CURLE_WRITE_ERROR:
error_code = absl::StatusCode::kCancelled;
break;
case CURLE_ABORTED_BY_CALLBACK:
error_code = absl::StatusCode::kAborted;
break;
case CURLE_REMOTE_ACCESS_DENIED:
error_code = absl::StatusCode::kPermissionDenied;
break;
case CURLE_SEND_FAIL_REWIND:
case CURLE_RANGE_ERROR:
error_code = absl::StatusCode::kInternal;
break;
case CURLE_BAD_FUNCTION_ARGUMENT:
case CURLE_OUT_OF_MEMORY:
case CURLE_NOT_BUILT_IN:
case CURLE_UNKNOWN_OPTION:
case CURLE_BAD_DOWNLOAD_RESUME:
error_code = absl::StatusCode::kInternal;
break;
default:
break;
}
absl::Status status(
error_code, tensorstore::StrCat("CURL error ", curl_easy_strerror(code),
detail.empty() ? "" : ": ", detail));
status.SetPayload("curl_code", absl::Cord(tensorstore::StrCat(code)));
MaybeAddSourceLocation(status, loc);
return status;
}
absl::Status CurlMCodeToStatus(CURLMcode code, std::string_view detail,
SourceLocation loc) {
if (code == CURLM_OK) {
return absl::OkStatus();
}
absl::Status status(
absl::StatusCode::kInternal,
tensorstore::StrCat("CURLM error ", curl_multi_strerror(code),
detail.empty() ? "" : ": ", detail));
status.SetPayload("curlm_code", absl::Cord(tensorstore::StrCat(code)));
MaybeAddSourceLocation(status, loc);
return status;
}
}
} | #include "tensorstore/internal/http/curl_wrappers.h"
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal_http::CurlCodeToStatus;
using ::tensorstore::internal_http::CurlMCodeToStatus;
TEST(CurlFactoryTest, CurlCodeToStatus) {
struct {
CURLcode curl;
absl::StatusCode expected;
} expected_codes[]{
{CURLE_OK, absl::StatusCode::kOk},
{CURLE_RECV_ERROR, absl::StatusCode::kUnavailable},
{CURLE_SEND_ERROR, absl::StatusCode::kUnavailable},
{CURLE_PARTIAL_FILE, absl::StatusCode::kUnavailable},
{CURLE_SSL_CONNECT_ERROR, absl::StatusCode::kUnavailable},
{CURLE_COULDNT_RESOLVE_HOST, absl::StatusCode::kUnavailable},
{CURLE_COULDNT_RESOLVE_PROXY, absl::StatusCode::kUnavailable},
{CURLE_COULDNT_CONNECT, absl::StatusCode::kUnavailable},
{CURLE_REMOTE_ACCESS_DENIED, absl::StatusCode::kPermissionDenied},
{CURLE_OPERATION_TIMEDOUT, absl::StatusCode::kDeadlineExceeded},
{CURLE_ABORTED_BY_CALLBACK, absl::StatusCode::kAborted},
{CURLE_FAILED_INIT, absl::StatusCode::kUnknown},
{CURLE_GOT_NOTHING, absl::StatusCode::kUnavailable},
{CURLE_AGAIN, absl::StatusCode::kUnknown},
{CURLE_HTTP2, absl::StatusCode::kUnavailable},
{CURLE_BAD_DOWNLOAD_RESUME, absl::StatusCode::kInternal},
{CURLE_RANGE_ERROR, absl::StatusCode::kInternal},
{CURLE_UNSUPPORTED_PROTOCOL, absl::StatusCode::kUnavailable},
};
for (auto const& t : expected_codes) {
auto actual = CurlCodeToStatus(t.curl, {});
EXPECT_EQ(t.expected, actual.code()) << "CURL code=" << t.curl;
}
}
TEST(CurlFactoryTest, CurlMCodeToStatus) {
struct {
CURLMcode curl;
absl::StatusCode expected;
} expected_codes[]{
{CURLM_OK, absl::StatusCode::kOk},
{CURLM_BAD_HANDLE, absl::StatusCode::kInternal},
{CURLM_BAD_EASY_HANDLE, absl::StatusCode::kInternal},
{CURLM_OUT_OF_MEMORY, absl::StatusCode::kInternal},
{CURLM_INTERNAL_ERROR, absl::StatusCode::kInternal},
};
for (auto const& t : expected_codes) {
auto actual = CurlMCodeToStatus(t.curl, {});
EXPECT_EQ(t.expected, actual.code()) << "CURLM code=" << t.curl;
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/http/curl_wrappers.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/http/curl_wrappers_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
3896e059-52e8-44fa-aa37-6d162956acdf | cpp | tensorflow/tensorflow | hlo_rematerialization | third_party/xla/xla/service/hlo_rematerialization.cc | third_party/xla/xla/service/hlo_rematerialization_test.cc | #include "xla/service/hlo_rematerialization.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <limits>
#include <memory>
#include <optional>
#include <set>
#include <string>
#include <string_view>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/layout_util.h"
#include "xla/map_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/logical_buffer.h"
#include "xla/service/tuple_points_to_analysis.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/numbers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ::tsl::strings::HumanReadableNumBytes;
bool IsRematerializable(const HloInstruction* instruction) {
if (instruction->opcode() == HloOpcode::kCopy) {
if (LayoutUtil::Equal(instruction->shape().layout(),
instruction->operand(0)->shape().layout())) {
return false;
}
}
if (auto collective = DynCast<HloCollectiveInstruction>(instruction)) {
return !collective->constrain_layout();
}
switch (instruction->opcode()) {
case HloOpcode::kCall:
case HloOpcode::kConstant:
case HloOpcode::kConditional:
case HloOpcode::kCustomCall:
case HloOpcode::kParameter:
case HloOpcode::kWhile:
return false;
default:
return !instruction->HasSideEffect();
}
}
bool CanBeRematerialized(
const HloInstruction* instruction,
absl::flat_hash_map<const HloInstruction*, bool>* rematerializable_map) {
auto it = rematerializable_map->find(instruction);
if (it != rematerializable_map->end()) {
return it->second;
}
bool rematerializable = IsRematerializable(instruction);
(*rematerializable_map)[instruction] = rematerializable;
return rematerializable;
}
bool IsSupportedIndirectUser(const HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kBitcast ||
instruction->opcode() == HloOpcode::kGetTupleElement;
}
using BufferId = int64_t;
using BufferIdList = absl::InlinedVector<BufferId, 3>;
struct RematStrategy {
enum {
kRecompute,
kCompress,
kHostOffload,
} kind;
Shape compact_shape;
};
struct Item {
HloInstruction* instruction;
bool placed = false;
bool denylisted = false;
BufferIdList buffers_defined;
BufferIdList buffers_output;
BufferIdList buffers_used;
bool is_skip_node = false;
private:
friend class InstructionList;
Item* next = nullptr;
Item* prev = nullptr;
Item* prev_skip_node = nullptr;
Item* next_skip_node = nullptr;
int64_t position;
};
struct ItemUse {
Item* user;
int64_t operand_number;
std::optional<int64_t> index;
ItemUse(Item* user, int64_t op_num, std::optional<int64_t> index)
: user(user), operand_number(op_num), index(index) {}
bool operator==(const ItemUse& other) const {
return user == other.user && operand_number == other.operand_number &&
index == other.index;
}
};
using ItemList = absl::InlinedVector<Item*, 3>;
using UsesList = absl::InlinedVector<ItemUse, 3>;
class InstructionList {
public:
explicit InstructionList(const HloInstructionSequence& order) {
int64_t position = 0;
Item* last = nullptr;
last_skip_node_ = nullptr;
first_skip_node_ = nullptr;
for (HloInstruction* inst : order.instructions()) {
Item* item = new Item;
item->next = nullptr;
item->prev = last;
if (last == nullptr) {
first_ = item;
} else {
last->next = item;
}
last = item;
item->instruction = inst;
item->position = position;
position++;
item_map_[inst] = item;
}
}
~InstructionList() {
for (Item* item = first_; item != nullptr;) {
Item* next = item->next;
delete item;
item = next;
}
}
size_t size() const { return item_map_.size(); }
Item* first() const { return first_; }
Item* next(Item* item) const { return item->next; }
const Item* next(const Item* item) const { return item->next; }
Item* prev(Item* item) const { return item->prev; }
const Item* prev(const Item* item) const { return item->prev; }
Item* first_skip_node() const { return first_skip_node_; }
Item* next_skip_node(Item* item) const { return item->next_skip_node; }
Item* CreateItem(HloInstruction* inst) {
Item* item = new Item;
item->instruction = inst;
CHECK(item_map_.insert({inst, item}).second)
<< "inserting inst twice " << inst->name();
return item;
}
Item* GetItem(const HloInstruction* inst) const {
auto iter = item_map_.find(inst);
CHECK(iter != item_map_.end()) << "Did not find " << inst->name();
return iter->second;
}
void InsertBeforeInstructions(Item* to_insert,
absl::Span<Item* const> before_instructions) {
VLOG(3) << "InsertBeforeInstructions: " << to_insert->instruction->name()
<< " before {"
<< absl::StrJoin(before_instructions, ", ",
[](std::string* out, Item* item) {
absl::StrAppend(out, item->instruction->name());
})
<< "}";
CHECK(!before_instructions.empty());
Item* min_position_item = nullptr;
for (Item* item : before_instructions) {
if (min_position_item == nullptr ||
item->position < min_position_item->position) {
min_position_item = item;
}
}
while (min_position_item->prev != nullptr &&
min_position_item->position == min_position_item->prev->position) {
min_position_item = min_position_item->prev;
}
while (!absl::c_linear_search(before_instructions, min_position_item)) {
min_position_item = min_position_item->next;
}
return InsertBefore(to_insert, min_position_item);
}
void PromoteNodesToSkip(absl::FunctionRef<bool(Item*)> should_promote) {
int64_t count = 0;
for (auto* item = first(); item != nullptr; item = next(item)) {
if (should_promote(item)) {
count += 1;
if (first_skip_node_ == nullptr) {
first_skip_node_ = item;
}
item->is_skip_node = true;
item->prev_skip_node = last_skip_node_;
if (last_skip_node_ != nullptr) {
last_skip_node_->next_skip_node = item;
}
last_skip_node_ = item;
}
}
VLOG(1) << " Rematerialization has " << count << " items in express lane";
}
void InsertAfterInstructions(Item* to_insert,
absl::Span<Item* const> after_instructions) {
VLOG(3) << "InsertAfterInstructions: " << to_insert->instruction->name()
<< " after {"
<< absl::StrJoin(after_instructions, ", ",
[](std::string* out, Item* item) {
absl::StrAppend(out, item->instruction->name());
})
<< "}";
CHECK(!after_instructions.empty());
Item* max_position_item = nullptr;
for (Item* item : after_instructions) {
if (max_position_item == nullptr ||
item->position > max_position_item->position) {
max_position_item = item;
}
}
CHECK(max_position_item->next != nullptr);
InsertBeforeInstructions(to_insert, {max_position_item->next});
}
void Denylist(const HloInstruction* inst) {
GetItem(inst)->denylisted = true;
}
private:
void InsertBefore(Item* item, Item* before) {
VLOG(3) << "InsertBefore: " << item->instruction->name() << " before "
<< before->instruction->name();
item->is_skip_node = true;
Item* cursor = before;
while (cursor != nullptr && !cursor->is_skip_node) {
cursor = cursor->next;
}
CHECK(cursor == nullptr || cursor->is_skip_node);
if (cursor == nullptr) {
item->prev_skip_node = last_skip_node_;
item->next_skip_node = nullptr;
last_skip_node_ = item;
} else {
CHECK(cursor->is_skip_node);
item->prev_skip_node = cursor->prev_skip_node;
if (item->prev_skip_node != nullptr) {
item->prev_skip_node->next_skip_node = item;
}
item->next_skip_node = cursor;
cursor->prev_skip_node = item;
}
if (first_skip_node_ == cursor) {
first_skip_node_ = item;
}
item->prev = before->prev;
item->next = before;
before->prev = item;
if (item->prev != nullptr) {
item->prev->next = item;
} else {
first_ = item;
}
item->position = before->position;
}
Item* first_;
Item* first_skip_node_;
Item* last_skip_node_;
absl::flat_hash_map<const HloInstruction*, Item*> item_map_;
};
UsesList GetUsers(const InstructionList& instruction_list,
const LogicalBuffer* logical_buffer,
const TuplePointsToAnalysis& points_to_analysis,
bool* has_indirect_users) {
UsesList users;
*has_indirect_users = false;
for (const BufferAlias& buffer_alias :
points_to_analysis.GetBufferAliases(*logical_buffer)) {
for (const HloInstruction* user : buffer_alias.instruction()->users()) {
if (points_to_analysis.DoesNotUseOperandBuffer(
buffer_alias.instruction(), buffer_alias.index(), user)) {
continue;
}
if (buffer_alias.instruction() != logical_buffer->instruction() &&
!IsSupportedIndirectUser(buffer_alias.instruction())) {
*has_indirect_users = true;
}
Item* user_item = instruction_list.GetItem(user);
std::optional<int64_t> user_index =
logical_buffer->index().size() != 1
? std::nullopt
: std::make_optional(logical_buffer->index().back());
for (int64_t op_idx : user->OperandIndices(buffer_alias.instruction())) {
if (!absl::c_linear_search(
users,
ItemUse{user_item, static_cast<int>(op_idx), user_index})) {
users.push_back(
ItemUse{user_item, static_cast<int>(op_idx), user_index});
}
}
}
}
return users;
}
class MemoryUsageTracker {
public:
MemoryUsageTracker(const HloRematerialization::Options& options,
const HloComputation* computation,
const TuplePointsToAnalysis& points_to_analysis,
const InstructionList& instruction_list);
absl::Status BeginInstruction(Item* item);
int64_t RematerializationCost(const std::vector<Item*>& items,
int64_t memory_reduced,
int64_t memory_limit_bytes) const {
bool zero_cost_move = true;
for (auto* item : items) {
auto* instruction = item->instruction;
if (absl::c_any_of(
instruction->users(),
[this](const HloInstruction* inst) { return IsPlaced(inst); })) {
zero_cost_move = false;
break;
}
}
if (zero_cost_move) {
return 0;
}
CHECK_GT(memory_reduced, 0);
return memory_limit_bytes / memory_reduced;
}
absl::Status EndInstruction();
int64_t MemoryReducedIfCompressed(const Item* item,
const Shape& compact_shape) const;
int64_t MemoryReducedIfRematerialized(
absl::Span<const Item* const> items) const;
absl::Status AddCompressInstructions(Item* original_item,
Item* compressed_item,
Item* uncompressed_item);
absl::Status AddRematerializedInstruction(Item* original_item,
Item* remat_item,
absl::Span<Item*> indirect_users);
std::tuple<UsesList, UsesList> GetPlacedAndUnplacedUsers(
const UsesList& uses) const;
public:
absl::Status AddHostOffloadCopyInstructions(Item* original_item,
Item* copy_start_to_host_item,
Item* copy_done_to_host_item,
Item* copy_start_to_device_item,
Item* copy_done_to_device_item);
int64_t BytesUsedByBuffers(const Item* item,
bool only_count_unplaced_users) const;
std::optional<int64_t> GetCostOfCompression(const Item* candidate_item,
int64_t memory_limit_bytes,
int64_t peak_memory_bytes);
std::optional<int64_t> GetCostOfHostOffload(const Item* candidate_item,
int64_t memory_limit_bytes) const;
std::optional<int64_t> GetCostOfRecompute(
const std::vector<Item*>& candidate_items,
int64_t memory_limit_bytes) const;
std::tuple<std::vector<Item*>, RematStrategy, int>
PickRematerializationCandidates(
const InstructionList& instruction_list, int64_t memory_limit_bytes,
absl::flat_hash_map<const HloInstruction*, bool>* rematerializable_map,
int min_block_size, int max_block_size, int64_t peak_memory_bytes);
bool IsPlaced(const HloInstruction* instruction) const {
return instruction_list_.GetItem(instruction)->placed;
}
bool HasUnplacedUsers(Item* item) const;
UsesList GetItemUses(Item* item) const;
bool IsInProgressItem(Item* item) const { return item == in_progress_item_; }
int64_t memory_usage() const { return memory_usage_; }
int64_t AllocatedSize(Item* item) const {
int64_t size = 0;
for (auto buffer_id : item->buffers_defined) {
size += AllocatedSize(buffer_id);
}
return size;
}
const HloComputation* computation() const { return computation_; }
const HloRematerialization::Options& options() const { return options_; }
bool Check() const;
std::string ToString() const;
private:
struct Buffer {
const BufferId id;
Item* defining_instruction;
const int64_t size;
Shape shape;
bool live_out;
bool has_indirect_uses;
ShapeIndex index;
UsesList users;
int64_t unfinished_user_count;
std::string ToString() const {
return absl::StrCat("Buffer ", id, " (defined by ",
defining_instruction->instruction->name(), ", size ",
size, " bytes)");
}
};
void CountAllocatedMemory(Item* item);
absl::Status CountFreedMemory(Item* item);
void ReplaceUsesInUsersOfBuffer(Buffer& buffer, BufferId old_id) const;
absl::StatusOr<const Shape*> GetCompactShape(const HloInstruction* hlo);
Buffer& CreateBufferFromLogicalBuffer(
const LogicalBuffer* logical_buffer,
const TuplePointsToAnalysis& points_to_analysis, bool live_out) {
bool has_indirect_uses = false;
UsesList users = GetUsers(instruction_list_, logical_buffer,
points_to_analysis, &has_indirect_uses);
return NewBuffer(instruction_list_.GetItem(logical_buffer->instruction()),
logical_buffer->shape(), logical_buffer->index(),
std::move(users), live_out, has_indirect_uses);
}
Buffer& RematerializeBuffer(const Buffer& original_buffer, Item* remat_item,
UsesList&& rematerialized_uses) {
CHECK(original_buffer.defining_instruction->placed)
<< original_buffer.defining_instruction->instruction->name();
CHECK(!original_buffer.has_indirect_uses) << original_buffer.ToString();
CHECK(!original_buffer.live_out) << original_buffer.ToString();
for (ItemUse& use : rematerialized_uses) {
CHECK(!use.user->placed) << use.user->instruction->name();
}
return NewBuffer(remat_item, original_buffer.shape, original_buffer.index,
std::move(rematerialized_uses), false,
false);
}
int64_t AllocatedSize(BufferId buffer_id) const {
const Buffer& buffer = buffers_.at(buffer_id);
HloInstruction* inst = buffer.defining_instruction->instruction;
HloOpcode def_opcode = inst->opcode();
if (buffer.live_out || def_opcode == HloOpcode::kParameter) {
return 0;
} else {
if (options_.host_memory_offload_config && buffer.shape.has_layout() &&
buffer.shape.layout().memory_space() ==
options_.host_memory_offload_config->host_memory_space) {
return 0;
}
return buffer.size;
}
}
bool IsFinished(Item* item) const {
return item->placed && item != in_progress_item_;
}
bool IsInUse(BufferId buffer_id) const {
if (in_progress_item_ == nullptr) {
return false;
}
const BufferIdList& in_progress_uses = in_progress_item_->buffers_used;
return absl::c_linear_search(in_progress_uses, buffer_id);
}
bool IsCurrentlyLive(BufferId buffer_id) const {
const Buffer& buffer = buffers_[buffer_id];
return (buffer.defining_instruction->placed &&
buffer.unfinished_user_count > 0);
}
bool IsInstructionCurrentlyLive(const Item* instruction) const {
if (!IsPlaced(instruction->instruction)) {
return false;
}
for (const HloInstruction* user : instruction->instruction->users()) {
if (!IsPlaced(user)) {
return true;
}
}
return false;
}
Buffer& NewBuffer(Item* defining_instruction, const Shape& shape,
const ShapeIndex& index, UsesList&& uses, bool live_out,
bool has_indirect_uses) {
int buffer_id = buffers_.size();
auto get_num_of_unique_users = [](const UsesList& uses) -> int64_t {
absl::flat_hash_set<Item*> users_set;
for (const ItemUse& use : uses) {
users_set.insert(use.user);
}
return users_set.size();
};
buffers_.push_back(Buffer{buffer_id, defining_instruction,
options_.hlo_cost_analysis.GetShapeSize(shape),
shape, live_out, has_indirect_uses, index, uses,
get_num_of_unique_users(uses)});
return buffers_.back();
}
const HloRematerialization::Options& options_;
const HloComputation* computation_;
const InstructionList& instruction_list_;
absl::flat_hash_map<const HloInstruction*, Shape> compact_shape_;
int64_t memory_usage_ = 0;
Item* in_progress_item_ = nullptr;
std::vector<Buffer> buffers_;
};
MemoryUsageTracker::MemoryUsageTracker(
const HloRematerialization::Options& options,
const HloComputation* computation,
const TuplePointsToAnalysis& points_to_analysis,
const InstructionList& instruction_list)
: options_(options),
computation_(computation),
instruction_list_(instruction_list) {
PointsToSet::BufferSet live_out_set =
points_to_analysis.GetPointsToSet(computation_->root_instruction())
.CreateFlattenedSet();
absl::flat_hash_map<const LogicalBuffer*, BufferId>
logical_buffer_to_buffer_id;
for (auto* item = instruction_list_.first(); item != nullptr;
item = instruction_list_.next(item)) {
const HloInstruction* const instruction = item->instruction;
for (const LogicalBuffer* logical_buffer :
points_to_analysis.GetBuffersDefinedByInstruction(instruction)) {
Buffer* buffer;
if (instruction->opcode() == HloOpcode::kWhile) {
const PointsToSet& operand_points_to =
points_to_analysis.GetPointsToSet(instruction->operand(0));
CHECK_EQ(operand_points_to.element(logical_buffer->index()).size(), 1);
const LogicalBuffer* source_logical_buffer =
operand_points_to.element(logical_buffer->index())[0];
buffer =
&buffers_.at(logical_buffer_to_buffer_id.at(source_logical_buffer));
buffer->has_indirect_uses = true;
buffer->live_out =
buffer->live_out || ContainsKey(live_out_set, logical_buffer);
bool unused;
for (ItemUse& user_item : GetUsers(instruction_list_, logical_buffer,
points_to_analysis, &unused)) {
auto existing_user_it = absl::c_find_if(
buffer->users,
[&](const ItemUse& use) { return user_item.user == use.user; });
if (existing_user_it == buffer->users.end()) {
buffer->unfinished_user_count++;
user_item.user->buffers_used.push_back(buffer->id);
buffer->users.push_back(user_item);
}
}
} else {
buffer = &CreateBufferFromLogicalBuffer(
logical_buffer, points_to_analysis,
ContainsKey(live_out_set, logical_buffer));
item->buffers_defined.push_back(buffer->id);
for (ItemUse& user : buffer->users) {
if (!absl::c_linear_search(user.user->buffers_used, buffer->id)) {
user.user->buffers_used.push_back(buffer->id);
}
}
}
logical_buffer_to_buffer_id[logical_buffer] = buffer->id;
}
for (const LogicalBuffer* logical_buffer :
points_to_analysis.GetPointsToSet(instruction).CreateFlattenedSet()) {
item->buffers_output.push_back(
logical_buffer_to_buffer_id[logical_buffer]);
}
}
XLA_VLOG_LINES(10, ToString());
DCHECK(Check());
}
void MemoryUsageTracker::CountAllocatedMemory(Item* item) {
for (BufferId buffer_id : item->buffers_defined) {
VLOG(3) << " Buffer " << buffers_.at(buffer_id).ToString()
<< " is now live.";
memory_usage_ += AllocatedSize(buffer_id);
}
}
absl::Status MemoryUsageTracker::CountFreedMemory(Item* item) {
for (BufferId buffer_id : item->buffers_used) {
Buffer& buffer = buffers_.at(buffer_id);
buffer.unfinished_user_count--;
TF_RET_CHECK(buffer.unfinished_user_count >= 0)
<< buffer.ToString() << " has negative unfinished user count.";
if (buffer.unfinished_user_count == 0) {
VLOG(3) << " " << buffer.ToString() << " is now dead.";
memory_usage_ -= AllocatedSize(buffer_id);
}
}
for (BufferId buffer_id : item->buffers_defined) {
const Buffer& buffer = buffers_.at(buffer_id);
if (buffer.unfinished_user_count == 0) {
VLOG(3) << " " << buffer.ToString() << " is immediately dead.";
memory_usage_ -= AllocatedSize(buffer_id);
}
}
return absl::OkStatus();
}
absl::Status MemoryUsageTracker::BeginInstruction(Item* item) {
const HloInstruction* instruction = item->instruction;
VLOG(3) << "BeginInstruction " << instruction->name();
TF_RET_CHECK(in_progress_item_ == nullptr);
in_progress_item_ = item;
item->placed = true;
CountAllocatedMemory(item);
VLOG(3) << " memory usage = " << memory_usage_;
VLOG(10) << ToString();
if (VLOG_IS_ON(1)) {
DCHECK(Check());
}
return absl::OkStatus();
}
absl::Status MemoryUsageTracker::EndInstruction() {
TF_RET_CHECK(in_progress_item_ != nullptr);
VLOG(3) << "EndInstruction " << in_progress_item_->instruction->name();
TF_RETURN_IF_ERROR(CountFreedMemory(in_progress_item_));
in_progress_item_ = nullptr;
VLOG(3) << " memory usage = " << memory_usage_;
VLOG(10) << ToString();
if (VLOG_IS_ON(1)) {
DCHECK(Check());
}
return absl::OkStatus();
}
int64_t MemoryUsageTracker::MemoryReducedIfCompressed(
const Item* item, const Shape& compact_shape) const {
CHECK_NE(in_progress_item_, nullptr);
if (!item->placed || item == in_progress_item_) {
return 0;
}
int64_t memory_reduced = 0;
CHECK_EQ(item->buffers_output.size(), 1);
BufferId buffer_id = item->buffers_output[0];
if (IsCurrentlyLive(buffer_id) && !IsInUse(buffer_id) &&
IsInstructionCurrentlyLive(item)) {
const Buffer& buffer = buffers_.at(buffer_id);
memory_reduced += buffer.size;
int64_t compact_shape_size =
options_.hlo_cost_analysis.GetShapeSize(compact_shape);
memory_reduced -= compact_shape_size;
}
return memory_reduced;
}
int64_t MemoryUsageTracker::MemoryReducedIfRematerialized(
absl::Span<const Item* const> items) const {
CHECK_NE(in_progress_item_, nullptr);
int64_t memory_reduced = 0;
absl::flat_hash_set<const Item*> remat_candidates;
for (const Item* item : items) {
if (!item->placed || item == in_progress_item_) {
LOG(WARNING) << "Unplaced item or in progress item being checked for "
"rematerialization.";
return 0;
}
for (BufferId buffer_id : item->buffers_defined) {
const Buffer& buffer = buffers_.at(buffer_id);
if (buffer.has_indirect_uses || buffer.live_out ||
buffer.index.size() > 1) {
return 0;
}
if (IsInUse(buffer_id)) {
return 0;
}
if (IsCurrentlyLive(buffer_id)) {
memory_reduced += AllocatedSize(buffer_id);
}
}
for (BufferId buffer_id : item->buffers_used) {
if (!IsCurrentlyLive(buffer_id)) {
Item* defining_instruction =
buffers_.at(buffer_id).defining_instruction;
if (!remat_candidates.contains(defining_instruction)) {
memory_reduced -= AllocatedSize(buffer_id);
}
}
}
remat_candidates.insert(item);
}
return memory_reduced;
}
std::tuple<UsesList, UsesList> MemoryUsageTracker::GetPlacedAndUnplacedUsers(
const UsesList& uses) const {
UsesList placed_users, unplaced_users;
for (const ItemUse& use : uses) {
if (use.user->placed) {
DCHECK(IsFinished(use.user)) << use.user->instruction->name();
placed_users.push_back(use);
} else {
unplaced_users.push_back(use);
}
}
return {placed_users, unplaced_users};
}
void MemoryUsageTracker::ReplaceUsesInUsersOfBuffer(Buffer& buffer,
BufferId old_id) const {
for (ItemUse& use : buffer.users) {
BufferIdList& buffers_used = use.user->buffers_used;
absl::c_replace(buffers_used, old_id, buffer.id);
}
}
absl::Status MemoryUsageTracker::AddCompressInstructions(
Item* original_item, Item* compressed_item, Item* uncompressed_item) {
CHECK(original_item->placed)
<< "Compressing instruction, but the original is not yet placed.";
CHECK_EQ(original_item->buffers_output.size(), 1)
<< "Only compressing items which have a single output buffer";
memory_usage_ -= options_.hlo_cost_analysis.GetShapeSize(
original_item->instruction->shape());
memory_usage_ += options_.hlo_cost_analysis.GetShapeSize(
compressed_item->instruction->shape());
BufferId original_buffer_id = original_item->buffers_output[0];
Buffer& original_buffer = buffers_.at(original_buffer_id);
auto [placed_users, unplaced_users] =
GetPlacedAndUnplacedUsers(original_buffer.users);
original_buffer.users = std::move(placed_users);
original_buffer.unfinished_user_count = 0;
original_buffer.users.push_back(ItemUse{compressed_item, 0, std::nullopt});
ShapeIndex copied_index = original_buffer.index;
Buffer& compressed_buffer =
NewBuffer(compressed_item, compressed_item->instruction->shape(),
copied_index, {ItemUse{uncompressed_item, 0, std::nullopt}},
false,
false);
compressed_item->buffers_used = original_item->buffers_output;
compressed_item->buffers_output = {compressed_buffer.id};
compressed_item->buffers_defined.push_back(compressed_buffer.id);
Buffer& uncompressed_buffer =
NewBuffer(uncompressed_item, uncompressed_item->instruction->shape(),
copied_index, std::move(unplaced_users), false,
false);
uncompressed_item->buffers_used = {compressed_item->buffers_output[0]};
uncompressed_item->buffers_output = {uncompressed_buffer.id};
uncompressed_item->buffers_defined = {uncompressed_buffer.id};
ReplaceUsesInUsersOfBuffer(uncompressed_buffer, original_buffer_id);
return absl::OkStatus();
}
absl::Status MemoryUsageTracker::AddRematerializedInstruction(
Item* original_item, Item* remat_item, absl::Span<Item*> indirect_users) {
VLOG(3) << "AddRematerializedInstruction: original_instruction = "
<< original_item->instruction->name()
<< ", remat_instruction = " << remat_item->instruction->name();
TF_RET_CHECK(in_progress_item_ != nullptr);
TF_RET_CHECK(original_item->placed) << original_item->instruction->name();
TF_RET_CHECK(!remat_item->placed) << remat_item->instruction->name();
remat_item->buffers_used = original_item->buffers_used;
for (BufferId buffer_id : original_item->buffers_used) {
Buffer& buffer = buffers_.at(buffer_id);
if (buffer.unfinished_user_count == 0) {
memory_usage_ += AllocatedSize(buffer.id);
}
buffer.unfinished_user_count++;
absl::InlinedVector<ItemUse, 2> filtered_users;
std::copy_if(buffer.users.begin(), buffer.users.end(),
std::back_inserter(filtered_users),
[&](const ItemUse& iu) { return iu.user == original_item; });
for (ItemUse& u : filtered_users) {
buffer.users.push_back(ItemUse{remat_item, u.operand_number, u.index});
}
}
const absl::flat_hash_set<Item*> indirect_users_set(indirect_users.begin(),
indirect_users.end());
for (BufferId old_buffer_id : original_item->buffers_defined) {
Buffer& old_buffer = buffers_.at(old_buffer_id);
UsesList placed_users;
UsesList unplaced_users;
for (ItemUse& user : old_buffer.users) {
if (user.user->placed) {
placed_users.push_back(user);
} else {
if (!IsSupportedIndirectUser(user.user->instruction) ||
indirect_users_set.contains(user.user)) {
unplaced_users.push_back(user);
} else {
CHECK(user.user->buffers_defined.empty())
<< "Buffers defined expected to be empty for use passthrough "
"instructions";
user.user->buffers_output.clear();
user.user->buffers_used.clear();
}
}
}
old_buffer.users = std::move(placed_users);
old_buffer.unfinished_user_count = 0;
memory_usage_ -= AllocatedSize(old_buffer.id);
Buffer& new_buffer =
RematerializeBuffer(old_buffer, remat_item, std::move(unplaced_users));
remat_item->buffers_defined.push_back(new_buffer.id);
remat_item->buffers_output.push_back(new_buffer.id);
auto update_buffers = [old_buffer_id, new_buffer_id = new_buffer.id](
BufferIdList& to_update) {
std::replace(to_update.begin(), to_update.end(), old_buffer_id,
new_buffer_id);
};
for (ItemUse& user : new_buffer.users) {
update_buffers(user.user->buffers_used);
update_buffers(user.user->buffers_output);
}
}
for (Item* indirect_user : indirect_users) {
const Item* source_item =
instruction_list_.GetItem(indirect_user->instruction->operand(0));
switch (indirect_user->instruction->opcode()) {
case HloOpcode::kBitcast: {
if (IsSupportedIndirectUser(source_item->instruction)) {
indirect_user->buffers_used = source_item->buffers_output;
indirect_user->buffers_output = source_item->buffers_output;
} else {
indirect_user->buffers_used = source_item->buffers_defined;
indirect_user->buffers_output = source_item->buffers_defined;
}
break;
}
case HloOpcode::kGetTupleElement: {
const HloGetTupleElementInstruction* gte =
Cast<HloGetTupleElementInstruction>(indirect_user->instruction);
for (BufferId buffer_id : source_item->buffers_defined) {
const Buffer& def_buffer = buffers_.at(buffer_id);
if (def_buffer.index == ShapeIndex{gte->tuple_index()}) {
indirect_user->buffers_output.push_back(buffer_id);
}
if (def_buffer.index.empty()) {
indirect_user->buffers_used.push_back(buffer_id);
}
}
break;
}
default: {
LOG(FATAL) << "Unsupported indirect instruction with opcode "
<< indirect_user->instruction->opcode();
break;
}
}
for (BufferId buffer_id : indirect_user->buffers_used) {
Buffer& buffer = buffers_.at(buffer_id);
buffer.unfinished_user_count++;
buffer.users.push_back(ItemUse{indirect_user, 0, std::nullopt});
}
}
VLOG(3) << " memory usage = " << memory_usage_;
XLA_VLOG_LINES(10, ToString());
DCHECK(Check());
return absl::OkStatus();
}
absl::Status MemoryUsageTracker::AddHostOffloadCopyInstructions(
Item* original_item, Item* copy_start_to_host_item,
Item* copy_done_to_host_item, Item* copy_start_to_device_item,
Item* copy_done_to_device_item) {
CHECK_EQ(original_item->buffers_defined.size(), 1);
CHECK_EQ(original_item->buffers_output.size(), 1);
BufferId original_buffer_id = original_item->buffers_output[0];
Buffer& original_buffer = buffers_.at(original_buffer_id);
auto [placed_users, unplaced_users] =
GetPlacedAndUnplacedUsers(original_buffer.users);
original_buffer.users = std::move(placed_users);
original_buffer.users.emplace_back(copy_start_to_host_item, 0, std::nullopt);
original_buffer.unfinished_user_count = 1;
CHECK_EQ(copy_start_to_host_item->instruction->shape().tuple_shapes_size(), 3)
<< "copy_start_to_host_item's shape is "
<< copy_start_to_host_item->instruction->shape().ToString();
CHECK_EQ(copy_start_to_device_item->instruction->shape().tuple_shapes_size(),
3)
<< "copy_start_to_device_item's shape is "
<< copy_start_to_device_item->instruction->shape().ToString();
BufferId copy_start_to_host_device_buffer_id =
NewBuffer(copy_start_to_host_item,
copy_start_to_host_item->instruction->shape().tuple_shapes(1),
ShapeIndex(),
UsesList{ItemUse{copy_done_to_host_item, 0, std::nullopt}},
false, false)
.id;
BufferId copy_start_to_host_context_buffer_id =
NewBuffer(copy_start_to_host_item,
copy_start_to_host_item->instruction->shape().tuple_shapes(2),
ShapeIndex(),
UsesList{ItemUse{copy_done_to_host_item, 0, std::nullopt}},
false, false)
.id;
BufferId copy_start_to_device_device_buffer_id =
NewBuffer(copy_start_to_device_item,
copy_start_to_device_item->instruction->shape().tuple_shapes(0),
ShapeIndex(),
UsesList{ItemUse{copy_done_to_device_item, 0, std::nullopt}},
false, false)
.id;
BufferId copy_start_to_device_context_buffer_id =
NewBuffer(copy_start_to_device_item,
copy_start_to_device_item->instruction->shape().tuple_shapes(2),
ShapeIndex(),
UsesList{ItemUse{copy_done_to_device_item, 0, std::nullopt}},
false, false)
.id;
BufferId copy_done_to_device_buffer_id =
NewBuffer(copy_done_to_device_item,
copy_done_to_device_item->instruction->shape(), ShapeIndex(),
std::move(unplaced_users), false,
false)
.id;
copy_start_to_host_item->buffers_used = original_item->buffers_output;
copy_start_to_host_item->buffers_output = {
copy_start_to_host_device_buffer_id,
copy_start_to_host_context_buffer_id};
copy_start_to_host_item->buffers_defined = {
copy_start_to_host_device_buffer_id,
copy_start_to_host_context_buffer_id};
copy_done_to_host_item->buffers_used =
copy_start_to_host_item->buffers_output;
copy_done_to_host_item->buffers_output = {};
copy_done_to_host_item->buffers_defined = {};
copy_start_to_device_item->buffers_used =
copy_done_to_host_item->buffers_output;
copy_start_to_device_item->buffers_output = {
copy_start_to_device_device_buffer_id,
copy_start_to_device_context_buffer_id};
copy_start_to_device_item->buffers_defined = {
copy_start_to_device_device_buffer_id,
copy_start_to_device_context_buffer_id};
copy_done_to_device_item->buffers_used =
copy_start_to_device_item->buffers_output;
copy_done_to_device_item->buffers_output = {copy_done_to_device_buffer_id};
copy_done_to_device_item->buffers_defined = {copy_done_to_device_buffer_id};
Buffer& copy_done_to_device_buffer =
buffers_.at(copy_done_to_device_buffer_id);
ReplaceUsesInUsersOfBuffer(copy_done_to_device_buffer, original_buffer_id);
if (copy_start_to_host_item->placed) {
CountAllocatedMemory(copy_start_to_host_item);
TF_RETURN_IF_ERROR(CountFreedMemory(copy_start_to_host_item));
if (copy_done_to_host_item->placed) {
CountAllocatedMemory(copy_done_to_host_item);
TF_RETURN_IF_ERROR(CountFreedMemory(copy_done_to_host_item));
if (copy_start_to_device_item->placed) {
CountAllocatedMemory(copy_start_to_device_item);
TF_RETURN_IF_ERROR(CountFreedMemory(copy_start_to_device_item));
if (copy_done_to_device_item->placed) {
CountAllocatedMemory(copy_done_to_device_item);
TF_RETURN_IF_ERROR(CountFreedMemory(copy_done_to_device_item));
}
}
}
}
return absl::OkStatus();
}
std::string MemoryUsageTracker::ToString() const {
std::string output =
absl::StrCat("MemoryUsageTracker for ", computation_->name(), "\n");
absl::StrAppend(&output,
"Memory usage: ", HumanReadableNumBytes(memory_usage()), " (",
memory_usage(), " bytes)");
for (auto* item = instruction_list_.first(); item != nullptr;
item = instruction_list_.next(item)) {
const HloInstruction* instruction = item->instruction;
absl::string_view inprogress =
item == in_progress_item_ ? " in-progress" : "";
absl::string_view placed = item->placed ? " placed" : "";
absl::StrAppend(&output, " ", instruction->name(), inprogress, placed,
"\n Defines:\n");
for (BufferId buffer_id : item->buffers_defined) {
const Buffer& buffer = buffers_[buffer_id];
absl::string_view live = IsCurrentlyLive(buffer_id) ? " live" : "";
absl::StrAppend(&output, " ", buffer.ToString(), live, ", ",
buffer.unfinished_user_count, " unfinished uses\n");
}
absl::StrAppend(&output, " Outputs:\n");
for (BufferId buffer_id : item->buffers_output) {
absl::StrAppend(&output, " ", buffers_[buffer_id].ToString(), "\n");
}
absl::StrAppend(&output, " Uses:\n");
for (BufferId buffer_id : item->buffers_used) {
absl::StrAppend(&output, " ", buffers_[buffer_id].ToString(), "\n");
}
}
return output;
}
absl::StatusOr<const Shape*> MemoryUsageTracker::GetCompactShape(
const HloInstruction* hlo) {
auto it = compact_shape_.find(hlo);
if (it != compact_shape_.end()) {
return &it->second;
}
const Shape& original_shape = hlo->shape();
TF_ASSIGN_OR_RETURN(Shape min_shape,
options_.compact_shape_function(original_shape));
return &compact_shape_.emplace(hlo, min_shape).first->second;
}
bool MemoryUsageTracker::Check() const {
auto elements_are_unique = [](const BufferIdList& vec) {
return vec.size() == std::set<BufferId>(vec.begin(), vec.end()).size();
};
for (auto* instruction : computation_->instructions()) {
const BufferIdList& defined_buffers =
instruction_list_.GetItem(instruction)->buffers_defined;
CHECK(elements_are_unique(defined_buffers))
<< "Instruction " << instruction->name()
<< " does not have unique defined buffers: "
<< absl::StrJoin(defined_buffers, ", ",
[this](std::string* out, BufferId buffer_id) {
absl::StrAppend(out,
buffers_.at(buffer_id).ToString());
});
for (const Buffer& buffer : buffers_) {
if (buffer.defining_instruction->instruction == instruction) {
CHECK(absl::c_linear_search(defined_buffers, buffer.id))
<< "Instruction " << instruction->name()
<< " defined buffers is missing: " << buffer.ToString();
}
}
}
for (auto* instruction : computation_->instructions()) {
const BufferIdList& used_buffers =
instruction_list_.GetItem(instruction)->buffers_used;
CHECK(elements_are_unique(used_buffers))
<< "Instruction " << instruction->name()
<< " does not have unique used buffers: "
<< absl::StrJoin(used_buffers, ", ",
[this](std::string* out, BufferId buffer_id) {
absl::StrAppend(out,
buffers_.at(buffer_id).ToString());
});
}
for (const Buffer& buffer : buffers_) {
int64_t unfinished_uses = 0;
absl::flat_hash_set<Item*> already_counted_user;
for (const ItemUse& user : buffer.users) {
const BufferIdList& used_buffers = user.user->buffers_used;
CHECK(absl::c_linear_search(used_buffers, buffer.id))
<< "Instruction " << user.user->instruction->name()
<< " used buffers is missing " << buffer.ToString();
if (!IsFinished(user.user) &&
already_counted_user.insert(user.user).second) {
unfinished_uses++;
}
}
CHECK_EQ(buffer.unfinished_user_count, unfinished_uses)
<< "Incorrect unplaced use count for " << buffer.ToString();
}
return true;
}
std::vector<Item*> GetInitialBlock(const InstructionList& instruction_list,
const MemoryUsageTracker& tracker,
Item* start_item, int min_block_size) {
std::vector<Item*> item_block;
Item* curr_item = start_item;
for (int i = 0; i < min_block_size; ++i) {
if (curr_item == nullptr || !curr_item->placed ||
tracker.IsInProgressItem(curr_item)) {
break;
}
item_block.push_back(curr_item);
curr_item = instruction_list.next(curr_item);
}
return item_block;
}
bool AnyDenylistedOrNonRematerializable(
const std::vector<Item*>& block,
absl::flat_hash_map<const HloInstruction*, bool>* rematerializable_map) {
for (auto* item : block) {
if (item->denylisted) {
return true;
}
if (!CanBeRematerialized(item->instruction, rematerializable_map)) {
return true;
}
}
return false;
}
int64_t MemoryUsageTracker::BytesUsedByBuffers(
const Item* item, bool only_count_unplaced_users) const {
int64_t bytes_used_by_buffers = 0;
for (const auto& buffer_id : item->buffers_defined) {
VLOG(3) << " buffer " << buffer_id << "'s users are "
<< absl::StrJoin(buffers_.at(buffer_id).users, ", ",
[](std::string* str, const auto& use) {
str->append(use.user->instruction->name());
});
for (const auto& use : buffers_.at(buffer_id).users) {
if (!only_count_unplaced_users || !use.user->placed) {
bytes_used_by_buffers += AllocatedSize(buffer_id);
break;
}
}
}
return bytes_used_by_buffers;
}
std::optional<int64_t> MemoryUsageTracker::GetCostOfCompression(
const Item* candidate_item, int64_t memory_limit_bytes,
int64_t peak_memory_bytes) {
CHECK(candidate_item != nullptr);
if (candidate_item->buffers_output.size() != 1) {
HloInstruction* candidate_instruction = candidate_item->instruction;
VLOG(2) << " " << candidate_instruction->name()
<< " has more than one output buffer; cannot offload to host.";
return {};
}
const Buffer& output_buffer = buffers_.at(candidate_item->buffers_output[0]);
if (!candidate_item->placed || candidate_item == in_progress_item_ ||
output_buffer.live_out) {
return {};
}
const Shape& original_shape = candidate_item->instruction->shape();
if (!original_shape.IsArray()) {
return {};
}
const Shape* compact_shape =
GetCompactShape(candidate_item->instruction).value();
const int64_t memory_reduced =
MemoryReducedIfCompressed(candidate_item, *compact_shape);
const int64_t size = options_.hlo_cost_analysis.GetShapeSize(
candidate_item->instruction->shape());
const int64_t reduced_size =
options_.hlo_cost_analysis.GetShapeSize(*compact_shape);
if (memory_reduced > 0 && size + reduced_size < peak_memory_bytes) {
return memory_limit_bytes / memory_reduced;
} else {
return {};
}
}
std::optional<int64_t> MemoryUsageTracker::GetCostOfHostOffload(
const Item* candidate_item, int64_t memory_limit_bytes) const {
CHECK(candidate_item != nullptr);
HloInstruction* candidate_instruction = candidate_item->instruction;
VLOG(2)
<< "Considering host offload as an option for remat. looking at instr "
<< candidate_instruction->name();
if (candidate_item->buffers_output.size() != 1) {
VLOG(2) << " " << candidate_instruction->name()
<< " has more than one output buffer; cannot offload to host.";
return {};
}
for (auto buffer_id : candidate_item->buffers_defined) {
for (auto use : buffers_.at(buffer_id).users) {
if (use.user->instruction->opcode() == HloOpcode::kBitcast) {
VLOG(3) << " " << candidate_item->instruction->name()
<< " has a user which is a bitcast instruction("
<< use.user->instruction->name()
<< "); cannot offload "
"to host.";
return {};
} else if (use.user->instruction->opcode() == HloOpcode::kTuple) {
VLOG(3) << " " << candidate_item->instruction->name()
<< " has a user which is a tuple instruction("
<< use.user->instruction->name()
<< "); cannot offload "
"to host.";
return {};
}
}
}
const Buffer& output_buffer = buffers_.at(candidate_item->buffers_output[0]);
if (!candidate_item->placed || candidate_item == in_progress_item_ ||
output_buffer.live_out) {
VLOG(2) << " " << candidate_instruction->name()
<< " is not yet placed, is in progress, or is \"live_out\"; cannot "
"offload to host.";
return {};
}
const bool current_instruction_uses_this_item = [&]() {
if (in_progress_item_ == nullptr) {
return false;
}
const auto& output_buffer_ids = candidate_item->buffers_output;
for (const auto& output_buffer_id : output_buffer_ids) {
const Buffer& output_buffer = buffers_.at(output_buffer_id);
for (const auto& use : output_buffer.users) {
if (use.user == in_progress_item_) {
return true;
}
}
}
return false;
}();
if (current_instruction_uses_this_item) {
VLOG(2) << " " << candidate_instruction->name()
<< " is used by the current instruction in mem tracker ("
<< in_progress_item_->instruction->name()
<< "); cannot offload to host.";
return {};
}
const int64_t bytes_used_by_buffers =
BytesUsedByBuffers(candidate_item, true);
if (bytes_used_by_buffers == 0) {
VLOG(2) << " " << candidate_instruction->name()
<< " consumes no memory; no point in offloading.";
return {};
}
const auto [placed_uses, unplaced_uses] =
GetPlacedAndUnplacedUsers(output_buffer.users);
const Item* last_placed_user = nullptr;
const Item* first_unplaced_user = nullptr;
for (const auto* item = instruction_list_.first(); item != nullptr;
item = instruction_list_.next(item)) {
if (absl::c_find_if(placed_uses, [&](const auto& use) {
return use.user == item;
}) != placed_uses.end()) {
last_placed_user = item;
}
if (first_unplaced_user == nullptr &&
absl::c_find_if(unplaced_uses, [&](const auto& use) {
return use.user == item;
}) != unplaced_uses.end()) {
first_unplaced_user = item;
break;
}
}
if (last_placed_user == nullptr) {
VLOG(3) << " " << candidate_instruction->name()
<< " has no placed users, starting search at self.";
last_placed_user = candidate_item;
}
CHECK(first_unplaced_user != nullptr)
<< "Didn't find any unplaced user for instruction \""
<< candidate_instruction->name()
<< "\". There must be a "
"bug in how we calculate how much memory this item uses.";
float time_spent_before_next_use = 0.0;
for (auto* item = last_placed_user; item != first_unplaced_user;
item = instruction_list_.next(item)) {
time_spent_before_next_use += std::max(
0.0f, options_.hlo_cost_analysis.optimal_seconds(*item->instruction));
}
if (time_spent_before_next_use <= 0.0) {
return {};
}
const float time_spent_on_copies =
bytes_used_by_buffers / options_.host_memory_offload_config
->bandwidth_to_host_bytes_per_second +
bytes_used_by_buffers / options_.host_memory_offload_config
->bandwidth_from_host_bytes_per_second;
if (time_spent_before_next_use < time_spent_on_copies) {
return {};
}
VLOG(3) << " " << candidate_instruction->name() << " has enough time ("
<< time_spent_before_next_use
<< ") between itself and next use. The memcpy out and back will take "
<< time_spent_on_copies << "s";
return memory_limit_bytes / bytes_used_by_buffers;
}
std::optional<int64_t> MemoryUsageTracker::GetCostOfRecompute(
const std::vector<Item*>& candidate_items,
int64_t memory_limit_bytes) const {
for (auto* item : candidate_items) {
HloInstruction* candidate = item->instruction;
if (std::any_of(
candidate->control_successors().begin(),
candidate->control_successors().end(),
[this](const HloInstruction* inst) { return IsPlaced(inst); })) {
return {};
}
}
VLOG(5) << "Block contains:";
for (auto* hlo : candidate_items) {
VLOG(5) << hlo->instruction->name();
}
const int64_t memory_reduced = MemoryReducedIfRematerialized(candidate_items);
if (memory_reduced <= 0) {
return {};
}
return RematerializationCost(candidate_items, memory_reduced,
memory_limit_bytes);
}
std::tuple<std::vector<Item*>, RematStrategy, int>
MemoryUsageTracker::PickRematerializationCandidates(
const InstructionList& instruction_list, int64_t memory_limit_bytes,
absl::flat_hash_map<const HloInstruction*, bool>* rematerializable_map,
int min_block_size, int max_block_size, int64_t peak_memory_bytes) {
std::vector<Item*> best_items;
int64_t best_cost = std::numeric_limits<int64_t>::max();
RematStrategy best_strategy;
int effort = 0;
VLOG(5) << "Picking candidate block with size in [" << min_block_size << ", "
<< max_block_size << "]";
for (auto* start_item = instruction_list.first_skip_node();
start_item != nullptr;
start_item = instruction_list.next_skip_node(start_item)) {
std::vector<Item*> block =
GetInitialBlock(instruction_list, *this, start_item, min_block_size);
if (block.size() < min_block_size) {
break;
}
if (AnyDenylistedOrNonRematerializable(block, rematerializable_map)) {
continue;
}
if (options_.remat_mode_config.compress && block.size() == 1) {
auto cost =
GetCostOfCompression(block[0], memory_limit_bytes, peak_memory_bytes);
++effort;
if (cost && *cost < best_cost) {
VLOG(1) << "Found new best cost; from " << best_cost << " to " << *cost
<< " with strategy kCompress on block of size " << block.size();
best_strategy.kind = RematStrategy::kCompress;
best_strategy.compact_shape =
*GetCompactShape(block[0]->instruction).value();
best_items = block;
best_cost = *cost;
}
}
if (options_.remat_mode_config.host_offload && block.size() == 1) {
auto cost = GetCostOfHostOffload(block[0], memory_limit_bytes);
++effort;
if (cost && *cost < best_cost) {
VLOG(1) << "Found new best cost; from " << best_cost << " to " << *cost
<< " with strategy kHostOffload on block of size "
<< block.size();
best_strategy.kind = RematStrategy::kHostOffload;
best_items = block;
best_cost = *cost;
}
}
if (!options_.remat_mode_config.recompute) {
continue;
}
while (block.size() <= max_block_size) {
auto cost = GetCostOfRecompute(block, memory_limit_bytes);
++effort;
if (cost && *cost < best_cost) {
VLOG(1) << "Found new best cost; from " << best_cost << " to " << *cost
<< " with strategy kRecompute on block of size "
<< block.size();
best_strategy.kind = RematStrategy::kRecompute;
best_items = block;
best_cost = *cost;
}
auto* last_item = block[block.size() - 1];
auto* next_item = instruction_list.next(last_item);
if (next_item == nullptr || next_item->denylisted || !next_item->placed ||
next_item == in_progress_item_ ||
!CanBeRematerialized(next_item->instruction, rematerializable_map)) {
break;
}
block.push_back(next_item);
}
}
return {best_items, best_strategy, effort};
}
bool MemoryUsageTracker::HasUnplacedUsers(Item* item) const {
for (BufferId buffer_id : item->buffers_defined) {
const Buffer& buffer = buffers_.at(buffer_id);
for (const ItemUse& user : buffer.users) {
if (!user.user->placed) {
return true;
}
}
}
return false;
}
UsesList MemoryUsageTracker::GetItemUses(Item* item) const {
UsesList combined_users;
for (BufferId buffer_id : item->buffers_defined) {
const Buffer& buffer = buffers_.at(buffer_id);
for (const ItemUse& user : buffer.users) {
combined_users.push_back(user);
}
}
return combined_users;
}
absl::StatusOr<int64_t> RematerializeInstructions(
MemoryUsageTracker* memory_tracker, std::vector<Item*>* best_items,
absl::flat_hash_set<const HloInstruction*>* remat_move_instructions,
InstructionList* instruction_list, HloSchedule* schedule,
HloRematerialization* rematerialization) {
int64_t net_instructions_added = 0;
std::vector<std::string> instruction_names(best_items->size());
for (int i = best_items->size() - 1; i >= 0; --i) {
Item* best_item = (*best_items)[i];
HloInstruction* best = best_item->instruction;
instruction_names[i] = best->name();
HloComputation* computation = best->parent();
if (!memory_tracker->HasUnplacedUsers(best_item)) {
continue;
}
HloCloneContext context(computation->parent());
HloInstruction* remat =
computation->AddInstruction(best->Clone("remat", &context));
for (auto& cloned_computation_pair : context.cloned_computations()) {
if (!schedule->is_computation_scheduled(cloned_computation_pair.first)) {
continue;
}
HloInstructionSequence& sequence =
schedule->GetOrCreateSequence(cloned_computation_pair.second);
HloInstructionSequence& old_sequence =
schedule->GetOrCreateSequence(cloned_computation_pair.first);
for (HloInstruction* instr : old_sequence.instructions()) {
sequence.push_back(instr);
}
}
if (DynCast<HloChannelInstruction>(best) &&
DynCast<HloChannelInstruction>(best)->channel_id()) {
remat->set_channel_id(rematerialization->NextChannelId());
}
TF_RETURN_IF_ERROR(remat->CopyAllControlDepsFrom(best));
Item* remat_item = instruction_list->CreateItem(remat);
absl::InlinedVector<Item*, 4> indirect_users;
absl::flat_hash_map<int64_t, HloInstruction*> gte_cache;
for (auto& user : memory_tracker->GetItemUses(best_item)) {
if (!memory_tracker->IsPlaced(user.user->instruction)) {
VLOG(2) << " Replacing use of " << best->name() << " in "
<< user.user->instruction->name() << " with " << remat->name();
HloInstruction* remat_use = remat;
HloInstruction* const user_operand =
user.user->instruction->mutable_operand(user.operand_number);
if (remat_use == user_operand) {
continue;
}
if (user.index && remat_use->shape() != user_operand->shape()) {
auto cached_gte = gte_cache.find(*user.index);
if (cached_gte == gte_cache.end()) {
remat_use = computation->AddInstruction(
HloInstruction::CreateGetTupleElement(
ShapeUtil::GetTupleElementShape(remat_use->shape(),
*user.index),
remat_use, *user.index),
"gte.remat");
indirect_users.push_back(instruction_list->CreateItem(remat_use));
gte_cache[*user.index] = remat_use;
} else {
remat_use = cached_gte->second;
}
}
if (user_operand->shape() != remat_use->shape()) {
remat_use = computation->AddInstruction(
HloInstruction::CreateBitcast(user_operand->shape(), remat_use),
"bitcast.remat");
indirect_users.push_back(instruction_list->CreateItem(remat_use));
}
TF_RETURN_IF_ERROR(user.user->instruction->ReplaceOperandWith(
user.operand_number, remat_use));
}
}
TF_RETURN_IF_ERROR(memory_tracker->AddRematerializedInstruction(
best_item, remat_item, absl::MakeSpan(indirect_users)));
ItemList place_before;
const absl::flat_hash_set<Item*> indirect_users_set(indirect_users.begin(),
indirect_users.end());
for (auto user : remat->users()) {
if (!indirect_users_set.contains(instruction_list->GetItem(user))) {
place_before.push_back(instruction_list->GetItem(user));
}
}
for (auto* indirect_user : indirect_users) {
for (auto user : indirect_user->instruction->users()) {
if (!indirect_users_set.contains(instruction_list->GetItem(user))) {
place_before.push_back(instruction_list->GetItem(user));
}
}
}
for (auto* operand : remat->operands()) {
for (auto* operand_user : operand->users()) {
if (operand_user != remat) {
Item* operand_user_item = instruction_list->GetItem(operand_user);
if (!operand_user_item->placed) {
place_before.push_back(operand_user_item);
}
}
}
}
for (auto successor : remat->control_successors()) {
Item* successor_item = instruction_list->GetItem(successor);
CHECK(!successor_item->placed) << successor_item->instruction->name();
place_before.push_back(successor_item);
}
instruction_list->InsertBeforeInstructions(remat_item, place_before);
for (auto* bitcast : indirect_users) {
instruction_list->InsertBeforeInstructions(bitcast, place_before);
}
std::function<bool(HloInstruction*)> uses_empty = [&](HloInstruction* i) {
for (auto* u : i->users()) {
if (!IsSupportedIndirectUser(u) || !uses_empty(u)) {
return false;
}
}
return true;
};
if (uses_empty(best)) {
VLOG(2) << best->name() << " is now dead";
if (ContainsKey(*remat_move_instructions, best)) {
instruction_list->Denylist(remat);
}
remat_move_instructions->insert(remat);
net_instructions_added += indirect_users.size();
} else {
net_instructions_added += indirect_users.size() + 1;
}
for (auto* indirect_user : indirect_users) {
instruction_list->Denylist(indirect_user->instruction);
}
if (HloDataflowAnalysis::IsAsynchronousOperationStart(best->opcode()) ||
HloDataflowAnalysis::IsAsynchronousOperationDone(best->opcode())) {
VLOG(2) << "The old instruction " << best->name()
<< " is an async op. Removing to maintain one start to one done "
"invariant to keep the HLO valid.";
TF_RETURN_IF_ERROR(best->DropAllControlDeps());
TF_RETURN_IF_ERROR(computation->RemoveInstruction(best));
}
}
return net_instructions_added;
}
absl::StatusOr<int64_t> CompressInstruction(MemoryUsageTracker* memory_tracker,
Item* best_item,
const Shape& compact_shape,
InstructionList* instruction_list) {
HloInstruction* best = best_item->instruction;
VLOG(5) << "Transposing instruction " << best->name() << " (saving "
<< HumanReadableNumBytes(memory_tracker->MemoryReducedIfCompressed(
best_item, compact_shape))
<< ") to" << compact_shape.ToString(true);
HloComputation* computation = best->parent();
HloInstruction* compressed = computation->AddInstruction(
HloInstruction::CreateUnary(compact_shape, HloOpcode::kCopy, best),
absl::StrCat(best->name(), ".remat_compressed"));
HloInstruction* uncompressed = computation->AddInstruction(
HloInstruction::CreateUnary(best->shape(), HloOpcode::kCopy, compressed),
absl::StrCat(best->name(), ".remat_uncompressed"));
Item* compressed_item = instruction_list->CreateItem(compressed);
compressed_item->placed = true;
Item* uncompressed_item = instruction_list->CreateItem(uncompressed);
std::vector<HloInstruction*> best_users_copy = best->users();
for (HloInstruction* user : best_users_copy) {
if (!memory_tracker->IsPlaced(user)) {
VLOG(5) << " Replacing use of " << best->name() << " in " << user->name()
<< " with " << uncompressed->name();
TF_RETURN_IF_ERROR(best->ReplaceUseWith(user, uncompressed));
}
}
TF_RETURN_IF_ERROR(memory_tracker->AddCompressInstructions(
best_item, compressed_item, uncompressed_item));
ItemList place_before;
for (auto user : uncompressed->users()) {
place_before.push_back(instruction_list->GetItem(user));
}
instruction_list->Denylist(compressed_item->instruction);
instruction_list->Denylist(uncompressed_item->instruction);
instruction_list->InsertBeforeInstructions(uncompressed_item, place_before);
instruction_list->InsertAfterInstructions(compressed_item, {best_item});
return 2;
}
absl::StatusOr<int64_t> OffloadInstruction(MemoryUsageTracker* memory_tracker,
Item* best_item,
InstructionList* instruction_list) {
HloInstruction* best_instruction = best_item->instruction;
HloComputation* computation = best_instruction->parent();
VLOG(2) << "Best_instruction's users: "
<< absl::StrJoin(best_instruction->users(), ", ",
[](std::string* str, const auto* x) {
return str->append(x->name());
});
Shape instruction_shape_device = best_instruction->shape();
Shape instruction_shape_host = best_instruction->shape();
instruction_shape_host.mutable_layout()->set_memory_space(
memory_tracker->options().host_memory_offload_config->host_memory_space);
Shape context_shape = ShapeUtil::MakeShape(U32, {});
HloInstruction* copy_start_to_host =
computation->AddInstruction(HloInstruction::CreateCopyStart(
ShapeUtil::MakeTupleShape({instruction_shape_host,
instruction_shape_device, context_shape}),
best_instruction));
HloInstruction* copy_done_to_host =
computation->AddInstruction(HloInstruction::CreateUnary(
instruction_shape_host, HloOpcode::kCopyDone, copy_start_to_host));
HloInstruction* copy_start_to_device =
computation->AddInstruction(HloInstruction::CreateCopyStart(
ShapeUtil::MakeTupleShape({instruction_shape_device,
instruction_shape_host, context_shape}),
copy_done_to_host));
HloInstruction* copy_done_to_device = computation->AddInstruction(
HloInstruction::CreateUnary(instruction_shape_device,
HloOpcode::kCopyDone, copy_start_to_device));
VLOG(3) << "Created copy_start_to_host instr: "
<< copy_start_to_host->ToString();
VLOG(3) << "Created copy_done_to_host instr: "
<< copy_done_to_host->ToString();
VLOG(3) << "Created copy_start_to_device instr: "
<< copy_start_to_device->ToString();
VLOG(3) << "Created copy_done_to_device instr: "
<< copy_done_to_device->ToString();
TF_RETURN_IF_ERROR(
copy_start_to_host->Visit(&memory_tracker->options().hlo_cost_analysis));
TF_RETURN_IF_ERROR(
copy_done_to_host->Visit(&memory_tracker->options().hlo_cost_analysis));
TF_RETURN_IF_ERROR(copy_start_to_device->Visit(
&memory_tracker->options().hlo_cost_analysis));
TF_RETURN_IF_ERROR(
copy_done_to_device->Visit(&memory_tracker->options().hlo_cost_analysis));
Item* copy_start_to_host_item =
instruction_list->CreateItem(copy_start_to_host);
Item* copy_done_to_host_item =
instruction_list->CreateItem(copy_done_to_host);
Item* copy_start_to_device_item =
instruction_list->CreateItem(copy_start_to_device);
Item* copy_done_to_device_item =
instruction_list->CreateItem(copy_done_to_device);
instruction_list->Denylist(copy_start_to_host);
instruction_list->Denylist(copy_done_to_host);
instruction_list->Denylist(copy_start_to_device);
instruction_list->Denylist(copy_done_to_device);
Item* place_before{nullptr};
{
ItemList place_before_list;
for (auto user : best_instruction->users()) {
if (user == copy_start_to_host) {
continue;
}
auto item_of_user = instruction_list->GetItem(user);
if (item_of_user->placed) {
continue;
}
place_before_list.push_back(item_of_user);
}
CHECK(!place_before_list.empty()) << "Have nothing to place this before!";
for (auto* item = instruction_list->first(); item != nullptr;
item = instruction_list->next(item)) {
if (absl::c_linear_search(place_before_list, item)) {
place_before = item;
break;
}
}
}
CHECK_NE(place_before, nullptr)
<< "Could not find an item to place this before.";
auto get_first_item_after_compute_time = [&](Item* start_item, Item* end_item,
auto successor_func,
float time_spent_on_copy) {
float time_so_far = 0.0;
auto* current_item = start_item;
while (time_so_far < time_spent_on_copy) {
auto next_item = successor_func(current_item);
if (next_item == end_item) {
LOG(WARNING) << "Didn't find enough computation before end of window";
break;
}
current_item = next_item;
CHECK_NE(current_item, nullptr) << "current_item is null";
CHECK_NE(current_item->instruction, nullptr)
<< "current_item's instruction is null";
time_so_far += std::max(
0.0f, memory_tracker->options().hlo_cost_analysis.optimal_seconds(
*current_item->instruction));
}
return current_item;
};
const int64_t bytes_used_by_buffers = memory_tracker->BytesUsedByBuffers(
best_item, false);
const float copy_to_host_time_seconds =
bytes_used_by_buffers /
memory_tracker->options()
.host_memory_offload_config->bandwidth_to_host_bytes_per_second;
const float copy_from_host_time_seconds =
bytes_used_by_buffers /
memory_tracker->options()
.host_memory_offload_config->bandwidth_from_host_bytes_per_second;
VLOG(2) << "Item uses " << bytes_used_by_buffers << "B and will take "
<< copy_to_host_time_seconds << "s to copy to host and "
<< copy_from_host_time_seconds << "s to copy from host.";
VLOG(2) << "Inserting " << copy_start_to_host_item->instruction->name()
<< " immediately after " << best_item->instruction->name();
instruction_list->InsertAfterInstructions(copy_start_to_host_item,
{best_item});
VLOG(2) << "Inserting " << copy_done_to_device_item->instruction->name()
<< " immediately before " << place_before->instruction->name();
instruction_list->InsertBeforeInstructions(copy_done_to_device_item,
{place_before});
auto first_item_after_to_host_copy = get_first_item_after_compute_time(
copy_start_to_host_item, copy_done_to_device_item,
[&instruction_list](Item* item) { return instruction_list->next(item); },
copy_to_host_time_seconds);
VLOG(2) << "Inserting " << copy_done_to_host_item->instruction->name()
<< " immediately after "
<< first_item_after_to_host_copy->instruction->name();
instruction_list->InsertAfterInstructions(copy_done_to_host_item,
{first_item_after_to_host_copy});
auto first_item_before_from_host_copy = get_first_item_after_compute_time(
copy_done_to_device_item, copy_done_to_host_item,
[&instruction_list](Item* item) { return instruction_list->prev(item); },
copy_from_host_time_seconds);
VLOG(2) << "Inserting " << copy_start_to_device_item->instruction->name()
<< " immediately before "
<< first_item_before_from_host_copy->instruction->name();
instruction_list->InsertBeforeInstructions(
copy_start_to_device_item, {first_item_before_from_host_copy});
{
auto item = instruction_list->first();
while (item != nullptr) {
if (item == copy_start_to_host_item || item == copy_done_to_host_item ||
item == copy_start_to_device_item ||
item == copy_done_to_device_item) {
item->placed = true;
} else if (memory_tracker->IsInProgressItem(item)) {
break;
}
item = instruction_list->next(item);
}
}
std::vector<HloInstruction*> best_users_copy = best_instruction->users();
for (HloInstruction* user : best_users_copy) {
if (!memory_tracker->IsPlaced(user)) {
VLOG(3) << " Replacing use of " << best_instruction->name() << " in "
<< user->name() << " with " << copy_done_to_device->name();
TF_RETURN_IF_ERROR(
best_instruction->ReplaceUseWith(user, copy_done_to_device));
} else {
VLOG(3) << user->name() << " is placed, not going to update";
}
}
TF_RETURN_IF_ERROR(memory_tracker->AddHostOffloadCopyInstructions(
best_item, copy_start_to_host_item, copy_done_to_host_item,
copy_start_to_device_item, copy_done_to_device_item));
return 4;
}
struct InstructionsAdded {
int remat_count;
int net_instructions_added;
int effort;
};
absl::StatusOr<InstructionsAdded> RematerializeBestBlock(
int min_block_size, int max_block_size, MemoryUsageTracker* memory_tracker,
InstructionList* instruction_list, HloSchedule* schedule,
int64_t memory_limit_bytes,
absl::flat_hash_map<const HloInstruction*, bool>* rematerializable_map,
absl::flat_hash_set<const HloInstruction*>* remat_move_instructions,
HloRematerialization* rematerialization) {
CHECK(min_block_size > 0) << "Negative block size.";
std::vector<Item*> best_items;
RematStrategy best_strategy;
int effort;
std::tie(best_items, best_strategy, effort) =
memory_tracker->PickRematerializationCandidates(
*instruction_list, memory_limit_bytes, rematerializable_map,
min_block_size, max_block_size,
rematerialization->ComputationPeakMemory(
memory_tracker->computation()));
InstructionsAdded num_instructions_added;
num_instructions_added.remat_count = best_items.size();
num_instructions_added.effort = effort;
if (best_items.empty()) {
num_instructions_added.net_instructions_added = 0;
return num_instructions_added;
}
if (best_strategy.kind == RematStrategy::kCompress) {
CHECK(best_items.size() == 1)
<< "More than one instruction compressed simultaneously.";
HloInstruction* best = best_items[0]->instruction;
VLOG(1) << "Remat via compression: " << best->name() << " (saving "
<< HumanReadableNumBytes(memory_tracker->MemoryReducedIfCompressed(
best_items[0], best_strategy.compact_shape))
<< ")";
TF_ASSIGN_OR_RETURN(
num_instructions_added.net_instructions_added,
CompressInstruction(memory_tracker, best_items[0],
best_strategy.compact_shape, instruction_list));
} else if (best_strategy.kind == RematStrategy::kHostOffload) {
CHECK_EQ(best_items.size(), 1)
<< "More than one buffer offloaded simultaneously.";
VLOG(1) << "Remat via offload: " << best_items[0]->instruction->name();
TF_ASSIGN_OR_RETURN(
num_instructions_added.net_instructions_added,
OffloadInstruction(memory_tracker, best_items[0], instruction_list));
VLOG(4) << "Offload done, hlo computation:\n"
<< memory_tracker->computation()->ToString();
VLOG(6) << "Memory tracker:\n" << memory_tracker->ToString();
} else {
CHECK_EQ(best_strategy.kind, RematStrategy::kRecompute)
<< "Expecting strategy to be Recompute";
VLOG(1) << "Remat via recomputation: {"
<< absl::StrJoin(best_items, ", ",
[](std::string* out, Item* item) {
absl::StrAppend(out, item->instruction->name());
})
<< '}';
TF_ASSIGN_OR_RETURN(
num_instructions_added.net_instructions_added,
RematerializeInstructions(memory_tracker, &best_items,
remat_move_instructions, instruction_list,
schedule, rematerialization));
}
return num_instructions_added;
}
}
absl::StatusOr<int64_t> HloRematerialization::ComputePeakMemory(
const HloComputation* computation, const HloInstructionSequence& order,
const absl::flat_hash_set<absl::string_view>& execution_threads) const {
InstructionList instruction_list(order);
MemoryUsageTracker tracker(options_, computation, *points_to_analysis_,
instruction_list);
int64_t peak_memory = tracker.memory_usage();
for (auto* item = instruction_list.first(); item != nullptr;
item = instruction_list.next(item)) {
const HloInstruction* instruction = item->instruction;
TF_RETURN_IF_ERROR(tracker.BeginInstruction(item));
TF_ASSIGN_OR_RETURN(
int64_t callee_usage,
CalledComputationsMemoryUsage(instruction, execution_threads));
peak_memory =
std::max<int64_t>(peak_memory, tracker.memory_usage() + callee_usage);
TF_RETURN_IF_ERROR(tracker.EndInstruction());
}
VLOG(1) << "Peak memory for " << computation->name() << ": "
<< HumanReadableNumBytes(peak_memory);
return peak_memory;
}
absl::StatusOr<int64_t> HloRematerialization::CalledComputationsMemoryUsage(
const HloInstruction* instruction,
const absl::flat_hash_set<absl::string_view>& execution_threads) const {
const CallSite* callsite =
call_graph_->GetNode(instruction->parent()).GetCallSite(instruction);
if (callsite == nullptr || callsite->context() == CallContext::kEmbedded) {
return 0;
}
int64_t callee_usage = 0;
for (const HloComputation* computation : callsite->called_computations()) {
if (!HloInstruction::IsThreadIncluded(computation->execution_thread(),
execution_threads)) {
continue;
}
TF_RET_CHECK(ContainsKey(computation_peak_memory_, computation));
callee_usage += computation_peak_memory_.at(computation);
}
return callee_usage;
}
absl::StatusOr<bool> HloRematerialization::RematerializeComputation(
HloComputation* computation, HloSchedule* schedule,
int64_t memory_limit_bytes, int64_t min_remat_size,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
const auto peak_memory_usage = computation_peak_memory_.at(computation);
if (peak_memory_usage <= memory_limit_bytes) {
VLOG(1) << "Asked to rematerialize computation of size "
<< peak_memory_usage
<< " but it already fits within the given memory limit ("
<< memory_limit_bytes << ")";
return false;
}
VLOG(1) << "Rematerializing computation " << computation->name()
<< " with limit " << HumanReadableNumBytes(memory_limit_bytes);
VLOG(1) << "peak memory usage is "
<< HumanReadableNumBytes(peak_memory_usage);
CHECK(!ContainsKey(rematerialized_computations_, computation));
InstructionList instruction_list(schedule->sequence(computation));
MemoryUsageTracker memory_tracker(options_, computation, *points_to_analysis_,
instruction_list);
instruction_list.PromoteNodesToSkip([&](Item* item) {
return memory_tracker.AllocatedSize(item) >= min_remat_size;
});
bool changed = false;
absl::flat_hash_set<const HloInstruction*> remat_move_instructions;
absl::flat_hash_map<const HloInstruction*, bool> rematerializable_map;
int64_t peak_memory = memory_tracker.memory_usage();
int64_t remat_count = 0;
int64_t net_instructions_added = 0;
const CallGraphNode& call_graph_node = call_graph_->GetNode(computation);
int64_t instruction_index = 0;
for (auto* item = instruction_list.first(); item != nullptr;
item = instruction_list.next(item)) {
const HloInstruction* instruction = item->instruction;
TF_ASSIGN_OR_RETURN(
int64_t callee_usage,
CalledComputationsMemoryUsage(instruction, execution_threads));
TF_RETURN_IF_ERROR(memory_tracker.BeginInstruction(item));
VLOG(2) << "Program point at " << instruction->name()
<< ", memory usage = " << memory_tracker.memory_usage()
<< ", callee usage = " << callee_usage << ", [" << instruction_index
<< "/" << instruction_list.size() << "]";
instruction_index++;
int min_block_size = 1;
int max_block_size = 1;
if (memory_tracker.AllocatedSize(item) + callee_usage > 0) {
bool is_first_phase = true;
int64_t first_phase_effort = 0;
int64_t second_phase_effort = 0;
while (memory_tracker.memory_usage() + callee_usage >
memory_limit_bytes) {
VLOG(2) << "Over memory limit at instruction " << instruction->name()
<< ", using "
<< HumanReadableNumBytes(memory_tracker.memory_usage() +
callee_usage)
<< ", limit is " << HumanReadableNumBytes(memory_limit_bytes);
TF_ASSIGN_OR_RETURN(
InstructionsAdded instructions_added,
RematerializeBestBlock(min_block_size, max_block_size,
&memory_tracker, &instruction_list, schedule,
memory_limit_bytes, &rematerializable_map,
&remat_move_instructions, this));
net_instructions_added += instructions_added.net_instructions_added;
remat_count += instructions_added.remat_count;
if (is_first_phase) {
first_phase_effort += instructions_added.effort;
} else {
second_phase_effort += instructions_added.effort;
}
if (instructions_added.net_instructions_added > 0) {
VLOG(1) << "memory_usage after rematerialization = "
<< HumanReadableNumBytes(memory_tracker.memory_usage());
}
if (instructions_added.remat_count == 0) {
min_block_size = max_block_size + 1;
max_block_size = 2 * max_block_size;
is_first_phase = false;
} else {
max_rematerialized_block_size_ =
std::max(max_rematerialized_block_size_, max_block_size);
changed = true;
min_block_size = 1;
max_block_size = 1;
}
if (max_block_size > options_.block_size_limit ||
second_phase_effort >
options_.block_rematerialization_factor * first_phase_effort) {
break;
}
}
}
const CallSite* callsite = call_graph_node.GetCallSite(instruction);
if (callsite != nullptr &&
callsite->context() == CallContext::kControlFlow &&
memory_tracker.memory_usage() + callee_usage > memory_limit_bytes) {
VLOG(1) << "Memory usage still over the limit ("
<< (memory_tracker.memory_usage() + callee_usage) << " > "
<< memory_limit_bytes
<< "). Rematerializing computations called by "
<< instruction->name();
for (HloComputation* called_computation :
callsite->called_computations()) {
if (!ContainsKey(rematerialized_computations_, called_computation) &&
HloInstruction::IsThreadIncluded(
called_computation->execution_thread(), execution_threads)) {
int64_t subcomputation_memory_limit_bytes = std::max<int64_t>(
0, memory_limit_bytes - memory_tracker.memory_usage());
TF_ASSIGN_OR_RETURN(
bool subcomputation_changed,
RematerializeComputation(called_computation, schedule,
subcomputation_memory_limit_bytes,
min_remat_size, execution_threads));
changed |= subcomputation_changed;
}
}
TF_ASSIGN_OR_RETURN(callee_usage, CalledComputationsMemoryUsage(
instruction, execution_threads));
}
peak_memory = std::max<int64_t>(
peak_memory, memory_tracker.memory_usage() + callee_usage);
VLOG(3) << "peak memory usage = " << HumanReadableNumBytes(peak_memory);
TF_RETURN_IF_ERROR(memory_tracker.EndInstruction());
}
for (auto* instruction : computation->instructions()) {
CHECK(memory_tracker.IsPlaced(instruction)) << instruction->name();
}
VLOG(1) << "In computation " << computation->name() << " rematerialized "
<< remat_count << " instructions; " << net_instructions_added
<< " net instructions added";
VLOG(1) << " peak memory usage now " << HumanReadableNumBytes(peak_memory)
<< " (was "
<< HumanReadableNumBytes(computation_peak_memory_.at(computation))
<< ")";
computation_peak_memory_.at(computation) = peak_memory;
HloInstructionSequence& sequence = schedule->GetOrCreateSequence(computation);
sequence.clear();
for (auto* item = instruction_list.first(); item != nullptr;
item = instruction_list.next(item)) {
HloInstruction* instruction = item->instruction;
sequence.push_back(instruction);
}
rematerialized_computations_.insert(computation);
instructions_rematerialized_ += remat_count;
net_instructions_added_ += net_instructions_added;
return changed;
}
absl::StatusOr<bool> HloRematerialization::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
if (options_.remat_mode_config.host_offload) {
CHECK(options_.host_memory_offload_config.has_value())
<< "Host memory config is required when host memory offload strategy "
"is specified";
}
VLOG(1) << "HloRematerialization() with memory limit of "
<< HumanReadableNumBytes(options_.memory_limit_bytes);
if (!options_.remat_mode_config.compress &&
!options_.remat_mode_config.recompute &&
!options_.remat_mode_config.host_offload) {
VLOG(1) << "All rematerialization strategies are disabled. Skipping.";
return false;
}
VLOG(2) << "HloRemat mode: compress: " << options_.remat_mode_config.compress
<< ", host_offload: " << options_.remat_mode_config.host_offload
<< ", recompute: " << options_.remat_mode_config.recompute;
XLA_VLOG_LINES(3, "Before HloRematerialization:\n" + module->ToString());
computation_peak_memory_.clear();
rematerialized_computations_.clear();
instructions_rematerialized_ = 0;
net_instructions_added_ = 0;
TF_RET_CHECK(module->has_schedule());
TF_ASSIGN_OR_RETURN(points_to_analysis_, TuplePointsToAnalysis::Run(module));
next_channel_id_ = hlo_query::NextChannelId(*module);
int64_t module_output_size = 0;
ShapeUtil::ForEachSubshape(
module->result_shape(),
[&module_output_size, this](const Shape& subshape,
const ShapeIndex& output_index) {
module_output_size += options_.hlo_cost_analysis.GetShapeSize(subshape);
});
int64_t adjusted_memory_limit_bytes =
std::max<int64_t>(0, options_.memory_limit_bytes - module_output_size);
VLOG(1) << "Adjusted memory limit accounting for output ("
<< HumanReadableNumBytes(module_output_size)
<< "): " << HumanReadableNumBytes(adjusted_memory_limit_bytes);
call_graph_ = CallGraph::Build(module);
int64_t total_async_peak_memory = 0;
if (!options_.async_computation_parallelism.empty()) {
absl::flat_hash_set<std::string_view> async_threads;
for (const auto& [computation, _] :
options_.async_computation_parallelism) {
async_threads.insert(computation->execution_thread());
}
TF_RETURN_IF_ERROR(call_graph_->VisitNodes(
[this, module,
&async_threads](const CallGraphNode& node) -> absl::Status {
auto callee_thread = node.computation()->execution_thread();
if (node.context() == CallContext::kControlFlow &&
HloInstruction::IsThreadIncluded(callee_thread, async_threads)) {
TF_ASSIGN_OR_RETURN(computation_peak_memory_[node.computation()],
ComputePeakMemory(node.computation(),
module->schedule().sequence(
node.computation()),
{callee_thread}));
}
return absl::OkStatus();
},
false));
int64_t async_peak_memory = 0;
for (const auto [entry_computation, parallel_threads] :
options_.async_computation_parallelism) {
const int64_t peak_memory =
computation_peak_memory_.at(entry_computation);
const int64_t parallel_peak_memory = peak_memory * parallel_threads;
async_peak_memory = std::max(async_peak_memory, parallel_peak_memory);
}
adjusted_memory_limit_bytes =
std::max<int64_t>(0, adjusted_memory_limit_bytes - async_peak_memory);
total_async_peak_memory += async_peak_memory;
VLOG(1) << "Adjusted memory limit accounting for async computations ("
<< HumanReadableNumBytes(async_peak_memory)
<< "): " << HumanReadableNumBytes(adjusted_memory_limit_bytes);
computation_peak_memory_.clear();
}
TF_RETURN_IF_ERROR(call_graph_->VisitNodes(
[this, module,
&execution_threads](const CallGraphNode& node) -> absl::Status {
if (node.context() == CallContext::kControlFlow &&
HloInstruction::IsThreadIncluded(
node.computation()->execution_thread(), execution_threads)) {
TF_ASSIGN_OR_RETURN(
computation_peak_memory_[node.computation()],
ComputePeakMemory(node.computation(),
module->schedule().sequence(node.computation()),
execution_threads));
}
return absl::OkStatus();
},
false));
const int64_t before_peak_memory =
computation_peak_memory_.at(module->entry_computation()) +
module_output_size + total_async_peak_memory;
VLOG(1) << "Peak memory usage of module (before): "
<< HumanReadableNumBytes(before_peak_memory);
for (auto* computation :
module->MakeComputationPostOrder(execution_threads)) {
TF_RETURN_IF_ERROR(computation->Accept(&options_.hlo_cost_analysis));
}
TF_ASSIGN_OR_RETURN(
bool changed,
RematerializeComputation(module->entry_computation(), &module->schedule(),
adjusted_memory_limit_bytes,
options_.min_remat_size, execution_threads));
HloSchedule saved_schedule = module->schedule();
module->clear_schedule();
TF_ASSIGN_OR_RETURN(bool dead_code_removed, HloDCE().Run(module));
changed |= dead_code_removed;
TF_RETURN_IF_ERROR(saved_schedule.Update(execution_threads));
TF_RETURN_IF_ERROR(module->set_schedule(std::move(saved_schedule)));
VLOG(1) << "Rematerialized " << instructions_rematerialized_
<< " instructions in module " << module->name() << "; "
<< net_instructions_added_ << " net instructions added";
const int64_t current_peak_memory =
computation_peak_memory_.at(module->entry_computation()) +
module_output_size + total_async_peak_memory;
VLOG(1) << "Peak memory usage of module now "
<< HumanReadableNumBytes(current_peak_memory) << " ("
<< current_peak_memory << " bytes), was "
<< HumanReadableNumBytes(before_peak_memory) << " ("
<< before_peak_memory << " bytes)";
const int64_t reduced_peak_memory = before_peak_memory - current_peak_memory;
VLOG(1) << "Reduced peak memory by "
<< HumanReadableNumBytes(reduced_peak_memory) << " ("
<< reduced_peak_memory << " bytes)";
sizes_.before_bytes = before_peak_memory;
sizes_.after_bytes = current_peak_memory;
XLA_VLOG_LINES(5, "After HloRematerialization:\n" + module->ToString());
if (current_peak_memory > options_.memory_limit_bytes) {
LOG(WARNING) << absl::StrFormat(
"Can't reduce memory use below %s (%d bytes) by rematerialization; "
"only reduced to %s (%d bytes), down from %s (%d bytes) originally",
HumanReadableNumBytes(options_.memory_limit_bytes),
options_.memory_limit_bytes, HumanReadableNumBytes(current_peak_memory),
current_peak_memory, HumanReadableNumBytes(before_peak_memory),
before_peak_memory);
}
return changed;
}
} | #include "xla/service/hlo_rematerialization.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <gmock/gmock.h>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/layout.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_memory_scheduler.h"
#include "xla/service/hlo_rematerialization_test_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
using ::testing::_;
class AsyncRematerializationTest : public RematerializationTestBase {
protected:
absl::StatusOr<bool> RunHloRematerialization(
int64_t memory_limit_bytes, HloModule* module,
const absl::flat_hash_map<HloComputation*, int64_t>&
async_computation_parallelism,
int64_t min_remat_size = 0) {
TF_EXPECT_OK(verifier().Run(module).status());
if (!module->has_schedule()) {
HloMemoryScheduler scheduler(
[](const BufferValue& buffer) { return ByteSizeOf(buffer.shape()); },
ComputationSchedulerToModuleScheduler(DefaultMemoryScheduler));
TF_EXPECT_OK(scheduler.Run(module).status());
}
HloRematerialization::RematerializationModeConfig config(
true, true, false);
auto shape_size_func = [](const Shape& shape) { return ByteSizeOf(shape); };
HloCostAnalysis cost_analysis(shape_size_func);
HloRematerialization::Options options(
cost_analysis, config, memory_limit_bytes,
1, 1,
min_remat_size, nullptr,
std::nullopt,
async_computation_parallelism);
HloRematerialization::RematerializationSizes sizes;
HloRematerialization remat(options, sizes);
return remat.Run(module, {HloInstruction::kMainExecutionThread});
}
static constexpr int64_t kNumParallelThreads = 16;
};
TEST_F(AsyncRematerializationTest, AsyncComputation) {
constexpr std::string_view hlo = R"(
HloModule async, is_scheduled=true
%offload_computation {
%param = f32[1]{0} parameter(0)
%reshape = f32[] reshape(f32[1]{0} %param)
%broadcast = f32[1024]{0} broadcast(f32[] %reshape), dimensions={}
%negate = f32[1024]{0} negate(f32[1024]{0} %broadcast)
%concatenate = f32[2048]{0} concatenate(f32[1024]{0} %negate, f32[1024]{0} %negate), dimensions={0}
%slice = f32[1]{0} slice(f32[2048]{0} %concatenate), slice={[0:1]}
%concatenate.1 = f32[1025]{0} concatenate(f32[1024]{0} %broadcast, f32[1]{0} %slice), dimensions={0}
ROOT %slice.1 = f32[1]{0} slice(f32[1025]{0} %concatenate.1), slice={[0:1]}
}
%main_computation {
%param = f32[1]{0} parameter(0)
%reshape = f32[] reshape(f32[1]{0} %param)
%broadcast = f32[1024]{0} broadcast(f32[] %reshape), dimensions={}
%negate = f32[1024]{0} negate(f32[1024]{0} %broadcast)
%concatenate = f32[2048]{0} concatenate(f32[1024]{0} %negate, f32[1024]{0} %negate), dimensions={0}
%slice = f32[1]{0} slice(f32[2048]{0} %concatenate), slice={[0:1]}
%concatenate.1 = f32[1025]{0} concatenate(f32[1024]{0} %broadcast, f32[1]{0} %slice), dimensions={0}
ROOT %slice.1 = f32[1]{0} slice(f32[1025]{0} %concatenate.1), slice={[0:1]}
}
ENTRY %main {
%param = f32[1]{0} parameter(0)
%call-start = ((f32[1]{0}), f32[1]{0}, s32[]) call-start(f32[1]{0} %param), to_apply=%offload_computation, async_execution_thread="offload"
%call-done = f32[1]{0} call-done(((f32[1]{0}), f32[1]{0}, s32[]) %call-start)
ROOT %call = f32[1]{0} call(f32[1]{0} %call-done), to_apply=%main_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
HloInstruction* call_start = FindInstruction(module.get(), "call-start");
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
RunHloRematerialization(
kNumParallelThreads * 16 * 1024 + 14 * 1024,
module.get(),
{{call_start->async_wrapped_computation(), kNumParallelThreads}}));
EXPECT_TRUE(changed);
}
class RecomputeAndCompressHloRematerializationTest
: public RematerializationTestBase {
protected:
absl::StatusOr<bool> RunHloRematerialization(int64_t memory_limit_bytes,
HloModule* module,
int64_t min_remat_size = 0) {
TF_EXPECT_OK(verifier().Run(module).status());
if (!module->has_schedule()) {
HloMemoryScheduler scheduler(
[](const BufferValue& buffer) { return ByteSizeOf(buffer.shape()); },
ComputationSchedulerToModuleScheduler(DefaultMemoryScheduler));
TF_EXPECT_OK(scheduler.Run(module).status());
}
for (const HloComputation* computation : module->computations()) {
before_computation_names_.insert(computation->name());
for (const HloInstruction* instruction : computation->instructions()) {
before_instruction_names_.insert(instruction->name());
}
}
HloRematerialization::RematerializationModeConfig config(
true, true, false);
auto shape_size_func = [](const Shape& shape) { return ByteSizeOf(shape); };
HloCostAnalysis cost_analysis(shape_size_func);
HloRematerialization::Options options(
cost_analysis, config, memory_limit_bytes,
1, 1,
min_remat_size, nullptr,
std::nullopt,
{});
HloRematerialization::RematerializationSizes sizes;
HloRematerialization remat(options, sizes);
absl::StatusOr<bool> result = remat.Run(module);
for (const HloComputation* computation : module->computations()) {
if (!before_computation_names_.contains(computation->name())) {
continue;
}
for (const HloInstruction* instruction : computation->instructions()) {
after_instruction_names_.insert(instruction->name());
}
}
return result;
}
void CheckForRematInInstructionNames(absl::string_view test_case_name) {
constexpr const absl::string_view kRematInstructionNameMustContain =
".remat";
for (const auto& instruction_name : after_instruction_names_) {
if (!before_instruction_names_.contains(instruction_name)) {
EXPECT_TRUE(absl::StrContains(instruction_name,
kRematInstructionNameMustContain))
<< "[" << test_case_name << "] Instruction \"" << instruction_name
<< "\" must contain \"" << kRematInstructionNameMustContain << "\"";
}
}
}
private:
absl::flat_hash_set<absl::string_view> before_computation_names_;
absl::flat_hash_set<absl::string_view> before_instruction_names_;
absl::flat_hash_set<absl::string_view> after_instruction_names_;
};
TEST_F(RecomputeAndCompressHloRematerializationTest, SingleComputation) {
auto module = CreateNewVerifiedModule();
HloComputation* computation =
module->AddEntryComputation(MakeRematerializableComputation());
const HloInstruction* slice = computation->root_instruction();
ASSERT_THAT(slice, op::Slice(op::Concatenate(op::Broadcast(_), _)));
const HloInstruction* concat = slice->operand(0);
const HloInstruction* bcast = concat->operand(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
14 * 1024, module.get()));
EXPECT_TRUE(changed);
EXPECT_EQ(computation->root_instruction(), slice);
const HloInstruction* remat_bcast = concat->operand(0);
EXPECT_THAT(remat_bcast, op::Broadcast(::testing::Ne(bcast)));
EXPECT_EQ(module->schedule()
.sequence(computation)
.instructions()[computation->instruction_count() - 2],
concat);
EXPECT_EQ(module->schedule()
.sequence(computation)
.instructions()[computation->instruction_count() - 3],
remat_bcast);
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest,
SingleComputationNoWorthRemat) {
auto module = CreateNewVerifiedModule();
HloComputation* computation =
module->AddEntryComputation(MakeRematerializableComputation());
const HloInstruction* slice = computation->root_instruction();
ASSERT_THAT(slice, op::Slice(op::Concatenate(op::Broadcast(_), _)));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
14 * 1024, module.get(),
14 * 1024));
EXPECT_FALSE(changed);
}
TEST_F(RecomputeAndCompressHloRematerializationTest,
SingleComputationNoRematerialization) {
auto module = CreateNewVerifiedModule();
HloComputation* computation =
module->AddEntryComputation(MakeRematerializableComputation());
EXPECT_EQ(computation->instruction_count(), 8);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
20 * 1024, module.get()));
EXPECT_FALSE(changed);
EXPECT_EQ(computation->instruction_count(), 8);
}
TEST_F(RecomputeAndCompressHloRematerializationTest, RematerializeAroundWhile) {
auto module = CreateNewVerifiedModule();
auto cond_builder = HloComputation::Builder(TestName() + ".cond");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, vec1_shape_, "param"));
cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
HloComputation* while_cond =
module->AddEmbeddedComputation(cond_builder.Build());
HloComputation* body_computation = module->AddEmbeddedComputation(
MakeRematerializableComputation(".body"));
HloComputation* entry_computation =
module->AddEntryComputation(MakeRematerializableWhileComputation(
while_cond, body_computation));
EXPECT_EQ(entry_computation->instruction_count(), 7);
EXPECT_EQ(body_computation->instruction_count(), 8);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
17 * 1024, module.get()));
EXPECT_TRUE(changed);
EXPECT_EQ(entry_computation->instruction_count(), 8);
EXPECT_EQ(body_computation->instruction_count(), 8);
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest,
RematerializeEntryAndWhileBody) {
auto module = CreateNewVerifiedModule();
auto cond_builder = HloComputation::Builder(TestName() + ".cond");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, vec1_shape_, "param"));
cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
HloComputation* while_cond =
module->AddEmbeddedComputation(cond_builder.Build());
HloComputation* body_computation = module->AddEmbeddedComputation(
MakeRematerializableComputation(".body"));
HloComputation* entry_computation =
module->AddEntryComputation(MakeRematerializableWhileComputation(
while_cond, body_computation));
EXPECT_EQ(entry_computation->instruction_count(), 7);
EXPECT_EQ(body_computation->instruction_count(), 8);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
15 * 1024, module.get()));
EXPECT_TRUE(changed);
EXPECT_EQ(entry_computation->instruction_count(), 9);
EXPECT_EQ(body_computation->instruction_count(), 9);
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest,
RematerializeNestedComputations) {
auto module = CreateNewVerifiedModule();
auto cond_builder = HloComputation::Builder(TestName() + ".cond");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, vec1_shape_, "param"));
cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
HloComputation* while_cond =
module->AddEmbeddedComputation(cond_builder.Build());
HloComputation* while_cond_copy =
module->AddEmbeddedComputation(while_cond->Clone());
HloComputation* inner_computation = module->AddEmbeddedComputation(
MakeRematerializableComputation(".inner"));
HloComputation* middle_computation =
module->AddEmbeddedComputation(MakeRematerializableWhileComputation(
while_cond, inner_computation,
".middle"));
HloComputation* entry_computation =
module->AddEntryComputation(MakeRematerializableWhileComputation(
while_cond_copy, middle_computation));
EXPECT_EQ(entry_computation->instruction_count(), 7);
EXPECT_EQ(middle_computation->instruction_count(), 7);
EXPECT_EQ(inner_computation->instruction_count(), 8);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
13 * 1024, module.get()));
EXPECT_TRUE(changed);
EXPECT_EQ(entry_computation->instruction_count(), 9);
EXPECT_EQ(middle_computation->instruction_count(), 9);
EXPECT_EQ(inner_computation->instruction_count(), 9);
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest, RngNotRematerialized) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param"));
auto rng = builder.AddInstruction(HloInstruction::CreateRng(
vec1024_shape_, RandomDistribution::RNG_UNIFORM, {param, param}));
auto tanh = builder.AddInstruction(
HloInstruction::CreateUnary(vec1024_shape_, HloOpcode::kTanh, rng));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(vec1024_shape_, HloOpcode::kExp, rng));
auto add_0 = builder.AddInstruction(
HloInstruction::CreateBinary(vec1024_shape_, HloOpcode::kAdd, rng, tanh));
auto add_1 = builder.AddInstruction(HloInstruction::CreateBinary(
vec1024_shape_, HloOpcode::kAdd, rng,
builder.AddInstruction(HloInstruction::CreateBinary(
vec1024_shape_, HloOpcode::kAdd, exp, add_0))));
builder.AddInstruction(HloInstruction::CreateBinary(
vec1024_shape_, HloOpcode::kAdd, rng,
builder.AddInstruction(HloInstruction::CreateBinary(
vec1024_shape_, HloOpcode::kAdd, tanh, add_1))));
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
auto count_rngs = [](const HloComputation* computation) {
int64_t rng_count = 0;
for (auto* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kRng) {
++rng_count;
}
}
return rng_count;
};
ASSERT_EQ(count_rngs(entry_computation), 1);
const int64_t original_instruction_count =
entry_computation->instruction_count();
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
RunHloRematerialization(
4 * ByteSizeOf(vec1024_shape_), module.get()));
EXPECT_TRUE(changed);
EXPECT_EQ(count_rngs(entry_computation), 1);
EXPECT_GT(entry_computation->instruction_count(), original_instruction_count);
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest,
InstructionRematerializedMultipleTimes) {
auto module = CreateNewVerifiedModule();
HloComputation* subcomputation = nullptr;
{
auto builder = HloComputation::Builder(TestName() + ".subcomputation");
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, vec1024_shape_, "param"));
auto concat = builder.AddInstruction(HloInstruction::CreateConcatenate(
ShapeUtil::MakeShape(xla::F32, {2048}), {param, param},
0));
builder.AddInstruction(HloInstruction::CreateSlice(
vec1024_shape_, concat, {0},
{1024}, {1}));
subcomputation = module->AddEmbeddedComputation(builder.Build());
}
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param"));
auto bcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(vec1024_shape_, param, {}));
auto add_1 = builder.AddInstruction(HloInstruction::CreateBinary(
vec1024_shape_, HloOpcode::kAdd, bcast, bcast));
auto call_1 = builder.AddInstruction(
HloInstruction::CreateCall(vec1024_shape_, {add_1}, subcomputation));
auto add_2 = builder.AddInstruction(HloInstruction::CreateBinary(
vec1024_shape_, HloOpcode::kAdd, bcast, call_1));
auto call_2 = builder.AddInstruction(
HloInstruction::CreateCall(vec1024_shape_, {add_2}, subcomputation));
auto add_3 = builder.AddInstruction(HloInstruction::CreateBinary(
vec1024_shape_, HloOpcode::kAdd, bcast, call_2));
auto call_3 = builder.AddInstruction(
HloInstruction::CreateCall(vec1024_shape_, {add_3}, subcomputation));
auto add_4 = builder.AddInstruction(HloInstruction::CreateBinary(
vec1024_shape_, HloOpcode::kAdd, bcast, call_3));
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
auto count_broadcasts = [](const HloComputation* computation) {
int64_t bcast_count = 0;
for (auto* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kBroadcast) {
bcast_count++;
}
}
return bcast_count;
};
EXPECT_EQ(count_broadcasts(entry_computation), 1);
EXPECT_EQ(entry_computation->instruction_count(), 9);
EXPECT_EQ(add_2->operand(0), bcast);
EXPECT_EQ(add_3->operand(0), bcast);
EXPECT_EQ(add_4->operand(0), bcast);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
22 * 1024, module.get()));
EXPECT_TRUE(changed);
EXPECT_EQ(count_broadcasts(entry_computation), 4);
EXPECT_EQ(entry_computation->instruction_count(), 12);
EXPECT_NE(add_2->operand(0), bcast);
EXPECT_THAT(add_2->operand(0), op::Broadcast(param));
EXPECT_NE(add_3->operand(0), bcast);
EXPECT_THAT(add_3->operand(0), op::Broadcast(param));
EXPECT_NE(add_4->operand(0), bcast);
EXPECT_THAT(add_4->operand(0), op::Broadcast(param));
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest, CopyNotRematerialized) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, vec1024_shape_, "param"));
auto copy = builder.AddInstruction(
HloInstruction::CreateUnary(vec1024_shape_, HloOpcode::kCopy, param));
auto negate_a_1 = builder.AddInstruction(
HloInstruction::CreateUnary(vec1024_shape_, HloOpcode::kNegate, copy));
auto negate_a_2 = builder.AddInstruction(HloInstruction::CreateUnary(
vec1024_shape_, HloOpcode::kNegate, negate_a_1));
auto negate_b_1 = builder.AddInstruction(
HloInstruction::CreateUnary(vec1024_shape_, HloOpcode::kNegate, copy));
auto negate_b_2 = builder.AddInstruction(HloInstruction::CreateUnary(
vec1024_shape_, HloOpcode::kNegate, negate_b_1));
builder.AddInstruction(HloInstruction::CreateTuple({negate_a_2, negate_b_2}));
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
1 * 1024, module.get()));
auto count_copies = [](const HloComputation* computation) {
int64_t copy_count = 0;
for (auto* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kCopy) {
copy_count++;
}
}
return copy_count;
};
EXPECT_TRUE(changed);
EXPECT_EQ(count_copies(entry_computation), 1);
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest, ThroughBitcastRemat) {
const std::string& hlo_string = R"(
HloModule fusion, is_scheduled=true
ENTRY %mycomp (param: f32[1]) -> f32[1] {
%param = f32[1]{0} parameter(0)
%reshape = f32[] reshape(f32[1]{0} %param)
%broadcast = f32[1024,1]{1,0} broadcast(f32[] %reshape), dimensions={}
%bitcast = f32[1024]{0} bitcast(f32[1024,1]{1,0} %broadcast)
%negate = f32[1024,1]{1,0} negate(f32[1024,1]{1,0} %broadcast)
%concatenate = f32[2048,1]{1,0} concatenate(f32[1024,1]{1,0} %negate, f32[1024,1]{1,0} %negate), dimensions={0}
%slice = f32[1,1]{1,0} slice(f32[2048,1]{1,0} %concatenate), slice={[0:1], [0:1]}
%bitcast.1 = f32[1]{0} bitcast(f32[1,1]{1,0} %slice)
%concatenate.1 = f32[1025]{0} concatenate(f32[1024]{0} %bitcast, f32[1]{0} %bitcast.1), dimensions={0}
ROOT %slice.1 = f32[1]{0} slice(f32[1025]{0} %concatenate.1), slice={[0:1]}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto* computation = module->entry_computation();
const HloInstruction* slice = computation->root_instruction();
ASSERT_THAT(slice,
op::Slice(op::Concatenate(op::Bitcast(op::Broadcast(_)), _)));
const HloInstruction* concat = slice->operand(0);
const HloInstruction* bcast = concat->operand(0)->operand(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
14 * 1024, module.get()));
EXPECT_TRUE(changed);
EXPECT_EQ(computation->root_instruction(), slice);
const HloInstruction* remat_bitcast = concat->operand(0);
const HloInstruction* remat_broadcast = remat_bitcast->operand(0);
EXPECT_THAT(remat_broadcast, op::Broadcast(::testing::Ne(bcast)));
EXPECT_EQ(module->schedule()
.sequence(computation)
.instructions()[computation->instruction_count() - 2],
concat);
EXPECT_EQ(module->schedule()
.sequence(computation)
.instructions()[computation->instruction_count() - 3],
remat_bitcast);
EXPECT_EQ(module->schedule()
.sequence(computation)
.instructions()[computation->instruction_count() - 4],
remat_broadcast);
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest,
ThroughBitcastRematInfiniteLoop) {
const std::string& hlo_string = R"(
HloModule fusion, is_scheduled=true
ENTRY %mycomp (param: f32[1]) -> f32[1024] {
%param = f32[1]{0} parameter(0)
%reshape = f32[] reshape(f32[1]{0} %param)
%broadcast = f32[1024,1]{1,0} broadcast(f32[] %reshape), dimensions={}
%bitcast = f32[1024]{0} bitcast(f32[1024,1]{1,0} %broadcast)
%broadcast2 = f32[1024,1]{1,0} broadcast(f32[] %reshape), dimensions={}
%bitcast2 = f32[1024]{0} bitcast(f32[1024,1]{1,0} %broadcast2)
ROOT %add = f32[1024]{0} add(f32[1024]{0} %bitcast, f32[1024]{0} %bitcast2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto* computation = module->entry_computation();
const HloInstruction* add = computation->root_instruction();
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
1024, module.get()));
ASSERT_THAT(add, op::Add(op::Bitcast(op::Broadcast(_)),
op::Bitcast(op::Broadcast(_))));
EXPECT_TRUE(changed);
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest, RematTupleShape) {
const std::string& hlo_string = R"(
HloModule fusion, is_scheduled=true
%add_mul_comp {
%p0 = f32[] parameter(0)
%p1 = f32[] parameter(1)
%x = f32[1024]{0} broadcast(f32[] %p0), dimensions={}
%y = f32[1024]{0} broadcast(f32[] %p1), dimensions={}
%add = f32[1024] add(%x, %y)
%mul = f32[1024] multiply(%x, %y)
ROOT %out = (f32[1024], f32[1024]) tuple(%add, %mul)
}
ENTRY %entry {
%param.0 = f32[] parameter(0)
%param.1 = f32[] parameter(1)
%fus = (f32[1024]{0}, f32[1024]{0}) fusion(%param.0, %param.1), kind=kLoop,
calls=%add_mul_comp
%gte.1 = f32[1024]{0} get-tuple-element(%fus), index=0
%add = f32[1024]{0} add(f32[1024]{0} %gte.1, f32[1024]{0} %gte.1)
%broadcast.1 = f32[1024]{0} broadcast(f32[] %param.0), dimensions={}
%mul = f32[1024]{0} multiply(f32[1024]{0} %add, f32[1024]{0} %broadcast.1)
%gte.2 = f32[1024]{0} get-tuple-element(%fus), index=1
ROOT %add.2 = f32[1024]{0} add(f32[1024]{0} %mul, f32[1024]{0} %gte.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloComputation* computation = module->entry_computation();
const HloInstruction* add = computation->root_instruction();
ASSERT_THAT(add, op::Add(op::Multiply(), op::GetTupleElement(op::Fusion())));
const HloInstruction* fusion = add->operand(0)->operand(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
11 * 1024, module.get()));
EXPECT_TRUE(changed);
ASSERT_THAT(
add, op::Add(op::Multiply(), AllOf(op::Fusion(), ::testing::Ne(fusion))));
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest, RematTupleShapeDoubleUse) {
const std::string& hlo_string = R"(
HloModule fusion, is_scheduled=true
%add_mul_comp {
%p0 = f32[] parameter(0)
%p1 = f32[] parameter(1)
%x = f32[1024]{0} broadcast(f32[] %p0), dimensions={}
%y = f32[1024]{0} broadcast(f32[] %p1), dimensions={}
%add = f32[1024] add(%x, %y)
%mul = f32[1024] multiply(%x, %y)
ROOT %out = (f32[1024], f32[1024]) tuple(%add, %mul)
}
ENTRY %entry {
%param.0 = f32[] parameter(0)
%param.1 = f32[] parameter(1)
%fus = (f32[1024]{0}, f32[1024]{0}) fusion(%param.0, %param.1), kind=kLoop,
calls=%add_mul_comp
%gte.1 = f32[1024]{0} get-tuple-element(%fus), index=0
%add = f32[1024]{0} add(f32[1024]{0} %gte.1, f32[1024]{0} %gte.1)
%broadcast.1 = f32[1024]{0} broadcast(f32[] %param.0), dimensions={}
%mul = f32[1024]{0} multiply(f32[1024]{0} %add, f32[1024]{0} %broadcast.1)
%gte.2 = f32[1024]{0} get-tuple-element(%fus), index=1
%gte.3 = f32[1024]{0} get-tuple-element(%fus), index=0
%add.2 = f32[1024]{0} add(f32[1024]{0} %mul, f32[1024]{0} %gte.2)
ROOT %mul.2 = f32[1024]{0} multiply(f32[1024]{0} %add.2, f32[1024]{0} %gte.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloComputation* computation = module->entry_computation();
const HloInstruction* add = computation->root_instruction();
ASSERT_THAT(add, op::Multiply(op::Add(op::Multiply(),
op::GetTupleElement(op::Fusion())),
op::GetTupleElement(op::Fusion())));
const HloInstruction* fusion = add->operand(0)->operand(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
11 * 1024, module.get()));
EXPECT_TRUE(changed);
ASSERT_THAT(
add,
op::Multiply(
op::Add(op::Multiply(), op::GetTupleElement(AllOf(
op::Fusion(), ::testing::Ne(fusion)))),
op::GetTupleElement(AllOf(op::Fusion(), ::testing::Ne(fusion)))));
EXPECT_EQ(add->operand(0)->operand(1)->operand(0),
add->operand(1)->operand(0));
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest,
RematTupleShapeThroughBitcasts) {
const std::string& hlo_string = R"(
HloModule fusion, is_scheduled=true
%add_mul_comp {
%p0 = f32[] parameter(0)
%p1 = f32[] parameter(1)
%x = f32[1024]{0} broadcast(f32[] %p0), dimensions={}
%y = f32[1024]{0} broadcast(f32[] %p1), dimensions={}
%add = f32[1024] add(%x, %y)
%mul = f32[1024] multiply(%x, %y)
ROOT %out = (f32[1024], f32[1024]) tuple(%add, %mul)
}
ENTRY %entry {
%param.0 = f32[] parameter(0)
%param.1 = f32[] parameter(1)
%fus = (f32[1024]{0}, f32[1024]{0}) fusion(%param.0, %param.1), kind=kLoop,
calls=%add_mul_comp
%gte.1 = f32[1024]{0} get-tuple-element(%fus), index=0
%add = f32[1024]{0} add(f32[1024]{0} %gte.1, f32[1024]{0} %gte.1)
%broadcast.1 = f32[1024]{0} broadcast(f32[] %param.0), dimensions={}
%mul = f32[1024]{0} multiply(f32[1024]{0} %add, f32[1024]{0} %broadcast.1)
%gte.2 = f32[1024]{0} get-tuple-element(%fus), index=1
%bc.1 = f32[1024,1]{0,1} bitcast(%mul)
%bc.2 = f32[1024,1]{0,1} bitcast(%gte.2)
ROOT %add.2 = f32[1024,1]{0,1} add(f32[1024,1]{0,1} %bc.1,
f32[1024,1]{0,1} %bc.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloComputation* computation = module->entry_computation();
const HloInstruction* add = computation->root_instruction();
ASSERT_THAT(add, op::Add(op::Bitcast(op::Multiply()),
op::Bitcast(op::GetTupleElement(op::Fusion()))));
const HloInstruction* fusion = add->operand(0)->operand(0)->operand(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
11 * 1024, module.get()));
EXPECT_TRUE(changed);
ASSERT_THAT(add,
op::Add(op::Bitcast(op::Multiply()),
op::Bitcast(AllOf(op::Fusion(), ::testing::Ne(fusion)))));
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest, RematThroughTuple) {
const std::string& hlo_string = R"(
HloModule fusion, is_scheduled=true
%add_mul_comp {
%p0 = f32[] parameter(0)
%p1 = f32[] parameter(1)
%x = f32[1024]{0} broadcast(f32[] %p0), dimensions={}
%y = f32[1024]{0} broadcast(f32[] %p1), dimensions={}
%add = f32[1024] add(%x, %y)
%mul = f32[1024] multiply(%x, %y)
ROOT %out = (f32[1024], f32[1024]) tuple(%add, %mul)
}
ENTRY %entry {
%param.0 = f32[] parameter(0)
%param.1 = f32[] parameter(1)
%fus = (f32[1024]{0}, f32[1024]{0}) fusion(%param.0, %param.1), kind=kLoop,
calls=%add_mul_comp
%gte.1 = f32[1024]{0} get-tuple-element(%fus), index=0
%gte.3 = f32[1024]{0} get-tuple-element(%fus), index=1
%add = f32[1024]{0} add(f32[1024]{0} %gte.1, f32[1024]{0} %gte.3)
%broadcast.1 = f32[1024]{0} broadcast(f32[] %param.0), dimensions={}
%mul = f32[1024]{0} multiply(f32[1024]{0} %add, f32[1024]{0} %broadcast.1)
%tpl = (f32[1024]{0}, f32[1024]{0}) tuple(%gte.1, %add)
%bc.1 = f32[1024,1]{0,1} bitcast(%mul)
%gte.2 = f32[1024]{0} get-tuple-element(%tpl), index=0
ROOT %add.2 = f32[1024]{0} add(f32[1024]{0} %gte.2, f32[1024]{0} %add)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloComputation* computation = module->entry_computation();
const HloInstruction* add = computation->root_instruction();
ASSERT_THAT(add, op::Add(op::GetTupleElement(
op::Tuple(op::GetTupleElement(op::Fusion()), _)),
op::Add()));
const HloInstruction* tuple = add->operand(0)->operand(0);
const HloInstruction* fusion = tuple->operand(0)->operand(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
11 * 1024, module.get()));
EXPECT_TRUE(changed);
ASSERT_THAT(add, op::Add(AllOf(op::Fusion(), ::testing::Ne(tuple),
::testing::Ne(fusion)),
op::Add()));
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest, AllGatherChannelId) {
const std::string& hlo_string = R"(
HloModule fusion, is_scheduled=true
ENTRY %mycomp (param: f32[1]) -> f32[1] {
%param = f32[1]{0} parameter(0)
%reshape = f32[] reshape(f32[1]{0} %param)
%broadcast = f32[256,1]{1,0} broadcast(f32[] %reshape), dimensions={}
%ag = f32[1024,1]{1,0} all-gather(f32[256,1]{1,0} %broadcast), dimensions={0},
channel_id=1, replica_groups={{0,1,2,3}}, use_global_device_ids=true
%bitcast = f32[1024]{0} bitcast(f32[1024,1]{1,0} %ag)
%negate = f32[1024,1]{1,0} negate(f32[1024,1]{1,0} %ag)
%concatenate = f32[2048,1]{1,0} concatenate(f32[1024,1]{1,0} %negate,
f32[1024,1]{1,0} %negate), dimensions={0}
%slice = f32[1,1]{1,0} slice(f32[2048,1]{1,0} %concatenate),
slice={[0:1], [0:1]}
%bitcast.1 = f32[1]{0} bitcast(f32[1,1]{1,0} %slice)
%concatenate.1 = f32[1025]{0} concatenate(f32[1024]{0} %bitcast,
f32[1]{0} %bitcast.1), dimensions={0}
ROOT %slice.1 = f32[1]{0} slice(f32[1025]{0} %concatenate.1), slice={[0:1]}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto* computation = module->entry_computation();
const HloInstruction* slice = computation->root_instruction();
ASSERT_THAT(slice, op::Slice(op::Concatenate(
op::Bitcast(op::AllGather(op::Broadcast(_))), _)));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
14 * 1024, module.get()));
EXPECT_TRUE(changed);
EXPECT_EQ(computation->root_instruction(), slice);
const HloInstruction* original_ag = FindInstruction(module.get(), "ag");
const HloInstruction* remat_ag = FindInstruction(module.get(), "ag.remat");
EXPECT_NE(remat_ag, nullptr);
EXPECT_TRUE(original_ag->channel_id().has_value());
EXPECT_TRUE(remat_ag->channel_id().has_value());
EXPECT_EQ(*remat_ag->channel_id(), *original_ag->channel_id() + 1);
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest, RematTupleArgFusion) {
const std::string& hlo_string = R"(
HloModule fusion, is_scheduled=true
%add_mul_comp {
%p0 = f32[] parameter(0)
%p1 = f32[] parameter(1)
%x = f32[1024]{0} broadcast(f32[] %p0), dimensions={}
%y = f32[1024]{0} broadcast(f32[] %p1), dimensions={}
%add = f32[1024] add(%x, %y)
%mul = f32[1024] multiply(%x, %y)
ROOT %out = (f32[1024], f32[1024]) tuple(%add, %mul)
}
%add_comp {
%p0 = f32[] parameter(0)
%p1 = f32[] parameter(1)
ROOT %add = add(%p0, %p1)
}
%add_tuple_comp {
%p = (f32[1024]{0}, f32[1024]{0}) parameter(0)
%p0 = get-tuple-element(%p), index=0
%p1 = get-tuple-element(%p), index=1
ROOT %add = add(%p0, %p1)
}
ENTRY %entry {
%param.0 = f32[] parameter(0)
%param.1 = f32[] parameter(1)
%fus = (f32[1024]{0}, f32[1024]{0}) fusion(%param.0, %param.1), kind=kLoop,
calls=%add_mul_comp
%gte.1 = f32[1024]{0} get-tuple-element(%fus), index=0
%gte.3 = f32[1024]{0} get-tuple-element(%fus), index=1
%add.0 = f32[1024]{0} add(f32[1024]{0} %gte.1, f32[1024]{0} %gte.3)
%broadcast.1 = f32[1024]{0} broadcast(f32[] %param.0), dimensions={}
%add.1 = f32[1024]{0} add(f32[1024]{0} %add.0, f32[1024]{0} %broadcast.1)
%c = f32[] constant(0)
%reduce = f32[] reduce(%add.1, %c), dimensions={0}, to_apply=add_comp
%fus.1 = f32[1024]{0} fusion(%fus), kind=kLoop, calls=%add_tuple_comp
ROOT %tuple = tuple(%reduce, %fus.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloComputation* computation = module->entry_computation();
const HloInstruction* root = computation->root_instruction();
ASSERT_THAT(root, op::Tuple(op::Reduce(), op::Fusion(op::Fusion())));
const HloInstruction* fusion1 = root->operand(1);
const HloInstruction* fusion0 = fusion1->operand(0);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
11 * 1024, module.get()));
EXPECT_TRUE(changed);
ASSERT_THAT(
root, op::Tuple(op::Reduce(),
op::Fusion(AllOf(op::Fusion(), ::testing::Ne(fusion0)))));
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
TEST_F(RecomputeAndCompressHloRematerializationTest,
RematFusionUpdateSchedule) {
const std::string& hlo_string = R"(
HloModule fusion, is_scheduled=true
%custom_call_comp {
%p = f32[1024]{0} parameter(0)
ROOT %n = f32[1024]{0} negate(p)
}
%add_mul_comp {
%p0 = f32[] parameter(0)
%p1 = f32[] parameter(1)
%x = f32[1024]{0} broadcast(f32[] %p0), dimensions={}
%y = f32[1024]{0} broadcast(f32[] %p1), dimensions={}
%add = f32[1024] add(%x, %y)
%mul = f32[1024] multiply(%x, %y)
%c = f32[1024] custom-call(%mul), custom_call_target="SomeCall", called_computations={custom_call_comp}
ROOT %out = (f32[1024], f32[1024]) tuple(%add, %c)
}
ENTRY %entry {
%param.0 = f32[] parameter(0)
%param.1 = f32[] parameter(1)
%fus = (f32[1024]{0}, f32[1024]{0}) fusion(%param.0, %param.1), kind=kLoop,
calls=%add_mul_comp
%gte.1 = f32[1024]{0} get-tuple-element(%fus), index=0
%add = f32[1024]{0} add(f32[1024]{0} %gte.1, f32[1024]{0} %gte.1)
%broadcast.1 = f32[1024]{0} broadcast(f32[] %param.0), dimensions={}
%mul = f32[1024]{0} multiply(f32[1024]{0} %add, f32[1024]{0} %broadcast.1)
%gte.2 = f32[1024]{0} get-tuple-element(%fus), index=1
%gte.3 = f32[1024]{0} get-tuple-element(%fus), index=0
%add.2 = f32[1024]{0} add(f32[1024]{0} %mul, f32[1024]{0} %gte.2)
ROOT %mul.2 = f32[1024]{0} multiply(f32[1024]{0} %add.2, f32[1024]{0} %gte.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloComputation* computation = module->entry_computation();
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
11 * 1024, module.get()));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
const HloInstruction* add = computation->root_instruction();
const HloInstruction* fusion = add->operand(0)->operand(0);
ASSERT_THAT(
add,
op::Multiply(
op::Add(op::Multiply(), op::GetTupleElement(AllOf(
op::Fusion(), ::testing::Ne(fusion)))),
op::GetTupleElement(AllOf(op::Fusion(), ::testing::Ne(fusion)))));
const HloInstruction* fusion0 = add->operand(0)->operand(1)->operand(0);
const HloInstruction* fusion1 = add->operand(1)->operand(0);
auto it = std::find_if(fusion0->fused_instructions().begin(),
fusion0->fused_instructions().end(),
[](const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kCustomCall;
});
ASSERT_NE(it, fusion0->fused_instructions().end());
auto it2 = std::find_if(fusion1->fused_instructions().begin(),
fusion1->fused_instructions().end(),
[](const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kCustomCall;
});
ASSERT_NE(it2, fusion1->fused_instructions().end());
EXPECT_TRUE(module->schedule().is_computation_scheduled(
(*it)->called_computations()[0]));
EXPECT_TRUE(module->schedule().is_computation_scheduled(
(*it2)->called_computations()[0]));
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
class CompressingRematerializationTest : public RematerializationTestBase {
protected:
static int64_t ShapeSizePadMinorTo64(const Shape& shape) {
if (shape.IsTuple()) {
return 4;
}
Shape descending_shape =
ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout(shape);
int64_t size =
ShapeUtil::ByteSizeOfPrimitiveType(descending_shape.element_type());
for (int64_t i = 0; i < descending_shape.rank(); ++i) {
int64_t dim = descending_shape.dimensions(i);
if (i == descending_shape.rank() - 1) {
dim = RoundUpTo<int64_t>(dim, 64);
}
size *= dim;
}
return size;
}
static absl::StatusOr<Shape> ChooseCompactLayoutForShape(const Shape& shape) {
if (shape.rank() != 2) {
return shape;
}
Shape result = shape;
Layout layout = result.layout();
int64_t most_minor_index = layout.minor_to_major()[0];
int64_t second_minor_index = layout.minor_to_major()[1];
int64_t most_minor = result.dimensions(most_minor_index);
int64_t second_minor = result.dimensions(second_minor_index);
if (most_minor < second_minor) {
Layout new_layout = layout;
new_layout.set_minor_to_major(0, second_minor_index);
new_layout.set_minor_to_major(1, most_minor_index);
*result.mutable_layout() = new_layout;
}
return result;
}
absl::StatusOr<bool> RunHloRematerialization(int64_t memory_limit_bytes,
HloModule* module,
int64_t min_remat_size = 0) {
TF_EXPECT_OK(verifier().Run(module).status());
HloRematerialization::RematerializationModeConfig config(
false, true, false);
auto shape_size_func = [](const Shape& shape) {
return ShapeSizePadMinorTo64(shape);
};
HloCostAnalysis cost_analysis(shape_size_func);
HloRematerialization::Options options(
cost_analysis, config, memory_limit_bytes,
1, 1,
min_remat_size, ChooseCompactLayoutForShape,
std::nullopt,
{});
HloRematerialization::RematerializationSizes sizes;
HloRematerialization remat(options, sizes);
return remat.Run(module);
}
};
TEST_F(CompressingRematerializationTest, OnlyRematBigBuffer) {
const std::string& hlo_string = R"(
HloModule fusion, is_scheduled=true
%add_float {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %x, f32[] %y)
}
ENTRY %entry {
%param.0 = f32[] parameter(0)
%constant = f32[] constant(0)
%broadcast.0 = f32[64,2]{1,0} broadcast(f32[] %param.0), dimensions={}
%broadcast.1 = f32[10,2]{1,0} broadcast(f32[] %param.0), dimensions={}
%negate = f32[64,2]{1,0} negate(f32[64,2]{1,0} broadcast.0)
%reduce.0 = f32[] reduce(f32[64,2]{1,0} %negate, f32[] %constant), dimensions={1, 0}, to_apply=%add_float
%reduce.1 = f32[] reduce(f32[64,2]{1,0} %broadcast.0, f32[] %constant), dimensions={1, 0}, to_apply=%add_float
%reduce.2 = f32[] reduce(f32[10,2]{1,0} %broadcast.1, f32[] %constant), dimensions={1, 0}, to_apply=%add_float
%add = f32[] add(f32[] %reduce.0, f32[] %reduce.1)
ROOT %add.2 = f32[] add(f32[] %add, f32[] %reduce.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloRematerialization(
30 * 1024,
module.get(), 10 * 1024));
EXPECT_TRUE(changed);
HloInstruction* broadcast =
module->entry_computation()->GetInstructionWithName("broadcast.0");
HloInstruction* broadcast_2 =
module->entry_computation()->GetInstructionWithName("broadcast.1");
HloInstruction* reduce =
module->entry_computation()->GetInstructionWithName("reduce.1");
HloInstruction* reduce_2 =
module->entry_computation()->GetInstructionWithName("reduce.2");
EXPECT_THAT(reduce,
op::Reduce(op::Copy(op::Copy(broadcast)), op::Constant()));
EXPECT_THAT(reduce_2, op::Reduce(broadcast_2, op::Constant()));
}
TEST_F(CompressingRematerializationTest, SingleRemat) {
const std::string& hlo_string = R"(
HloModule fusion, is_scheduled=true
%add_float {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %x, f32[] %y)
}
ENTRY %entry {
%param.0 = f32[] parameter(0)
%constant = f32[] constant(0)
%broadcast.0 = f32[64,2]{1,0} broadcast(f32[] %param.0), dimensions={}
%negate = f32[64,2]{1,0} negate(f32[64,2]{1,0} broadcast.0)
%reduce.0 = f32[] reduce(f32[64,2]{1,0} %negate, f32[] %constant), dimensions={1, 0}, to_apply=%add_float
%reduce.1 = f32[] reduce(f32[64,2]{1,0} %broadcast.0, f32[] %constant), dimensions={1, 0}, to_apply=%add_float
%add = f32[] add(f32[] %reduce.0, f32[] %reduce.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
30 * 1024, module.get()));
EXPECT_TRUE(changed);
HloInstruction* broadcast =
module->entry_computation()->GetInstructionWithName("broadcast.0");
HloInstruction* reduce =
module->entry_computation()->GetInstructionWithName("reduce.1");
EXPECT_THAT(reduce,
op::Reduce(op::Copy(op::Copy(broadcast)), op::Constant()));
}
TEST_F(CompressingRematerializationTest, AvoidPathologicalCompress) {
const std::string& hlo_string = R"(
HloModule fusion, is_scheduled=true
%add_float {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %x, f32[] %y)
}
ENTRY %entry {
%param.0 = f32[] parameter(0)
%constant = f32[] constant(0)
%broadcast.0 = f32[63,60]{1,0} broadcast(f32[] %param.0), dimensions={}
%broadcast.1 = f32[16,64]{1,0} broadcast(f32[] %param.0), dimensions={}
%reduce.0 = f32[] reduce(%broadcast.1, f32[] %constant), dimensions={1, 0}, to_apply=%add_float
%reduce.1 = f32[] reduce(%broadcast.0, f32[] %constant), dimensions={1, 0}, to_apply=%add_float
%add = f32[] add(f32[] %reduce.0, f32[] %reduce.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
16 * 1024, module.get()));
EXPECT_FALSE(changed);
HloInstruction* broadcast =
module->entry_computation()->GetInstructionWithName("broadcast.0");
HloInstruction* reduce =
module->entry_computation()->GetInstructionWithName("reduce.1");
EXPECT_THAT(reduce, op::Reduce(broadcast, op::Constant()));
}
TEST_F(CompressingRematerializationTest, AllUsersUseSameCopy) {
const std::string& hlo_string = R"(
HloModule fusion, is_scheduled=true
%add_float {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %x, f32[] %y)
}
ENTRY %entry {
%param.0 = f32[] parameter(0)
%constant = f32[] constant(0)
%broadcast.0 = f32[64,2]{1,0} broadcast(f32[] %param.0), dimensions={}
%negate = f32[64,2]{1,0} negate(f32[64,2]{1,0} broadcast.0)
%reduce.0 = f32[] reduce(f32[64,2]{1,0} %negate, f32[] %constant), dimensions={1, 0}, to_apply=%add_float
%reduce.1 = f32[] reduce(f32[64,2]{1,0} %negate, f32[] %constant), dimensions={1, 0}, to_apply=%add_float
%reduce.2 = f32[] reduce(f32[64,2]{1,0} %broadcast.0, f32[] %constant), dimensions={1, 0}, to_apply=%add_float
%add = f32[] add(f32[] %reduce.0, f32[] %reduce.1)
%reduce.3 = f32[] reduce(f32[64,2]{1,0} %broadcast.0, f32[] %constant), dimensions={1, 0}, to_apply=%add_float
%add.2 = f32[] add(f32[] %reduce.2, f32[] %reduce.3)
ROOT %tuple = (f32[], f32[]) tuple (f32[] add, f32[] add.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
30 * 1024, module.get()));
EXPECT_TRUE(changed);
HloInstruction* broadcast =
module->entry_computation()->GetInstructionWithName("broadcast.0");
HloInstruction* reduce_2 =
module->entry_computation()->GetInstructionWithName("reduce.2");
HloInstruction* reduce_3 =
module->entry_computation()->GetInstructionWithName("reduce.3");
EXPECT_THAT(reduce_2,
op::Reduce(op::Copy(op::Copy(broadcast)), op::Constant()));
EXPECT_THAT(reduce_3,
op::Reduce(op::Copy(op::Copy(broadcast)), op::Constant()));
}
class OffloadingRematerializationTest : public RematerializationTestBase {
protected:
absl::StatusOr<bool> RunHloRematerialization(int64_t memory_limit_bytes,
HloModule* module,
int64_t min_remat_size = 0) {
TF_EXPECT_OK(verifier().Run(module).status());
if (!module->has_schedule()) {
HloMemoryScheduler scheduler(
[](const BufferValue& buffer) { return ByteSizeOf(buffer.shape()); },
ComputationSchedulerToModuleScheduler(DefaultMemoryScheduler));
TF_EXPECT_OK(scheduler.Run(module).status());
}
HloCostAnalysis::Options hlo_cost_analysis_options;
hlo_cost_analysis_options.shape_size = [](const Shape& shape) {
return ByteSizeOf(shape);
};
hlo_cost_analysis_options.set_flops_per_second(flops_per_second_);
hlo_cost_analysis_options.set_transcendentals_per_second(
transcendentals_per_second_);
HloCostAnalysis cost_analysis(hlo_cost_analysis_options);
HloRematerialization::RematerializationModeConfig config(
false, false, true);
HloRematerialization::HostMemoryOffloadConfig host_memory_offload_config(
kHostMemorySpaceColor, copy_to_host_speed_, copy_from_host_speed_);
HloRematerialization::Options options(
cost_analysis, config, memory_limit_bytes,
1, 1,
min_remat_size, nullptr,
host_memory_offload_config,
{});
HloRematerialization::RematerializationSizes sizes;
HloRematerialization remat(options, sizes);
return remat.Run(module);
}
void SetCopyToHostSpeed(float val) { copy_to_host_speed_ = val; }
void SetCopyFromHostSpeed(float val) { copy_from_host_speed_ = val; }
void SetFlopsPerSecond(float val) { flops_per_second_ = val; }
void SetTranscendentalsPerSecond(float val) {
transcendentals_per_second_ = val;
}
static constexpr const int64_t kHostMemorySpaceColor{5};
private:
float copy_to_host_speed_{1.0f};
float copy_from_host_speed_{1.0f};
float flops_per_second_{1.0f};
float transcendentals_per_second_{1.0f};
};
TEST_F(OffloadingRematerializationTest, BasicSuccessfulHostOffload) {
const std::string& hlo_string = R"(
HloModule MyModule, is_scheduled=true, entry_computation_layout={(f32[1024]{0}, f32[1024]{0})->f32[1024]{0}}
ENTRY MyModule {
param_0 = f32[1024]{0} parameter(0)
param_1 = f32[1024]{0} parameter(1)
res_3 = f32[1024]{0} add(param_0, param_1)
res_4 = f32[1024]{0} tanh(res_3)
res_5 = f32[1024]{0} tanh(res_4)
res_6 = f32[1024]{0} tanh(res_5)
res_7 = f32[1024]{0} add(res_6, res_6)
res_8 = f32[1024]{0} add(res_7, res_5)
res_9 = f32[1024]{0} add(res_8, res_4)
res_10 = f32[1024]{0} add(res_9, res_3)
ROOT res_11 = f32[1024]{0} tanh(res_10)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
SetCopyToHostSpeed(4.0 * 1024);
SetCopyFromHostSpeed(4.0 * 1024);
SetFlopsPerSecond(2 * 1024);
SetTranscendentalsPerSecond(2 * 1024);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
10 * 1024, module.get()));
ASSERT_TRUE(changed);
ASSERT_TRUE(module->has_schedule());
auto res_3_matcher = op::Add(op::Parameter(), op::Parameter());
auto res_3_rematted_matcher = op::AsyncCopy(
xla::Layout::kDefaultMemorySpace, kHostMemorySpaceColor,
op::AsyncCopy(kHostMemorySpaceColor, xla::Layout::kDefaultMemorySpace,
res_3_matcher));
auto res_4_matcher = op::Tanh(res_3_matcher);
auto res_4_rematted_matcher = op::AsyncCopy(
xla::Layout::kDefaultMemorySpace, kHostMemorySpaceColor,
op::AsyncCopy(kHostMemorySpaceColor, xla::Layout::kDefaultMemorySpace,
res_4_matcher));
auto res_5_matcher = op::Tanh(res_4_matcher);
auto res_6_matcher = op::Tanh(res_5_matcher);
auto res_7_matcher = op::Add(res_6_matcher, res_6_matcher);
auto res_8_matcher = op::Add(res_7_matcher, res_5_matcher);
auto res_9_matcher = op::Add(res_8_matcher, res_4_rematted_matcher);
auto res_10_matcher = op::Add(res_9_matcher, res_3_rematted_matcher);
const auto instruction_sequence =
module->schedule().sequence(module->entry_computation());
ASSERT_THAT(instruction_sequence.instructions().back(),
op::Tanh(res_10_matcher));
}
TEST_F(OffloadingRematerializationTest, SkipOffloadWhenBitcastIsInvolved) {
const std::string& hlo_string = R"(
HloModule MyModule, is_scheduled=true, entry_computation_layout={(f32[1024]{0}, f32[1024]{0})->f32[1024]{0}}
ENTRY MyModule {
param_0 = f32[1024]{0} parameter(0)
param_1 = f32[1024]{0} parameter(1)
res_3 = f32[1024]{0} add(param_0, param_1)
bitcast = f32[1024]{0} bitcast(res_3)
res_4 = f32[1024]{0} tanh(res_3)
res_5 = f32[1024]{0} tanh(res_4)
res_6 = f32[1024]{0} tanh(res_5)
res_7 = f32[1024]{0} add(res_6, res_6)
res_8 = f32[1024]{0} add(res_7, res_5)
res_9 = f32[1024]{0} add(res_8, res_4)
res_10 = f32[1024]{0} add(res_9, bitcast)
ROOT res_11 = f32[1024]{0} tanh(res_10)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
SetCopyToHostSpeed(4.0 * 1024);
SetCopyFromHostSpeed(4.0 * 1024);
SetFlopsPerSecond(2 * 1024);
SetTranscendentalsPerSecond(2 * 1024);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
10 * 1024, module.get()));
ASSERT_TRUE(changed);
ASSERT_TRUE(module->has_schedule());
auto res_3_matcher = op::Add(op::Parameter(), op::Parameter());
auto res_4_matcher = op::Tanh(res_3_matcher);
auto res_4_rematted_matcher = op::AsyncCopy(
xla::Layout::kDefaultMemorySpace, kHostMemorySpaceColor,
op::AsyncCopy(kHostMemorySpaceColor, xla::Layout::kDefaultMemorySpace,
res_4_matcher));
auto res_5_matcher = op::Tanh(res_4_matcher);
auto res_6_matcher = op::Tanh(res_5_matcher);
auto res_7_matcher = op::Add(res_6_matcher, res_6_matcher);
auto res_8_matcher = op::Add(res_7_matcher, res_5_matcher);
auto res_9_matcher = op::Add(res_8_matcher, res_4_rematted_matcher);
auto res_10_matcher = op::Add(res_9_matcher, op::Bitcast(res_3_matcher));
const auto instruction_sequence =
module->schedule().sequence(module->entry_computation());
ASSERT_THAT(instruction_sequence.instructions().back(),
op::Tanh(res_10_matcher));
}
class IndirectUseTest : public RecomputeAndCompressHloRematerializationTest,
public ::testing::WithParamInterface<bool> {};
TEST_P(IndirectUseTest, IndirectUseRematerialized) {
const bool indirectly_used = GetParam();
auto module = CreateNewVerifiedModule();
HloComputation* subcomputation = nullptr;
{
auto builder = HloComputation::Builder(TestName() + ".subcomputation");
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, vec1024_shape_, "param"));
auto concat = builder.AddInstruction(HloInstruction::CreateConcatenate(
ShapeUtil::MakeShape(xla::F32, {2048}), {param, param},
0));
builder.AddInstruction(HloInstruction::CreateSlice(
vec1024_shape_, concat, {0},
{1024}, {1}));
subcomputation = module->AddEmbeddedComputation(builder.Build());
}
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param"));
auto bcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(vec1024_shape_, param, {}));
auto add_1 = builder.AddInstruction(HloInstruction::CreateBinary(
vec1024_shape_, HloOpcode::kAdd, bcast, bcast));
auto call_1 = builder.AddInstruction(
HloInstruction::CreateCall(vec1024_shape_, {add_1}, subcomputation));
auto add_2 = builder.AddInstruction(HloInstruction::CreateBinary(
vec1024_shape_, HloOpcode::kAdd, bcast, call_1));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({bcast, add_2}));
auto gte = builder.AddInstruction(HloInstruction::CreateGetTupleElement(
vec1024_shape_, tuple, indirectly_used ? 0 : 1));
builder.AddInstruction(
HloInstruction::CreateUnary(vec1024_shape_, HloOpcode::kNegate, gte));
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_EQ(entry_computation->instruction_count(), 8);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
RunHloRematerialization(
22 * 1024, module.get()));
if (indirectly_used) {
EXPECT_TRUE(changed);
EXPECT_EQ(entry_computation->instruction_count(), 3);
} else {
EXPECT_TRUE(changed);
EXPECT_EQ(entry_computation->instruction_count(), 9);
}
CheckForRematInInstructionNames(
::testing::UnitTest::GetInstance()->current_test_info()->name());
}
INSTANTIATE_TEST_SUITE_P(IndirectUseTestInstantiation, IndirectUseTest,
::testing::Values(true, false));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_rematerialization.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_rematerialization_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e9469ed1-6c77-4323-9ef8-b4a9ff678ff6 | cpp | tensorflow/tensorflow | type_to_shape | third_party/xla/xla/hlo/translate/mhlo_to_hlo/type_to_shape.cc | third_party/xla/xla/hlo/translate/mhlo_to_hlo/type_to_shape_test.cc | #include "xla/hlo/translate/mhlo_to_hlo/type_to_shape.h"
#include <algorithm>
#include <cstdint>
#include <numeric>
#include <optional>
#include <tuple>
#include <utility>
#include <vector>
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/LogicalResult.h"
#include "mlir/Dialect/SparseTensor/IR/Enums.h"
#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/Location.h"
#include "mlir/Support/DebugStringHelper.h"
#include "mlir/Support/LLVM.h"
#include "stablehlo/dialect/StablehloOps.h"
#include "xla/mlir/utils/type_util.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
using ::int64_t;
using mlir::MemRefType;
using mlir::RankedTensorType;
using mlir::ShapedType;
using mlir::VectorType;
using mlir::mhlo::TypeExtensionsAttr;
using xla::PrimitiveType;
namespace xla {
std::optional<std::tuple<DimLevelType, bool, bool>> ConvertDimLevelType(
mlir::sparse_tensor::LevelType lt) {
auto f = mlir::sparse_tensor::getLevelFormat(lt);
if (!f) return std::nullopt;
bool unique = mlir::sparse_tensor::isUniqueLT(lt);
bool ordered = mlir::sparse_tensor::isOrderedLT(lt);
switch (*f) {
case mlir::sparse_tensor::LevelFormat::Singleton:
return std::make_tuple(DimLevelType::DIM_SINGLETON, unique, ordered);
case mlir::sparse_tensor::LevelFormat::Compressed:
return std::make_tuple(DimLevelType::DIM_COMPRESSED, unique, ordered);
case mlir::sparse_tensor::LevelFormat::Dense:
return std::make_tuple(DimLevelType::DIM_DENSE, unique, ordered);
case mlir::sparse_tensor::LevelFormat::LooseCompressed:
return std::make_tuple(DimLevelType::DIM_LOOSE_COMPRESSED, unique,
ordered);
default:
return std::nullopt;
}
}
Shape TypeToShape(mlir::Type type) {
PrimitiveType ptype = ConvertMlirTypeToPrimitiveType(type);
if (ptype != PrimitiveType::PRIMITIVE_TYPE_INVALID)
return ShapeUtil::MakeShape(ptype, {});
if (type.isIntOrFloat()) {
auto* context = type.getContext();
mlir::emitError(mlir::UnknownLoc::get(context))
<< "lowering should have been handled by primitive type lowering for "
<< debugString(type);
} else if (auto v = mlir::dyn_cast<mlir::VectorType>(type)) {
llvm::SmallVector<int64_t, 4> span(v.getShape().begin(),
v.getShape().end());
mlir::Type element_type = v.getElementType();
PrimitiveType primitive_type = ConvertMlirTypeToPrimitiveType(element_type);
if (primitive_type != PrimitiveType::PRIMITIVE_TYPE_INVALID)
return ShapeUtil::MakeShape(primitive_type, span);
} else if (auto m = mlir::dyn_cast<mlir::MemRefType>(type)) {
llvm::SmallVector<int64_t, 6> span(m.getShape().begin(),
m.getShape().end());
mlir::Type element_type = m.getElementType();
if (auto v = mlir::dyn_cast<mlir::VectorType>(element_type)) {
element_type = v.getElementType();
span.insert(span.end(), v.getShape().begin(), v.getShape().end());
}
PrimitiveType primitive_type = ConvertMlirTypeToPrimitiveType(element_type);
if (primitive_type == PrimitiveType::PRIMITIVE_TYPE_INVALID) return {};
if (m.getLayout().isIdentity())
return ShapeUtil::MakeShape(primitive_type, span);
llvm::SmallVector<int64_t, 4> strides;
int64_t offset;
if (failed(mlir::getStridesAndOffset(m, strides, offset))) return {};
llvm::SmallVector<std::pair<int64_t, int>, 4> strides_with_indices;
for (const auto& e : llvm::enumerate(strides)) {
strides_with_indices.push_back({e.value(), e.index()});
}
std::stable_sort(strides_with_indices.begin(), strides_with_indices.end());
llvm::SmallVector<int64_t, 4> minor_to_major;
int64_t stride = 1;
for (const auto& pr : strides_with_indices) {
minor_to_major.push_back(pr.second);
if (stride != pr.first && m.getShape()[pr.second] != 1) return {};
stride *= m.getShape()[pr.second];
}
llvm::SmallVector<int64_t, 4> dimensions(m.getShape().begin(),
m.getShape().end());
return ::xla::ShapeUtil::MakeShapeWithDenseLayout(
primitive_type, dimensions, minor_to_major);
} else if (auto t = mlir::dyn_cast<mlir::RankedTensorType>(type)) {
int64_t rank = t.getRank();
llvm::SmallVector<int64_t, 4> bounds;
if (auto extn =
mlir::dyn_cast_or_null<TypeExtensionsAttr>(t.getEncoding())) {
bounds = llvm::to_vector<4>(extn.getBounds());
} else {
bounds.assign(rank, ShapedType::kDynamic);
}
llvm::SmallVector<int64_t, 4> shape(rank, mlir::ShapedType::kDynamic);
std::vector<bool> is_dynamic(rank, false);
for (int64_t dim = 0; dim < rank; ++dim) {
int64_t size = t.getDimSize(dim);
if (size == ShapedType::kDynamic) {
shape[dim] = bounds[dim] != ShapedType::kDynamic
? bounds[dim]
: Shape::kUnboundedSize;
is_dynamic[dim] = true;
} else {
if (bounds[dim] != ShapedType::kDynamic) return {};
shape[dim] = size;
}
}
PrimitiveType primitive_type =
ConvertMlirTypeToPrimitiveType(t.getElementType());
if (primitive_type == PrimitiveType::PRIMITIVE_TYPE_INVALID) return {};
if (auto sparse = mlir::sparse_tensor::getSparseTensorEncoding(type)) {
if (!t.hasStaticShape()) return {};
if (sparse.getPosWidth() != 32 || sparse.getCrdWidth() != 32) return {};
llvm::SmallVector<DimLevelType, 3> lvl_types;
llvm::SmallVector<bool, 3> level_unique;
llvm::SmallVector<bool, 3> level_ordered;
for (auto lt : sparse.getLvlTypes()) {
auto new_lt = ConvertDimLevelType(lt);
if (!new_lt) return {};
lvl_types.push_back(std::get<0>(*new_lt));
level_unique.push_back(std::get<1>(*new_lt));
level_ordered.push_back(std::get<2>(*new_lt));
}
std::vector<int64_t> ordering(rank);
std::iota(ordering.rbegin(), ordering.rend(), 0);
auto dimToLvl = sparse.getDimToLvl()
? sparse.getDimToLvl()
: mlir::AffineMap::getMultiDimIdentityMap(
rank, sparse.getContext());
auto final_ordering = mlir::applyPermutationMap(
dimToLvl, llvm::ArrayRef<int64_t>(ordering));
auto sparse_shape = ::xla::ShapeUtil::MakeShapeWithSparseLayout(
primitive_type, shape, final_ordering, lvl_types, level_unique,
level_ordered);
return sparse_shape;
}
return ShapeUtil::MakeShape(primitive_type, shape, is_dynamic);
} else if (auto tuple_type = mlir::dyn_cast<mlir::TupleType>(type)) {
llvm::SmallVector<Shape, 4> shapes;
shapes.reserve(tuple_type.size());
for (mlir::Type sub_type : tuple_type.getTypes()) {
shapes.push_back(TypeToShape(sub_type));
}
return ShapeUtil::MakeTupleShape(shapes);
} else if (mlir::isa<mlir::mhlo::TokenType>(type) ||
mlir::isa<mlir::stablehlo::TokenType>(type)) {
return ShapeUtil::MakeTokenShape();
} else if (auto bundle_type =
mlir::dyn_cast<mlir::mhlo::AsyncBundleType>(type)) {
auto tuple_type =
mlir::TupleType::get(type.getContext(), bundle_type.getTypes());
return TypeToShape(tuple_type);
}
return {};
}
} | #include "xla/hlo/translate/mhlo_to_hlo/type_to_shape.h"
#include <iostream>
#include <utility>
#include "absl/status/statusor.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/translate/hlo_to_mhlo/hlo_utils.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/protobuf.h"
using mlir::Builder;
using mlir::MemRefType;
using mlir::MLIRContext;
using mlir::RankedTensorType;
using mlir::UnrankedTensorType;
using mlir::VectorType;
namespace xla {
namespace {
class ProtoStringMatcher {
public:
explicit ProtoStringMatcher(const tsl::protobuf::Message& expected)
: expected_(expected.SerializeAsString()) {}
template <typename Message>
bool MatchAndExplain(const Message& p, testing::MatchResultListener*) const {
return p.SerializeAsString() == expected_;
}
void DescribeTo(::std::ostream* os) const { *os << expected_; }
void DescribeNegationTo(::std::ostream* os) const {
*os << "not equal to expected message: " << expected_;
}
private:
const std::string expected_;
};
inline ::testing::PolymorphicMatcher<ProtoStringMatcher> EqualsProto(
const tsl::protobuf::Message& x) {
return ::testing::MakePolymorphicMatcher(ProtoStringMatcher(x));
}
TEST(TypeToShapeTest, ConvertBasicTypesToTypes) {
MLIRContext context;
Builder b(&context);
EXPECT_TRUE(
ShapeUtil::IsScalarWithElementType(TypeToShape(b.getF32Type()), F32));
EXPECT_THAT(
TypeToShape(VectorType::get({8, 128}, b.getIntegerType(32))).ToProto(),
EqualsProto(
ShapeUtil::MakeShape(PrimitiveType::S32, {8, 128}).ToProto()));
EXPECT_THAT(
TypeToShape(VectorType::get({8, 128}, b.getF32Type())).ToProto(),
EqualsProto(
ShapeUtil::MakeShape(PrimitiveType::F32, {8, 128}).ToProto()));
EXPECT_THAT(
TypeToShape(VectorType::get({8, 128}, b.getIntegerType(17))).ToProto(),
EqualsProto(Shape().ToProto()));
}
TEST(TypeToShapeTest, ConvertMemRefTypeToTypes) {
MLIRContext context;
Builder b(&context);
EXPECT_THAT(
TypeToShape(MemRefType::get({8, 128}, b.getF32Type())).ToProto(),
EqualsProto(
ShapeUtil::MakeShape(PrimitiveType::F32, {8, 128}).ToProto()));
EXPECT_THAT(
TypeToShape(MemRefType::get({100, 13, 210}, b.getF32Type())).ToProto(),
EqualsProto(
ShapeUtil::MakeShape(PrimitiveType::F32, {100, 13, 210}).ToProto()));
EXPECT_THAT(
TypeToShape(MemRefType::get({100, 13, 210},
VectorType::get({8, 128}, b.getF32Type())))
.ToProto(),
EqualsProto(
ShapeUtil::MakeShape(PrimitiveType::F32, {100, 13, 210, 8, 128})
.ToProto()));
}
TEST(TypeToShapeTest, ConvertTensorTypeToTypes) {
mlir::MLIRContext context;
context.loadDialect<mlir::mhlo::MhloDialect>();
Builder b(&context);
EXPECT_THAT(
TypeToShape(RankedTensorType::get({8, 128}, b.getF32Type())).ToProto(),
EqualsProto(
ShapeUtil::MakeShape(PrimitiveType::F32, {8, 128}).ToProto()));
llvm::SmallVector<int64_t, 4> bounds = {8, mlir::ShapedType::kDynamic};
auto extensions = mlir::mhlo::TypeExtensionsAttr::get(&context, bounds);
EXPECT_THAT(
TypeToShape(RankedTensorType::get({mlir::ShapedType::kDynamic, 128},
b.getF32Type(), extensions))
.ToProto(),
EqualsProto(
ShapeUtil::MakeShape(PrimitiveType::F32, {8, 128}, {true, false})
.ToProto()));
EXPECT_THAT(
TypeToShape(RankedTensorType::get({mlir::ShapedType::kDynamic, 784},
b.getF32Type()))
.ToProto(),
EqualsProto(ShapeUtil::MakeShape(PrimitiveType::F32,
{Shape::kUnboundedSize, 784},
{true, false})
.ToProto()));
EXPECT_THAT(TypeToShape(UnrankedTensorType::get(b.getF32Type())).ToProto(),
EqualsProto(Shape().ToProto()));
EXPECT_THAT(
TypeToShape(RankedTensorType::get(
{8, 128}, VectorType::get({16, 16}, b.getF32Type())))
.ToProto(),
EqualsProto(Shape().ToProto()));
}
TEST(TypeToShapeTest, ConvertMemRefToShape) {
Shape shape = ShapeUtil::MakeShapeWithDenseLayout(PrimitiveType::F32,
{10, 20, 30}, {2, 0, 1});
MLIRContext context;
mlir::Builder builder(&context);
absl::StatusOr<mlir::Type> mlir_type =
ConvertShapeToType<MemRefType>(shape, builder);
ASSERT_TRUE(mlir_type.ok());
mlir::Type type = std::move(mlir_type).value();
Shape converted = TypeToShape(type);
EXPECT_TRUE(ShapeUtil::Equal(
converted, ShapeUtil::MakeShapeWithDenseLayout(PrimitiveType::F32,
{10, 20, 30}, {2, 0, 1})));
EXPECT_TRUE(ShapeUtil::Equal(converted, shape));
}
TEST(TypeToShapeTest, ConvertMemRefToShape2) {
Shape shape = ShapeUtil::MakeShapeWithDenseLayout(PrimitiveType::C64,
{2, 4, 3, 3}, {2, 3, 1, 0});
MLIRContext context;
mlir::Builder builder(&context);
absl::StatusOr<mlir::Type> mlir_type =
ConvertShapeToType<MemRefType>(shape, builder);
ASSERT_TRUE(mlir_type.ok());
mlir::Type type = std::move(mlir_type).value();
Shape converted = TypeToShape(type);
EXPECT_TRUE(ShapeUtil::Equal(
converted, ShapeUtil::MakeShapeWithDenseLayout(
PrimitiveType::C64, {2, 4, 3, 3}, {2, 3, 1, 0})));
EXPECT_TRUE(ShapeUtil::Equal(converted, shape));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/translate/mhlo_to_hlo/type_to_shape.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/translate/mhlo_to_hlo/type_to_shape_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
79bd5411-3f18-46a2-ad61-ae5728c586c1 | cpp | tensorflow/tensorflow | call_inliner | third_party/xla/xla/service/call_inliner.cc | third_party/xla/xla/service/call_inliner_test.cc | #include "xla/service/call_inliner.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding_metadata.h"
#include "xla/service/call_graph.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_domain_isolator.h"
#include "xla/service/spmd/shardy/constants.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class SubcomputationInsertionVisitor : public DfsHloVisitorWithDefault {
public:
explicit SubcomputationInsertionVisitor(HloInstruction* call)
: call_(call), outer_(call->parent()) {
CHECK_EQ(HloOpcode::kCall, call_->opcode());
}
absl::Status DefaultAction(HloInstruction* hlo) override {
std::vector<HloInstruction*> new_operands;
for (HloInstruction* operand : hlo->operands()) {
TF_ASSIGN_OR_RETURN(HloInstruction * new_operand, Resolve(operand));
new_operands.push_back(new_operand);
}
VLOG(1) << "Cloning HLO and adding to caller: " << hlo->ToString();
auto new_hlo = hlo->CloneWithNewOperands(hlo->shape(), new_operands);
HloInstruction* new_hlo_pointer =
outer_->AddInstruction(std::move(new_hlo));
TF_RETURN_IF_ERROR(NoteMapping(hlo, new_hlo_pointer));
for (HloInstruction* control_predecessor : hlo->control_predecessors()) {
TF_ASSIGN_OR_RETURN(HloInstruction * new_control_predecessor,
Resolve(control_predecessor));
TF_RETURN_IF_ERROR(
new_control_predecessor->AddControlDependencyTo(new_hlo_pointer));
}
return absl::OkStatus();
}
absl::Status HandleParameter(HloInstruction* parameter) override {
TF_RETURN_IF_ERROR(NoteMapping(
parameter, call_->mutable_operand(parameter->parameter_number())));
return absl::OkStatus();
}
absl::Status FinishVisit(HloInstruction* root) override {
TF_ASSIGN_OR_RETURN(HloInstruction * new_root, Resolve(root));
VLOG(1) << "Replacing all uses of " << call_->ToString()
<< " with new root " << new_root->ToString();
return outer_->ReplaceInstruction(call_, new_root);
}
CallInliner::InlinedInstructionMap ConsumeInstructionMap() {
return std::move(subcomputation_hlo_to_new_hlo_);
}
private:
absl::StatusOr<HloInstruction*> Resolve(HloInstruction* subcomputation_hlo) {
auto it = subcomputation_hlo_to_new_hlo_.find(subcomputation_hlo);
if (it == subcomputation_hlo_to_new_hlo_.end()) {
return NotFound(
"Could not find mapping from subcomputation HLO %s to a cloned HLO.",
subcomputation_hlo->ToString());
}
return it->second;
}
absl::Status NoteMapping(HloInstruction* subcomputation_hlo,
HloInstruction* new_hlo) {
auto result = subcomputation_hlo_to_new_hlo_.insert(
std::make_pair(subcomputation_hlo, new_hlo));
TF_RET_CHECK(result.second)
<< "A mapping for the subcomputation HLO is already present.";
return absl::OkStatus();
}
HloInstruction* call_;
HloComputation* outer_;
CallInliner::InlinedInstructionMap subcomputation_hlo_to_new_hlo_;
};
bool InlineUnderShardy(HloInstruction* instruction) {
return !(instruction->GetModule()->config().use_shardy_partitioner() &&
(absl::StrContains(instruction->to_apply()->name(), "shmap_body") ||
absl::StartsWith(instruction->to_apply()->name(),
sdy::kManualComputationBodyFuncName.str())));
}
}
absl::StatusOr<CallInliner::InlinedInstructionMap>
CallInliner::Inline(HloInstruction* call) {
TF_RET_CHECK(call->opcode() == HloOpcode::kCall)
<< "Instruction was not a call op: " << call->opcode();
if (call->is_composite()) {
FrontendAttributes frontend_attributes = call->frontend_attributes();
frontend_attributes.mutable_map()->erase("composite.name");
frontend_attributes.mutable_map()->erase("composite.attributes");
frontend_attributes.mutable_map()->erase("composite.version");
call->set_frontend_attributes(frontend_attributes);
}
const auto& callees = call->called_computations();
TF_RET_CHECK(callees.size() == 1);
HloComputation* callee = callees[0];
if (call->has_frontend_attributes()) {
const FrontendAttributes& call_attributes = call->frontend_attributes();
std::string has_fuse =
call_attributes.map().contains("MUST_FUSE") ? "MUST_FUSE"
: call_attributes.map().contains("MAXIMAL_FUSE") ? "MAXIMAL_FUSE"
: "";
if (!has_fuse.empty()) {
for (auto instruction : callee->instructions()) {
if (instruction->IsFusible()) {
FrontendAttributes frontend_attributes =
instruction->frontend_attributes();
frontend_attributes.mutable_map()->insert(
{has_fuse, call_attributes.map().at(has_fuse)});
instruction->set_frontend_attributes(frontend_attributes);
}
}
}
}
SubcomputationInsertionVisitor visitor(call);
TF_RETURN_IF_ERROR(callee->Accept(&visitor));
return visitor.ConsumeInstructionMap();
}
bool CallInliner::IsInlineableCallOp(HloInstruction* instruction) const {
return instruction->opcode() == HloOpcode::kCall &&
!instruction->has_backend_config() &&
!instruction->parent()->IsAsyncComputation() &&
InlineUnderShardy(instruction);
}
absl::StatusOr<bool> CallInliner::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module);
bool did_mutate = false;
TF_RETURN_IF_ERROR(call_graph->VisitNodes([&](const CallGraphNode& node)
-> absl::Status {
if (!HloInstruction::IsThreadIncluded(
node.computation()->execution_thread(), execution_threads)) {
return absl::OkStatus();
}
VLOG(1) << "Visiting node: " << node.ToString();
for (HloInstruction* instruction :
node.computation()->MakeInstructionPostOrder()) {
if (IsInlineableCallOp(instruction)) {
const auto& callees = instruction->called_computations();
TF_RET_CHECK(callees.size() == 1);
if (!single_call_site_ || call_graph->GetNode(instruction->to_apply())
.caller_callsites()
.size() == 1) {
TF_ASSIGN_OR_RETURN(CallInliner::InlinedInstructionMap inline_map,
Inline(instruction));
if (update_domain_) {
HloDomainIsolator isolator(
[]() { return ShardingDomainCreator{}; });
for (const auto& [call_inst, inlined_inst] : inline_map) {
TF_RETURN_IF_ERROR(isolator.UpdateDomains(inlined_inst).status());
}
}
did_mutate = true;
}
}
}
return absl::OkStatus();
}));
if (did_mutate) {
TF_RETURN_IF_ERROR(HloDCE().Run(module, execution_threads).status());
}
return did_mutate;
}
} | #include "xla/service/call_inliner.h"
#include <cstdint>
#include <string>
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace {
using CallInlinerTest = HloTestBase;
TEST_F(CallInlinerTest, ControlDependenciesAreCarriedToCaller) {
HloComputation::Builder inner(TestName() + ".inner");
HloInstruction* zero = inner.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(24.0f)));
HloInstruction* one = inner.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
TF_ASSERT_OK(zero->AddControlDependencyTo(one));
auto module = CreateNewVerifiedModule();
HloComputation* inner_computation =
module->AddEmbeddedComputation(inner.Build());
HloComputation::Builder outer(TestName() + ".outer");
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
outer.AddInstruction(
HloInstruction::CreateCall(r0f32, {}, inner_computation));
auto computation = module->AddEntryComputation(outer.Build());
CallInliner call_inliner;
TF_ASSERT_OK_AND_ASSIGN(bool mutated, call_inliner.Run(module.get()));
ASSERT_TRUE(mutated);
EXPECT_THAT(computation->root_instruction(), op::Constant());
EXPECT_EQ(computation->root_instruction()->literal().GetFirstElement<float>(),
42);
ASSERT_EQ(1, computation->root_instruction()->control_predecessors().size());
auto prior = computation->root_instruction()->control_predecessors()[0];
EXPECT_THAT(prior, op::Constant());
EXPECT_EQ(prior->literal().GetFirstElement<float>(), 24);
}
TEST_F(CallInlinerTest, CallsWithinWhileBodiesAreInlined) {
const Shape pred = ShapeUtil::MakeShape(PRED, {});
auto module = CreateNewVerifiedModule();
HloComputation::Builder just_false(TestName() + ".false");
just_false.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* false_computation =
module->AddEmbeddedComputation(just_false.Build());
HloComputation::Builder call_false_builder(TestName() + ".call_false");
call_false_builder.AddInstruction(
HloInstruction::CreateParameter(0, pred, "param"));
call_false_builder.AddInstruction(
HloInstruction::CreateCall(pred, {}, false_computation));
HloComputation* call_false =
module->AddEmbeddedComputation(call_false_builder.Build());
HloComputation::Builder outer(TestName() + ".outer");
HloInstruction* init_value = outer.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
outer.AddInstruction(
HloInstruction::CreateWhile(pred, call_false, call_false, init_value));
auto computation = module->AddEntryComputation(outer.Build());
CallInliner call_inliner;
TF_ASSERT_OK_AND_ASSIGN(bool mutated, call_inliner.Run(module.get()));
ASSERT_TRUE(mutated);
EXPECT_THAT(
computation->root_instruction()->while_condition()->root_instruction(),
op::Constant());
EXPECT_THAT(computation->root_instruction()->while_body()->root_instruction(),
op::Constant());
}
TEST_F(CallInlinerTest, InlineWithoutRunningPass) {
const Shape pred = ShapeUtil::MakeShape(PRED, {});
auto module = CreateNewVerifiedModule();
HloComputation::Builder just_false(TestName() + ".false");
auto* true_constant = just_false.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<bool>({true})));
auto* false_constant = just_false.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
TF_ASSERT_OK(false_constant->AddControlDependencyTo(true_constant));
HloComputation* false_computation =
module->AddEmbeddedComputation(just_false.Build());
HloComputation::Builder call_false_builder(TestName() + ".call_false");
HloInstruction* call = call_false_builder.AddInstruction(
HloInstruction::CreateCall(pred, {}, false_computation));
auto computation = module->AddEntryComputation(call_false_builder.Build());
TF_ASSERT_OK(CallInliner::Inline(call).status());
EXPECT_THAT(computation->root_instruction(), op::Constant());
EXPECT_THAT(computation->root_instruction()->control_successors(),
ElementsAre(op::Constant()));
}
TEST_F(CallInlinerTest, InlineWithEmptyComputation) {
const Shape pred = ShapeUtil::MakeShape(PRED, {});
auto module = CreateNewVerifiedModule();
Shape r0s32 = ShapeUtil::MakeShape(S32, {});
HloComputation::Builder empty(TestName() + ".empty");
empty.AddInstruction(HloInstruction::CreateParameter(0, r0s32, "A"));
empty.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0)));
HloComputation* empty_computation =
module->AddEmbeddedComputation(empty.Build());
HloComputation::Builder empty2(TestName() + ".empty");
empty2.AddInstruction(HloInstruction::CreateParameter(0, r0s32, "A"));
empty2.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0)));
HloComputation* empty2_computation =
module->AddEmbeddedComputation(empty2.Build());
HloComputation::Builder entry("entry");
auto zero = entry.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0)));
entry.AddInstruction(
HloInstruction::CreateCall(r0s32, {zero}, empty_computation));
HloInstruction* call1 = entry.AddInstruction(
HloInstruction::CreateCall(r0s32, {zero}, empty2_computation));
entry.AddInstruction(
HloInstruction::CreateCall(r0s32, {call1}, empty_computation));
auto computation = module->AddEntryComputation(entry.Build());
CallInliner call_inliner;
TF_ASSERT_OK_AND_ASSIGN(bool mutated, call_inliner.Run(module.get()));
ASSERT_TRUE(mutated);
EXPECT_THAT(computation->root_instruction(), op::Constant());
}
TEST_F(CallInlinerTest, CallToOutfeedComputationIsInlined) {
const Shape f32 = ShapeUtil::MakeShape(F32, {});
auto module = CreateNewVerifiedModule();
HloComputation::Builder outfeeder(TestName() + ".outfeeder");
auto value = outfeeder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
auto token = outfeeder.AddInstruction(HloInstruction::CreateToken());
outfeeder.AddInstruction(
HloInstruction::CreateOutfeed(f32, value, token, ""));
auto outfeed_computation = module->AddEmbeddedComputation(outfeeder.Build());
HloComputation::Builder outer(TestName() + ".outer");
outer.AddInstruction(HloInstruction::CreateCall(
outfeed_computation->root_instruction()->shape(), {},
outfeed_computation));
module->AddEntryComputation(outer.Build());
CallInliner call_inliner;
TF_ASSERT_OK_AND_ASSIGN(bool mutated, call_inliner.Run(module.get()));
ASSERT_TRUE(mutated);
}
TEST_F(CallInlinerTest, InlineSingleUseCalleesOnly) {
const absl::string_view hlo_string = R"(
HloModule inline_module
a {
ROOT tuple = () tuple()
}
b {
ROOT tuple.1 = () tuple()
}
ENTRY inline {
a = () call(), to_apply=a
b = () call(), to_apply=a
c = () call(), to_apply=b
ROOT tuple = ((), (), ()) tuple(a, b, c)
})";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
CallInliner call_inliner(true);
TF_ASSERT_OK_AND_ASSIGN(bool mutated, call_inliner.Run(module.get()));
ASSERT_TRUE(mutated);
ASSERT_EQ(module->entry_computation()->instruction_count(), 4);
auto inst = module->entry_computation()->instructions().begin();
EXPECT_THAT(*inst, op::Call());
++inst;
EXPECT_THAT(*inst, op::Call());
++inst;
EXPECT_THAT(*inst, op::Tuple());
++inst;
EXPECT_THAT(*inst, op::Tuple());
}
TEST_F(CallInlinerTest, InliningPerformedInsideSpecifiedThreadsOnly) {
const std::string hlo_string = R"(
HloModule inline_specified_threads_only
%secondary_inner () -> u32[] {
ROOT %co.2 = u32[] constant(2)
}, execution_thread="secondary_thread"
%secondary_outer () -> u32[] {
%co.1 = u32[] constant(1)
%call.1 = u32[] call(), to_apply=%secondary_inner
ROOT %add.1 = add(%co.1, %call.1)
}, execution_thread="secondary_thread"
%main_inner () -> u32[] {
%co.0 = u32[] constant(0)
%async-start = ((), u32[], u32[]) call-start(), async_execution_thread="secondary_thread", to_apply=secondary_outer
%async-done = u32[] call-done(((), u32[], u32[]) %async-start)
ROOT %add.2 = add(%co.0, %async-done)
}
ENTRY %main_outer (p0: u32[]) -> u32[] {
%p.0 = u32[] parameter(0)
%call.0 = u32[] call(), to_apply=%main_inner
ROOT %add.3 = add(%p.0, %call.0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto module_clone = module->Clone("");
{
VLOG(1) << "Module BEFORE CallInliner\n" << module->ToString();
CallInliner call_inliner;
TF_ASSERT_OK_AND_ASSIGN(bool mutated, call_inliner.Run(module.get()));
VLOG(1) << "Module AFTER CallInliner\n" << module->ToString();
EXPECT_TRUE(mutated);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Add(op::Parameter(0),
op::Add(op::Constant(LiteralUtil::CreateR0<uint32_t>(0)),
op::AsyncDone())));
EXPECT_THAT(module->entry_computation()
->root_instruction()
->operand(1)
->operand(1)
->async_wrapped_instruction()
->called_computations()
.at(0)
->root_instruction(),
op::Add(op::Constant(LiteralUtil::CreateR0<uint32_t>(1)),
op::Constant(LiteralUtil::CreateR0<uint32_t>(2))));
}
VLOG(1) << "Restricting CallInliner to the secondary thread.";
{
CallInliner call_inliner;
TF_ASSERT_OK_AND_ASSIGN(
bool mutated,
call_inliner.Run(module_clone.get(), {"secondary_thread"}));
VLOG(1) << "Module AFTER CallInliner\n" << module_clone->ToString();
EXPECT_TRUE(mutated);
EXPECT_THAT(module_clone->entry_computation()->root_instruction(),
op::Add(op::Parameter(0), op::Call()));
EXPECT_THAT(module_clone->entry_computation()
->root_instruction()
->operand(1)
->called_computations()
.at(0)
->root_instruction(),
op::Add(op::Constant(LiteralUtil::CreateR0<uint32_t>(0)),
op::AsyncDone()));
EXPECT_THAT(module_clone->entry_computation()
->root_instruction()
->operand(1)
->called_computations()
.at(0)
->root_instruction()
->operand(1)
->async_wrapped_instruction()
->called_computations()
.at(0)
->root_instruction(),
op::Add(op::Constant(LiteralUtil::CreateR0<uint32_t>(1)),
op::Constant(LiteralUtil::CreateR0<uint32_t>(2))));
}
}
TEST_F(CallInlinerTest, InlineCompositeCall) {
const absl::string_view hlo_string = R"(
HloModule composite
%add (lhs: f32[]) -> f32[] {
%lhs = f32[] parameter(0)
%rhs = f32[] constant(2)
ROOT %add = f32[] add(f32[] %lhs, f32[] %rhs)
}
ENTRY %main () -> f32[] {
%lhs = f32[] constant(42)
ROOT %call = f32[] call(f32[] %lhs), to_apply=%add, is_composite=true, frontend_attributes={composite.attributes={n = 1 : i32, tensor = dense<1> : tensor<i32>},composite.name="foo.bar",composite.version="1"}
})";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
CallInliner call_inliner(true);
TF_ASSERT_OK_AND_ASSIGN(bool mutated, call_inliner.Run(module.get()));
ASSERT_TRUE(mutated);
ASSERT_EQ(module->entry_computation()->instruction_count(), 3);
auto inst = module->entry_computation()->instructions().begin();
EXPECT_THAT(*inst, op::Constant());
++inst;
EXPECT_THAT(*inst, op::Constant());
++inst;
EXPECT_THAT(*inst, op::Add());
EXPECT_TRUE((*inst)->frontend_attributes().map().empty());
}
TEST_F(CallInlinerTest, UseShardyMhloToHloShmapBodyNotInlined) {
const char* const hloString = R"(
HloModule jit_f, entry_computation_layout={(f32[8,8]{1,0})->f32[8,8]{1,0}}
%prefix_shmap_body_suffix.4 (Arg_0.5: f32[1,8]) -> f32[1,8] {
%Arg_0.5 = f32[1,8]{1,0} parameter(0)
ROOT %add.6 = f32[1,8]{1,0} add(f32[1,8]{1,0} %Arg_0.5, f32[1,8]{1,0} %Arg_0.5), metadata={source_file="-" source_line=11}
}
ENTRY %main.10 (Arg_0.1: f32[8,8]) -> f32[8,8] {
%Arg_0.1 = f32[8,8]{1,0} parameter(0)
%custom-call.2 = f32[8,8]{1,0} custom-call(f32[8,8]{1,0} %Arg_0.1), custom_call_target="Sharding", sharding={devices=[8,1]<=[8]}, metadata={source_file="-" source_line=3}
%custom-call.3 = f32[1,8]{1,0} custom-call(f32[8,8]{1,0} %custom-call.2), custom_call_target="SPMDFullToShardShape", sharding={manual}, metadata={source_file="-" source_line=4}
%call.7 = f32[1,8]{1,0} call(f32[1,8]{1,0} %custom-call.3), to_apply=%prefix_shmap_body_suffix.4
%custom-call.8 = f32[1,8]{1,0} custom-call(f32[1,8]{1,0} %call.7), custom_call_target="Sharding", sharding={manual}, metadata={source_file="-" source_line=6}
ROOT %custom-call.9 = f32[8,8]{1,0} custom-call(f32[1,8]{1,0} %custom-call.8), custom_call_target="SPMDShardToFullShape", sharding={devices=[8,1]<=[8]}, metadata={source_file="-" source_line=7}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hloString));
module->mutable_config().set_use_shardy_partitioner(true);
TF_ASSERT_OK_AND_ASSIGN(bool changed, CallInliner().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_FALSE(changed);
HloInstruction* call = FindInstruction(module.get(), xla::HloOpcode::kCall);
EXPECT_NE(call, nullptr);
EXPECT_TRUE(call->has_to_apply());
EXPECT_EQ(call->to_apply()->name(), "prefix_shmap_body_suffix.4");
}
TEST_F(CallInlinerTest, UseShardManualComputationBodyNotInlined) {
const char* const hloString = R"(
HloModule jit_f, entry_computation_layout={(f32[8,8]{1,0})->f32[8,8]{1,0}}
%xla.sdy.manual_computation_body.4 (Arg_0.5: f32[1,8]) -> f32[1,8] {
%Arg_0.5 = f32[1,8]{1,0} parameter(0)
ROOT %add.6 = f32[1,8]{1,0} add(f32[1,8]{1,0} %Arg_0.5, f32[1,8]{1,0} %Arg_0.5), metadata={source_file="-" source_line=11}
}
ENTRY %main.10 (Arg_0.1: f32[8,8]) -> f32[8,8] {
%Arg_0.1 = f32[8,8]{1,0} parameter(0)
%custom-call.3 = f32[1,8]{1,0} custom-call(f32[8,8]{1,0} %Arg_0.1), custom_call_target="SPMDFullToShardShape", sharding={manual}, metadata={source_file="-" source_line=4}
%call.7 = f32[1,8]{1,0} call(f32[1,8]{1,0} %custom-call.3), to_apply=%xla.sdy.manual_computation_body.4
ROOT %custom-call.9 = f32[8,8]{1,0} custom-call(f32[1,8]{1,0} %call.7), custom_call_target="SPMDShardToFullShape", sharding={devices=[8,1]<=[8]}, metadata={source_file="-" source_line=7}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hloString));
module->mutable_config().set_use_shardy_partitioner(true);
TF_ASSERT_OK_AND_ASSIGN(bool changed, CallInliner().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_FALSE(changed);
HloInstruction* call = FindInstruction(module.get(), xla::HloOpcode::kCall);
EXPECT_NE(call, nullptr);
EXPECT_TRUE(call->has_to_apply());
EXPECT_EQ(call->to_apply()->name(), "xla.sdy.manual_computation_body.4");
}
TEST_F(CallInlinerTest, UseShardManualComputationBodyInlined) {
const char* const hloString = R"(
HloModule jit_f, entry_computation_layout={(f32[8,8]{1,0})->f32[8,8]{1,0}}
%prefix_xla.sdy.manual_computation_body.4 (Arg_0.5: f32[1,8]) -> f32[1,8] {
%Arg_0.5 = f32[1,8]{1,0} parameter(0)
ROOT %add.6 = f32[1,8]{1,0} add(f32[1,8]{1,0} %Arg_0.5, f32[1,8]{1,0} %Arg_0.5), metadata={source_file="-" source_line=11}
}
ENTRY %main.10 (Arg_0.1: f32[8,8]) -> f32[8,8] {
%Arg_0.1 = f32[8,8]{1,0} parameter(0)
%custom-call.3 = f32[1,8]{1,0} custom-call(f32[8,8]{1,0} %Arg_0.1), custom_call_target="SPMDFullToShardShape", sharding={manual}, metadata={source_file="-" source_line=4}
%call.7 = f32[1,8]{1,0} call(f32[1,8]{1,0} %custom-call.3), to_apply=%prefix_xla.sdy.manual_computation_body.4
ROOT %custom-call.9 = f32[8,8]{1,0} custom-call(f32[1,8]{1,0} %call.7), custom_call_target="SPMDShardToFullShape", sharding={devices=[8,1]<=[8]}, metadata={source_file="-" source_line=7}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hloString));
module->mutable_config().set_use_shardy_partitioner(true);
TF_ASSERT_OK_AND_ASSIGN(bool changed, CallInliner().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/call_inliner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/call_inliner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ed544b03-b54a-491f-ae4b-4e50be0c4758 | cpp | tensorflow/tensorflow | partial_run_mgr | tensorflow/core/distributed_runtime/partial_run_mgr.cc | tensorflow/core/distributed_runtime/partial_run_mgr_test.cc | #include "tensorflow/core/distributed_runtime/partial_run_mgr.h"
namespace tensorflow {
bool PartialRunMgr::FindOrCreate(int step_id,
CancellationManager** cancellation_manager) {
mutex_lock l(mu_);
auto it = step_id_to_partial_run_.find(step_id);
if (it != step_id_to_partial_run_.end()) {
*cancellation_manager = it->second->cancellation_manager.get();
return false;
}
std::unique_ptr<PartialRunState> partial_run =
std::make_unique<PartialRunState>();
partial_run->cancellation_manager = std::make_unique<CancellationManager>();
*cancellation_manager = partial_run->cancellation_manager.get();
step_id_to_partial_run_[step_id] = std::move(partial_run);
return true;
}
void PartialRunMgr::ExecutorDone(int step_id, const Status& executor_status) {
StatusCallback done;
Status callback_status;
{
mutex_lock l(mu_);
auto run_it = step_id_to_partial_run_.find(step_id);
if (run_it == step_id_to_partial_run_.end()) {
return;
}
done = std::move(run_it->second->final_callback);
if (!executor_status.ok()) {
run_it->second->final_status = executor_status;
}
callback_status = run_it->second->final_status;
run_it->second->executor_done = true;
}
if (done != nullptr) {
done(callback_status);
mutex_lock l(mu_);
step_id_to_partial_run_.erase(step_id);
}
}
void PartialRunMgr::PartialRunDone(int step_id, StatusCallback done,
const Status& status) {
Status callback_status;
{
mutex_lock l(mu_);
auto run_it = step_id_to_partial_run_.find(step_id);
if (run_it == step_id_to_partial_run_.end()) {
return;
}
run_it->second->final_status.Update(status);
if (!run_it->second->executor_done) {
run_it->second->final_callback = std::move(done);
return;
}
callback_status = run_it->second->final_status;
}
done(callback_status);
mutex_lock l(mu_);
step_id_to_partial_run_.erase(step_id);
}
} | #include "tensorflow/core/distributed_runtime/partial_run_mgr.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(PartialRunMgrFindOrCreate, Create) {
PartialRunMgr partial_run_mgr;
int step_id = 1;
CancellationManager* cancellation_manager;
partial_run_mgr.FindOrCreate(step_id, &cancellation_manager);
EXPECT_TRUE(cancellation_manager != nullptr);
}
TEST(PartialRunMgrFindOrCreate, Find) {
PartialRunMgr partial_run_mgr;
int step_id = 1;
CancellationManager* cancellation_manager;
partial_run_mgr.FindOrCreate(step_id, &cancellation_manager);
CancellationManager* found_cancellation_manager;
partial_run_mgr.FindOrCreate(step_id, &found_cancellation_manager);
EXPECT_EQ(cancellation_manager, found_cancellation_manager);
}
TEST(PartialRunMgrFindOrCreate, NewCreate) {
PartialRunMgr partial_run_mgr;
int step_id = 1;
CancellationManager* cancellation_manager;
partial_run_mgr.FindOrCreate(step_id, &cancellation_manager);
int new_step_id = 2;
CancellationManager* new_cancellation_manager;
partial_run_mgr.FindOrCreate(new_step_id, &new_cancellation_manager);
EXPECT_NE(cancellation_manager, new_cancellation_manager);
}
TEST(PartialRunMgr, PartialRunRemoved) {
PartialRunMgr partial_run_mgr;
int step_id = 1;
CancellationManager* cancellation_manager;
partial_run_mgr.FindOrCreate(step_id, &cancellation_manager);
int called = 0;
partial_run_mgr.PartialRunDone(
step_id, [&called](Status status) { called++; }, absl::OkStatus());
partial_run_mgr.ExecutorDone(step_id, absl::OkStatus());
partial_run_mgr.PartialRunDone(
step_id, [&called](Status status) { called++; }, absl::OkStatus());
partial_run_mgr.ExecutorDone(step_id, absl::OkStatus());
EXPECT_EQ(1, called);
}
struct StatusTestParam {
Status executor_status;
Status partial_run_status;
Status expected_status;
};
class StatusPropagationTest : public ::testing::TestWithParam<StatusTestParam> {
protected:
PartialRunMgr partial_run_mgr_;
Notification invoked_;
Status status_;
void set_status(const Status& status) {
status_ = status;
invoked_.Notify();
}
Status status() {
invoked_.WaitForNotification();
return status_;
}
};
TEST_P(StatusPropagationTest, ExecutorDoneFirst) {
StatusTestParam param = GetParam();
int step_id = 1;
CancellationManager* cancellation_manager;
partial_run_mgr_.FindOrCreate(step_id, &cancellation_manager);
partial_run_mgr_.ExecutorDone(step_id, param.executor_status);
partial_run_mgr_.PartialRunDone(step_id,
[this](Status status) { set_status(status); },
param.partial_run_status);
EXPECT_EQ(status(), param.expected_status);
}
TEST_P(StatusPropagationTest, PartialRunDoneFirst) {
StatusTestParam param = GetParam();
int step_id = 1;
CancellationManager* cancellation_manager;
partial_run_mgr_.FindOrCreate(step_id, &cancellation_manager);
partial_run_mgr_.PartialRunDone(step_id,
[this](Status status) { set_status(status); },
param.partial_run_status);
partial_run_mgr_.ExecutorDone(step_id, param.executor_status);
EXPECT_EQ(status(), param.expected_status);
}
Status ExecutorError() { return errors::Internal("executor error"); }
Status PartialRunError() { return errors::Internal("partial run error"); }
INSTANTIATE_TEST_SUITE_P(
PartialRunMgr, StatusPropagationTest,
::testing::Values(
StatusTestParam{absl::OkStatus(), absl::OkStatus(), absl::OkStatus()},
StatusTestParam{ExecutorError(), absl::OkStatus(), ExecutorError()},
StatusTestParam{absl::OkStatus(), PartialRunError(), PartialRunError()},
StatusTestParam{ExecutorError(), PartialRunError(), ExecutorError()}));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/partial_run_mgr.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/partial_run_mgr_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
96adaac4-6e20-4641-ba4a-c5b9cc5e9ff3 | cpp | tensorflow/tensorflow | dcn_analysis | tensorflow/core/profiler/convert/dcn_analysis.cc | tensorflow/core/profiler/convert/dcn_analysis_test.cc | #include "tensorflow/core/profiler/convert/dcn_analysis.h"
#include <algorithm>
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/string_view.h"
#include "xla/tsl/profiler/utils/math_utils.h"
#include "xla/tsl/profiler/utils/tpu_xplane_utils.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/convert/dcn_utils.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
namespace tensorflow {
namespace profiler {
using tsl::profiler::kMaxCollectivesToDisplay;
using tsl::profiler::kMegaScaleDcnReceive;
using tsl::profiler::LineIdType;
using tsl::profiler::MicroToNano;
void DcnBurstManager::ResetBurstState() {
active_burst_messages_ = 0;
straggler_idx_ = 0;
active_burst_.num_messages = 0;
active_burst_.max_overlapping_messages = 0;
active_burst_.start_timestamp_ns = 0;
active_burst_.end_timestamp_ns = 0;
active_burst_.burst_size_bytes = 0;
}
void DcnBurstManager::CreateBursts(const TimestampMap& tm_events) {
ResetBurstState();
for (const auto& tm_event : tm_events) {
if (active_burst_messages_ < 0) {
LOG_FIRST_N(WARNING, 10)
<< "Negative messages in burst, bursts will be incorrect.";
}
if (active_burst_messages_ == 0) {
active_burst_.start_timestamp_ns = tm_event.first;
}
active_burst_messages_ += tm_event.second->message_diff;
if (tm_event.second->message_diff > 0) {
active_burst_.num_messages += tm_event.second->message_diff;
active_burst_.burst_size_bytes += tm_event.second->size_diff;
} else {
Straggler straggler = {tm_event.second->duration_ns,
tm_event.second->timestamp_ns,
tm_event.second->size_diff * (-1),
tm_event.second->src_slice_id};
active_burst_.stragglers[straggler_idx_] = straggler;
straggler_idx_ = (straggler_idx_ + 1) % kMaxStragglersPerBurst;
}
active_burst_.max_overlapping_messages =
std::max(active_burst_.max_overlapping_messages,
static_cast<uint64_t>(active_burst_messages_));
if (active_burst_messages_ == 0) {
active_burst_.end_timestamp_ns = tm_event.first;
total_latency_ +=
(active_burst_.end_timestamp_ns - active_burst_.start_timestamp_ns);
bursts_.emplace_back(std::move(active_burst_));
ResetBurstState();
}
}
}
DcnEventsProcessor::DcnEventsProcessor(uint32_t num_tpu_tensor_cores,
bool is_megacore)
: num_tpu_tensor_cores_(num_tpu_tensor_cores), is_megacore_(is_megacore) {
registered_dcn_messages_.push_back(kMegaScaleDcnReceive);
tpu_collective_ts_map_.resize(num_tpu_tensor_cores_);
tpu_collective_bursts_.resize(num_tpu_tensor_cores_);
}
void DcnEventsProcessor::SetupMessageInfo(const XPlaneVisitor& plane) {
plane.ForEachEventMetadata([&](const XEventMetadataVisitor& event_metadata) {
if (std::find(registered_dcn_messages_.begin(),
registered_dcn_messages_.end(),
event_metadata.Name()) != registered_dcn_messages_.end()) {
megascale_msg_[event_metadata.Name()] = event_metadata.Id();
}
});
}
uint32_t DcnEventsProcessor::FindTpuIdx(int tpu) {
uint32_t num_tpus = num_tpu_tensor_cores_;
if (is_megacore_) {
num_tpus /= 2;
}
uint32_t tpu_idx = tpu % num_tpus;
if (is_megacore_) {
tpu_idx = tpu_idx * 2;
}
return tpu_idx;
}
void DcnEventsProcessor::GenerateTimestampEvents(
const DcnMessage& dcn_message) {
std::shared_ptr<TimestampEvent> start_event(
new TimestampEvent{dcn_message.start_timestamp_ns, 0, 1,
dcn_message.size_bytes, dcn_message.slice_src});
std::shared_ptr<TimestampEvent> end_event(new TimestampEvent{
dcn_message.end_timestamp_ns,
static_cast<uint64_t>(MicroToNano(dcn_message.duration_us)), -1,
-1 * dcn_message.size_bytes, dcn_message.slice_src});
std::pair<uint64_t, std::shared_ptr<TimestampEvent>> start_event_entry =
std::make_pair(dcn_message.start_timestamp_ns, start_event);
std::pair<uint64_t, std::shared_ptr<TimestampEvent>> end_event_entry =
std::make_pair(dcn_message.end_timestamp_ns, end_event);
host_ts_map_.insert(start_event_entry);
host_ts_map_.insert(end_event_entry);
const std::string& collective_name = dcn_message.collective_name;
uint32_t tpu_idx = FindTpuIdx(dcn_message.tpu_dst);
auto& m = tpu_collective_ts_map_[tpu_idx][collective_name];
m.insert(start_event_entry);
m.insert(end_event_entry);
}
void DcnEventsProcessor::PrintTimestampEvents() {
for (const auto& host_ts : host_ts_map_) {
LOG(INFO) << host_ts.first << ": " << host_ts.second->timestamp_ns << " "
<< host_ts.second->duration_ns << " "
<< host_ts.second->message_diff << " "
<< host_ts.second->size_diff << " "
<< host_ts.second->src_slice_id;
}
for (uint32_t tpu_idx = 0; tpu_idx < num_tpu_tensor_cores_; tpu_idx++) {
LOG(INFO) << "TPU: " << tpu_idx;
for (const auto& col_id : tpu_collective_ts_map_[tpu_idx]) {
LOG(INFO) << col_id.first;
for (const auto& tpu_col_ts :
tpu_collective_ts_map_[tpu_idx][col_id.first]) {
LOG(INFO) << tpu_col_ts.first << ": " << tpu_col_ts.second->timestamp_ns
<< " " << tpu_col_ts.second->duration_ns << " "
<< tpu_col_ts.second->message_diff << " "
<< tpu_col_ts.second->size_diff << " "
<< tpu_col_ts.second->src_slice_id;
}
}
}
}
uint32_t DcnEventsProcessor::NumCollectivesQualified(
const std::vector<uint64_t>& latencies) {
uint32_t num_collectives_qualified = 0;
uint32_t max_collectives = kMaxCollectivesToDisplay - 1;
for (const auto& lat : latencies) {
if (lat < host_dcn_bursts_.TotalLatency() * 0.05) {
return num_collectives_qualified;
} else if (lat < host_dcn_bursts_.TotalLatency() * 0.2 &&
num_collectives_qualified >= (max_collectives / 2)) {
return num_collectives_qualified;
} else if (num_collectives_qualified >= max_collectives) {
return num_collectives_qualified;
} else {
num_collectives_qualified++;
}
}
return latencies.size();
}
void DcnEventsProcessor::QualifyCollectives() {
for (auto tpu_idx = 0; tpu_idx < num_tpu_tensor_cores_; tpu_idx++) {
std::vector<uint64_t> latency_to_order;
latency_to_order.reserve(tpu_collective_bursts_[tpu_idx].size());
for (const auto& col_info : tpu_collective_bursts_[tpu_idx]) {
latency_to_order.emplace_back(col_info.second.TotalLatency());
}
std::sort(latency_to_order.begin(), latency_to_order.end(),
std::greater<uint64_t>());
uint32_t num_collectives_qualified =
NumCollectivesQualified(latency_to_order);
if (num_collectives_qualified > 0) {
uint32_t min_latency_to_qualify =
latency_to_order[num_collectives_qualified - 1];
uint32_t col_num = 0;
for (auto& col_info : tpu_collective_bursts_[tpu_idx]) {
if (col_info.second.TotalLatency() >= min_latency_to_qualify) {
col_info.second.SetToDisplay(true);
if (++col_num == kMaxCollectivesToDisplay - 1) break;
}
}
}
}
}
void DcnEventsProcessor::GenerateBursts() {
host_dcn_bursts_.CreateBursts(host_ts_map_);
host_dcn_bursts_.SetToDisplay(true);
for (auto tpu_idx = 0; tpu_idx < num_tpu_tensor_cores_; tpu_idx++) {
for (const auto& col_info : tpu_collective_ts_map_[tpu_idx]) {
tpu_collective_bursts_[tpu_idx][col_info.first].CreateBursts(
tpu_collective_ts_map_[tpu_idx][col_info.first]);
}
}
QualifyCollectives();
}
void DcnEventsProcessor::ProcessReceiveMessages(const XPlaneVisitor& plane) {
plane.ForEachLine([&](const XLineVisitor& line) {
uint32_t recv_msg_id = megascale_msg_[kMegaScaleDcnReceive];
line.ForEachEvent([&](const XEventVisitor& event) {
if (event.Id() == recv_msg_id) {
DcnMessage dcn_message = GetDcnMessageFromXEvent(event);
if (dcn_message.validity_info == DCN_MESSAGE_VALID) {
GenerateTimestampEvents(dcn_message);
}
received_messages_.emplace_back(std::move(dcn_message));
}
});
});
GenerateBursts();
}
absl::string_view DcnEventsProcessor::GetBwInfo(bool is_per_tpu,
const DcnBurst& burst,
float& burst_mean_bw,
float& burst_bw_utilization) {
absl::string_view bw_level;
uint32_t bw_divider = 1;
burst_mean_bw = static_cast<float>(burst.burst_size_bytes) /
(burst.end_timestamp_ns - burst.start_timestamp_ns);
if (is_per_tpu) {
bw_divider = num_tpu_tensor_cores_;
if (is_megacore_) {
bw_divider /= 2;
}
}
if (burst_mean_bw < kLimitLowHostDcnBw / bw_divider) {
bw_level = "Low BW";
} else if (burst_mean_bw < kLimitMedHostDcnBw / bw_divider) {
bw_level = "Med BW";
} else {
bw_level = "High BW";
}
burst_bw_utilization = burst_mean_bw / (kMaxHostDcnBw / bw_divider);
return bw_level;
}
void DcnEventsProcessor::AddHostDcnTrafficToXPlane(XPlane* host_xplane) {
if (!host_dcn_bursts_.ToDisplay()) return;
XPlaneBuilder plane_builder(host_xplane);
XLineBuilder line =
plane_builder.GetOrCreateLine(LineIdType::kDcnHostTraffic);
line.SetNameIfEmpty("DCN Host Bandwidth");
line.SetTimestampNs(0);
XStatMetadata* bw_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Bandwidth (GBytes/sec)");
XStatMetadata* bw_util_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Bandwidth Utilization");
XStatMetadata* num_msg_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Total Messages");
XStatMetadata* max_overlap_msg_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Max Overlapping Messages");
XStatMetadata* avg_msg_size_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Average Message Size (Bytes)");
for (const auto& host_burst : host_dcn_bursts_.GetBursts()) {
float burst_mean_bw, bw_utilization;
absl::string_view bw_level =
GetBwInfo(false, host_burst, burst_mean_bw, bw_utilization);
XEventMetadata* event_metadata =
plane_builder.GetOrCreateEventMetadata(bw_level);
XEventBuilder event = line.AddEvent(*event_metadata);
event.SetOffsetNs(host_burst.start_timestamp_ns);
event.SetDurationNs(host_burst.end_timestamp_ns -
host_burst.start_timestamp_ns);
event.ParseAndAddStatValue(*bw_stat_metadata,
std::to_string(burst_mean_bw));
event.ParseAndAddStatValue(*bw_util_stat_metadata,
std::to_string(bw_utilization));
event.AddStatValue(*num_msg_stat_metadata, host_burst.num_messages);
event.AddStatValue(*max_overlap_msg_stat_metadata,
host_burst.max_overlapping_messages);
uint32_t avg_message_size =
host_burst.burst_size_bytes / host_burst.num_messages;
event.AddStatValue(*avg_msg_size_stat_metadata, avg_message_size);
}
}
void DcnEventsProcessor::AddUnqualifiedCollectivesToXPlane(
XPlaneBuilder& plane_builder, uint32_t tpu_idx) {
XLineBuilder line =
plane_builder.GetOrCreateLine(LineIdType::kDcnCollectiveTrafficMax);
line.SetNameIfEmpty("Remaining collectives");
line.SetTimestampNs(0);
for (const auto& col_item : tpu_collective_bursts_[tpu_idx]) {
if (col_item.second.ToDisplay()) continue;
for (const auto& col_burst : col_item.second.GetBursts()) {
XEventMetadata* straggler_event_metadata =
plane_builder.GetOrCreateEventMetadata(col_item.first);
uint32_t stragglers_processed = 0;
XStatMetadata* straggler_src_slice_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Source slice");
XStatMetadata* straggler_duration_ns_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Duration ns");
XStatMetadata* straggler_send_time_ns_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Send timestamp ns");
XStatMetadata* straggler_recv_time_ns_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Recv timestamp ns");
for (const auto& straggler : col_burst.stragglers) {
XEventBuilder straggler_event =
line.AddEvent(*straggler_event_metadata);
straggler_event.SetOffsetNs(straggler.end_timestamp_ns - 10000);
straggler_event.SetDurationNs(10000);
straggler_event.AddStatValue(*straggler_src_slice_stat_metadata,
straggler.src_slice_id);
straggler_event.AddStatValue(*straggler_duration_ns_stat_metadata,
straggler.duration_ns);
straggler_event.AddStatValue(
*straggler_send_time_ns_stat_metadata,
straggler.end_timestamp_ns - straggler.duration_ns);
straggler_event.AddStatValue(*straggler_recv_time_ns_stat_metadata,
straggler.end_timestamp_ns);
if (++stragglers_processed >= col_burst.num_messages) break;
}
}
}
}
void DcnEventsProcessor::AddQualifiedCollectivesToXPlane(
XPlaneBuilder& plane_builder, uint32_t tpu_idx) {
uint32_t total_collectives = 0;
for (const auto& col_item : tpu_collective_bursts_[tpu_idx]) {
if (!col_item.second.ToDisplay()) continue;
const std::string& col_name = col_item.first;
XLineBuilder line = plane_builder.GetOrCreateLine(
LineIdType::kDcnCollectiveTraffic + total_collectives++);
line.SetNameIfEmpty(col_name);
line.SetTimestampNs(0);
XStatMetadata* bw_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Bandwidth (GBytes/sec)");
XStatMetadata* bw_util_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Bandwidth Utilization");
XStatMetadata* num_msg_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Total Messages");
XStatMetadata* max_overlap_msg_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Max Overlapping Messages");
XStatMetadata* avg_msg_size_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Average Message Size (Bytes)");
XStatMetadata* straggler_details_metadata =
plane_builder.GetOrCreateStatMetadata("Straggler info:");
XStatMetadata* straggler_src_slice_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Source slice");
XStatMetadata* straggler_duration_ns_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Duration ns");
XStatMetadata* straggler_send_time_ns_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Send timestamp ns");
XStatMetadata* straggler_recv_time_ns_stat_metadata =
plane_builder.GetOrCreateStatMetadata("Recv timestamp ns");
for (const auto& col_burst : col_item.second.GetBursts()) {
float burst_mean_bw, bw_utilization;
absl::string_view bw_level =
GetBwInfo(true, col_burst, burst_mean_bw, bw_utilization);
XEventMetadata* event_metadata =
plane_builder.GetOrCreateEventMetadata(bw_level);
XEventBuilder event = line.AddEvent(*event_metadata);
event.SetOffsetNs(col_burst.start_timestamp_ns);
event.SetDurationNs(col_burst.end_timestamp_ns -
col_burst.start_timestamp_ns);
event.ParseAndAddStatValue(*bw_stat_metadata,
std::to_string(burst_mean_bw));
event.ParseAndAddStatValue(*bw_util_stat_metadata,
std::to_string(bw_utilization));
event.AddStatValue(*num_msg_stat_metadata, col_burst.num_messages);
event.AddStatValue(*max_overlap_msg_stat_metadata,
col_burst.max_overlapping_messages);
event.AddStatValue(*avg_msg_size_stat_metadata,
col_burst.burst_size_bytes / col_burst.num_messages);
XEventMetadata* straggler_event_metadata =
plane_builder.GetOrCreateEventMetadata("Straggler");
uint32_t stragglers_processed = 0;
std::string straggler_details = "Stragglers:\n";
for (const auto& straggler : col_burst.stragglers) {
if (straggler.end_timestamp_ns == col_burst.end_timestamp_ns) {
XEventBuilder straggler_event =
line.AddEvent(*straggler_event_metadata);
straggler_event.SetOffsetNs(straggler.end_timestamp_ns -
straggler.duration_ns);
straggler_event.SetDurationNs(straggler.duration_ns);
straggler_event.AddStatValue(*straggler_src_slice_stat_metadata,
straggler.src_slice_id);
straggler_event.AddStatValue(*straggler_duration_ns_stat_metadata,
straggler.duration_ns);
straggler_event.AddStatValue(
*straggler_send_time_ns_stat_metadata,
straggler.end_timestamp_ns - straggler.duration_ns);
straggler_event.AddStatValue(*straggler_recv_time_ns_stat_metadata,
straggler.end_timestamp_ns);
}
straggler_details +=
" Src slice: " + std::to_string(straggler.src_slice_id) +
" -- Duration (ns): " + std::to_string(straggler.duration_ns) +
" -- [Send Timestamp, Recv Timestamp]: [" +
std::to_string(straggler.end_timestamp_ns - straggler.duration_ns) +
", " + std::to_string(straggler.end_timestamp_ns) + "]\n";
if (++stragglers_processed >= col_burst.num_messages) break;
}
event.AddStatValue(*straggler_details_metadata, straggler_details);
}
}
}
void DcnEventsProcessor::AddTpuCollectiveDcnTrafficToXPlane(
XPlane* device_xplane) {
XPlaneBuilder plane_builder(device_xplane);
auto tpu = tsl::profiler::GetTensorCoreId(plane_builder.Name());
if (!tpu.has_value()) return;
uint32_t tpu_idx = FindTpuIdx(tpu.value());
AddQualifiedCollectivesToXPlane(plane_builder, tpu_idx);
AddUnqualifiedCollectivesToXPlane(plane_builder, tpu_idx);
}
}
} | #include "tensorflow/core/profiler/convert/dcn_analysis.h"
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/convert/dcn_utils.h"
namespace tensorflow {
namespace profiler {
namespace {
using tensorflow::profiler::DCN_MESSAGE_INVALID_BAD_KEY;
using tensorflow::profiler::DCN_MESSAGE_INVALID_CLOCK_SKEW;
using tensorflow::profiler::DCN_MESSAGE_VALID;
using tensorflow::profiler::DCN_MESSAGE_VALID_LOOPBACK;
using tensorflow::profiler::XEventBuilder;
using tensorflow::profiler::XEventMetadata;
using tensorflow::profiler::XLineBuilder;
using tensorflow::profiler::XPlane;
using tensorflow::profiler::XPlaneBuilder;
using tensorflow::profiler::XPlaneVisitor;
using tensorflow::profiler::XSpace;
using ::testing::FieldsAre;
using tsl::profiler::kMegaScaleDcnReceive;
using tsl::profiler::kMegaScaleDcnSend;
TEST(DcnAnalysis, SetupMessageInfoTest) {
XSpace space;
XPlane *host_trace = space.add_planes();
XPlaneBuilder host_trace_builder(host_trace);
XEventMetadata *event_metadata_1 =
host_trace_builder.GetOrCreateEventMetadata(1);
event_metadata_1->set_name(std::string(kMegaScaleDcnReceive));
XEventMetadata *event_metadata_2 =
host_trace_builder.GetOrCreateEventMetadata(2);
event_metadata_2->set_name(std::string(kMegaScaleDcnSend));
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(host_trace);
DcnEventsProcessor dcn_events_processor( 4,
false);
dcn_events_processor.SetupMessageInfo(plane);
ASSERT_FALSE(dcn_events_processor.HasDcnMessages(kMegaScaleDcnSend));
ASSERT_TRUE(dcn_events_processor.HasDcnMessages(kMegaScaleDcnReceive));
ASSERT_FALSE(dcn_events_processor.HasDcnMessages("Another Message"));
ASSERT_EQ(dcn_events_processor.MegaScaleMessageId(kMegaScaleDcnReceive), 1);
ASSERT_EQ(dcn_events_processor.MegaScaleMessageId(kMegaScaleDcnSend),
std::nullopt);
}
TEST(DcnAnalysis, CreateMessageTestValidMessages) {
XSpace space;
XPlane *host_trace = space.add_planes();
XPlaneBuilder xplane_builder(host_trace);
XEventMetadata *event_metadata_1 = xplane_builder.GetOrCreateEventMetadata(1);
event_metadata_1->set_name(std::string(kMegaScaleDcnReceive));
XLineBuilder xline_builder_0 = xplane_builder.GetOrCreateLine(0);
XLineBuilder xline_builder_1 = xplane_builder.GetOrCreateLine(1);
XEventBuilder event_builder = xline_builder_0.AddEvent(*event_metadata_1);
event_builder.SetOffsetNs(100000);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_label"),
"all-reduce.273_312");
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_slice_id"), 2);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_per_slice_device_id"),
3);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_destination_slice_id"), 1);
event_builder.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
"dcn_destination_per_slice_device_id"),
3);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_chunk"), 0);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_loop_index"), 24);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("duration_us"), 50);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("payload_size_bytes"), 32768);
event_builder = xline_builder_0.AddEvent(*event_metadata_1);
event_builder.SetOffsetNs(175000);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_label"),
"super-collective.1234");
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_slice_id"), 112);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_per_slice_device_id"),
1);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_destination_slice_id"), 34);
event_builder.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
"dcn_destination_per_slice_device_id"),
2);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_chunk"), 4);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_loop_index"), 0);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("duration_us"), 50);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("payload_size_bytes"), 1);
event_builder = xline_builder_1.AddEvent(*event_metadata_1);
event_builder.SetOffsetNs(150000);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_label"), "super-collective");
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_slice_id"), 9);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_per_slice_device_id"),
3);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_destination_slice_id"), 0);
event_builder.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
"dcn_destination_per_slice_device_id"),
0);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("duration_us"), 75);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("payload_size_bytes"), 10);
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(host_trace);
DcnEventsProcessor dcn_events_processor(4, false);
dcn_events_processor.SetupMessageInfo(plane);
dcn_events_processor.ProcessReceiveMessages(plane);
ASSERT_EQ(dcn_events_processor.NumReceivedMessages(), 3);
EXPECT_THAT(dcn_events_processor.GetMessage(0),
FieldsAre("all-reduce.273_312",
2, 3, 1, 3,
50000, 100000, 50,
32768, 0, 24,
DCN_MESSAGE_VALID));
EXPECT_THAT(dcn_events_processor.GetMessage(1),
FieldsAre("super-collective.1234",
112, 1, 34, 2,
125000, 175000, 50,
1, 4, 0,
DCN_MESSAGE_VALID));
EXPECT_THAT(
dcn_events_processor.GetMessage(2),
FieldsAre("super-collective",
9, 3, 0, 0,
75000, 150000,
75,
10, -1, -1,
DCN_MESSAGE_VALID));
TimestampMap host_ts_map = dcn_events_processor.HostTsMap();
ASSERT_EQ(host_ts_map.size(), 6);
for (const auto &ts_map_item : host_ts_map) {
ASSERT_EQ(ts_map_item.first, ts_map_item.second->timestamp_ns);
if (ts_map_item.first == 50000) {
ASSERT_EQ(ts_map_item.second->duration_ns, 0);
ASSERT_EQ(ts_map_item.second->message_diff, 1);
ASSERT_EQ(ts_map_item.second->size_diff, 32768);
} else if (ts_map_item.first == 125000) {
ASSERT_EQ(ts_map_item.second->duration_ns, 0);
ASSERT_EQ(ts_map_item.second->message_diff, 1);
ASSERT_EQ(ts_map_item.second->size_diff, 1);
} else if (ts_map_item.first == 75000) {
ASSERT_EQ(ts_map_item.second->duration_ns, 0);
ASSERT_EQ(ts_map_item.second->message_diff, 1);
ASSERT_EQ(ts_map_item.second->size_diff, 10);
} else if (ts_map_item.first == 100000) {
ASSERT_EQ(ts_map_item.second->duration_ns, 50000);
ASSERT_EQ(ts_map_item.second->message_diff, -1);
ASSERT_EQ(ts_map_item.second->size_diff, -32768);
} else if (ts_map_item.first == 175000) {
ASSERT_EQ(ts_map_item.second->duration_ns, 50000);
ASSERT_EQ(ts_map_item.second->message_diff, -1);
ASSERT_EQ(ts_map_item.second->size_diff, -1);
} else if (ts_map_item.first == 150000) {
ASSERT_EQ(ts_map_item.second->duration_ns, 75000);
ASSERT_EQ(ts_map_item.second->message_diff, -1);
ASSERT_EQ(ts_map_item.second->size_diff, -10);
} else {
FAIL() << "Unexpected timestamp entry.";
}
}
const std::vector<DcnBurst> &host_bursts =
dcn_events_processor.GetHostBursts();
ASSERT_EQ(host_bursts.size(), 1);
ASSERT_EQ(host_bursts[0].num_messages, 3);
ASSERT_EQ(host_bursts[0].start_timestamp_ns, 50000);
ASSERT_EQ(host_bursts[0].end_timestamp_ns, 175000);
ASSERT_EQ(host_bursts[0].burst_size_bytes, 32779);
ASSERT_EQ(host_bursts[0].max_overlapping_messages, 2);
}
TEST(DcnAnalysis, CreateLoopBackMessageTest) {
XSpace space;
XPlane *host_trace = space.add_planes();
XPlaneBuilder xplane_builder(host_trace);
XEventMetadata *event_metadata_1 = xplane_builder.GetOrCreateEventMetadata(1);
event_metadata_1->set_name(std::string(kMegaScaleDcnReceive));
XLineBuilder xline_builder = xplane_builder.GetOrCreateLine(0);
XEventBuilder event_builder = xline_builder.AddEvent(*event_metadata_1);
event_builder.SetOffsetNs(5000000);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_label"), "all-gather.1234");
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_slice_id"), 2);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_per_slice_device_id"),
3);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_destination_slice_id"), 2);
event_builder.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
"dcn_destination_per_slice_device_id"),
1);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_chunk"), 4);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_loop_index"), 40);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("duration_us"), 1000);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("payload_size_bytes"), 1000);
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(host_trace);
DcnEventsProcessor dcn_events_processor(4, false);
dcn_events_processor.SetupMessageInfo(plane);
dcn_events_processor.ProcessReceiveMessages(plane);
ASSERT_EQ(dcn_events_processor.NumReceivedMessages(), 1);
EXPECT_THAT(dcn_events_processor.GetMessage(0),
FieldsAre("all-gather.1234",
2, 3, 2, 1,
4000000, 5000000, 1000,
1000, 4, 40,
DCN_MESSAGE_VALID_LOOPBACK));
}
TEST(DcnAnalysis, CreateZeroDurationMessageTest) {
XSpace space;
XPlane *host_trace = space.add_planes();
XPlaneBuilder xplane_builder(host_trace);
XEventMetadata *event_metadata_1 = xplane_builder.GetOrCreateEventMetadata(1);
event_metadata_1->set_name(std::string(kMegaScaleDcnReceive));
XLineBuilder xline_builder = xplane_builder.GetOrCreateLine(0);
XEventBuilder event_builder = xline_builder.AddEvent(*event_metadata_1);
event_builder.SetOffsetNs(20000);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_label"),
"all-reduce.273_312");
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_slice_id"), 2);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_per_slice_device_id"),
3);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_destination_slice_id"), 1);
event_builder.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
"dcn_destination_per_slice_device_id"),
1);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_chunk"), 0);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_loop_index"), 25);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("duration_us"), 0);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("payload_size_bytes"), 512);
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(host_trace);
DcnEventsProcessor dcn_events_processor(4, false);
dcn_events_processor.SetupMessageInfo(plane);
dcn_events_processor.ProcessReceiveMessages(plane);
EXPECT_THAT(
dcn_events_processor.GetMessage(0),
FieldsAre("all-reduce.273_312",
2, 3, 1, 1,
20000, 20000,
0,
512, 0, 25,
DCN_MESSAGE_INVALID_CLOCK_SKEW));
}
TEST(DcnAnalysis, CreateMissingKeyTest) {
XSpace space;
XPlane *host_trace = space.add_planes();
XPlaneBuilder xplane_builder(host_trace);
XEventMetadata *event_metadata_1 = xplane_builder.GetOrCreateEventMetadata(1);
event_metadata_1->set_name(std::string(kMegaScaleDcnReceive));
XLineBuilder xline_builder = xplane_builder.GetOrCreateLine(0);
XEventBuilder event_builder = xline_builder.AddEvent(*event_metadata_1);
event_builder.SetOffsetNs(50000);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("duration_us"), 10);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("payload_size_bytes"), 100);
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(host_trace);
DcnEventsProcessor dcn_events_processor(4, false);
dcn_events_processor.SetupMessageInfo(plane);
dcn_events_processor.ProcessReceiveMessages(plane);
EXPECT_THAT(
dcn_events_processor.GetMessage(0),
FieldsAre("",
-1, -1, -1, -1,
40000, 50000,
10,
100, -1, -1,
DCN_MESSAGE_INVALID_BAD_KEY));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/dcn_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/dcn_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
15e7ba7c-a336-42e7-96e0-75df36916842 | cpp | google/tensorstore | dimension_labels | tensorstore/internal/dimension_labels.cc | tensorstore/internal/dimension_labels_test.cc | #include "tensorstore/internal/dimension_labels.h"
#include <stddef.h>
#include <algorithm>
#include <string>
#include <string_view>
#include "absl/container/fixed_array.h"
#include "absl/status/status.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal {
namespace {
absl::Status ValidateDimensionLabelsAreUniqueImpl(
tensorstore::span<std::string_view> sorted_labels) {
std::sort(sorted_labels.begin(), sorted_labels.end());
size_t i;
for (i = 1; i < sorted_labels.size() && sorted_labels[i].empty(); ++i)
continue;
std::string error;
for (; i < sorted_labels.size(); ++i) {
std::string_view label = sorted_labels[i];
if (label == sorted_labels[i - 1]) {
tensorstore::StrAppend(&error, error.empty() ? "" : ", ",
QuoteString(label));
}
}
if (!error.empty()) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Dimension label(s) ", error, " not unique"));
}
return absl::OkStatus();
}
}
absl::Status ValidateDimensionLabelsAreUnique(
tensorstore::span<const std::string> labels) {
absl::FixedArray<std::string_view, kMaxRank> sorted_labels(labels.begin(),
labels.end());
return ValidateDimensionLabelsAreUniqueImpl(sorted_labels);
}
absl::Status ValidateDimensionLabelsAreUnique(
tensorstore::span<const std::string_view> labels) {
absl::FixedArray<std::string_view, kMaxRank> sorted_labels(labels.begin(),
labels.end());
return ValidateDimensionLabelsAreUniqueImpl(sorted_labels);
}
}
} | #include "tensorstore/internal/dimension_labels.h"
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal::ValidateDimensionLabelsAreUnique;
TEST(ValidateDimensionLabelsAreUniqueTest, Basic) {
TENSORSTORE_EXPECT_OK(ValidateDimensionLabelsAreUnique(
std::vector<std::string>{"a", "b", "c"}));
TENSORSTORE_EXPECT_OK(
ValidateDimensionLabelsAreUnique(std::vector<std::string>{"", "", ""}));
TENSORSTORE_EXPECT_OK(ValidateDimensionLabelsAreUnique(
std::vector<std::string>{"a", "b", "", "d", ""}));
TENSORSTORE_EXPECT_OK(
ValidateDimensionLabelsAreUnique(std::vector<std::string>{}));
EXPECT_THAT(ValidateDimensionLabelsAreUnique(
std::vector<std::string>{"a", "b", "c", "a"}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Dimension label.* \"a\" not unique"));
EXPECT_THAT(ValidateDimensionLabelsAreUnique(
std::vector<std::string>{"a", "b", "c", "b"}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Dimension label.* \"b\" not unique"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/dimension_labels.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/dimension_labels_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
2812a750-abac-41e2-8608-97c9f26bc2c8 | cpp | tensorflow/tensorflow | group_events | third_party/xla/xla/tsl/profiler/utils/group_events.cc | third_party/xla/xla/tsl/profiler/utils/group_events_test.cc | #include "xla/tsl/profiler/utils/group_events.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <iterator>
#include <map>
#include <memory>
#include <optional>
#include <queue>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/functional/bind_front.h"
#include "absl/strings/str_cat.h"
#include "xla/tsl/lib/gtl/map_util.h"
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "xla/tsl/profiler/utils/xplane_builder.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "xla/tsl/profiler/utils/xplane_utils.h"
#include "xla/tsl/profiler/utils/xplane_visitor.h"
#include "tsl/platform/dso_loader.h"
#include "tsl/platform/env.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace profiler {
void CreateStatMetadata(XPlane* plane) {
XPlaneBuilder builder(plane);
builder.GetOrCreateStatMetadata(GetStatTypeStr(StatType::kGroupId));
builder.GetOrCreateStatMetadata(GetStatTypeStr(StatType::kStepName));
builder.GetOrCreateStatMetadata(GetStatTypeStr(StatType::kIsEager));
}
std::optional<int64_t> GetKernelEventType(bool is_host_plane,
const XEventVisitor& event) {
if (event.GetStat(StatType::kCorrelationId).has_value()) {
return is_host_plane ? HostEventType::kKernelLaunch
: HostEventType::kKernelExecute;
}
return std::nullopt;
}
int64_t GetEventType(bool is_host_plane, const XEventVisitor& event) {
if (std::optional<int64_t> event_type = event.Type()) {
return *event_type;
} else if (std::optional<int64_t> kernel_event_type =
GetKernelEventType(is_host_plane, event)) {
return *kernel_event_type;
} else {
return HostEventType::kUnknownHostEventType;
}
}
bool IsLegacyRootEvent(const XEventVisitor& event) {
return event.Type() == HostEventType::kTraceContext;
}
struct GroupingEventStats {
explicit GroupingEventStats(const XEventVisitor& event);
std::optional<int> producer_type;
std::optional<uint64_t> producer_id;
std::optional<int> consumer_type;
std::optional<uint64_t> consumer_id;
std::optional<int> root_level;
bool is_async = false;
};
GroupingEventStats::GroupingEventStats(const XEventVisitor& event) {
std::optional<int64_t> step_id;
event.ForEachStat([&](const XStatVisitor& stat) {
if (!stat.Type().has_value()) return;
switch (*stat.Type()) {
case StatType::kProducerType:
producer_type = stat.IntValue();
break;
case StatType::kProducerId:
producer_id = stat.IntOrUintValue();
break;
case StatType::kConsumerType:
consumer_type = stat.IntValue();
break;
case StatType::kConsumerId:
consumer_id = stat.IntOrUintValue();
break;
case StatType::kIsRoot:
root_level = stat.IntValue();
break;
case StatType::kIsAsync:
is_async = stat.BoolValue();
break;
case StatType::kStepId:
step_id = stat.IntValue();
break;
default:
break;
}
});
if (!root_level.has_value() && IsLegacyRootEvent(event)) {
root_level = 1;
}
}
void SetContextGroup(const GroupingEventStats& stats, EventNode* event,
ContextGroupMap* context_groups) {
if (stats.producer_type.has_value() && stats.producer_id.has_value()) {
((*context_groups)[*stats.producer_type][*stats.producer_id])
.producers.push_back(event);
}
if (stats.consumer_type.has_value() && stats.consumer_id.has_value()) {
((*context_groups)[*stats.consumer_type][*stats.consumer_id])
.consumers.push_back(event);
}
}
void ConnectContextGroups(const ContextGroupMap& context_groups) {
for (auto& type_id_group : context_groups) {
for (auto& id_group : type_id_group.second) {
const ContextGroup& group = id_group.second;
if (group.producers.size() >= 64 && group.consumers.size() >= 64) {
LOG_EVERY_N(WARNING, 1000)
<< "id:" << id_group.first
<< " producers:" << group.producers.size() << " : "
<< group.producers[0]->GetEventVisitor().Name()
<< " consumers:" << group.consumers.size() << " : "
<< group.consumers[0]->GetEventVisitor().Name();
continue;
}
for (EventNode* parent : group.producers) {
for (EventNode* child : group.consumers) {
parent->AddChild(child);
}
}
}
}
}
bool IsImplicitRootEvent(const XEventVisitor& event) {
static const auto* const kImplicitRootEvents =
new absl::flat_hash_set<int64_t>{
HostEventType::kFunctionRun, HostEventType::kSessionRun,
HostEventType::kRunGraph, HostEventType::kExecutorStateProcess};
return event.Type().has_value() &&
kImplicitRootEvents->contains(*event.Type());
}
void ProcessRootEvent(int64_t group_id, EventNode* root_event,
GroupMetadataMap* group_metadata_map) {
root_event->PropagateGroupId(group_id, group_metadata_map);
std::string group_name = root_event->GetGroupName();
if (!IsImplicitRootEvent(root_event->GetEventVisitor())) {
root_event->AddStepName(group_name);
}
(*group_metadata_map)[group_id].name = std::move(group_name);
}
using Comparator = std::function<bool(const EventNode*)>;
const EventNode* FindParentWithComparator(const Comparator& comparator,
const EventNode* node,
bool include_self) {
std::queue<const EventNode*> nodes;
absl::flat_hash_set<const EventNode*> seen = {node};
if (include_self) {
nodes.push(node);
} else {
for (const EventNode* parent : node->GetParents()) {
nodes.push(parent);
seen.insert(parent);
}
}
while (!nodes.empty()) {
const EventNode* node = nodes.front();
nodes.pop();
if (comparator(node)) return node;
for (const EventNode* parent : node->GetParents()) {
if (seen.contains(parent)) continue;
nodes.push(parent);
seen.insert(parent);
}
}
return nullptr;
}
bool IsIteratorEventType(std::optional<int64_t> event_type) {
return event_type == HostEventType::kIterator ||
event_type == HostEventType::kDeviceInputPipelineSecondIterator;
}
bool CheckLoopOp(const XSpace& space) {
for (const XPlane& plane : space.planes()) {
for (const auto& event_metadata : plane.event_metadata()) {
std::optional<int64_t> event_type =
FindHostEventType(event_metadata.second.name());
if (!event_type.has_value()) continue;
switch (*event_type) {
case HostEventType::kWhileOpEvalCond:
case HostEventType::kWhileOpStartBody:
case HostEventType::kForOp:
case HostEventType::kParallelForOp:
case HostEventType::kForeverOp:
return true;
default:
break;
}
}
}
return false;
}
std::optional<XStatVisitor> EventNode::GetContextStat(int64_t stat_type) const {
std::queue<const EventNode*> nodes;
absl::flat_hash_set<const EventNode*> seen = {this};
nodes.push(this);
while (!nodes.empty()) {
const EventNode* node = nodes.front();
nodes.pop();
if (std::optional<XStatVisitor> stat = node->visitor_.GetStat(stat_type)) {
return stat;
}
for (const EventNode* parent : node->GetParents()) {
if (seen.contains(parent)) continue;
nodes.push(parent);
seen.insert(parent);
}
}
return std::nullopt;
}
std::string EventNode::GetGroupName() const {
std::string name;
if (std::optional<XStatVisitor> stat = GetContextStat(StatType::kGraphType)) {
absl::StrAppend(&name, stat->StrOrRefValue(), " ");
} else if (!(IsImplicitRootEvent(visitor_))) {
absl::StrAppend(&name, GetEventVisitor().Name(), " ");
}
int64_t step_num = group_id_.value_or(0);
if (std::optional<XStatVisitor> stat = GetContextStat(StatType::kIterNum)) {
step_num = stat->IntValue();
} else if (std::optional<XStatVisitor> stat =
GetContextStat(StatType::kStepNum)) {
step_num = stat->IntValue();
}
absl::StrAppend(&name, step_num);
return name;
}
XStat* EventNode::FindOrAddStatByType(int64_t stat_type) {
const XPlaneVisitor& plane = visitor_.Plane();
const XStatMetadata* stat_metadata = plane.GetStatMetadataByType(stat_type);
DCHECK(stat_metadata != nullptr);
auto* raw_event = const_cast<XEvent*>(&visitor_.RawEvent());
return FindOrAddMutableStat(*stat_metadata, raw_event);
}
void EventNode::SetGroupId(int64_t group_id) {
group_id_ = group_id;
FindOrAddStatByType(StatType::kGroupId)->set_int64_value(group_id);
}
void EventNode::PropagateGroupId(int64_t group_id,
GroupMetadataMap* group_metadata_map) {
std::queue<EventNode*> nodes;
absl::flat_hash_set<EventNode*> seen = {this};
nodes.push(this);
while (!nodes.empty()) {
EventNode* node = nodes.front();
nodes.pop();
std::optional<int64_t> node_group_id = node->GetGroupId();
if (node_group_id.has_value()) {
if (*node_group_id != group_id) {
(*group_metadata_map)[group_id].children.insert(*node_group_id);
(*group_metadata_map)[*node_group_id].parents.insert(group_id);
}
} else {
node->SetGroupId(group_id);
for (EventNode* child : node->GetChildren()) {
if (seen.contains(child)) continue;
nodes.push(child);
seen.insert(child);
}
}
}
}
void EventNode::AddStepName(absl::string_view step_name) {
FindOrAddStatByType(StatType::kStepName)
->set_str_value(step_name.data(), step_name.size());
}
void EventNode::SetIsEager(bool is_eager) {
FindOrAddStatByType(StatType::kIsEager)->set_int64_value(is_eager ? 1 : 0);
}
bool EventNode::IsCompiledFunc() const {
auto is_func = visitor_.GetStat(StatType::kIsFunc);
return !is_func || is_func->IntValue();
}
bool EventNode::IsEager() const {
const EventNode* node = FindParent(HostEventType::kEagerKernelExecute);
if (node == nullptr) {
return false;
}
return !node->IsCompiledFunc();
}
const EventNode* EventNode::FindParent(int64_t event_type) const {
return FindParentWithComparator(
[event_type](const EventNode* node) {
return node->GetEventVisitor().Type() == event_type;
},
this, true);
}
void EventForest::FindEventNodeAndApply(
const int64_t event_type, const std::vector<int64_t>& stat_types,
const std::function<void(EventNode&, const std::vector<uint64>&)>& cb) {
if (auto* event_node_list = gtl::FindOrNull(event_node_map_, event_type)) {
for (EventNode& event_node : *event_node_list) {
std::vector<uint64> stats;
for (const auto stat_type : stat_types) {
std::optional<XStatVisitor> stat =
event_node.GetEventVisitor().GetStat(stat_type);
if (!stat) break;
stats.push_back(stat->IntOrUintValue());
}
if (stats.size() == stat_types.size()) {
cb(event_node, stats);
}
}
}
}
void EventForest::ConnectIntraThread(XPlane* plane, XPlaneVisitor* visitor,
ContextGroupMap* context_groups) {
bool is_host_plane = (visitor->Name() == kHostThreadsPlaneName);
for (auto& line : *plane->mutable_lines()) {
std::vector<EventNode*> parent_nodes;
for (auto& event : *line.mutable_events()) {
XEventVisitor event_visitor(visitor, &line, &event);
int64_t event_type = GetEventType(is_host_plane, event_visitor);
EventNode* cur_node =
&event_node_map_[event_type].emplace_back(std::move(event_visitor));
GroupingEventStats stats(cur_node->GetEventVisitor());
if (stats.root_level.has_value()) {
cur_node->SetRootLevel(*stats.root_level);
}
SetContextGroup(stats, cur_node, context_groups);
if (!stats.is_async) {
while (!parent_nodes.empty()) {
EventNode* parent_node = parent_nodes.back();
if (parent_node->GetEventVisitor().GetTimespan().Includes(
cur_node->GetEventVisitor().GetTimespan())) {
parent_node->AddChild(cur_node);
break;
} else {
parent_nodes.pop_back();
}
}
parent_nodes.push_back(cur_node);
}
}
}
}
void EventForest::ConnectInterThread(
const std::vector<InterThreadConnectInfo>& connect_info_list) {
for (const auto& connect_info : connect_info_list) {
absl::flat_hash_map<std::vector<uint64>, EventNode*> connect_map;
const std::vector<int64_t>& parent_stat_types =
connect_info.parent_stat_types;
const std::vector<int64_t>* child_stat_types =
&connect_info.child_stat_types;
if (child_stat_types->empty()) {
child_stat_types = &parent_stat_types;
}
FindEventNodeAndApply(connect_info.parent_event_type, parent_stat_types,
[&connect_map](EventNode& event_node,
const std::vector<uint64>& stats) {
connect_map[stats] = &event_node;
});
FindEventNodeAndApply(
connect_info.child_event_type, *child_stat_types,
[&connect_map](EventNode& event_node,
const std::vector<uint64>& stats) {
if (auto parent_event_node = gtl::FindPtrOrNull(connect_map, stats)) {
parent_event_node->AddChild(&event_node);
}
});
}
}
bool RootNeedsGrouping(const EventNode* root) {
if (root->GetGroupId().has_value()) return false;
const EventNode* root_parent = FindParentWithComparator(
[root](const EventNode* parent) {
return parent->RootLevel() == root->RootLevel();
},
root,
false);
return root_parent == nullptr;
}
void SortRootEventList(EventList* event_list) {
absl::c_sort(*event_list, [](const EventNode* e1, const EventNode* e2) {
return e1->RootLevel() == e2->RootLevel()
? *e1 < *e2
: e1->RootLevel() > e2->RootLevel();
});
}
void EventForest::CreateEventGroups() {
int64_t group_id = 0;
if (!tf_loop_root_events_.empty()) {
for (EventNode* root_event : tf_loop_root_events_) {
ProcessRootEvent(group_id++, root_event, &group_metadata_map_);
}
return;
}
EventList root_events;
for (auto& [event_type, events] : event_node_map_) {
for (EventNode& event : events) {
if (!event.RootLevel()) continue;
std::optional<XStatVisitor> step_id_stat =
event.GetEventVisitor().GetStat(StatType::kStepId);
if (step_id_stat && tf_data_step_ids_.contains(step_id_stat->IntValue()))
continue;
root_events.push_back(&event);
}
}
SortRootEventList(&root_events);
for (EventNode* root_event : root_events) {
if (RootNeedsGrouping(root_event)) {
ProcessRootEvent(group_id++, root_event, &group_metadata_map_);
}
}
}
void EventForest::MarkEagerlyExecutedGpuKernels() {
auto kernel_execute_event_node_list =
gtl::FindOrNull(event_node_map_, HostEventType::kKernelExecute);
if (!kernel_execute_event_node_list) return;
for (EventNode& kernel_execute_event_node : *kernel_execute_event_node_list) {
kernel_execute_event_node.SetIsEager(kernel_execute_event_node.IsEager());
}
}
void EventForest::MarkEagerlyExecutedCpuTfOps() {
auto tf_op_run_event_node_list =
gtl::FindOrNull(event_node_map_, HostEventType::kTfOpRun);
if (!tf_op_run_event_node_list) return;
for (EventNode& tf_op_run_event_node : *tf_op_run_event_node_list) {
tf_op_run_event_node.SetIsEager(tf_op_run_event_node.IsEager());
}
}
void EventForest::ProcessTfDataSteps() {
const int64_t tf_data_event_types[] = {
HostEventType::kTfDataCapturedFunctionRun,
HostEventType::kTfDataCapturedFunctionRunAsync,
HostEventType::kTfDataCapturedFunctionRunInstantiated,
HostEventType::kTfDataCapturedFunctionRunWithBorrowedArgs};
for (const int64_t tf_data_event_type : tf_data_event_types) {
auto tf_data_events = gtl::FindOrNull(event_node_map_, tf_data_event_type);
if (!tf_data_events) continue;
for (const EventNode& tf_data_event : *tf_data_events) {
std::optional<XStatVisitor> step_id_stat =
tf_data_event.GetEventVisitor().GetStat(StatType::kStepId);
if (!step_id_stat) continue;
tf_data_step_ids_.insert(step_id_stat->IntValue());
}
}
}
void EventForest::ProcessTensorFlowLoop() {
struct TensorFlowLoopIteration {
EventNode* first_event = nullptr;
std::vector<EventNode*> events;
};
using TensorFlowLoop =
absl::flat_hash_map<int64_t , TensorFlowLoopIteration>;
absl::flat_hash_map<int64_t , TensorFlowLoop> tf_loops;
auto executor_event_list =
gtl::FindOrNull(event_node_map_, HostEventType::kExecutorStateProcess);
if (!executor_event_list) return;
for (EventNode& executor_event : *executor_event_list) {
std::optional<XStatVisitor> step_id_stat =
executor_event.GetEventVisitor().GetStat(StatType::kStepId);
std::optional<XStatVisitor> iter_num_stat =
executor_event.GetEventVisitor().GetStat(StatType::kIterNum);
if (!step_id_stat || !iter_num_stat) continue;
int64_t step_id = step_id_stat->IntValue();
if (tf_data_step_ids_.contains(step_id)) continue;
TensorFlowLoop& tf_loop = tf_loops[step_id];
TensorFlowLoopIteration& iteration = tf_loop[iter_num_stat->IntValue()];
if (!iteration.first_event || executor_event < *iteration.first_event) {
iteration.first_event = &executor_event;
}
iteration.events.push_back(&executor_event);
}
std::vector<const TensorFlowLoopIteration*> iters;
for (const auto& step_id_and_tf_loop : tf_loops) {
const TensorFlowLoop& tf_loop = step_id_and_tf_loop.second;
if (tf_loop.size() == 1 && tf_loop.contains(0)) continue;
for (const auto& iter_num_and_iter : tf_loop) {
iters.push_back(&iter_num_and_iter.second);
}
}
absl::c_sort(iters, [](const auto& iter1, const auto& iter2) {
return *iter1->first_event < *iter2->first_event;
});
for (const TensorFlowLoopIteration* iter : iters) {
EventNode* root_event = iter->first_event;
tf_loop_root_events_.push_back(root_event);
for (EventNode* event : iter->events) {
if (event == root_event) continue;
root_event->AddChild(event);
}
}
}
void EventForest::AddPlane(
const std::function<XPlaneVisitor(const XPlane*)> visitor_factory,
XPlane* plane) {
CreateStatMetadata(plane);
planes_.push_back({plane, visitor_factory(plane)});
}
void EventForest::AddSpace(
const std::function<XPlaneVisitor(const XPlane*)> visitor_factory,
XSpace* space) {
for (XPlane& plane : *space->mutable_planes()) {
AddPlane(visitor_factory, &plane);
}
}
void EventForest::AddPlanes(
const std::function<XPlaneVisitor(const XPlane*)> visitor_factory,
const std::vector<XPlane*>& planes) {
for (XPlane* plane : planes) {
AddPlane(visitor_factory, plane);
}
}
void EventForest::ConnectEvents(
const std::vector<InterThreadConnectInfo>& connect_info_list) {
ContextGroupMap context_groups;
for (auto& plane_visitor : planes_) {
ConnectIntraThread(plane_visitor.first, &plane_visitor.second,
&context_groups);
}
ConnectInterThread(connect_info_list);
ConnectContextGroups(context_groups);
}
void EventForest::ConnectTfDataEvents() {
absl::flat_hash_map<
std::pair<int64_t , int64_t >,
std::vector<EventNode*>>
produce_iterator_map;
uint64 num_producers = 0;
for (HostEventType event_type :
{HostEventType::kPrefetchProduce,
HostEventType::kParallelInterleaveProduce,
HostEventType::kParallelMapProduce, HostEventType::kMapAndBatchProduce,
HostEventType::kParseExampleProduce,
HostEventType::kParallelBatchProduce}) {
auto produce_event_list = gtl::FindOrNull(event_node_map_, event_type);
if (!produce_event_list) continue;
VLOG(1) << produce_event_list->size() << " "
<< GetHostEventTypeStr(event_type) << " events found.";
for (EventNode& produce_event : *produce_event_list) {
std::optional<XStatVisitor> element_id =
produce_event.GetEventVisitor().GetStat(StatType::kElementId);
if (!element_id.has_value()) continue;
for (EventNode* produce_iterator : produce_event.GetChildren()) {
if (IsIteratorEventType(produce_iterator->GetEventVisitor().Type())) {
std::optional<XStatVisitor> iterator_id =
produce_iterator->GetEventVisitor().GetStat(StatType::kParentId);
if (!iterator_id.has_value()) break;
produce_iterator_map[{iterator_id->IntValue(),
element_id->IntValue()}]
.push_back(produce_iterator);
++num_producers;
break;
}
}
}
}
VLOG(1) << num_producers << " producer iterators found.";
uint64 num_matched = 0;
for (HostEventType event_type :
{HostEventType::kPrefetchConsume,
HostEventType::kParallelInterleaveConsume,
HostEventType::kParallelMapConsume, HostEventType::kMapAndBatchConsume,
HostEventType::kParseExampleConsume,
HostEventType::kParallelBatchConsume}) {
auto consume_event_list = gtl::FindOrNull(event_node_map_, event_type);
if (!consume_event_list) continue;
VLOG(1) << consume_event_list->size() << " "
<< GetHostEventTypeStr(event_type) << " events found.";
for (EventNode& consume_event : *consume_event_list) {
std::optional<XStatVisitor> element_id =
consume_event.GetEventVisitor().GetStat(StatType::kElementId);
if (!element_id.has_value()) continue;
if (consume_event.GetParents().empty()) continue;
EventNode* consume_iterator = consume_event.GetParents().at(0);
if (!consume_iterator ||
!IsIteratorEventType(consume_iterator->GetEventVisitor().Type())) {
continue;
}
std::optional<XStatVisitor> iterator_id =
consume_iterator->GetEventVisitor().GetStat(StatType::kStepId);
if (!iterator_id.has_value()) continue;
if (auto produce_iterators = gtl::FindOrNull(
produce_iterator_map, std::make_pair(iterator_id->IntValue(),
element_id->IntValue()))) {
for (EventNode* produce_iterator : *produce_iterators) {
consume_iterator->AddChild(produce_iterator);
++num_matched;
}
}
}
}
VLOG(1) << num_matched << " consumer iterators matched.";
}
void EventForest::GroupEvents() {
ProcessTfDataSteps();
ProcessTensorFlowLoop();
CreateEventGroups();
MarkEagerlyExecutedGpuKernels();
MarkEagerlyExecutedCpuTfOps();
}
std::vector<InterThreadConnectInfo> CreateInterThreadConnectInfoList() {
std::vector<InterThreadConnectInfo> connect_info_list = {
{HostEventType::kExecutorStateProcess,
HostEventType::kIteratorGetNextOp,
{StatType::kStepId, StatType::kIterNum}},
{HostEventType::kExecutorStateProcess,
HostEventType::kIteratorGetNextAsOptionalOp,
{StatType::kStepId, StatType::kIterNum}},
{HostEventType::kKernelLaunch,
HostEventType::kKernelExecute,
{StatType::kCorrelationId}}};
return connect_info_list;
}
void GroupTfEvents(XSpace* space, EventForest* event_forest) {
if (CheckLoopOp(*space)) {
return;
}
std::vector<InterThreadConnectInfo> connect_info_list =
CreateInterThreadConnectInfoList();
event_forest->AddSpace(CreateTfXPlaneVisitor, space);
event_forest->ConnectEvents(connect_info_list);
event_forest->GroupEvents();
}
void GroupTfEvents(XSpace* space) {
EventForest event_forest;
GroupTfEvents(space, &event_forest);
}
void AddGroupMetadataToStepEvents(const GroupMetadataMap& group_metadata_map,
XLineBuilder& line) {
if (group_metadata_map.empty()) return;
XPlaneBuilder* plane = line.Plane();
const XStatMetadata* group_id_stat_metadata =
plane->GetStatMetadata(GetStatTypeStr(StatType::kGroupId));
if (group_id_stat_metadata == nullptr) return;
const XStatMetadata* step_name_stat_metadata =
plane->GetOrCreateStatMetadata(GetStatTypeStr(StatType::kStepName));
line.ForEachEvent([&](XEventBuilder event) {
const XStat* group_id_stat = event.GetStat(*group_id_stat_metadata);
if (group_id_stat != nullptr) {
int64_t group_id = group_id_stat->int64_value();
if (const GroupMetadata* group_metadata =
gtl::FindOrNull(group_metadata_map, group_id)) {
event.AddStatValue(*step_name_stat_metadata, group_metadata->name);
}
}
});
}
std::optional<int64_t> GetGroupId(const XEventVisitor& event,
const XStatMetadata& group_id_stat_metadata) {
if (auto group_id_stat =
event.GetStat(StatType::kGroupId, group_id_stat_metadata)) {
return group_id_stat->IntValue();
}
return std::nullopt;
}
class GroupQueue {
public:
GroupQueue(const XPlaneVisitor* plane, const XLine* line,
const XStatMetadata* group_id_stat_metadata)
: group_queue_(plane, line),
group_id_stat_metadata_(group_id_stat_metadata) {}
std::optional<int64_t> OverlappingGroupId(Timespan timespan) {
if (!group_event_visitor_ ||
!group_event_visitor_->GetTimespan().Overlaps(timespan)) {
group_event_visitor_ = group_queue_.GetOverlappingEvent(timespan);
if (group_event_visitor_) {
group_id_ = GetGroupId(*group_event_visitor_, *group_id_stat_metadata_);
} else {
group_id_.reset();
}
}
return group_id_;
}
private:
XEventContextTracker group_queue_;
std::optional<XEventVisitor> group_event_visitor_;
std::optional<int64_t> group_id_;
const XStatMetadata* group_id_stat_metadata_;
};
void MergeHostSteps(const XStatMetadata& group_id_stat_metadata,
const XPlaneVisitor& plane_visitor,
XPlaneBuilder* plane_builder, XLine* step_line) {
std::optional<int64_t> merged_group_id;
std::optional<XEventBuilder> merged_step_builder;
absl::flat_hash_set<const XEvent*> events_to_remove;
for (XEvent& step_event : *step_line->mutable_events()) {
XEventVisitor step_visitor(&plane_visitor, step_line, &step_event);
auto group_id = GetGroupId(step_visitor, group_id_stat_metadata);
if (!group_id) {
merged_group_id.reset();
merged_step_builder.reset();
events_to_remove.insert(&step_event);
} else if (merged_group_id != group_id) {
merged_group_id = group_id;
merged_step_builder.emplace(step_line, plane_builder, &step_event);
} else {
merged_step_builder->SetEndTimestampPs(step_visitor.EndTimestampPs());
events_to_remove.insert(&step_event);
}
}
if (events_to_remove.size() < step_line->events_size()) {
RemoveEvents(step_line, events_to_remove);
}
}
void GroupLine(const XStatMetadata& group_id_stat_metadata,
const XPlaneVisitor& plane_visitor, const XLine& group_line,
XPlaneBuilder* plane_builder, XLine* line) {
GroupQueue group_queue(&plane_visitor, &group_line, &group_id_stat_metadata);
for (XEvent& event : *line->mutable_events()) {
XEventBuilder event_builder(line, plane_builder, &event);
if (auto group_id =
group_queue.OverlappingGroupId(event_builder.GetTimespan())) {
event_builder.AddStatValue(group_id_stat_metadata, *group_id);
}
}
}
void GroupHostAndPlanes(
tensorflow::profiler::XSpace* space,
const std::vector<tensorflow::profiler::XPlane*>& device_traces,
EventForest* event_forest) {
std::vector<InterThreadConnectInfo> connect_info_list =
CreateInterThreadConnectInfoList();
event_forest->AddSpace(CreateTfXPlaneVisitor, space);
event_forest->AddPlanes(CreateTfXPlaneVisitor, device_traces);
event_forest->ConnectEvents(connect_info_list);
event_forest->GroupEvents();
}
void GroupXplaneEvents(tensorflow::profiler::XPlane* plane,
const GroupMetadataMap& group_metadata_map) {
XLine* module_line = nullptr;
XLine* step_line = nullptr;
std::vector<XLine*> other_lines;
for (XLine& line : *plane->mutable_lines()) {
if (line.name() == "XLA Modules") {
module_line = &line;
} else if (line.name() == "Steps") {
step_line = &line;
} else {
other_lines.push_back(&line);
}
}
if (!module_line) return;
XPlaneBuilder plane_builder(plane);
const XStatMetadata* group_id_stat_metadata =
plane_builder.GetOrCreateStatMetadata(GetStatTypeStr(StatType::kGroupId));
XPlaneVisitor plane_visitor = CreateTfXPlaneVisitor(plane);
const XLine* group_line = module_line;
if (step_line) {
bool device_loop = (step_line->events_size() > module_line->events_size());
if (device_loop) {
group_line = nullptr;
} else {
if (group_line) {
GroupLine(*group_id_stat_metadata, plane_visitor, *group_line,
&plane_builder, step_line);
MergeHostSteps(*group_id_stat_metadata, plane_visitor, &plane_builder,
step_line);
XLineBuilder step_line_builder(step_line, &plane_builder);
AddGroupMetadataToStepEvents(group_metadata_map, step_line_builder);
}
}
}
if (group_line) {
for (XLine* line : other_lines) {
GroupLine(*group_id_stat_metadata, plane_visitor, *group_line,
&plane_builder, line);
}
}
}
void GroupTpuEventsOSS(
tensorflow::profiler::XSpace* space,
const std::vector<tensorflow::profiler::XPlane*>& device_traces,
EventForest* event_forest) {
if (CheckLoopOp(*space)) {
return;
}
GroupHostAndPlanes(space, device_traces, event_forest);
if (device_traces.empty()) return;
const GroupMetadataMap& group_metadata_map =
event_forest->GetGroupMetadataMap();
std::vector<std::unique_ptr<Thread>> threads;
ThreadOptions thread_options;
threads.reserve(device_traces.size());
for (XPlane* plane : device_traces) {
threads.emplace_back(Env::Default()->StartThread(
thread_options, "group_xplane_events",
absl::bind_front(GroupXplaneEvents, plane,
std::ref(group_metadata_map))));
}
}
}
} | #include "xla/tsl/profiler/utils/group_events.h"
#include <optional>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/string_view.h"
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "xla/tsl/profiler/utils/xplane_builder.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "xla/tsl/profiler/utils/xplane_test_utils.h"
#include "xla/tsl/profiler/utils/xplane_visitor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tsl {
namespace profiler {
namespace {
constexpr int64_t kTfExecutor = static_cast<int64_t>(ContextType::kTfExecutor);
TEST(GroupEventsTest, GroupGpuTraceLegacyRootTest) {
constexpr int64_t kStepNum = 123;
constexpr int64_t kStepId = 0;
constexpr int64_t kCorrelationId = 100;
XSpace space;
XPlaneBuilder host_plane_builder(GetOrCreateHostXPlane(&space));
host_plane_builder.ReserveLines(2);
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(
&host_plane_builder, &main_thread, HostEventType::kTraceContext, 0, 100,
{{StatType::kGraphType, "train"}, {StatType::kStepNum, kStepNum}});
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kFunctionRun,
10, 90,
{{StatType::kStepId, kStepId},
{StatType::kProducerType, kTfExecutor},
{StatType::kProducerId, kStepId}});
auto tf_executor_thread = host_plane_builder.GetOrCreateLine(1);
CreateXEvent(&host_plane_builder, &tf_executor_thread,
HostEventType::kExecutorStateProcess, 20, 80,
{{StatType::kStepId, kStepId},
{StatType::kConsumerType, kTfExecutor},
{StatType::kConsumerId, kStepId}});
CreateXEvent(&host_plane_builder, &tf_executor_thread, "matmul", 30, 70,
{{StatType::kCorrelationId, kCorrelationId}});
XPlane* device_plane = space.add_planes();
XPlaneBuilder device_plane_builder(device_plane);
device_plane_builder.ReserveLines(1);
auto stream = device_plane_builder.GetOrCreateLine(0);
CreateXEvent(&device_plane_builder, &stream, "matmul", 200, 300,
{{StatType::kCorrelationId, kCorrelationId}});
EventForest event_forest;
GroupTfEvents(&space, &event_forest);
const GroupMetadataMap& group_metadata_map =
event_forest.GetGroupMetadataMap();
XPlaneVisitor device_plane_visitor = CreateTfXPlaneVisitor(device_plane);
EXPECT_EQ(device_plane->lines(0).events(0).stats_size(), 3);
EXPECT_EQ(device_plane_visitor.GetStatType(
device_plane->lines(0).events(0).stats(1).metadata_id()),
StatType::kGroupId);
EXPECT_EQ(group_metadata_map.size(), 1);
EXPECT_EQ(group_metadata_map.at(0).name, "train 123");
}
TEST(GroupEventsTest, GroupGpuTraceTest) {
constexpr int64_t kStepNum = 123;
constexpr int64_t kStepId = 0;
constexpr int64_t kCorrelationId = 100;
XSpace space;
XPlaneBuilder host_plane_builder(GetOrCreateHostXPlane(&space));
host_plane_builder.ReserveLines(2);
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(
&host_plane_builder, &main_thread, "train", 0, 100,
{{StatType::kStepNum, kStepNum}, {StatType::kIsRoot, int64_t{1}}});
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kFunctionRun,
10, 90,
{{StatType::kStepId, kStepId},
{StatType::kProducerType, kTfExecutor},
{StatType::kProducerId, kStepId}});
auto tf_executor_thread = host_plane_builder.GetOrCreateLine(1);
CreateXEvent(&host_plane_builder, &tf_executor_thread,
HostEventType::kExecutorStateProcess, 20, 80,
{{StatType::kStepId, kStepId},
{StatType::kConsumerType, kTfExecutor},
{StatType::kConsumerId, kStepId}});
CreateXEvent(&host_plane_builder, &tf_executor_thread, "matmul", 30, 70,
{{StatType::kCorrelationId, kCorrelationId}});
XPlane* device_plane = space.add_planes();
XPlaneBuilder device_plane_builder(device_plane);
device_plane_builder.ReserveLines(1);
auto stream = device_plane_builder.GetOrCreateLine(0);
CreateXEvent(&device_plane_builder, &stream, "matmul", 200, 300,
{{StatType::kCorrelationId, kCorrelationId}});
EventForest event_forest;
GroupTfEvents(&space, &event_forest);
const GroupMetadataMap& group_metadata_map =
event_forest.GetGroupMetadataMap();
XPlaneVisitor device_plane_visitor = CreateTfXPlaneVisitor(device_plane);
EXPECT_EQ(device_plane->lines(0).events(0).stats_size(), 3);
EXPECT_EQ(device_plane_visitor.GetStatType(
device_plane->lines(0).events(0).stats(1).metadata_id()),
StatType::kGroupId);
EXPECT_EQ(group_metadata_map.size(), 1);
EXPECT_EQ(group_metadata_map.at(0).name, "train 123");
}
TEST(GroupEventsTest, GroupTensorFlowLoopTest) {
constexpr int64_t kStepId = 0;
constexpr int64_t kIterNum = 10;
constexpr int64_t kCorrelationId = 100;
XSpace space;
XPlaneBuilder host_plane_builder(GetOrCreateHostXPlane(&space));
host_plane_builder.ReserveLines(1);
auto tf_executor_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &tf_executor_thread,
HostEventType::kExecutorStateProcess, 5, 10,
{{StatType::kStepId, kStepId},
{StatType::kIterNum, kIterNum},
{StatType::kConsumerType, kTfExecutor},
{StatType::kConsumerId, kStepId}});
CreateXEvent(&host_plane_builder, &tf_executor_thread,
HostEventType::kExecutorStateProcess, 20, 80,
{{StatType::kStepId, kStepId},
{StatType::kIterNum, kIterNum},
{StatType::kConsumerType, kTfExecutor},
{StatType::kConsumerId, kStepId}});
CreateXEvent(&host_plane_builder, &tf_executor_thread, "matmul", 30, 70,
{{StatType::kCorrelationId, kCorrelationId}});
XPlane* device_plane = space.add_planes();
XPlaneBuilder device_plane_builder(device_plane);
device_plane_builder.ReserveLines(1);
auto stream = device_plane_builder.GetOrCreateLine(0);
CreateXEvent(&device_plane_builder, &stream, "matmul", 200, 300,
{{StatType::kCorrelationId, kCorrelationId}});
EventForest event_forest;
GroupTfEvents(&space, &event_forest);
const GroupMetadataMap& group_metadata_map =
event_forest.GetGroupMetadataMap();
XPlaneVisitor device_plane_visitor = CreateTfXPlaneVisitor(device_plane);
EXPECT_EQ(device_plane->lines(0).events(0).stats_size(), 3);
EXPECT_EQ(device_plane_visitor.GetStatType(
device_plane->lines(0).events(0).stats(1).metadata_id()),
StatType::kGroupId);
EXPECT_EQ(device_plane->lines(0).events(0).stats(1).int64_value(), 0);
EXPECT_EQ(group_metadata_map.size(), 1);
ASSERT_TRUE(group_metadata_map.contains(0));
EXPECT_EQ(group_metadata_map.at(0).name, "10");
}
TEST(GroupEventsTest, GroupMultipleTensorFlowLoopsTest) {
constexpr int64_t kFirstStepId = 0;
constexpr int64_t kSecondStepId = 1;
constexpr int64_t kFirstIterNumStart = 10;
constexpr int64_t kSecondIterNumStart = 0;
XSpace space;
XPlaneBuilder host_plane_builder(GetOrCreateHostXPlane(&space));
host_plane_builder.ReserveLines(2);
auto first_tf_executor_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &first_tf_executor_thread,
HostEventType::kExecutorStateProcess, 220, 80,
{{StatType::kStepId, kSecondStepId},
{StatType::kIterNum, kSecondIterNumStart},
{StatType::kConsumerType, kTfExecutor},
{StatType::kConsumerId, kSecondStepId}});
CreateXEvent(&host_plane_builder, &first_tf_executor_thread,
HostEventType::kExecutorStateProcess, 320, 80,
{{StatType::kStepId, kSecondStepId},
{StatType::kIterNum, kSecondIterNumStart + 1},
{StatType::kConsumerType, kTfExecutor},
{StatType::kConsumerId, kSecondStepId}});
auto second_tf_executor_thread = host_plane_builder.GetOrCreateLine(1);
CreateXEvent(&host_plane_builder, &second_tf_executor_thread,
HostEventType::kExecutorStateProcess, 20, 80,
{{StatType::kStepId, kFirstStepId},
{StatType::kIterNum, kFirstIterNumStart},
{StatType::kConsumerType, kTfExecutor},
{StatType::kConsumerId, kFirstStepId}});
CreateXEvent(&host_plane_builder, &second_tf_executor_thread,
HostEventType::kExecutorStateProcess, 120, 80,
{{StatType::kStepId, kFirstStepId},
{StatType::kIterNum, kFirstIterNumStart + 1},
{StatType::kConsumerType, kTfExecutor},
{StatType::kConsumerId, kFirstStepId}});
EventForest event_forest;
GroupTfEvents(&space, &event_forest);
const GroupMetadataMap& group_metadata_map =
event_forest.GetGroupMetadataMap();
EXPECT_EQ(group_metadata_map.size(), 4);
ASSERT_TRUE(group_metadata_map.contains(0));
EXPECT_EQ(group_metadata_map.at(0).name, "10");
ASSERT_TRUE(group_metadata_map.contains(1));
EXPECT_EQ(group_metadata_map.at(1).name, "11");
ASSERT_TRUE(group_metadata_map.contains(2));
EXPECT_EQ(group_metadata_map.at(2).name, "0");
ASSERT_TRUE(group_metadata_map.contains(3));
EXPECT_EQ(group_metadata_map.at(3).name, "1");
}
TEST(GroupEventsTest, EagerOpTest) {
XSpace space;
XPlane* host_plane = GetOrCreateHostXPlane(&space);
XPlaneBuilder host_plane_builder(host_plane);
host_plane_builder.ReserveLines(1);
auto main_thread = host_plane_builder.GetOrCreateLine(0);
XPlane* device_plane = space.add_planes();
XPlaneBuilder device_plane_builder(device_plane);
device_plane_builder.ReserveLines(1);
auto gpu_stream = device_plane_builder.GetOrCreateLine(0);
int64_t correlation_id = 100;
const char* kTF1GpuLaunchEvent = "tf1 matmul";
const char* kTF1GpuEvent = "tf1_kernel_matmul";
CreateXEvent(&host_plane_builder, &main_thread, kTF1GpuLaunchEvent, 10, 90,
{{StatType::kCorrelationId, correlation_id}});
CreateXEvent(&device_plane_builder, &gpu_stream, kTF1GpuEvent, 200, 300,
{{StatType::kCorrelationId, correlation_id}});
++correlation_id;
const char* kLegacyGpuLaunchEvent = "legacy matmul";
const char* kLegacyGpuEvent = "legacy_kernel_matmul";
CreateXEvent(&host_plane_builder, &main_thread,
HostEventType::kEagerKernelExecute, 100, 200);
CreateXEvent(&host_plane_builder, &main_thread, kLegacyGpuLaunchEvent, 110,
190, {{StatType::kCorrelationId, correlation_id}});
CreateXEvent(&device_plane_builder, &gpu_stream, kLegacyGpuEvent, 300, 400,
{{StatType::kCorrelationId, correlation_id}});
++correlation_id;
const char* kEagerOpGpuLaunchEvent = "eager op matmul";
const char* kEagerOpGpuEvent = "eager_op_kernel_matmul";
CreateXEvent(&host_plane_builder, &main_thread,
HostEventType::kEagerKernelExecute, 200, 300,
{{StatType::kIsFunc, static_cast<int64_t>(0)}});
CreateXEvent(&host_plane_builder, &main_thread, kEagerOpGpuLaunchEvent, 210,
290, {{StatType::kCorrelationId, correlation_id}});
CreateXEvent(&device_plane_builder, &gpu_stream, kEagerOpGpuEvent, 400, 500,
{{StatType::kCorrelationId, correlation_id}});
++correlation_id;
const char* kEagerFuncGpuLaunchEvent = "eager func matmul";
const char* kEagerFuncGpuEvent = "eager_func_kernel_matmul";
CreateXEvent(&host_plane_builder, &main_thread,
HostEventType::kEagerKernelExecute, 300, 400,
{{StatType::kIsFunc, static_cast<int64_t>(1)}});
CreateXEvent(&host_plane_builder, &main_thread, kEagerFuncGpuLaunchEvent, 310,
390, {{StatType::kCorrelationId, correlation_id}});
CreateXEvent(&device_plane_builder, &gpu_stream, kEagerFuncGpuEvent, 500, 600,
{{StatType::kCorrelationId, correlation_id}});
++correlation_id;
const char* kEagerOpCpuEvent = "eager_op_cpu_kernel:Matmul";
CreateXEvent(&host_plane_builder, &main_thread,
HostEventType::kEagerKernelExecute, 400, 500,
{{StatType::kIsFunc, static_cast<int64_t>(0)}});
CreateXEvent(&host_plane_builder, &main_thread, kEagerOpCpuEvent, 410, 490);
const char* kEagerFuncCpuEvent = "eager_func_cpu_kernel:Matmul";
CreateXEvent(&host_plane_builder, &main_thread,
HostEventType::kEagerKernelExecute, 500, 600,
{{StatType::kIsFunc, static_cast<int64_t>(1)}});
CreateXEvent(&host_plane_builder, &main_thread, kEagerFuncCpuEvent, 510, 590);
GroupTfEvents(&space);
auto is_eager = [](const XEventVisitor& event) {
auto eager_stats = event.GetStat(StatType::kIsEager);
return eager_stats && eager_stats->IntValue();
};
XPlaneVisitor host_plane_visitor = CreateTfXPlaneVisitor(host_plane);
int interested_events_encountered = 0;
host_plane_visitor.ForEachLine([&](const XLineVisitor& line) {
line.ForEachEvent([&](const XEventVisitor& event) {
if (event.Name() == kEagerOpCpuEvent) {
interested_events_encountered++;
EXPECT_TRUE(is_eager(event));
} else if (event.Name() == kEagerFuncCpuEvent) {
interested_events_encountered++;
EXPECT_FALSE(is_eager(event));
}
});
});
EXPECT_EQ(interested_events_encountered, 2);
XPlaneVisitor device_plane_visitor = CreateTfXPlaneVisitor(device_plane);
interested_events_encountered = 0;
device_plane_visitor.ForEachLine([&](const XLineVisitor& line) {
line.ForEachEvent([&](const XEventVisitor& event) {
if (event.Name() == kTF1GpuEvent) {
interested_events_encountered++;
EXPECT_FALSE(is_eager(event));
} else if (event.Name() == kLegacyGpuEvent) {
interested_events_encountered++;
EXPECT_FALSE(is_eager(event));
} else if (event.Name() == kEagerOpGpuEvent) {
interested_events_encountered++;
EXPECT_TRUE(is_eager(event));
} else if (event.Name() == kEagerFuncGpuEvent) {
interested_events_encountered++;
EXPECT_FALSE(is_eager(event));
}
});
});
EXPECT_EQ(interested_events_encountered, 4);
}
TEST(GroupEventsTest, FunctionOpTest) {
constexpr int64_t kStepNum = 123;
constexpr int64_t kStepId = 0;
constexpr int64_t kCorrelationId = 100;
XSpace space;
XPlane* host_plane = GetOrCreateHostXPlane(&space);
XPlaneBuilder host_plane_builder(host_plane);
host_plane_builder.ReserveLines(2);
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kTraceContext,
0, 100, {{StatType::kStepNum, kStepNum}});
CreateXEvent(&host_plane_builder, &main_thread,
HostEventType::kEagerKernelExecute, 10, 90);
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kFunctionRun,
10, 90,
{{StatType::kStepId, kStepId},
{StatType::kProducerType, kTfExecutor},
{StatType::kProducerId, kStepId}});
auto tf_executor_thread = host_plane_builder.GetOrCreateLine(1);
CreateXEvent(&host_plane_builder, &tf_executor_thread,
HostEventType::kExecutorStateProcess, 20, 80,
{{StatType::kStepId, kStepId},
{StatType::kConsumerType, kTfExecutor},
{StatType::kConsumerId, kStepId}});
CreateXEvent(&host_plane_builder, &tf_executor_thread, "matmul", 30, 30,
{{StatType::kCorrelationId, kCorrelationId}});
CreateXEvent(&host_plane_builder, &tf_executor_thread, "add:Add", 70, 20);
XPlane* device_plane = space.add_planes();
XPlaneBuilder device_plane_builder(device_plane);
device_plane_builder.ReserveLines(1);
auto stream = device_plane_builder.GetOrCreateLine(0);
CreateXEvent(&device_plane_builder, &stream, "matmul", 200, 300,
{{StatType::kCorrelationId, kCorrelationId}});
GroupTfEvents(&space);
XPlaneVisitor host_plane_visitor = CreateTfXPlaneVisitor(host_plane);
const XEvent& cpu_tf_op = host_plane->lines(1).events(2);
EXPECT_EQ(cpu_tf_op.stats_size(), 2);
EXPECT_EQ(host_plane_visitor.GetStatType(cpu_tf_op.stats(1).metadata_id()),
StatType::kIsEager);
EXPECT_EQ(cpu_tf_op.stats(1).int64_value(), 0);
XPlaneVisitor device_plane_visitor = CreateTfXPlaneVisitor(device_plane);
const XEvent& gpu_kernel = device_plane->lines(0).events(0);
EXPECT_EQ(gpu_kernel.stats_size(), 3);
EXPECT_EQ(device_plane_visitor.GetStatType(gpu_kernel.stats(2).metadata_id()),
StatType::kIsEager);
EXPECT_EQ(gpu_kernel.stats(2).int64_value(), 0);
}
TEST(GroupEventsTest, SemanticArgTest) {
constexpr int64_t kIsRoot = 1;
constexpr int64_t kStepNum = 100;
constexpr int64_t kContextType = 123;
constexpr uint64 kContextId = 456;
XSpace raw_space;
XPlane* raw_plane = raw_space.add_planes();
XPlaneBuilder plane(raw_plane);
plane.ReserveLines(2);
auto root_producer = plane.GetOrCreateLine(0);
CreateXEvent(&plane, &root_producer, HostEventType::kTraceContext, 0, 100,
{{StatType::kIsRoot, kIsRoot}, {StatType::kStepNum, kStepNum}});
CreateXEvent(&plane, &root_producer, HostEventType::kFunctionRun, 10, 90,
{{StatType::kProducerType, kContextType},
{StatType::kProducerId, kContextId}});
auto consumer = plane.GetOrCreateLine(1);
CreateXEvent(&plane, &consumer, HostEventType::kExecutorStateProcess, 20, 80,
{{StatType::kConsumerType, kContextType},
{StatType::kConsumerId, kContextId}});
GroupTfEvents(&raw_space);
int num_events = 0;
CreateTfXPlaneVisitor(raw_plane).ForEachLine([&](const XLineVisitor& line) {
num_events += line.NumEvents();
line.ForEachEvent([&](const XEventVisitor& event) {
std::optional<int64_t> group_id;
if (std::optional<XStatVisitor> stat =
event.GetStat(StatType::kGroupId)) {
group_id = stat->IntValue();
}
EXPECT_TRUE(group_id.has_value());
EXPECT_EQ(*group_id, 0);
});
});
EXPECT_EQ(num_events, 3);
}
TEST(GroupEventsTest, SemanticIntArgNoMatchTest) {
constexpr int64_t kIsRoot = 1;
constexpr int64_t kStepNum = 100;
constexpr int64_t kContextType = 123;
constexpr uint64 kProducerId = 456;
constexpr uint64 kConsumerId = 789;
XSpace raw_space;
XPlane* raw_plane = raw_space.add_planes();
XPlaneBuilder plane(raw_plane);
plane.ReserveLines(2);
auto root_producer = plane.GetOrCreateLine(0);
CreateXEvent(&plane, &root_producer, HostEventType::kTraceContext, 0, 100,
{{StatType::kIsRoot, kIsRoot}, {StatType::kStepNum, kStepNum}});
CreateXEvent(&plane, &root_producer, HostEventType::kFunctionRun, 10, 90,
{{StatType::kProducerType, kContextType},
{StatType::kProducerId, kProducerId}});
auto consumer = plane.GetOrCreateLine(1);
CreateXEvent(&plane, &consumer, HostEventType::kExecutorStateProcess, 20, 80,
{{StatType::kConsumerType, kContextType},
{StatType::kConsumerId, kConsumerId}});
GroupTfEvents(&raw_space);
int num_events = 0;
CreateTfXPlaneVisitor(raw_plane).ForEachLine([&](const XLineVisitor& line) {
num_events += line.NumEvents();
line.ForEachEvent([&](const XEventVisitor& event) {
std::optional<int64_t> group_id;
if (std::optional<XStatVisitor> stat =
event.GetStat(StatType::kGroupId)) {
group_id = stat->IntValue();
}
if (event.Type() == HostEventType::kExecutorStateProcess) {
EXPECT_FALSE(group_id.has_value());
} else {
EXPECT_TRUE(group_id.has_value());
EXPECT_EQ(*group_id, 0);
}
});
});
EXPECT_EQ(num_events, 3);
}
TEST(GroupEventsTest, SemanticUintArgNoMatchTest) {
constexpr int64_t kIsRoot = 1;
constexpr int64_t kStepNum = 100;
constexpr int64_t kContextType = 123;
constexpr uint64 kProducerId = UINT64_MAX;
constexpr uint64 kConsumerId = UINT64_MAX - 1;
XSpace raw_space;
XPlane* raw_plane = raw_space.add_planes();
XPlaneBuilder plane(raw_plane);
plane.ReserveLines(2);
auto root_producer = plane.GetOrCreateLine(0);
CreateXEvent(&plane, &root_producer, HostEventType::kTraceContext, 0, 100,
{{StatType::kIsRoot, kIsRoot}, {StatType::kStepNum, kStepNum}});
CreateXEvent(&plane, &root_producer, HostEventType::kFunctionRun, 10, 90,
{{StatType::kProducerType, kContextType},
{StatType::kProducerId, kProducerId}});
auto consumer = plane.GetOrCreateLine(1);
CreateXEvent(&plane, &consumer, HostEventType::kExecutorStateProcess, 20, 80,
{{StatType::kConsumerType, kContextType},
{StatType::kConsumerId, kConsumerId}});
GroupTfEvents(&raw_space);
int num_events = 0;
CreateTfXPlaneVisitor(raw_plane).ForEachLine([&](const XLineVisitor& line) {
num_events += line.NumEvents();
line.ForEachEvent([&](const XEventVisitor& event) {
std::optional<int64_t> group_id;
if (std::optional<XStatVisitor> stat =
event.GetStat(StatType::kGroupId)) {
group_id = stat->IntValue();
}
if (event.Type() == HostEventType::kExecutorStateProcess) {
EXPECT_FALSE(group_id.has_value());
} else {
EXPECT_TRUE(group_id.has_value());
EXPECT_EQ(*group_id, 0);
}
});
});
EXPECT_EQ(num_events, 3);
}
TEST(GroupEventsTest, AsyncEventTest) {
constexpr int64_t kIsRoot = 1;
constexpr int64_t kIsAsync = 1;
constexpr absl::string_view kParent = "parent";
constexpr absl::string_view kAsync = "async";
constexpr absl::string_view kChild = "child";
XSpace raw_space;
XPlane* raw_plane = raw_space.add_planes();
XPlaneBuilder plane(raw_plane);
plane.ReserveLines(1);
auto line = plane.GetOrCreateLine(0);
CreateXEvent(&plane, &line, kParent, 0, 100, {{StatType::kIsRoot, kIsRoot}});
CreateXEvent(&plane, &line, kAsync, 10, 200,
{{StatType::kIsAsync, kIsAsync}});
CreateXEvent(&plane, &line, kChild, 20, 80);
GroupTfEvents(&raw_space);
CreateTfXPlaneVisitor(raw_plane).ForEachLine([&](const XLineVisitor& line) {
EXPECT_EQ(line.NumEvents(), 3);
line.ForEachEvent([&](const XEventVisitor& event) {
std::optional<int64_t> group_id;
if (std::optional<XStatVisitor> stat =
event.GetStat(StatType::kGroupId)) {
group_id = stat->IntValue();
}
if (event.Name() == kAsync) {
EXPECT_FALSE(group_id.has_value());
} else {
EXPECT_TRUE(group_id.has_value());
EXPECT_EQ(*group_id, 0);
}
});
});
}
TEST(GroupEventsTest, BatchingSessionTest) {
constexpr absl::string_view kSchedule = "Schedule";
constexpr int64_t kBatchContextType =
static_cast<int64_t>(ContextType::kSharedBatchScheduler);
constexpr int64_t kBatchContextId = 123;
constexpr int64_t kBatchingSessionRunRootLevel = 1;
constexpr int64_t kProcessBatchRootLevel = 2;
XSpace raw_space;
XPlane* raw_plane = raw_space.add_planes();
XPlaneBuilder plane(raw_plane);
plane.ReserveLines(2);
auto request_thread = plane.GetOrCreateLine(0);
CreateXEvent(&plane, &request_thread, HostEventType::kBatchingSessionRun, 0,
100, {{StatType::kIsRoot, kBatchingSessionRunRootLevel}});
CreateXEvent(&plane, &request_thread, kSchedule, 0, 100,
{{StatType::kProducerType, kBatchContextType},
{StatType::kProducerId, kBatchContextId}});
CreateXEvent(&plane, &request_thread, HostEventType::kBatchingSessionRun, 200,
100, {{StatType::kIsRoot, kBatchingSessionRunRootLevel}});
CreateXEvent(&plane, &request_thread, kSchedule, 200, 100,
{{StatType::kProducerType, kBatchContextType},
{StatType::kProducerId, kBatchContextId}});
auto batch_thread = plane.GetOrCreateLine(1);
CreateXEvent(&plane, &batch_thread, HostEventType::kProcessBatch, 200, 100,
{{StatType::kConsumerType, kBatchContextType},
{StatType::kConsumerId, kBatchContextId},
{StatType::kIsRoot, kProcessBatchRootLevel}});
EventForest event_forest;
GroupTfEvents(&raw_space, &event_forest);
const GroupMetadataMap& group_metadata_map =
event_forest.GetGroupMetadataMap();
EXPECT_EQ(group_metadata_map.size(), 3);
EXPECT_EQ(group_metadata_map.at(0).parents.size(), 2);
EXPECT_EQ(group_metadata_map.at(1).children.size(), 1);
EXPECT_EQ(group_metadata_map.at(2).children.size(), 1);
uint64 num_checked = 0;
CreateTfXPlaneVisitor(raw_plane).ForEachLine([&](const XLineVisitor& line) {
line.ForEachEvent([&](const XEventVisitor& event) {
std::optional<int64_t> group_id;
if (std::optional<XStatVisitor> stat =
event.GetStat(StatType::kGroupId)) {
group_id = stat->IntValue();
}
EXPECT_TRUE(group_id.has_value());
if (line.Id() == 0 &&
event.Type() == HostEventType::kBatchingSessionRun) {
++num_checked;
} else if (line.Id() == 1 &&
event.Type() == HostEventType::kProcessBatch) {
++num_checked;
}
});
});
EXPECT_EQ(num_checked, 3);
}
TEST(GroupTPUEventsTest, TpuExecuteOpTest) {
tensorflow::profiler::XSpace space;
XPlaneBuilder host_plane_builder(GetOrCreateHostXPlane(&space));
host_plane_builder.ReserveLines(1);
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(
&host_plane_builder, &main_thread, HostEventType::kExecutorStateProcess,
20, 50,
{{StatType::kStepId, int64_t{123}}, {StatType::kIterNum, int64_t{456}}});
EventForest event_forest;
GroupTpuEventsOSS(&space, {}, &event_forest);
EXPECT_EQ(event_forest.GetGroupMetadataMap().size(), 1);
XPlaneVisitor host_plane_visitor = CreateTfXPlaneVisitor(&space.planes(0));
host_plane_visitor.ForEachLine([&](const XLineVisitor& line) {
line.ForEachEvent([&](const XEventVisitor& event) {
EXPECT_TRUE(event.GetStat(StatType::kGroupId).has_value());
});
});
}
TEST(GroupTPUEventsTest, TpuRequestTest) {
tensorflow::profiler::XSpace space;
XPlaneBuilder host_plane_builder(GetOrCreateHostXPlane(&space));
host_plane_builder.ReserveLines(1);
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kSessionRun, 0,
100, {{StatType::kIsRoot, int64_t{1}}});
CreateXEvent(&host_plane_builder, &main_thread,
GetHostEventTypeStr(HostEventType::kEnqueueRequestLocked), 20,
50,
{{StatType::kQueueAddr, int64_t{123}},
{StatType::kRequestId, int64_t{456}}});
EventForest event_forest;
GroupTpuEventsOSS(&space, {}, &event_forest);
EXPECT_EQ(event_forest.GetGroupMetadataMap().size(), 1);
XPlaneVisitor host_plane_visitor = CreateTfXPlaneVisitor(&space.planes(0));
host_plane_visitor.ForEachLine([&](const XLineVisitor& line) {
line.ForEachEvent([&](const XEventVisitor& event) {
EXPECT_TRUE(event.GetStat(StatType::kGroupId).has_value());
});
});
}
TEST(GroupTPUEventsTest, TpuProgramCallbackTest) {
tensorflow::profiler::XSpace space;
XPlaneBuilder host_plane_builder(GetOrCreateHostXPlane(&space));
host_plane_builder.ReserveLines(1);
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kSessionRun, 0,
100, {{StatType::kIsRoot, int64_t{1}}});
CreateXEvent(&host_plane_builder, &main_thread,
GetHostEventTypeStr(HostEventType::kDoEnqueueProgram), 20, 50,
{{StatType::kRunId, int64_t{123}},
{StatType::kQueueId, int64_t{0}},
{StatType::kDeviceOrdinal, int64_t{1}}});
EventForest event_forest;
GroupTpuEventsOSS(&space, {}, &event_forest);
EXPECT_EQ(event_forest.GetGroupMetadataMap().size(), 1);
XPlaneVisitor host_plane_visitor = CreateTfXPlaneVisitor(&space.planes(0));
host_plane_visitor.ForEachLine([&](const XLineVisitor& line) {
line.ForEachEvent([&](const XEventVisitor& event) {
EXPECT_TRUE(event.GetStat(StatType::kGroupId).has_value());
});
});
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/group_events.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/group_events_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
15fd9202-a8ac-4fc4-9660-18c249c6c32b | cpp | tensorflow/tensorflow | broadcast_args | tensorflow/lite/kernels/broadcast_args.cc | tensorflow/lite/kernels/broadcast_args_test.cc | #include "tensorflow/lite/kernels/internal/reference/broadcast_args.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace broadcast_args {
constexpr int kShape1Tensor = 0;
constexpr int kShape2Tensor = 1;
constexpr int kOutputTensor = 0;
struct BroadcastArgsContext {
BroadcastArgsContext(TfLiteContext* context, TfLiteNode* node) {
shape1 = GetInput(context, node, kShape1Tensor);
shape2 = GetInput(context, node, kShape2Tensor);
output = GetOutput(context, node, kOutputTensor);
}
const TfLiteTensor* shape1;
const TfLiteTensor* shape2;
TfLiteTensor* output;
};
TfLiteStatus EvalImpl(TfLiteContext* context, TfLiteNode* node);
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE(context, NumInputs(node) == 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
BroadcastArgsContext op_context(context, node);
TF_LITE_ENSURE(context, op_context.shape1->type == kTfLiteInt32 ||
op_context.shape1->type == kTfLiteInt64);
TF_LITE_ENSURE_EQ(context, op_context.shape1->type, op_context.shape2->type);
TF_LITE_ENSURE_EQ(context, op_context.shape1->type, op_context.output->type);
TF_LITE_ENSURE_EQ(context, NumDimensions(op_context.shape1), 1);
TF_LITE_ENSURE_EQ(context, NumDimensions(op_context.shape2), 1);
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(1);
output_shape->data[0] = std::max(SizeOfDimension(op_context.shape1, 0),
SizeOfDimension(op_context.shape2, 0));
if (IsConstantOrPersistentTensor(op_context.shape1) &&
IsConstantOrPersistentTensor(op_context.shape2)) {
SetTensorToPersistentRo(op_context.output);
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, op_context.output,
output_shape));
return EvalImpl(context, node);
}
return context->ResizeTensor(context, op_context.output, output_shape);
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
BroadcastArgsContext op_context(context, node);
if (IsConstantOrPersistentTensor(op_context.output)) {
return kTfLiteOk;
} else {
return EvalImpl(context, node);
}
}
TfLiteStatus EvalImpl(TfLiteContext* context, TfLiteNode* node) {
BroadcastArgsContext op_context(context, node);
#define TF_LITE_BROADCAST_ARG(data_type) \
reference_ops::BroadcastArgs(GetTensorShape(op_context.shape1), \
GetTensorData<data_type>(op_context.shape1), \
GetTensorShape(op_context.shape2), \
GetTensorData<data_type>(op_context.shape2), \
GetTensorShape(op_context.output), \
GetTensorData<data_type>(op_context.output))
if (op_context.output->type == kTfLiteInt32) {
TF_LITE_BROADCAST_ARG(int32_t);
} else {
TF_LITE_BROADCAST_ARG(int64_t);
}
#undef TF_LITE_BROADCAST_ARG
return kTfLiteOk;
}
}
TfLiteRegistration* Register_BROADCAST_ARGS() {
static TfLiteRegistration r = {nullptr, nullptr, broadcast_args::Prepare,
broadcast_args::Eval};
return &r;
}
}
}
} | #include <cstdint>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/kernels/test_util.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
template <class ShapeType = int32_t>
class BroadcastArgsOpModel : public SingleOpModel {
public:
BroadcastArgsOpModel(std::initializer_list<ShapeType> input1,
std::initializer_list<ShapeType> input2,
bool constant_tensor) {
int input1_length = input1.size();
int input2_length = input2.size();
if (constant_tensor) {
shape1_ =
AddConstInput({GetTensorType<ShapeType>(), {input1_length}}, input1);
shape2_ =
AddConstInput({GetTensorType<ShapeType>(), {input2_length}}, input2);
} else {
shape1_ = AddInput({GetTensorType<ShapeType>(), {input1_length}});
shape2_ = AddInput({GetTensorType<ShapeType>(), {input2_length}});
}
output_ = AddOutput(GetTensorType<ShapeType>());
SetBuiltinOp(BuiltinOperator_BROADCAST_ARGS, BuiltinOptions_NONE, 0);
BuildInterpreter({{input1_length}, {input2_length}});
if (!constant_tensor) {
if (input1.size() > 0) SetInput1(input1);
if (input2.size() > 0) SetInput2(input2);
}
}
void SetInput1(std::initializer_list<ShapeType> data) {
PopulateTensor(shape1_, data);
}
void SetInput2(std::initializer_list<ShapeType> data) {
PopulateTensor(shape2_, data);
}
std::vector<ShapeType> GetOutput() {
return ExtractVector<ShapeType>(output_);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int shape1_;
int shape2_;
int output_;
};
template <typename T>
class BroadcastArgsOpTest : public ::testing::Test {};
using DataTypes = ::testing::Types<int64_t, int32_t>;
TYPED_TEST_SUITE(BroadcastArgsOpTest, DataTypes);
#if GTEST_HAS_DEATH_TEST
TYPED_TEST(BroadcastArgsOpTest, ShapeNotBroadcastableConstant) {
EXPECT_DEATH(BroadcastArgsOpModel<TypeParam> m({2, 3, 4, 4}, {2, 2},
true),
"");
}
TYPED_TEST(BroadcastArgsOpTest, ShapeNotBroadcastable) {
BroadcastArgsOpModel<TypeParam> m({2, 3, 4, 4}, {2, 2},
false);
EXPECT_DEATH(ASSERT_EQ(m.Invoke(), kTfLiteOk), "");
}
#endif
TYPED_TEST(BroadcastArgsOpTest, BroadcastArgsWithScalar) {
for (bool constant_tensor : {true, false}) {
BroadcastArgsOpModel<TypeParam> m({}, {2, 4}, constant_tensor);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({2, 4}));
}
}
TYPED_TEST(BroadcastArgsOpTest, BroadcastArgsDifferentDims) {
for (bool constant_tensor : {true, false}) {
BroadcastArgsOpModel<TypeParam> m({1}, {2, 4}, constant_tensor);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({2, 4}));
}
}
TYPED_TEST(BroadcastArgsOpTest, BroadcastArgsSameDims) {
for (bool constant_tensor : {true, false}) {
BroadcastArgsOpModel<TypeParam> m({1, 4, 6, 3, 1, 5}, {4, 4, 1, 3, 4, 1},
constant_tensor);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({4, 4, 6, 3, 4, 5}));
}
}
TYPED_TEST(BroadcastArgsOpTest, BroadcastArgsComplex) {
for (bool constant_tensor : {true, false}) {
BroadcastArgsOpModel<TypeParam> m({6, 3, 1, 5}, {4, 4, 1, 3, 4, 1},
constant_tensor);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({4, 4, 6, 3, 4, 5}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/broadcast_args.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/broadcast_args_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
03d4180a-c3ad-4127-b9e0-08f2506c3923 | cpp | tensorflow/tensorflow | hlo_query | third_party/xla/xla/hlo/utils/hlo_query.cc | third_party/xla/xla/hlo/utils/hlo_query_test.cc | #include "xla/hlo/utils/hlo_query.h"
#include <algorithm>
#include <cstdint>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape_util.h"
namespace xla {
namespace hlo_query {
bool IsCollectiveCommunicationOp(HloOpcode op) {
return op == HloOpcode::kAllReduce || op == HloOpcode::kAllGather ||
op == HloOpcode::kAllToAll || op == HloOpcode::kCollectivePermute ||
op == HloOpcode::kCollectiveBroadcast ||
op == HloOpcode::kReduceScatter || op == HloOpcode::kAllReduceStart ||
op == HloOpcode::kAllGatherStart ||
op == HloOpcode::kCollectivePermuteStart;
}
bool IsAsyncCollectiveStartOp(const HloInstruction* instruction,
bool include_send_recv) {
HloOpcode op = instruction->opcode();
if (op == HloOpcode::kAsyncStart) {
return IsCollectiveCommunicationOp(instruction->async_wrapped_opcode());
}
return op == HloOpcode::kAllReduceStart || op == HloOpcode::kAllGatherStart ||
op == HloOpcode::kCollectivePermuteStart ||
(include_send_recv &&
(op == HloOpcode::kSend || op == HloOpcode::kRecv));
}
bool IsAsyncCollectiveDoneOp(const HloInstruction* instruction,
bool include_send_recv) {
HloOpcode op = instruction->opcode();
if (op == HloOpcode::kAsyncDone) {
return IsCollectiveCommunicationOp(instruction->async_wrapped_opcode());
}
return op == HloOpcode::kAllReduceDone || op == HloOpcode::kAllGatherDone ||
op == HloOpcode::kCollectivePermuteDone ||
(include_send_recv &&
(op == HloOpcode::kSendDone || op == HloOpcode::kRecvDone));
}
bool IsConstantR0F32(HloInstruction* instruction, float* out) {
if (instruction->opcode() == HloOpcode::kConstant &&
ShapeUtil::IsScalarWithElementType(instruction->shape(), F32)) {
*out = instruction->literal().Get<float>({});
return true;
}
return false;
}
bool AllOperandsAreParametersOrConstants(const HloInstruction& instruction) {
for (const auto& operand : instruction.operands()) {
if (operand->opcode() != HloOpcode::kParameter &&
operand->opcode() != HloOpcode::kConstant) {
return false;
}
}
return true;
}
bool AllOperandsAreParametersOrConstantsWithSingleUser(
const HloInstruction& instruction) {
for (const auto& operand : instruction.operands()) {
if (operand->opcode() != HloOpcode::kParameter &&
operand->opcode() != HloOpcode::kConstant) {
return false;
}
if (operand->user_count() > 1) {
return false;
}
}
return true;
}
bool AllOperandsAreParameters(const HloInstruction& instruction) {
for (const auto& operand : instruction.operands()) {
if (operand->opcode() != HloOpcode::kParameter) {
return false;
}
}
return true;
}
bool AllOperandsAreConstants(const HloInstruction& instruction) {
for (const auto& operand : instruction.operands()) {
if (operand->opcode() != HloOpcode::kConstant) {
return false;
}
}
return true;
}
HloInstruction* GetMatchingOperand(const HloPredicate& matcher,
HloInstruction* instruction) {
for (HloInstruction* op : instruction->operands()) {
if (matcher(op)) {
return op;
}
}
return nullptr;
}
bool MatchBinaryInstructionOperand(const HloPredicate& matcher,
HloInstruction* instruction,
HloInstruction** matching_operand,
HloInstruction** other_operand) {
CHECK_EQ(instruction->operand_count(), 2);
if (matcher(instruction->operand(0))) {
*matching_operand = instruction->mutable_operand(0);
*other_operand = instruction->mutable_operand(1);
return true;
}
if (matcher(instruction->operand(1))) {
*matching_operand = instruction->mutable_operand(1);
*other_operand = instruction->mutable_operand(0);
return true;
}
return false;
}
bool MatchBinaryInstructionOperandOpcode(HloOpcode opcode,
HloInstruction* instruction,
HloInstruction** matching_operand,
HloInstruction** other_operand) {
return MatchBinaryInstructionOperand(
[opcode](const HloInstruction* instruction) {
return instruction->opcode() == opcode;
},
instruction, matching_operand, other_operand);
}
bool IsScalarConstant(const HloInstruction* instruction) {
return instruction->IsConstant() && ShapeUtil::IsScalar(instruction->shape());
}
bool IsBroadcastedConstantOrScalar(const HloInstruction& instr) {
return instr.IsConstant() || ShapeUtil::IsScalar(instr.shape()) ||
(HloOpcode::kBroadcast == instr.opcode() &&
(instr.operand(0)->IsConstant() ||
ShapeUtil::IsScalar(instr.operand(0)->shape())));
}
bool IsBroadcastOfScalarConstant(const HloInstruction& instr) {
return instr.opcode() == HloOpcode::kBroadcast &&
IsScalarConstant(instr.operand(0));
}
bool IsBroadcastOfParameter(const HloInstruction& instr) {
return instr.opcode() == HloOpcode::kBroadcast &&
instr.operand(0)->opcode() == HloOpcode::kParameter;
}
HloInstruction* GetFirstInstructionWithOpcode(const HloComputation& computation,
const HloOpcode opcode) {
auto instructions = computation.instructions();
auto it = absl::c_find_if(instructions, [&](HloInstruction* instr) {
return instr->opcode() == opcode;
});
return it == instructions.end() ? nullptr : *it;
}
bool ContainsInstrWithOpcode(const HloComputation* comp,
const absl::flat_hash_set<HloOpcode>& opcodes) {
for (const auto* instr : comp->instructions()) {
if (opcodes.count(instr->opcode())) {
return true;
}
for (const HloComputation* subcomp : instr->called_computations()) {
if (ContainsInstrWithOpcode(subcomp, opcodes)) {
return true;
}
}
}
return false;
}
bool ContainsLayoutConstrainedCollective(const HloModule& module,
HloOpcode op) {
CHECK(IsCollectiveCommunicationOp(op));
for (auto computation : module.computations()) {
for (auto hlo : computation->instructions()) {
if (hlo->opcode() == op &&
DynCast<HloCollectiveInstruction>(hlo)->constrain_layout()) {
return true;
}
}
}
return false;
}
int64_t NextChannelId(const HloModule& module) {
int64_t next_channel_id = 1;
for (const HloComputation* comp : module.computations()) {
for (const HloInstruction* hlo : comp->instructions()) {
const HloChannelInstruction* channel_instr =
DynCast<HloChannelInstruction>(hlo);
if (channel_instr && channel_instr->channel_id()) {
next_channel_id =
std::max(next_channel_id, *channel_instr->channel_id() + 1);
}
}
}
return next_channel_id;
}
bool HasX64TransformedHostTransfer(const HloModule& module) {
for (auto computation : module.computations()) {
for (auto hlo : computation->instructions()) {
if (hlo->opcode() == HloOpcode::kSend) {
auto send = DynCast<HloSendInstruction>(hlo);
if (send->is_host_transfer() && send->operand(0)->shape().IsTuple()) {
return true;
}
} else if (hlo->opcode() == HloOpcode::kRecv) {
auto recv = DynCast<HloRecvInstruction>(hlo);
if (recv->is_host_transfer() &&
recv->shape().tuple_shapes(0).IsTuple()) {
return true;
}
}
}
}
return false;
}
HloInstruction* GetUniqueGteInstruction(const HloInstruction* operand,
int64_t index) {
HloInstruction* gte = nullptr;
for (HloInstruction* instr : operand->parent()->MakeInstructionPostOrder()) {
if (!Match(instr, match::GetTupleElement().WithTupleIndex(index))) {
continue;
}
if (instr->operand(0) != operand) {
continue;
}
if (gte != nullptr) {
return nullptr;
}
gte = instr;
}
return gte;
}
HloComputation* FindComputation(HloModule* module, absl::string_view name) {
auto computations = module->computations();
auto it = absl::c_find_if(
computations, [&](HloComputation* c) { return c->name() == name; });
if (it == computations.end()) {
return nullptr;
}
return *it;
}
HloInstruction* FindInstruction(const HloComputation* computation,
absl::string_view name) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->name() == name) return instruction;
}
return nullptr;
}
HloInstruction* FindInstruction(const HloComputation* computation,
HloOpcode opcode) {
for (auto* instruction : computation->instructions()) {
if (instruction->opcode() == opcode) return instruction;
}
return nullptr;
}
}
} | #include "xla/hlo/utils/hlo_query.h"
#include <memory>
#include <utility>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using HloQueryTest = HloTestBase;
template <typename Hlo>
int CountInstructions(Hlo& module, HloOpcode opcode) {
int counter = 0;
hlo_query::ForEachInstructionWithOpcode(
module, opcode, [&counter](auto& instr) { counter++; });
return counter;
}
constexpr absl::string_view kConstantAdditionHloString = R"(
HloModule test
ENTRY main {
zero = f32[] constant(0)
five = f32[] constant(5)
ROOT out = f32[] add(zero, five)
})";
TEST_F(HloQueryTest,
GetInstructionWithOpCodeReturnsMatchingInstructionForModule) {
constexpr absl::string_view kHloString = R"(
HloModule m
computation.0 {
param.0 = f32[32]{0} parameter(0)
ROOT _ = f32[32]{0} rsqrt(param.0)
}
ENTRY main {
param.0 = f32[32]{0} parameter(0)
param.1 = f32[32]{0} parameter(1)
param.2 = f32[32]{0} parameter(2)
param.3 = f32[32]{0} parameter(3)
add.0 = f32[32]{0} add(param.0,param.1)
add.1 = f32[32]{0} add(param.1,param.2)
sub.0 = f32[32]{0} subtract(param.0,param.1)
mul.0 = f32[32]{0} multiply(param.0,param.1)
mul.1 = f32[32]{0} multiply(param.1,param.2)
mul.2 = f32[32]{0} multiply(param.2,param.3)
comp.0 = call(param.0), to_apply=computation.0
ROOT _ = (f32[32],f32[32],f32[32],f32[32],f32[32],f32[32],f32[32]) tuple(comp.0,add.0,add.1,sub.0,mul.0,mul.1,mul.2)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kHloString));
EXPECT_EQ(CountInstructions(*module, HloOpcode::kAdd), 2);
EXPECT_EQ(CountInstructions(*module, HloOpcode::kSubtract), 1);
EXPECT_EQ(CountInstructions(*module, HloOpcode::kMultiply), 3);
}
TEST_F(HloQueryTest,
GetInstructionWithOpCodeReturnsMatchingInstructionForComputation) {
constexpr absl::string_view kHloString = R"(
HloModule m
computation.0 {
param.0 = f32[32]{0} parameter(0)
param.1 = f32[32]{0} parameter(1)
param.2 = f32[32]{0} parameter(2)
param.3 = f32[32]{0} parameter(3)
add.0 = f32[32]{0} add(param.0,param.1)
add.1 = f32[32]{0} add(param.1,param.2)
sub.0 = f32[32]{0} subtract(param.0,param.1)
mul.0 = f32[32]{0} multiply(param.0,param.1)
mul.1 = f32[32]{0} multiply(param.1,param.2)
ROOT mul.2 = f32[32]{0} multiply(param.2,param.3)
}
ENTRY main {
param.0 = f32[32]{0} parameter(0)
param.1 = f32[32]{0} parameter(1)
param.2 = f32[32]{0} parameter(2)
param.3 = f32[32]{0} parameter(3)
add.0 = f32[32]{0} add(param.0,param.1)
sub.0 = f32[32]{0} subtract(param.0,param.1)
mul.0 = f32[32]{0} multiply(param.0,param.1)
comp.0 = f32[32]{0} call(param.0,param.1,param.2), to_apply=computation.0
ROOT _ = (f32[32],f32[32],f32[32],f32[32]) tuple(add.0,sub.0,mul.0,comp.0)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kHloString));
HloComputation* computation = module->GetComputationWithName("computation.0");
EXPECT_EQ(CountInstructions(*computation, HloOpcode::kAdd), 2);
EXPECT_EQ(CountInstructions(*computation, HloOpcode::kSubtract), 1);
EXPECT_EQ(CountInstructions(*computation, HloOpcode::kMultiply), 3);
}
TEST_F(HloQueryTest, GetUniqueGteTest) {
constexpr absl::string_view kHloString = R"(
HloModule m
ENTRY main {
param.0 = (f32[32]{0}, f32[32]{0}, f32[32]{0}, f32[32]{0}) parameter(0)
gte1 = f32[32]{0} get-tuple-element(param.0), index=0
gte2 = f32[32]{0} get-tuple-element(param.0), index=1
dup_gte2 = f32[32]{0} get-tuple-element(param.0), index=1
gte3 = f32[32]{0} get-tuple-element(param.0), index=2
ROOT gte4 = f32[32]{0} get-tuple-element(param.0), index=3
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kHloString));
HloInstruction* param = module->entry_computation()->parameter_instruction(0);
HloInstruction* gte1 = hlo_query::GetUniqueGteInstruction(param, 0);
EXPECT_NE(gte1, nullptr);
HloInstruction* gte2 = hlo_query::GetUniqueGteInstruction(param, 1);
EXPECT_EQ(gte2, nullptr);
}
TEST_F(HloQueryTest, FindComputationTest) {
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kConstantAdditionHloString));
EXPECT_NE(hlo_query::FindComputation(module.get(), "main"), nullptr);
EXPECT_EQ(hlo_query::FindComputation(module.get(), "foo"), nullptr);
}
TEST_F(HloQueryTest, FindInstructionUsingNameTest) {
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kConstantAdditionHloString));
const HloComputation* main = hlo_query::FindComputation(module.get(), "main");
EXPECT_NE(hlo_query::FindInstruction(main, "zero"), nullptr);
EXPECT_NE(hlo_query::FindInstruction(main, "five"), nullptr);
EXPECT_NE(hlo_query::FindInstruction(main, "out"), nullptr);
EXPECT_EQ(hlo_query::FindInstruction(main, "foo"), nullptr);
}
void FindInstructionsAndExpectEqual(const HloComputation* main,
absl::string_view name, HloOpcode opcode) {
SCOPED_TRACE(absl::StrCat("Comparing finding by name: ", name,
" and opcode: ", opcode));
HloInstruction* by_name = hlo_query::FindInstruction(main, name);
HloInstruction* by_opcode = hlo_query::FindInstruction(main, opcode);
EXPECT_EQ(by_name, by_opcode);
}
TEST_F(HloQueryTest, FindInstructionUsingOpcodeTest) {
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kConstantAdditionHloString));
const HloComputation* main = hlo_query::FindComputation(module.get(), "main");
EXPECT_NE(hlo_query::FindInstruction(main, HloOpcode::kConstant), nullptr);
EXPECT_NE(hlo_query::FindInstruction(main, HloOpcode::kAdd), nullptr);
EXPECT_EQ(hlo_query::FindInstruction(main, HloOpcode::kSelect), nullptr);
}
TEST_F(HloQueryTest, FindInstructionUsingOpcodeAndNameEqualTest) {
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kConstantAdditionHloString));
const HloComputation* main = hlo_query::FindComputation(module.get(), "main");
FindInstructionsAndExpectEqual(main, "zero", HloOpcode::kConstant);
FindInstructionsAndExpectEqual(main, "out", HloOpcode::kAdd);
FindInstructionsAndExpectEqual(main, "dummy", HloOpcode::kSelect);
}
TEST_F(HloQueryTest, FindInstructionDoesNotExistTest) {
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kConstantAdditionHloString));
const HloComputation* main = hlo_query::FindComputation(module.get(), "main");
EXPECT_NE(main, nullptr);
auto find_beef = hlo_query::FindInstruction(main, "deadbeef");
auto find_nothing = hlo_query::FindInstruction(main, "");
EXPECT_EQ(find_beef, nullptr);
EXPECT_EQ(find_nothing, nullptr);
}
TEST_F(HloQueryTest, NextChannelIdForModuleWithoutChannelIdTest) {
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnUnverifiedModule(kConstantAdditionHloString));
EXPECT_EQ(hlo_query::NextChannelId(*module), 1)
<< "module with no channel id";
}
TEST_F(HloQueryTest, NextChannelIdBasicTest) {
absl::string_view hlo = R"(
HloModule test
ENTRY test_computation {
p = u32[] partition-id()
ROOT start = u32[] collective-permute(p), channel_id=8,
source_target_pairs={{0,1},{1,2},{2,3},{3,0}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
EXPECT_EQ(hlo_query::NextChannelId(*module), 9);
}
TEST_F(HloQueryTest, NextChannelIdTwoIdsTest) {
absl::string_view hlo = R"(
HloModule test
ENTRY test_computation {
p = u32[] partition-id()
l = u32[] collective-permute(p), channel_id=8,
source_target_pairs={{0,1},{1,2}}
r = u32[] collective-permute(p), channel_id=9,
source_target_pairs={{2,3},{3,0}}
ROOT res = u32[] add(l,r)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
EXPECT_EQ(hlo_query::NextChannelId(*module), 10);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/utils/hlo_query.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/utils/hlo_query_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f7d0dad8-0b4f-4a8e-b3dc-80d73139d3c7 | cpp | google/cel-cpp | value | common/value.cc | common/value_test.cc | #include "common/value.h"
#include <array>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <ostream>
#include <string>
#include <type_traits>
#include <utility>
#include "absl/base/nullability.h"
#include "absl/base/optimization.h"
#include "absl/log/absl_check.h"
#include "absl/log/absl_log.h"
#include "absl/meta/type_traits.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
#include "absl/types/variant.h"
#include "base/attribute.h"
#include "common/json.h"
#include "common/optional_ref.h"
#include "common/type.h"
#include "common/value_kind.h"
#include "common/values/values.h"
#include "internal/status_macros.h"
#include "runtime/runtime_options.h"
#include "google/protobuf/descriptor.h"
namespace cel {
namespace {
static constexpr std::array<ValueKind, 25> kValueToKindArray = {
ValueKind::kError, ValueKind::kBool, ValueKind::kBytes,
ValueKind::kDouble, ValueKind::kDuration, ValueKind::kError,
ValueKind::kInt, ValueKind::kList, ValueKind::kList,
ValueKind::kList, ValueKind::kList, ValueKind::kMap,
ValueKind::kMap, ValueKind::kMap, ValueKind::kMap,
ValueKind::kNull, ValueKind::kOpaque, ValueKind::kString,
ValueKind::kStruct, ValueKind::kStruct, ValueKind::kStruct,
ValueKind::kTimestamp, ValueKind::kType, ValueKind::kUint,
ValueKind::kUnknown};
static_assert(kValueToKindArray.size() ==
absl::variant_size<common_internal::ValueVariant>(),
"Kind indexer must match variant declaration for cel::Value.");
}
Type Value::GetRuntimeType() const {
AssertIsValid();
switch (kind()) {
case ValueKind::kNull:
return NullType();
case ValueKind::kBool:
return BoolType();
case ValueKind::kInt:
return IntType();
case ValueKind::kUint:
return UintType();
case ValueKind::kDouble:
return DoubleType();
case ValueKind::kString:
return StringType();
case ValueKind::kBytes:
return BytesType();
case ValueKind::kStruct:
return this->GetStruct().GetRuntimeType();
case ValueKind::kDuration:
return DurationType();
case ValueKind::kTimestamp:
return TimestampType();
case ValueKind::kList:
return ListType();
case ValueKind::kMap:
return MapType();
case ValueKind::kUnknown:
return UnknownType();
case ValueKind::kType:
return TypeType();
case ValueKind::kError:
return ErrorType();
case ValueKind::kOpaque:
return this->GetOpaque().GetRuntimeType();
default:
return cel::Type();
}
}
ValueKind Value::kind() const {
ABSL_DCHECK_NE(variant_.index(), 0)
<< "kind() called on uninitialized cel::Value.";
return kValueToKindArray[variant_.index()];
}
absl::string_view Value::GetTypeName() const {
AssertIsValid();
return absl::visit(
[](const auto& alternative) -> absl::string_view {
if constexpr (std::is_same_v<
absl::remove_cvref_t<decltype(alternative)>,
absl::monostate>) {
return absl::string_view();
} else {
return alternative.GetTypeName();
}
},
variant_);
}
std::string Value::DebugString() const {
AssertIsValid();
return absl::visit(
[](const auto& alternative) -> std::string {
if constexpr (std::is_same_v<
absl::remove_cvref_t<decltype(alternative)>,
absl::monostate>) {
return std::string();
} else {
return alternative.DebugString();
}
},
variant_);
}
absl::Status Value::SerializeTo(AnyToJsonConverter& value_manager,
absl::Cord& value) const {
AssertIsValid();
return absl::visit(
[&value_manager, &value](const auto& alternative) -> absl::Status {
if constexpr (std::is_same_v<
absl::remove_cvref_t<decltype(alternative)>,
absl::monostate>) {
return absl::InternalError("use of invalid Value");
} else {
return alternative.SerializeTo(value_manager, value);
}
},
variant_);
}
absl::StatusOr<Json> Value::ConvertToJson(
AnyToJsonConverter& value_manager) const {
AssertIsValid();
return absl::visit(
[&value_manager](const auto& alternative) -> absl::StatusOr<Json> {
if constexpr (std::is_same_v<
absl::remove_cvref_t<decltype(alternative)>,
absl::monostate>) {
return absl::InternalError("use of invalid Value");
} else {
return alternative.ConvertToJson(value_manager);
}
},
variant_);
}
absl::Status Value::Equal(ValueManager& value_manager, const Value& other,
Value& result) const {
AssertIsValid();
return absl::visit(
[&value_manager, &other,
&result](const auto& alternative) -> absl::Status {
if constexpr (std::is_same_v<
absl::remove_cvref_t<decltype(alternative)>,
absl::monostate>) {
return absl::InternalError("use of invalid Value");
} else {
return alternative.Equal(value_manager, other, result);
}
},
variant_);
}
absl::StatusOr<Value> Value::Equal(ValueManager& value_manager,
const Value& other) const {
Value result;
CEL_RETURN_IF_ERROR(Equal(value_manager, other, result));
return result;
}
bool Value::IsZeroValue() const {
AssertIsValid();
return absl::visit(
[](const auto& alternative) -> bool {
if constexpr (std::is_same_v<
absl::remove_cvref_t<decltype(alternative)>,
absl::monostate>) {
return false;
} else {
return alternative.IsZeroValue();
}
},
variant_);
}
std::ostream& operator<<(std::ostream& out, const Value& value) {
return absl::visit(
[&out](const auto& alternative) -> std::ostream& {
if constexpr (std::is_same_v<
absl::remove_cvref_t<decltype(alternative)>,
absl::monostate>) {
return out << "default ctor Value";
} else {
return out << alternative;
}
},
value.variant_);
}
absl::StatusOr<Value> BytesValue::Equal(ValueManager& value_manager,
const Value& other) const {
Value result;
CEL_RETURN_IF_ERROR(Equal(value_manager, other, result));
return result;
}
absl::StatusOr<Value> ErrorValue::Equal(ValueManager& value_manager,
const Value& other) const {
Value result;
CEL_RETURN_IF_ERROR(Equal(value_manager, other, result));
return result;
}
absl::StatusOr<Value> ListValue::Equal(ValueManager& value_manager,
const Value& other) const {
Value result;
CEL_RETURN_IF_ERROR(Equal(value_manager, other, result));
return result;
}
absl::StatusOr<Value> MapValue::Equal(ValueManager& value_manager,
const Value& other) const {
Value result;
CEL_RETURN_IF_ERROR(Equal(value_manager, other, result));
return result;
}
absl::StatusOr<Value> OpaqueValue::Equal(ValueManager& value_manager,
const Value& other) const {
Value result;
CEL_RETURN_IF_ERROR(Equal(value_manager, other, result));
return result;
}
absl::StatusOr<Value> StringValue::Equal(ValueManager& value_manager,
const Value& other) const {
Value result;
CEL_RETURN_IF_ERROR(Equal(value_manager, other, result));
return result;
}
absl::StatusOr<Value> StructValue::Equal(ValueManager& value_manager,
const Value& other) const {
Value result;
CEL_RETURN_IF_ERROR(Equal(value_manager, other, result));
return result;
}
absl::StatusOr<Value> TypeValue::Equal(ValueManager& value_manager,
const Value& other) const {
Value result;
CEL_RETURN_IF_ERROR(Equal(value_manager, other, result));
return result;
}
absl::StatusOr<Value> UnknownValue::Equal(ValueManager& value_manager,
const Value& other) const {
Value result;
CEL_RETURN_IF_ERROR(Equal(value_manager, other, result));
return result;
}
absl::Status ListValue::Get(ValueManager& value_manager, size_t index,
Value& result) const {
return absl::visit(
[&value_manager, index,
&result](const auto& alternative) -> absl::Status {
return alternative.Get(value_manager, index, result);
},
variant_);
}
absl::StatusOr<Value> ListValue::Get(ValueManager& value_manager,
size_t index) const {
Value result;
CEL_RETURN_IF_ERROR(Get(value_manager, index, result));
return result;
}
absl::Status ListValue::ForEach(ValueManager& value_manager,
ForEachCallback callback) const {
return absl::visit(
[&value_manager, callback](const auto& alternative) -> absl::Status {
return alternative.ForEach(value_manager, callback);
},
variant_);
}
absl::Status ListValue::ForEach(ValueManager& value_manager,
ForEachWithIndexCallback callback) const {
return absl::visit(
[&value_manager, callback](const auto& alternative) -> absl::Status {
return alternative.ForEach(value_manager, callback);
},
variant_);
}
absl::StatusOr<absl::Nonnull<ValueIteratorPtr>> ListValue::NewIterator(
ValueManager& value_manager) const {
return absl::visit(
[&value_manager](const auto& alternative)
-> absl::StatusOr<absl::Nonnull<ValueIteratorPtr>> {
return alternative.NewIterator(value_manager);
},
variant_);
}
absl::Status ListValue::Equal(ValueManager& value_manager, const Value& other,
Value& result) const {
return absl::visit(
[&value_manager, &other,
&result](const auto& alternative) -> absl::Status {
return alternative.Equal(value_manager, other, result);
},
variant_);
}
absl::Status ListValue::Contains(ValueManager& value_manager,
const Value& other, Value& result) const {
return absl::visit(
[&value_manager, &other,
&result](const auto& alternative) -> absl::Status {
return alternative.Contains(value_manager, other, result);
},
variant_);
}
absl::StatusOr<Value> ListValue::Contains(ValueManager& value_manager,
const Value& other) const {
Value result;
CEL_RETURN_IF_ERROR(Contains(value_manager, other, result));
return result;
}
absl::Status MapValue::Get(ValueManager& value_manager, const Value& key,
Value& result) const {
return absl::visit(
[&value_manager, &key, &result](const auto& alternative) -> absl::Status {
return alternative.Get(value_manager, key, result);
},
variant_);
}
absl::StatusOr<Value> MapValue::Get(ValueManager& value_manager,
const Value& key) const {
Value result;
CEL_RETURN_IF_ERROR(Get(value_manager, key, result));
return result;
}
absl::StatusOr<bool> MapValue::Find(ValueManager& value_manager,
const Value& key, Value& result) const {
return absl::visit(
[&value_manager, &key,
&result](const auto& alternative) -> absl::StatusOr<bool> {
return alternative.Find(value_manager, key, result);
},
variant_);
}
absl::StatusOr<std::pair<Value, bool>> MapValue::Find(
ValueManager& value_manager, const Value& key) const {
Value result;
CEL_ASSIGN_OR_RETURN(auto ok, Find(value_manager, key, result));
return std::pair{std::move(result), ok};
}
absl::Status MapValue::Has(ValueManager& value_manager, const Value& key,
Value& result) const {
return absl::visit(
[&value_manager, &key, &result](const auto& alternative) -> absl::Status {
return alternative.Has(value_manager, key, result);
},
variant_);
}
absl::StatusOr<Value> MapValue::Has(ValueManager& value_manager,
const Value& key) const {
Value result;
CEL_RETURN_IF_ERROR(Has(value_manager, key, result));
return result;
}
absl::Status MapValue::ListKeys(ValueManager& value_manager,
ListValue& result) const {
return absl::visit(
[&value_manager, &result](const auto& alternative) -> absl::Status {
return alternative.ListKeys(value_manager, result);
},
variant_);
}
absl::StatusOr<ListValue> MapValue::ListKeys(
ValueManager& value_manager) const {
ListValue result;
CEL_RETURN_IF_ERROR(ListKeys(value_manager, result));
return result;
}
absl::Status MapValue::ForEach(ValueManager& value_manager,
ForEachCallback callback) const {
return absl::visit(
[&value_manager, callback](const auto& alternative) -> absl::Status {
return alternative.ForEach(value_manager, callback);
},
variant_);
}
absl::StatusOr<absl::Nonnull<ValueIteratorPtr>> MapValue::NewIterator(
ValueManager& value_manager) const {
return absl::visit(
[&value_manager](const auto& alternative)
-> absl::StatusOr<absl::Nonnull<ValueIteratorPtr>> {
return alternative.NewIterator(value_manager);
},
variant_);
}
absl::Status MapValue::Equal(ValueManager& value_manager, const Value& other,
Value& result) const {
return absl::visit(
[&value_manager, &other,
&result](const auto& alternative) -> absl::Status {
return alternative.Equal(value_manager, other, result);
},
variant_);
}
absl::Status StructValue::GetFieldByName(
ValueManager& value_manager, absl::string_view name, Value& result,
ProtoWrapperTypeOptions unboxing_options) const {
AssertIsValid();
return absl::visit(
[&value_manager, name, &result,
unboxing_options](const auto& alternative) -> absl::Status {
if constexpr (std::is_same_v<
absl::remove_cvref_t<decltype(alternative)>,
absl::monostate>) {
return absl::InternalError("use of invalid StructValue");
} else {
return alternative.GetFieldByName(value_manager, name, result,
unboxing_options);
}
},
variant_);
}
absl::StatusOr<Value> StructValue::GetFieldByName(
ValueManager& value_manager, absl::string_view name,
ProtoWrapperTypeOptions unboxing_options) const {
Value result;
CEL_RETURN_IF_ERROR(
GetFieldByName(value_manager, name, result, unboxing_options));
return result;
}
absl::Status StructValue::GetFieldByNumber(
ValueManager& value_manager, int64_t number, Value& result,
ProtoWrapperTypeOptions unboxing_options) const {
AssertIsValid();
return absl::visit(
[&value_manager, number, &result,
unboxing_options](const auto& alternative) -> absl::Status {
if constexpr (std::is_same_v<
absl::remove_cvref_t<decltype(alternative)>,
absl::monostate>) {
return absl::InternalError("use of invalid StructValue");
} else {
return alternative.GetFieldByNumber(value_manager, number, result,
unboxing_options);
}
},
variant_);
}
absl::StatusOr<Value> StructValue::GetFieldByNumber(
ValueManager& value_manager, int64_t number,
ProtoWrapperTypeOptions unboxing_options) const {
Value result;
CEL_RETURN_IF_ERROR(
GetFieldByNumber(value_manager, number, result, unboxing_options));
return result;
}
absl::Status StructValue::Equal(ValueManager& value_manager, const Value& other,
Value& result) const {
AssertIsValid();
return absl::visit(
[&value_manager, &other,
&result](const auto& alternative) -> absl::Status {
if constexpr (std::is_same_v<
absl::remove_cvref_t<decltype(alternative)>,
absl::monostate>) {
return absl::InternalError("use of invalid StructValue");
} else {
return alternative.Equal(value_manager, other, result);
}
},
variant_);
}
absl::Status StructValue::ForEachField(ValueManager& value_manager,
ForEachFieldCallback callback) const {
AssertIsValid();
return absl::visit(
[&value_manager, callback](const auto& alternative) -> absl::Status {
if constexpr (std::is_same_v<
absl::remove_cvref_t<decltype(alternative)>,
absl::monostate>) {
return absl::InternalError("use of invalid StructValue");
} else {
return alternative.ForEachField(value_manager, callback);
}
},
variant_);
}
absl::StatusOr<int> StructValue::Qualify(
ValueManager& value_manager, absl::Span<const SelectQualifier> qualifiers,
bool presence_test, Value& result) const {
AssertIsValid();
return absl::visit(
[&value_manager, qualifiers, presence_test,
&result](const auto& alternative) -> absl::StatusOr<int> {
if constexpr (std::is_same_v<
absl::remove_cvref_t<decltype(alternative)>,
absl::monostate>) {
return absl::InternalError("use of invalid StructValue");
} else {
return alternative.Qualify(value_manager, qualifiers, presence_test,
result);
}
},
variant_);
}
absl::StatusOr<std::pair<Value, int>> StructValue::Qualify(
ValueManager& value_manager, absl::Span<const SelectQualifier> qualifiers,
bool presence_test) const {
Value result;
CEL_ASSIGN_OR_RETURN(
auto count, Qualify(value_manager, qualifiers, presence_test, result));
return std::pair{std::move(result), count};
}
Value Value::Enum(absl::Nonnull<const google::protobuf::EnumValueDescriptor*> value) {
ABSL_DCHECK(value != nullptr);
if (value->type()->full_name() == "google.protobuf.NullValue") {
ABSL_DCHECK_EQ(value->number(), 0);
return NullValue();
}
return IntValue(value->number());
}
Value Value::Enum(absl::Nonnull<const google::protobuf::EnumDescriptor*> type,
int32_t number) {
ABSL_DCHECK(type != nullptr);
if (type->full_name() == "google.protobuf.NullValue") {
ABSL_DCHECK_EQ(number, 0);
return NullValue();
}
if (type->is_closed()) {
if (ABSL_PREDICT_FALSE(type->FindValueByNumber(number) == nullptr)) {
return ErrorValue(absl::InvalidArgumentError(absl::StrCat(
"closed enum has no such value: ", type->full_name(), ".", number)));
}
}
return IntValue(number);
}
absl::optional<BoolValue> Value::AsBool() const {
if (const auto* alternative = absl::get_if<BoolValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
return absl::nullopt;
}
optional_ref<const BytesValue> Value::AsBytes() const& {
if (const auto* alternative = absl::get_if<BytesValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
return absl::nullopt;
}
absl::optional<BytesValue> Value::AsBytes() && {
if (auto* alternative = absl::get_if<BytesValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
return absl::nullopt;
}
absl::optional<DoubleValue> Value::AsDouble() const {
if (const auto* alternative = absl::get_if<DoubleValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
return absl::nullopt;
}
absl::optional<DurationValue> Value::AsDuration() const {
if (const auto* alternative = absl::get_if<DurationValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
return absl::nullopt;
}
optional_ref<const ErrorValue> Value::AsError() const& {
if (const auto* alternative = absl::get_if<ErrorValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
return absl::nullopt;
}
absl::optional<ErrorValue> Value::AsError() && {
if (auto* alternative = absl::get_if<ErrorValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
return absl::nullopt;
}
absl::optional<IntValue> Value::AsInt() const {
if (const auto* alternative = absl::get_if<IntValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
return absl::nullopt;
}
absl::optional<ListValue> Value::AsList() const& {
if (const auto* alternative =
absl::get_if<common_internal::LegacyListValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
if (const auto* alternative = absl::get_if<ParsedListValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
if (const auto* alternative =
absl::get_if<ParsedRepeatedFieldValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
if (const auto* alternative = absl::get_if<ParsedJsonListValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
return absl::nullopt;
}
absl::optional<ListValue> Value::AsList() && {
if (auto* alternative =
absl::get_if<common_internal::LegacyListValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
if (auto* alternative = absl::get_if<ParsedListValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
if (auto* alternative = absl::get_if<ParsedRepeatedFieldValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
if (auto* alternative = absl::get_if<ParsedJsonListValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
return absl::nullopt;
}
absl::optional<MapValue> Value::AsMap() const& {
if (const auto* alternative =
absl::get_if<common_internal::LegacyMapValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
if (const auto* alternative = absl::get_if<ParsedMapValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
if (const auto* alternative = absl::get_if<ParsedMapFieldValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
if (const auto* alternative = absl::get_if<ParsedJsonMapValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
return absl::nullopt;
}
absl::optional<MapValue> Value::AsMap() && {
if (auto* alternative =
absl::get_if<common_internal::LegacyMapValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
if (auto* alternative = absl::get_if<ParsedMapValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
if (auto* alternative = absl::get_if<ParsedMapFieldValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
if (auto* alternative = absl::get_if<ParsedJsonMapValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
return absl::nullopt;
}
absl::optional<MessageValue> Value::AsMessage() const& {
if (const auto* alternative = absl::get_if<ParsedMessageValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
return absl::nullopt;
}
absl::optional<MessageValue> Value::AsMessage() && {
if (auto* alternative = absl::get_if<ParsedMessageValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
return absl::nullopt;
}
absl::optional<NullValue> Value::AsNull() const {
if (const auto* alternative = absl::get_if<NullValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
return absl::nullopt;
}
optional_ref<const OpaqueValue> Value::AsOpaque() const& {
if (const auto* alternative = absl::get_if<OpaqueValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
return absl::nullopt;
}
absl::optional<OpaqueValue> Value::AsOpaque() && {
if (auto* alternative = absl::get_if<OpaqueValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
return absl::nullopt;
}
optional_ref<const OptionalValue> Value::AsOptional() const& {
if (const auto* alternative = absl::get_if<OpaqueValue>(&variant_);
alternative != nullptr && alternative->IsOptional()) {
return static_cast<const OptionalValue&>(*alternative);
}
return absl::nullopt;
}
absl::optional<OptionalValue> Value::AsOptional() && {
if (auto* alternative = absl::get_if<OpaqueValue>(&variant_);
alternative != nullptr && alternative->IsOptional()) {
return static_cast<OptionalValue&&>(*alternative);
}
return absl::nullopt;
}
optional_ref<const ParsedJsonListValue> Value::AsParsedJsonList() const& {
if (const auto* alternative = absl::get_if<ParsedJsonListValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
return absl::nullopt;
}
absl::optional<ParsedJsonListValue> Value::AsParsedJsonList() && {
if (auto* alternative = absl::get_if<ParsedJsonListValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
return absl::nullopt;
}
optional_ref<const ParsedJsonMapValue> Value::AsParsedJsonMap() const& {
if (const auto* alternative = absl::get_if<ParsedJsonMapValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
return absl::nullopt;
}
absl::optional<ParsedJsonMapValue> Value::AsParsedJsonMap() && {
if (auto* alternative = absl::get_if<ParsedJsonMapValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
return absl::nullopt;
}
optional_ref<const ParsedListValue> Value::AsParsedList() const& {
if (const auto* alternative = absl::get_if<ParsedListValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
return absl::nullopt;
}
absl::optional<ParsedListValue> Value::AsParsedList() && {
if (auto* alternative = absl::get_if<ParsedListValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
return absl::nullopt;
}
optional_ref<const ParsedMapValue> Value::AsParsedMap() const& {
if (const auto* alternative = absl::get_if<ParsedMapValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
return absl::nullopt;
}
absl::optional<ParsedMapValue> Value::AsParsedMap() && {
if (auto* alternative = absl::get_if<ParsedMapValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
return absl::nullopt;
}
optional_ref<const ParsedMapFieldValue> Value::AsParsedMapField() const& {
if (const auto* alternative = absl::get_if<ParsedMapFieldValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
return absl::nullopt;
}
absl::optional<ParsedMapFieldValue> Value::AsParsedMapField() && {
if (auto* alternative = absl::get_if<ParsedMapFieldValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
return absl::nullopt;
}
optional_ref<const ParsedMessageValue> Value::AsParsedMessage() const& {
if (const auto* alternative = absl::get_if<ParsedMessageValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
return absl::nullopt;
}
absl::optional<ParsedMessageValue> Value::AsParsedMessage() && {
if (auto* alternative = absl::get_if<ParsedMessageValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
return absl::nullopt;
}
optional_ref<const ParsedRepeatedFieldValue> Value::AsParsedRepeatedField()
const& {
if (const auto* alternative =
absl::get_if<ParsedRepeatedFieldValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
return absl::nullopt;
}
absl::optional<ParsedRepeatedFieldValue> Value::AsParsedRepeatedField() && {
if (auto* alternative = absl::get_if<ParsedRepeatedFieldValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
return absl::nullopt;
}
optional_ref<const ParsedStructValue> Value::AsParsedStruct() const& {
if (const auto* alternative = absl::get_if<ParsedStructValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
return absl::nullopt;
}
absl::optional<ParsedStructValue> Value::AsParsedStruct() && {
if (auto* alternative = absl::get_if<ParsedStructValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
return absl::nullopt;
}
optional_ref<const StringValue> Value::AsString() const& {
if (const auto* alternative = absl::get_if<StringValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
return absl::nullopt;
}
absl::optional<StringValue> Value::AsString() && {
if (auto* alternative = absl::get_if<StringValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
return absl::nullopt;
}
absl::optional<StructValue> Value::AsStruct() const& {
if (const auto* alternative =
absl::get_if<common_internal::LegacyStructValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
if (const auto* alternative = absl::get_if<ParsedStructValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
if (const auto* alternative = absl::get_if<ParsedMessageValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
return absl::nullopt;
}
absl::optional<StructValue> Value::AsStruct() && {
if (auto* alternative =
absl::get_if<common_internal::LegacyStructValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
if (auto* alternative = absl::get_if<ParsedStructValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
if (auto* alternative = absl::get_if<ParsedMessageValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
return absl::nullopt;
}
absl::optional<TimestampValue> Value::AsTimestamp() const {
if (const auto* alternative = absl::get_if<TimestampValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
return absl::nullopt;
}
optional_ref<const TypeValue> Value::AsType() const& {
if (const auto* alternative = absl::get_if<TypeValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
return absl::nullopt;
}
absl::optional<TypeValue> Value::AsType() && {
if (auto* alternative = absl::get_if<TypeValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
return absl::nullopt;
}
absl::optional<UintValue> Value::AsUint() const {
if (const auto* alternative = absl::get_if<UintValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
return absl::nullopt;
}
optional_ref<const UnknownValue> Value::AsUnknown() const& {
if (const auto* alternative = absl::get_if<UnknownValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
return absl::nullopt;
}
absl::optional<UnknownValue> Value::AsUnknown() && {
if (auto* alternative = absl::get_if<UnknownValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
return absl::nullopt;
}
BoolValue Value::GetBool() const {
ABSL_DCHECK(IsBool()) << *this;
return absl::get<BoolValue>(variant_);
}
const BytesValue& Value::GetBytes() const& {
ABSL_DCHECK(IsBytes()) << *this;
return absl::get<BytesValue>(variant_);
}
BytesValue Value::GetBytes() && {
ABSL_DCHECK(IsBytes()) << *this;
return absl::get<BytesValue>(std::move(variant_));
}
DoubleValue Value::GetDouble() const {
ABSL_DCHECK(IsDouble()) << *this;
return absl::get<DoubleValue>(variant_);
}
DurationValue Value::GetDuration() const {
ABSL_DCHECK(IsDuration()) << *this;
return absl::get<DurationValue>(variant_);
}
const ErrorValue& Value::GetError() const& {
ABSL_DCHECK(IsError()) << *this;
return absl::get<ErrorValue>(variant_);
}
ErrorValue Value::GetError() && {
ABSL_DCHECK(IsError()) << *this;
return absl::get<ErrorValue>(std::move(variant_));
}
IntValue Value::GetInt() const {
ABSL_DCHECK(IsInt()) << *this;
return absl::get<IntValue>(variant_);
}
#ifdef ABSL_HAVE_EXCEPTIONS
#define CEL_VALUE_THROW_BAD_VARIANT_ACCESS() throw absl::bad_variant_access()
#else
#define CEL_VALUE_THROW_BAD_VARIANT_ACCESS() \
ABSL_LOG(FATAL) << absl::bad_variant_access().what()
#endif
ListValue Value::GetList() const& {
ABSL_DCHECK(IsList()) << *this;
if (const auto* alternative =
absl::get_if<common_internal::LegacyListValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
if (const auto* alternative = absl::get_if<ParsedListValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
if (const auto* alternative =
absl::get_if<ParsedRepeatedFieldValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
if (const auto* alternative = absl::get_if<ParsedJsonListValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
CEL_VALUE_THROW_BAD_VARIANT_ACCESS();
}
ListValue Value::GetList() && {
ABSL_DCHECK(IsList()) << *this;
if (auto* alternative =
absl::get_if<common_internal::LegacyListValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
if (auto* alternative = absl::get_if<ParsedListValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
if (auto* alternative = absl::get_if<ParsedRepeatedFieldValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
if (auto* alternative = absl::get_if<ParsedJsonListValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
CEL_VALUE_THROW_BAD_VARIANT_ACCESS();
}
MapValue Value::GetMap() const& {
ABSL_DCHECK(IsMap()) << *this;
if (const auto* alternative =
absl::get_if<common_internal::LegacyMapValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
if (const auto* alternative = absl::get_if<ParsedMapValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
if (const auto* alternative = absl::get_if<ParsedMapFieldValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
if (const auto* alternative = absl::get_if<ParsedJsonMapValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
CEL_VALUE_THROW_BAD_VARIANT_ACCESS();
}
MapValue Value::GetMap() && {
ABSL_DCHECK(IsMap()) << *this;
if (auto* alternative =
absl::get_if<common_internal::LegacyMapValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
if (auto* alternative = absl::get_if<ParsedMapValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
if (auto* alternative = absl::get_if<ParsedMapFieldValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
if (auto* alternative = absl::get_if<ParsedJsonMapValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
CEL_VALUE_THROW_BAD_VARIANT_ACCESS();
}
MessageValue Value::GetMessage() const& {
ABSL_DCHECK(IsMessage()) << *this;
return absl::get<ParsedMessageValue>(variant_);
}
MessageValue Value::GetMessage() && {
ABSL_DCHECK(IsMessage()) << *this;
return absl::get<ParsedMessageValue>(std::move(variant_));
}
NullValue Value::GetNull() const {
ABSL_DCHECK(IsNull()) << *this;
return absl::get<NullValue>(variant_);
}
const OpaqueValue& Value::GetOpaque() const& {
ABSL_DCHECK(IsOpaque()) << *this;
return absl::get<OpaqueValue>(variant_);
}
OpaqueValue Value::GetOpaque() && {
ABSL_DCHECK(IsOpaque()) << *this;
return absl::get<OpaqueValue>(std::move(variant_));
}
const OptionalValue& Value::GetOptional() const& {
ABSL_DCHECK(IsOptional()) << *this;
return static_cast<const OptionalValue&>(absl::get<OpaqueValue>(variant_));
}
OptionalValue Value::GetOptional() && {
ABSL_DCHECK(IsOptional()) << *this;
return static_cast<OptionalValue&&>(
absl::get<OpaqueValue>(std::move(variant_)));
}
const ParsedJsonListValue& Value::GetParsedJsonList() const& {
ABSL_DCHECK(IsParsedJsonList()) << *this;
return absl::get<ParsedJsonListValue>(variant_);
}
ParsedJsonListValue Value::GetParsedJsonList() && {
ABSL_DCHECK(IsParsedJsonList()) << *this;
return absl::get<ParsedJsonListValue>(std::move(variant_));
}
const ParsedJsonMapValue& Value::GetParsedJsonMap() const& {
ABSL_DCHECK(IsParsedJsonMap()) << *this;
return absl::get<ParsedJsonMapValue>(variant_);
}
ParsedJsonMapValue Value::GetParsedJsonMap() && {
ABSL_DCHECK(IsParsedJsonMap()) << *this;
return absl::get<ParsedJsonMapValue>(std::move(variant_));
}
const ParsedListValue& Value::GetParsedList() const& {
ABSL_DCHECK(IsParsedList()) << *this;
return absl::get<ParsedListValue>(variant_);
}
ParsedListValue Value::GetParsedList() && {
ABSL_DCHECK(IsParsedList()) << *this;
return absl::get<ParsedListValue>(std::move(variant_));
}
const ParsedMapValue& Value::GetParsedMap() const& {
ABSL_DCHECK(IsParsedMap()) << *this;
return absl::get<ParsedMapValue>(variant_);
}
ParsedMapValue Value::GetParsedMap() && {
ABSL_DCHECK(IsParsedMap()) << *this;
return absl::get<ParsedMapValue>(std::move(variant_));
}
const ParsedMapFieldValue& Value::GetParsedMapField() const& {
ABSL_DCHECK(IsParsedMapField()) << *this;
return absl::get<ParsedMapFieldValue>(variant_);
}
ParsedMapFieldValue Value::GetParsedMapField() && {
ABSL_DCHECK(IsParsedMapField()) << *this;
return absl::get<ParsedMapFieldValue>(std::move(variant_));
}
const ParsedMessageValue& Value::GetParsedMessage() const& {
ABSL_DCHECK(IsParsedMessage()) << *this;
return absl::get<ParsedMessageValue>(variant_);
}
ParsedMessageValue Value::GetParsedMessage() && {
ABSL_DCHECK(IsParsedMessage()) << *this;
return absl::get<ParsedMessageValue>(std::move(variant_));
}
const ParsedRepeatedFieldValue& Value::GetParsedRepeatedField() const& {
ABSL_DCHECK(IsParsedRepeatedField()) << *this;
return absl::get<ParsedRepeatedFieldValue>(variant_);
}
ParsedRepeatedFieldValue Value::GetParsedRepeatedField() && {
ABSL_DCHECK(IsParsedRepeatedField()) << *this;
return absl::get<ParsedRepeatedFieldValue>(std::move(variant_));
}
const ParsedStructValue& Value::GetParsedStruct() const& {
ABSL_DCHECK(IsParsedMap()) << *this;
return absl::get<ParsedStructValue>(variant_);
}
ParsedStructValue Value::GetParsedStruct() && {
ABSL_DCHECK(IsParsedMap()) << *this;
return absl::get<ParsedStructValue>(std::move(variant_));
}
const StringValue& Value::GetString() const& {
ABSL_DCHECK(IsString()) << *this;
return absl::get<StringValue>(variant_);
}
StringValue Value::GetString() && {
ABSL_DCHECK(IsString()) << *this;
return absl::get<StringValue>(std::move(variant_));
}
StructValue Value::GetStruct() const& {
ABSL_DCHECK(IsStruct()) << *this;
if (const auto* alternative =
absl::get_if<common_internal::LegacyStructValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
if (const auto* alternative = absl::get_if<ParsedStructValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
if (const auto* alternative = absl::get_if<ParsedMessageValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
CEL_VALUE_THROW_BAD_VARIANT_ACCESS();
}
StructValue Value::GetStruct() && {
ABSL_DCHECK(IsStruct()) << *this;
if (auto* alternative =
absl::get_if<common_internal::LegacyStructValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
if (auto* alternative = absl::get_if<ParsedStructValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
if (auto* alternative = absl::get_if<ParsedMessageValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
CEL_VALUE_THROW_BAD_VARIANT_ACCESS();
}
TimestampValue Value::GetTimestamp() const {
ABSL_DCHECK(IsTimestamp()) << *this;
return absl::get<TimestampValue>(variant_);
}
const TypeValue& Value::GetType() const& {
ABSL_DCHECK(IsType()) << *this;
return absl::get<TypeValue>(variant_);
}
TypeValue Value::GetType() && {
ABSL_DCHECK(IsType()) << *this;
return absl::get<TypeValue>(std::move(variant_));
}
UintValue Value::GetUint() const {
ABSL_DCHECK(IsUint()) << *this;
return absl::get<UintValue>(variant_);
}
const UnknownValue& Value::GetUnknown() const& {
ABSL_DCHECK(IsUnknown()) << *this;
return absl::get<UnknownValue>(variant_);
}
UnknownValue Value::GetUnknown() && {
ABSL_DCHECK(IsUnknown()) << *this;
return absl::get<UnknownValue>(std::move(variant_));
}
namespace {
class EmptyValueIterator final : public ValueIterator {
public:
bool HasNext() override { return false; }
absl::Status Next(ValueManager&, Value&) override {
return absl::FailedPreconditionError(
"`ValueIterator::Next` called after `ValueIterator::HasNext` returned "
"false");
}
};
}
absl::Nonnull<std::unique_ptr<ValueIterator>> NewEmptyValueIterator() {
return std::make_unique<EmptyValueIterator>();
}
} | #include "common/value.h"
#include <sstream>
#include "google/protobuf/struct.pb.h"
#include "google/protobuf/type.pb.h"
#include "google/protobuf/descriptor.pb.h"
#include "absl/base/attributes.h"
#include "absl/log/die_if_null.h"
#include "absl/status/status.h"
#include "absl/types/optional.h"
#include "common/native_type.h"
#include "common/type.h"
#include "common/value_testing.h"
#include "internal/parse_text_proto.h"
#include "internal/testing.h"
#include "internal/testing_descriptor_pool.h"
#include "internal/testing_message_factory.h"
#include "proto/test/v1/proto3/test_all_types.pb.h"
#include "google/protobuf/arena.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/generated_enum_reflection.h"
namespace cel {
namespace {
using ::absl_testing::StatusIs;
using ::cel::internal::DynamicParseTextProto;
using ::cel::internal::GetTestingDescriptorPool;
using ::cel::internal::GetTestingMessageFactory;
using ::testing::_;
using ::testing::An;
using ::testing::Eq;
using ::testing::NotNull;
using ::testing::Optional;
using TestAllTypesProto3 = ::google::api::expr::test::v1::proto3::TestAllTypes;
TEST(Value, KindDebugDeath) {
Value value;
static_cast<void>(value);
EXPECT_DEBUG_DEATH(static_cast<void>(value.kind()), _);
}
TEST(Value, GetTypeName) {
Value value;
static_cast<void>(value);
EXPECT_DEBUG_DEATH(static_cast<void>(value.GetTypeName()), _);
}
TEST(Value, DebugStringUinitializedValue) {
Value value;
static_cast<void>(value);
std::ostringstream out;
out << value;
EXPECT_EQ(out.str(), "default ctor Value");
}
TEST(Value, NativeValueIdDebugDeath) {
Value value;
static_cast<void>(value);
EXPECT_DEBUG_DEATH(static_cast<void>(NativeTypeId::Of(value)), _);
}
TEST(Value, GeneratedEnum) {
EXPECT_EQ(Value::Enum(google::protobuf::NULL_VALUE), NullValue());
EXPECT_EQ(Value::Enum(google::protobuf::SYNTAX_EDITIONS), IntValue(2));
}
TEST(Value, DynamicEnum) {
EXPECT_THAT(
Value::Enum(google::protobuf::GetEnumDescriptor<google::protobuf::NullValue>(), 0),
test::IsNullValue());
EXPECT_THAT(
Value::Enum(google::protobuf::GetEnumDescriptor<google::protobuf::NullValue>()
->FindValueByNumber(0)),
test::IsNullValue());
EXPECT_THAT(
Value::Enum(google::protobuf::GetEnumDescriptor<google::protobuf::Syntax>(), 2),
test::IntValueIs(2));
EXPECT_THAT(Value::Enum(google::protobuf::GetEnumDescriptor<google::protobuf::Syntax>()
->FindValueByNumber(2)),
test::IntValueIs(2));
}
TEST(Value, DynamicClosedEnum) {
google::protobuf::FileDescriptorProto file_descriptor;
file_descriptor.set_name("test/closed_enum.proto");
file_descriptor.set_package("test");
file_descriptor.set_syntax("editions");
file_descriptor.set_edition(google::protobuf::EDITION_2023);
{
auto* enum_descriptor = file_descriptor.add_enum_type();
enum_descriptor->set_name("ClosedEnum");
enum_descriptor->mutable_options()->mutable_features()->set_enum_type(
google::protobuf::FeatureSet::CLOSED);
auto* enum_value_descriptor = enum_descriptor->add_value();
enum_value_descriptor->set_number(1);
enum_value_descriptor->set_name("FOO");
enum_value_descriptor = enum_descriptor->add_value();
enum_value_descriptor->set_number(2);
enum_value_descriptor->set_name("BAR");
}
google::protobuf::DescriptorPool pool;
ASSERT_THAT(pool.BuildFile(file_descriptor), NotNull());
const auto* enum_descriptor = pool.FindEnumTypeByName("test.ClosedEnum");
ASSERT_THAT(enum_descriptor, NotNull());
EXPECT_THAT(Value::Enum(enum_descriptor, 0),
test::ErrorValueIs(StatusIs(absl::StatusCode::kInvalidArgument)));
}
TEST(Value, Is) {
google::protobuf::Arena arena;
EXPECT_TRUE(Value(BoolValue()).Is<BoolValue>());
EXPECT_TRUE(Value(BoolValue(true)).IsTrue());
EXPECT_TRUE(Value(BoolValue(false)).IsFalse());
EXPECT_TRUE(Value(BytesValue()).Is<BytesValue>());
EXPECT_TRUE(Value(DoubleValue()).Is<DoubleValue>());
EXPECT_TRUE(Value(DurationValue()).Is<DurationValue>());
EXPECT_TRUE(Value(ErrorValue()).Is<ErrorValue>());
EXPECT_TRUE(Value(IntValue()).Is<IntValue>());
EXPECT_TRUE(Value(ListValue()).Is<ListValue>());
EXPECT_TRUE(Value(ParsedListValue()).Is<ListValue>());
EXPECT_TRUE(Value(ParsedListValue()).Is<ParsedListValue>());
EXPECT_TRUE(Value(ParsedJsonListValue()).Is<ListValue>());
EXPECT_TRUE(Value(ParsedJsonListValue()).Is<ParsedJsonListValue>());
{
auto message = DynamicParseTextProto<TestAllTypesProto3>(
&arena, R"pb()pb", GetTestingDescriptorPool(),
GetTestingMessageFactory());
const auto* field = ABSL_DIE_IF_NULL(
message->GetDescriptor()->FindFieldByName("repeated_int32"));
EXPECT_TRUE(
Value(ParsedRepeatedFieldValue(message, field)).Is<ListValue>());
EXPECT_TRUE(Value(ParsedRepeatedFieldValue(message, field))
.Is<ParsedRepeatedFieldValue>());
}
EXPECT_TRUE(Value(MapValue()).Is<MapValue>());
EXPECT_TRUE(Value(ParsedMapValue()).Is<MapValue>());
EXPECT_TRUE(Value(ParsedMapValue()).Is<ParsedMapValue>());
EXPECT_TRUE(Value(ParsedJsonMapValue()).Is<MapValue>());
EXPECT_TRUE(Value(ParsedJsonMapValue()).Is<ParsedJsonMapValue>());
{
auto message = DynamicParseTextProto<TestAllTypesProto3>(
&arena, R"pb()pb", GetTestingDescriptorPool(),
GetTestingMessageFactory());
const auto* field = ABSL_DIE_IF_NULL(
message->GetDescriptor()->FindFieldByName("map_int32_int32"));
EXPECT_TRUE(Value(ParsedMapFieldValue(message, field)).Is<MapValue>());
EXPECT_TRUE(
Value(ParsedMapFieldValue(message, field)).Is<ParsedMapFieldValue>());
}
EXPECT_TRUE(Value(NullValue()).Is<NullValue>());
EXPECT_TRUE(Value(OptionalValue()).Is<OpaqueValue>());
EXPECT_TRUE(Value(OptionalValue()).Is<OptionalValue>());
EXPECT_TRUE(Value(ParsedMessageValue()).Is<StructValue>());
EXPECT_TRUE(Value(ParsedMessageValue()).Is<MessageValue>());
EXPECT_TRUE(Value(ParsedMessageValue()).Is<ParsedMessageValue>());
EXPECT_TRUE(Value(StringValue()).Is<StringValue>());
EXPECT_TRUE(Value(TimestampValue()).Is<TimestampValue>());
EXPECT_TRUE(Value(TypeValue(StringType())).Is<TypeValue>());
EXPECT_TRUE(Value(UintValue()).Is<UintValue>());
EXPECT_TRUE(Value(UnknownValue()).Is<UnknownValue>());
}
template <typename T>
constexpr T& AsLValueRef(T& t ABSL_ATTRIBUTE_LIFETIME_BOUND) {
return t;
}
template <typename T>
constexpr const T& AsConstLValueRef(T& t ABSL_ATTRIBUTE_LIFETIME_BOUND) {
return t;
}
template <typename T>
constexpr T&& AsRValueRef(T& t ABSL_ATTRIBUTE_LIFETIME_BOUND) {
return static_cast<T&&>(t);
}
template <typename T>
constexpr const T&& AsConstRValueRef(T& t ABSL_ATTRIBUTE_LIFETIME_BOUND) {
return static_cast<const T&&>(t);
}
TEST(Value, As) {
google::protobuf::Arena arena;
EXPECT_THAT(Value(BoolValue()).As<BoolValue>(), Optional(An<BoolValue>()));
EXPECT_THAT(Value(BoolValue()).As<ErrorValue>(), Eq(absl::nullopt));
{
Value value(BytesValue{});
Value other_value = value;
EXPECT_THAT(AsLValueRef<Value>(value).As<BytesValue>(),
Optional(An<BytesValue>()));
EXPECT_THAT(AsConstLValueRef<Value>(value).As<BytesValue>(),
Optional(An<BytesValue>()));
EXPECT_THAT(AsRValueRef<Value>(value).As<BytesValue>(),
Optional(An<BytesValue>()));
EXPECT_THAT(AsConstRValueRef<Value>(other_value).As<BytesValue>(),
Optional(An<BytesValue>()));
}
EXPECT_THAT(Value(DoubleValue()).As<DoubleValue>(),
Optional(An<DoubleValue>()));
EXPECT_THAT(Value(DoubleValue()).As<ErrorValue>(), Eq(absl::nullopt));
EXPECT_THAT(Value(DurationValue()).As<DurationValue>(),
Optional(An<DurationValue>()));
EXPECT_THAT(Value(DurationValue()).As<ErrorValue>(), Eq(absl::nullopt));
{
Value value(ErrorValue{});
Value other_value = value;
EXPECT_THAT(AsLValueRef<Value>(value).As<ErrorValue>(),
Optional(An<ErrorValue>()));
EXPECT_THAT(AsConstLValueRef<Value>(value).As<ErrorValue>(),
Optional(An<ErrorValue>()));
EXPECT_THAT(AsRValueRef<Value>(value).As<ErrorValue>(),
Optional(An<ErrorValue>()));
EXPECT_THAT(AsConstRValueRef<Value>(other_value).As<ErrorValue>(),
Optional(An<ErrorValue>()));
EXPECT_THAT(Value(ErrorValue()).As<BoolValue>(), Eq(absl::nullopt));
}
EXPECT_THAT(Value(IntValue()).As<IntValue>(), Optional(An<IntValue>()));
EXPECT_THAT(Value(IntValue()).As<ErrorValue>(), Eq(absl::nullopt));
{
Value value(ListValue{});
Value other_value = value;
EXPECT_THAT(AsLValueRef<Value>(value).As<ListValue>(),
Optional(An<ListValue>()));
EXPECT_THAT(AsConstLValueRef<Value>(value).As<ListValue>(),
Optional(An<ListValue>()));
EXPECT_THAT(AsRValueRef<Value>(value).As<ListValue>(),
Optional(An<ListValue>()));
EXPECT_THAT(AsConstRValueRef<Value>(other_value).As<ListValue>(),
Optional(An<ListValue>()));
EXPECT_THAT(Value(ListValue()).As<ErrorValue>(), Eq(absl::nullopt));
}
{
Value value(ParsedJsonListValue{});
Value other_value = value;
EXPECT_THAT(AsLValueRef<Value>(value).As<ListValue>(),
Optional(An<ListValue>()));
EXPECT_THAT(AsConstLValueRef<Value>(value).As<ListValue>(),
Optional(An<ListValue>()));
EXPECT_THAT(AsRValueRef<Value>(value).As<ListValue>(),
Optional(An<ListValue>()));
EXPECT_THAT(AsConstRValueRef<Value>(other_value).As<ListValue>(),
Optional(An<ListValue>()));
EXPECT_THAT(Value(ListValue()).As<ErrorValue>(), Eq(absl::nullopt));
}
{
Value value(ParsedJsonListValue{});
Value other_value = value;
EXPECT_THAT(AsLValueRef<Value>(value).As<ParsedJsonListValue>(),
Optional(An<ParsedJsonListValue>()));
EXPECT_THAT(AsConstLValueRef<Value>(value).As<ParsedJsonListValue>(),
Optional(An<ParsedJsonListValue>()));
EXPECT_THAT(AsRValueRef<Value>(value).As<ParsedJsonListValue>(),
Optional(An<ParsedJsonListValue>()));
EXPECT_THAT(AsConstRValueRef<Value>(other_value).As<ParsedJsonListValue>(),
Optional(An<ParsedJsonListValue>()));
}
{
Value value(ParsedListValue{});
Value other_value = value;
EXPECT_THAT(AsLValueRef<Value>(value).As<ListValue>(),
Optional(An<ListValue>()));
EXPECT_THAT(AsConstLValueRef<Value>(value).As<ListValue>(),
Optional(An<ListValue>()));
EXPECT_THAT(AsRValueRef<Value>(value).As<ListValue>(),
Optional(An<ListValue>()));
EXPECT_THAT(AsConstRValueRef<Value>(other_value).As<ListValue>(),
Optional(An<ListValue>()));
EXPECT_THAT(Value(ListValue()).As<ErrorValue>(), Eq(absl::nullopt));
}
{
Value value(ParsedListValue{});
Value other_value = value;
EXPECT_THAT(AsLValueRef<Value>(value).As<ParsedListValue>(),
Optional(An<ParsedListValue>()));
EXPECT_THAT(AsConstLValueRef<Value>(value).As<ParsedListValue>(),
Optional(An<ParsedListValue>()));
EXPECT_THAT(AsRValueRef<Value>(value).As<ParsedListValue>(),
Optional(An<ParsedListValue>()));
EXPECT_THAT(AsConstRValueRef<Value>(other_value).As<ParsedListValue>(),
Optional(An<ParsedListValue>()));
}
{
auto message = DynamicParseTextProto<TestAllTypesProto3>(
&arena, R"pb()pb", GetTestingDescriptorPool(),
GetTestingMessageFactory());
const auto* field = ABSL_DIE_IF_NULL(
message->GetDescriptor()->FindFieldByName("repeated_int32"));
Value value(ParsedRepeatedFieldValue{message, field});
Value other_value = value;
EXPECT_THAT(AsLValueRef<Value>(value).As<ListValue>(),
Optional(An<ListValue>()));
EXPECT_THAT(AsConstLValueRef<Value>(value).As<ListValue>(),
Optional(An<ListValue>()));
EXPECT_THAT(AsRValueRef<Value>(value).As<ListValue>(),
Optional(An<ListValue>()));
EXPECT_THAT(AsConstRValueRef<Value>(other_value).As<ListValue>(),
Optional(An<ListValue>()));
}
{
auto message = DynamicParseTextProto<TestAllTypesProto3>(
&arena, R"pb()pb", GetTestingDescriptorPool(),
GetTestingMessageFactory());
const auto* field = ABSL_DIE_IF_NULL(
message->GetDescriptor()->FindFieldByName("repeated_int32"));
Value value(ParsedRepeatedFieldValue{message, field});
Value other_value = value;
EXPECT_THAT(AsLValueRef<Value>(value).As<ParsedRepeatedFieldValue>(),
Optional(An<ParsedRepeatedFieldValue>()));
EXPECT_THAT(AsConstLValueRef<Value>(value).As<ParsedRepeatedFieldValue>(),
Optional(An<ParsedRepeatedFieldValue>()));
EXPECT_THAT(AsRValueRef<Value>(value).As<ParsedRepeatedFieldValue>(),
Optional(An<ParsedRepeatedFieldValue>()));
EXPECT_THAT(
AsConstRValueRef<Value>(other_value).As<ParsedRepeatedFieldValue>(),
Optional(An<ParsedRepeatedFieldValue>()));
}
{
Value value(MapValue{});
Value other_value = value;
EXPECT_THAT(AsLValueRef<Value>(value).As<MapValue>(),
Optional(An<MapValue>()));
EXPECT_THAT(AsConstLValueRef<Value>(value).As<MapValue>(),
Optional(An<MapValue>()));
EXPECT_THAT(AsRValueRef<Value>(value).As<MapValue>(),
Optional(An<MapValue>()));
EXPECT_THAT(AsConstRValueRef<Value>(other_value).As<MapValue>(),
Optional(An<MapValue>()));
EXPECT_THAT(Value(MapValue()).As<ErrorValue>(), Eq(absl::nullopt));
}
{
Value value(ParsedJsonMapValue{});
Value other_value = value;
EXPECT_THAT(AsLValueRef<Value>(value).As<MapValue>(),
Optional(An<MapValue>()));
EXPECT_THAT(AsConstLValueRef<Value>(value).As<MapValue>(),
Optional(An<MapValue>()));
EXPECT_THAT(AsRValueRef<Value>(value).As<MapValue>(),
Optional(An<MapValue>()));
EXPECT_THAT(AsConstRValueRef<Value>(other_value).As<MapValue>(),
Optional(An<MapValue>()));
EXPECT_THAT(Value(MapValue()).As<ErrorValue>(), Eq(absl::nullopt));
}
{
Value value(ParsedJsonMapValue{});
Value other_value = value;
EXPECT_THAT(AsLValueRef<Value>(value).As<ParsedJsonMapValue>(),
Optional(An<ParsedJsonMapValue>()));
EXPECT_THAT(AsConstLValueRef<Value>(value).As<ParsedJsonMapValue>(),
Optional(An<ParsedJsonMapValue>()));
EXPECT_THAT(AsRValueRef<Value>(value).As<ParsedJsonMapValue>(),
Optional(An<ParsedJsonMapValue>()));
EXPECT_THAT(AsConstRValueRef<Value>(other_value).As<ParsedJsonMapValue>(),
Optional(An<ParsedJsonMapValue>()));
}
{
Value value(ParsedMapValue{});
Value other_value = value;
EXPECT_THAT(AsLValueRef<Value>(value).As<MapValue>(),
Optional(An<MapValue>()));
EXPECT_THAT(AsConstLValueRef<Value>(value).As<MapValue>(),
Optional(An<MapValue>()));
EXPECT_THAT(AsRValueRef<Value>(value).As<MapValue>(),
Optional(An<MapValue>()));
EXPECT_THAT(AsConstRValueRef<Value>(other_value).As<MapValue>(),
Optional(An<MapValue>()));
EXPECT_THAT(Value(MapValue()).As<ErrorValue>(), Eq(absl::nullopt));
}
{
Value value(ParsedMapValue{});
Value other_value = value;
EXPECT_THAT(AsLValueRef<Value>(value).As<ParsedMapValue>(),
Optional(An<ParsedMapValue>()));
EXPECT_THAT(AsConstLValueRef<Value>(value).As<ParsedMapValue>(),
Optional(An<ParsedMapValue>()));
EXPECT_THAT(AsRValueRef<Value>(value).As<ParsedMapValue>(),
Optional(An<ParsedMapValue>()));
EXPECT_THAT(AsConstRValueRef<Value>(other_value).As<ParsedMapValue>(),
Optional(An<ParsedMapValue>()));
}
{
auto message = DynamicParseTextProto<TestAllTypesProto3>(
&arena, R"pb()pb", GetTestingDescriptorPool(),
GetTestingMessageFactory());
const auto* field = ABSL_DIE_IF_NULL(
message->GetDescriptor()->FindFieldByName("map_int32_int32"));
Value value(ParsedMapFieldValue{message, field});
Value other_value = value;
EXPECT_THAT(AsLValueRef<Value>(value).As<MapValue>(),
Optional(An<MapValue>()));
EXPECT_THAT(AsConstLValueRef<Value>(value).As<MapValue>(),
Optional(An<MapValue>()));
EXPECT_THAT(AsRValueRef<Value>(value).As<MapValue>(),
Optional(An<MapValue>()));
EXPECT_THAT(AsConstRValueRef<Value>(other_value).As<MapValue>(),
Optional(An<MapValue>()));
}
{
auto message = DynamicParseTextProto<TestAllTypesProto3>(
&arena, R"pb()pb", GetTestingDescriptorPool(),
GetTestingMessageFactory());
const auto* field = ABSL_DIE_IF_NULL(
message->GetDescriptor()->FindFieldByName("map_int32_int32"));
Value value(ParsedMapFieldValue{message, field});
Value other_value = value;
EXPECT_THAT(AsLValueRef<Value>(value).As<ParsedMapFieldValue>(),
Optional(An<ParsedMapFieldValue>()));
EXPECT_THAT(AsConstLValueRef<Value>(value).As<ParsedMapFieldValue>(),
Optional(An<ParsedMapFieldValue>()));
EXPECT_THAT(AsRValueRef<Value>(value).As<ParsedMapFieldValue>(),
Optional(An<ParsedMapFieldValue>()));
EXPECT_THAT(AsConstRValueRef<Value>(other_value).As<ParsedMapFieldValue>(),
Optional(An<ParsedMapFieldValue>()));
}
{
Value value(ParsedMessageValue{DynamicParseTextProto<TestAllTypesProto3>(
&arena, R"pb()pb", GetTestingDescriptorPool(),
GetTestingMessageFactory())});
Value other_value = value;
EXPECT_THAT(AsLValueRef<Value>(value).As<MessageValue>(),
Optional(An<MessageValue>()));
EXPECT_THAT(AsConstLValueRef<Value>(value).As<MessageValue>(),
Optional(An<MessageValue>()));
EXPECT_THAT(AsRValueRef<Value>(value).As<MessageValue>(),
Optional(An<MessageValue>()));
EXPECT_THAT(AsConstRValueRef<Value>(other_value).As<MessageValue>(),
Optional(An<MessageValue>()));
EXPECT_THAT(
Value(ParsedMessageValue{DynamicParseTextProto<TestAllTypesProto3>(
&arena, R"pb()pb", GetTestingDescriptorPool(),
GetTestingMessageFactory())})
.As<ErrorValue>(),
Eq(absl::nullopt));
}
EXPECT_THAT(Value(NullValue()).As<NullValue>(), Optional(An<NullValue>()));
EXPECT_THAT(Value(NullValue()).As<ErrorValue>(), Eq(absl::nullopt));
{
Value value(OptionalValue{});
Value other_value = value;
EXPECT_THAT(AsLValueRef<Value>(value).As<OpaqueValue>(),
Optional(An<OpaqueValue>()));
EXPECT_THAT(AsConstLValueRef<Value>(value).As<OpaqueValue>(),
Optional(An<OpaqueValue>()));
EXPECT_THAT(AsRValueRef<Value>(value).As<OpaqueValue>(),
Optional(An<OpaqueValue>()));
EXPECT_THAT(AsConstRValueRef<Value>(other_value).As<OpaqueValue>(),
Optional(An<OpaqueValue>()));
EXPECT_THAT(Value(OpaqueValue(OptionalValue())).As<ErrorValue>(),
Eq(absl::nullopt));
}
{
Value value(OptionalValue{});
Value other_value = value;
EXPECT_THAT(AsLValueRef<Value>(value).As<OptionalValue>(),
Optional(An<OptionalValue>()));
EXPECT_THAT(AsConstLValueRef<Value>(value).As<OptionalValue>(),
Optional(An<OptionalValue>()));
EXPECT_THAT(AsRValueRef<Value>(value).As<OptionalValue>(),
Optional(An<OptionalValue>()));
EXPECT_THAT(AsConstRValueRef<Value>(other_value).As<OptionalValue>(),
Optional(An<OptionalValue>()));
EXPECT_THAT(Value(OptionalValue()).As<ErrorValue>(), Eq(absl::nullopt));
}
{
OpaqueValue value(OptionalValue{});
OpaqueValue other_value = value;
EXPECT_THAT(AsLValueRef<OpaqueValue>(value).As<OptionalValue>(),
Optional(An<OptionalValue>()));
EXPECT_THAT(AsConstLValueRef<OpaqueValue>(value).As<OptionalValue>(),
Optional(An<OptionalValue>()));
EXPECT_THAT(AsRValueRef<OpaqueValue>(value).As<OptionalValue>(),
Optional(An<OptionalValue>()));
EXPECT_THAT(AsConstRValueRef<OpaqueValue>(other_value).As<OptionalValue>(),
Optional(An<OptionalValue>()));
}
{
Value value(ParsedMessageValue{DynamicParseTextProto<TestAllTypesProto3>(
&arena, R"pb()pb", GetTestingDescriptorPool(),
GetTestingMessageFactory())});
Value other_value = value;
EXPECT_THAT(AsLValueRef<Value>(value).As<ParsedMessageValue>(),
Optional(An<ParsedMessageValue>()));
EXPECT_THAT(AsConstLValueRef<Value>(value).As<ParsedMessageValue>(),
Optional(An<ParsedMessageValue>()));
EXPECT_THAT(AsRValueRef<Value>(value).As<ParsedMessageValue>(),
Optional(An<ParsedMessageValue>()));
EXPECT_THAT(AsConstRValueRef<Value>(other_value).As<ParsedMessageValue>(),
Optional(An<ParsedMessageValue>()));
}
{
Value value(StringValue{});
Value other_value = value;
EXPECT_THAT(AsLValueRef<Value>(value).As<StringValue>(),
Optional(An<StringValue>()));
EXPECT_THAT(AsConstLValueRef<Value>(value).As<StringValue>(),
Optional(An<StringValue>()));
EXPECT_THAT(AsRValueRef<Value>(value).As<StringValue>(),
Optional(An<StringValue>()));
EXPECT_THAT(AsConstRValueRef<Value>(other_value).As<StringValue>(),
Optional(An<StringValue>()));
EXPECT_THAT(Value(StringValue()).As<ErrorValue>(), Eq(absl::nullopt));
}
{
Value value(ParsedMessageValue{DynamicParseTextProto<TestAllTypesProto3>(
&arena, R"pb()pb", GetTestingDescriptorPool(),
GetTestingMessageFactory())});
Value other_value = value;
EXPECT_THAT(AsLValueRef<Value>(value).As<StructValue>(),
Optional(An<StructValue>()));
EXPECT_THAT(AsConstLValueRef<Value>(value).As<StructValue>(),
Optional(An<StructValue>()));
EXPECT_THAT(AsRValueRef<Value>(value).As<StructValue>(),
Optional(An<StructValue>()));
EXPECT_THAT(AsConstRValueRef<Value>(other_value).As<StructValue>(),
Optional(An<StructValue>()));
}
EXPECT_THAT(Value(TimestampValue()).As<TimestampValue>(),
Optional(An<TimestampValue>()));
EXPECT_THAT(Value(TimestampValue()).As<ErrorValue>(), Eq(absl::nullopt));
{
Value value(TypeValue(StringType{}));
Value other_value = value;
EXPECT_THAT(AsLValueRef<Value>(value).As<TypeValue>(),
Optional(An<TypeValue>()));
EXPECT_THAT(AsConstLValueRef<Value>(value).As<TypeValue>(),
Optional(An<TypeValue>()));
EXPECT_THAT(AsRValueRef<Value>(value).As<TypeValue>(),
Optional(An<TypeValue>()));
EXPECT_THAT(AsConstRValueRef<Value>(other_value).As<TypeValue>(),
Optional(An<TypeValue>()));
EXPECT_THAT(Value(TypeValue(StringType())).As<ErrorValue>(),
Eq(absl::nullopt));
}
EXPECT_THAT(Value(UintValue()).As<UintValue>(), Optional(An<UintValue>()));
EXPECT_THAT(Value(UintValue()).As<ErrorValue>(), Eq(absl::nullopt));
{
Value value(UnknownValue{});
Value other_value = value;
EXPECT_THAT(AsLValueRef<Value>(value).As<UnknownValue>(),
Optional(An<UnknownValue>()));
EXPECT_THAT(AsConstLValueRef<Value>(value).As<UnknownValue>(),
Optional(An<UnknownValue>()));
EXPECT_THAT(AsRValueRef<Value>(value).As<UnknownValue>(),
Optional(An<UnknownValue>()));
EXPECT_THAT(AsConstRValueRef<Value>(other_value).As<UnknownValue>(),
Optional(An<UnknownValue>()));
EXPECT_THAT(Value(UnknownValue()).As<ErrorValue>(), Eq(absl::nullopt));
}
}
template <typename To, typename From>
decltype(auto) DoGet(From&& from) {
return std::forward<From>(from).template Get<To>();
}
TEST(Value, Get) {
google::protobuf::Arena arena;
EXPECT_THAT(DoGet<BoolValue>(Value(BoolValue())), An<BoolValue>());
{
Value value(BytesValue{});
Value other_value = value;
EXPECT_THAT(DoGet<BytesValue>(AsLValueRef<Value>(value)), An<BytesValue>());
EXPECT_THAT(DoGet<BytesValue>(AsConstLValueRef<Value>(value)),
An<BytesValue>());
EXPECT_THAT(DoGet<BytesValue>(AsRValueRef<Value>(value)), An<BytesValue>());
EXPECT_THAT(DoGet<BytesValue>(AsConstRValueRef<Value>(other_value)),
An<BytesValue>());
}
EXPECT_THAT(DoGet<DoubleValue>(Value(DoubleValue())), An<DoubleValue>());
EXPECT_THAT(DoGet<DurationValue>(Value(DurationValue())),
An<DurationValue>());
{
Value value(ErrorValue{});
Value other_value = value;
EXPECT_THAT(DoGet<ErrorValue>(AsLValueRef<Value>(value)), An<ErrorValue>());
EXPECT_THAT(DoGet<ErrorValue>(AsConstLValueRef<Value>(value)),
An<ErrorValue>());
EXPECT_THAT(DoGet<ErrorValue>(AsRValueRef<Value>(value)), An<ErrorValue>());
EXPECT_THAT(DoGet<ErrorValue>(AsConstRValueRef<Value>(other_value)),
An<ErrorValue>());
}
EXPECT_THAT(DoGet<IntValue>(Value(IntValue())), An<IntValue>());
{
Value value(ListValue{});
Value other_value = value;
EXPECT_THAT(DoGet<ListValue>(AsLValueRef<Value>(value)), An<ListValue>());
EXPECT_THAT(DoGet<ListValue>(AsConstLValueRef<Value>(value)),
An<ListValue>());
EXPECT_THAT(DoGet<ListValue>(AsRValueRef<Value>(value)), An<ListValue>());
EXPECT_THAT(DoGet<ListValue>(AsConstRValueRef<Value>(other_value)),
An<ListValue>());
}
{
Value value(ParsedJsonListValue{});
Value other_value = value;
EXPECT_THAT(DoGet<ListValue>(AsLValueRef<Value>(value)), An<ListValue>());
EXPECT_THAT(DoGet<ListValue>(AsConstLValueRef<Value>(value)),
An<ListValue>());
EXPECT_THAT(DoGet<ListValue>(AsRValueRef<Value>(value)), An<ListValue>());
EXPECT_THAT(DoGet<ListValue>(AsConstRValueRef<Value>(other_value)),
An<ListValue>());
}
{
Value value(ParsedJsonListValue{});
Value other_value = value;
EXPECT_THAT(DoGet<ParsedJsonListValue>(AsLValueRef<Value>(value)),
An<ParsedJsonListValue>());
EXPECT_THAT(DoGet<ParsedJsonListValue>(AsConstLValueRef<Value>(value)),
An<ParsedJsonListValue>());
EXPECT_THAT(DoGet<ParsedJsonListValue>(AsRValueRef<Value>(value)),
An<ParsedJsonListValue>());
EXPECT_THAT(
DoGet<ParsedJsonListValue>(AsConstRValueRef<Value>(other_value)),
An<ParsedJsonListValue>());
}
{
Value value(ParsedListValue{});
Value other_value = value;
EXPECT_THAT(DoGet<ListValue>(AsLValueRef<Value>(value)), An<ListValue>());
EXPECT_THAT(DoGet<ListValue>(AsConstLValueRef<Value>(value)),
An<ListValue>());
EXPECT_THAT(DoGet<ListValue>(AsRValueRef<Value>(value)), An<ListValue>());
EXPECT_THAT(DoGet<ListValue>(AsConstRValueRef<Value>(other_value)),
An<ListValue>());
}
{
Value value(ParsedListValue{});
Value other_value = value;
EXPECT_THAT(DoGet<ParsedListValue>(AsLValueRef<Value>(value)),
An<ParsedListValue>());
EXPECT_THAT(DoGet<ParsedListValue>(AsConstLValueRef<Value>(value)),
An<ParsedListValue>());
EXPECT_THAT(DoGet<ParsedListValue>(AsRValueRef<Value>(value)),
An<ParsedListValue>());
EXPECT_THAT(DoGet<ParsedListValue>(AsConstRValueRef<Value>(other_value)),
An<ParsedListValue>());
}
{
auto message = DynamicParseTextProto<TestAllTypesProto3>(
&arena, R"pb()pb", GetTestingDescriptorPool(),
GetTestingMessageFactory());
const auto* field = ABSL_DIE_IF_NULL(
message->GetDescriptor()->FindFieldByName("repeated_int32"));
Value value(ParsedRepeatedFieldValue{message, field});
Value other_value = value;
EXPECT_THAT(DoGet<ListValue>(AsLValueRef<Value>(value)), An<ListValue>());
EXPECT_THAT(DoGet<ListValue>(AsConstLValueRef<Value>(value)),
An<ListValue>());
EXPECT_THAT(DoGet<ListValue>(AsRValueRef<Value>(value)), An<ListValue>());
EXPECT_THAT(DoGet<ListValue>(AsConstRValueRef<Value>(other_value)),
An<ListValue>());
}
{
auto message = DynamicParseTextProto<TestAllTypesProto3>(
&arena, R"pb()pb", GetTestingDescriptorPool(),
GetTestingMessageFactory());
const auto* field = ABSL_DIE_IF_NULL(
message->GetDescriptor()->FindFieldByName("repeated_int32"));
Value value(ParsedRepeatedFieldValue{message, field});
Value other_value = value;
EXPECT_THAT(DoGet<ParsedRepeatedFieldValue>(AsLValueRef<Value>(value)),
An<ParsedRepeatedFieldValue>());
EXPECT_THAT(DoGet<ParsedRepeatedFieldValue>(AsConstLValueRef<Value>(value)),
An<ParsedRepeatedFieldValue>());
EXPECT_THAT(DoGet<ParsedRepeatedFieldValue>(AsRValueRef<Value>(value)),
An<ParsedRepeatedFieldValue>());
EXPECT_THAT(
DoGet<ParsedRepeatedFieldValue>(AsConstRValueRef<Value>(other_value)),
An<ParsedRepeatedFieldValue>());
}
{
Value value(MapValue{});
Value other_value = value;
EXPECT_THAT(DoGet<MapValue>(AsLValueRef<Value>(value)), An<MapValue>());
EXPECT_THAT(DoGet<MapValue>(AsConstLValueRef<Value>(value)),
An<MapValue>());
EXPECT_THAT(DoGet<MapValue>(AsRValueRef<Value>(value)), An<MapValue>());
EXPECT_THAT(DoGet<MapValue>(AsConstRValueRef<Value>(other_value)),
An<MapValue>());
}
{
Value value(ParsedJsonMapValue{});
Value other_value = value;
EXPECT_THAT(DoGet<MapValue>(AsLValueRef<Value>(value)), An<MapValue>());
EXPECT_THAT(DoGet<MapValue>(AsConstLValueRef<Value>(value)),
An<MapValue>());
EXPECT_THAT(DoGet<MapValue>(AsRValueRef<Value>(value)), An<MapValue>());
EXPECT_THAT(DoGet<MapValue>(AsConstRValueRef<Value>(other_value)),
An<MapValue>());
}
{
Value value(ParsedJsonMapValue{});
Value other_value = value;
EXPECT_THAT(DoGet<ParsedJsonMapValue>(AsLValueRef<Value>(value)),
An<ParsedJsonMapValue>());
EXPECT_THAT(DoGet<ParsedJsonMapValue>(AsConstLValueRef<Value>(value)),
An<ParsedJsonMapValue>());
EXPECT_THAT(DoGet<ParsedJsonMapValue>(AsRValueRef<Value>(value)),
An<ParsedJsonMapValue>());
EXPECT_THAT(DoGet<ParsedJsonMapValue>(AsConstRValueRef<Value>(other_value)),
An<ParsedJsonMapValue>());
}
{
Value value(ParsedMapValue{});
Value other_value = value;
EXPECT_THAT(DoGet<MapValue>(AsLValueRef<Value>(value)), An<MapValue>());
EXPECT_THAT(DoGet<MapValue>(AsConstLValueRef<Value>(value)),
An<MapValue>());
EXPECT_THAT(DoGet<MapValue>(AsRValueRef<Value>(value)), An<MapValue>());
EXPECT_THAT(DoGet<MapValue>(AsConstRValueRef<Value>(other_value)),
An<MapValue>());
}
{
Value value(ParsedMapValue{});
Value other_value = value;
EXPECT_THAT(DoGet<ParsedMapValue>(AsLValueRef<Value>(value)),
An<ParsedMapValue>());
EXPECT_THAT(DoGet<ParsedMapValue>(AsConstLValueRef<Value>(value)),
An<ParsedMapValue>());
EXPECT_THAT(DoGet<ParsedMapValue>(AsRValueRef<Value>(value)),
An<ParsedMapValue>());
EXPECT_THAT(DoGet<ParsedMapValue>(AsConstRValueRef<Value>(other_value)),
An<ParsedMapValue>());
}
{
auto message = DynamicParseTextProto<TestAllTypesProto3>(
&arena, R"pb()pb", GetTestingDescriptorPool(),
GetTestingMessageFactory());
const auto* field = ABSL_DIE_IF_NULL(
message->GetDescriptor()->FindFieldByName("map_int32_int32"));
Value value(ParsedMapFieldValue{message, field});
Value other_value = value;
EXPECT_THAT(DoGet<MapValue>(AsLValueRef<Value>(value)), An<MapValue>());
EXPECT_THAT(DoGet<MapValue>(AsConstLValueRef<Value>(value)),
An<MapValue>());
EXPECT_THAT(DoGet<MapValue>(AsRValueRef<Value>(value)), An<MapValue>());
EXPECT_THAT(DoGet<MapValue>(AsConstRValueRef<Value>(other_value)),
An<MapValue>());
}
{
auto message = DynamicParseTextProto<TestAllTypesProto3>(
&arena, R"pb()pb", GetTestingDescriptorPool(),
GetTestingMessageFactory());
const auto* field = ABSL_DIE_IF_NULL(
message->GetDescriptor()->FindFieldByName("map_int32_int32"));
Value value(ParsedMapFieldValue{message, field});
Value other_value = value;
EXPECT_THAT(DoGet<ParsedMapFieldValue>(AsLValueRef<Value>(value)),
An<ParsedMapFieldValue>());
EXPECT_THAT(DoGet<ParsedMapFieldValue>(AsConstLValueRef<Value>(value)),
An<ParsedMapFieldValue>());
EXPECT_THAT(DoGet<ParsedMapFieldValue>(AsRValueRef<Value>(value)),
An<ParsedMapFieldValue>());
EXPECT_THAT(
DoGet<ParsedMapFieldValue>(AsConstRValueRef<Value>(other_value)),
An<ParsedMapFieldValue>());
}
{
Value value(ParsedMessageValue{DynamicParseTextProto<TestAllTypesProto3>(
&arena, R"pb()pb", GetTestingDescriptorPool(),
GetTestingMessageFactory())});
Value other_value = value;
EXPECT_THAT(DoGet<MessageValue>(AsLValueRef<Value>(value)),
An<MessageValue>());
EXPECT_THAT(DoGet<MessageValue>(AsConstLValueRef<Value>(value)),
An<MessageValue>());
EXPECT_THAT(DoGet<MessageValue>(AsRValueRef<Value>(value)),
An<MessageValue>());
EXPECT_THAT(DoGet<MessageValue>(AsConstRValueRef<Value>(other_value)),
An<MessageValue>());
}
EXPECT_THAT(DoGet<NullValue>(Value(NullValue())), An<NullValue>());
{
Value value(OptionalValue{});
Value other_value = value;
EXPECT_THAT(DoGet<OpaqueValue>(AsLValueRef<Value>(value)),
An<OpaqueValue>());
EXPECT_THAT(DoGet<OpaqueValue>(AsConstLValueRef<Value>(value)),
An<OpaqueValue>());
EXPECT_THAT(DoGet<OpaqueValue>(AsRValueRef<Value>(value)),
An<OpaqueValue>());
EXPECT_THAT(DoGet<OpaqueValue>(AsConstRValueRef<Value>(other_value)),
An<OpaqueValue>());
}
{
Value value(OptionalValue{});
Value other_value = value;
EXPECT_THAT(DoGet<OptionalValue>(AsLValueRef<Value>(value)),
An<OptionalValue>());
EXPECT_THAT(DoGet<OptionalValue>(AsConstLValueRef<Value>(value)),
An<OptionalValue>());
EXPECT_THAT(DoGet<OptionalValue>(AsRValueRef<Value>(value)),
An<OptionalValue>());
EXPECT_THAT(DoGet<OptionalValue>(AsConstRValueRef<Value>(other_value)),
An<OptionalValue>());
}
{
OpaqueValue value(OptionalValue{});
OpaqueValue other_value = value;
EXPECT_THAT(DoGet<OptionalValue>(AsLValueRef<OpaqueValue>(value)),
An<OptionalValue>());
EXPECT_THAT(DoGet<OptionalValue>(AsConstLValueRef<OpaqueValue>(value)),
An<OptionalValue>());
EXPECT_THAT(DoGet<OptionalValue>(AsRValueRef<OpaqueValue>(value)),
An<OptionalValue>());
EXPECT_THAT(
DoGet<OptionalValue>(AsConstRValueRef<OpaqueValue>(other_value)),
An<OptionalValue>());
}
{
Value value(ParsedMessageValue{DynamicParseTextProto<TestAllTypesProto3>(
&arena, R"pb()pb", GetTestingDescriptorPool(),
GetTestingMessageFactory())});
Value other_value = value;
EXPECT_THAT(DoGet<ParsedMessageValue>(AsLValueRef<Value>(value)),
An<ParsedMessageValue>());
EXPECT_THAT(DoGet<ParsedMessageValue>(AsConstLValueRef<Value>(value)),
An<ParsedMessageValue>());
EXPECT_THAT(DoGet<ParsedMessageValue>(AsRValueRef<Value>(value)),
An<ParsedMessageValue>());
EXPECT_THAT(DoGet<ParsedMessageValue>(AsConstRValueRef<Value>(other_value)),
An<ParsedMessageValue>());
}
{
Value value(StringValue{});
Value other_value = value;
EXPECT_THAT(DoGet<StringValue>(AsLValueRef<Value>(value)),
An<StringValue>());
EXPECT_THAT(DoGet<StringValue>(AsConstLValueRef<Value>(value)),
An<StringValue>());
EXPECT_THAT(DoGet<StringValue>(AsRValueRef<Value>(value)),
An<StringValue>());
EXPECT_THAT(DoGet<StringValue>(AsConstRValueRef<Value>(other_value)),
An<StringValue>());
}
{
Value value(ParsedMessageValue{DynamicParseTextProto<TestAllTypesProto3>(
&arena, R"pb()pb", GetTestingDescriptorPool(),
GetTestingMessageFactory())});
Value other_value = value;
EXPECT_THAT(DoGet<StructValue>(AsLValueRef<Value>(value)),
An<StructValue>());
EXPECT_THAT(DoGet<StructValue>(AsConstLValueRef<Value>(value)),
An<StructValue>());
EXPECT_THAT(DoGet<StructValue>(AsRValueRef<Value>(value)),
An<StructValue>());
EXPECT_THAT(DoGet<StructValue>(AsConstRValueRef<Value>(other_value)),
An<StructValue>());
}
EXPECT_THAT(DoGet<TimestampValue>(Value(TimestampValue())),
An<TimestampValue>());
{
Value value(TypeValue(StringType{}));
Value other_value = value;
EXPECT_THAT(DoGet<TypeValue>(AsLValueRef<Value>(value)), An<TypeValue>());
EXPECT_THAT(DoGet<TypeValue>(AsConstLValueRef<Value>(value)),
An<TypeValue>());
EXPECT_THAT(DoGet<TypeValue>(AsRValueRef<Value>(value)), An<TypeValue>());
EXPECT_THAT(DoGet<TypeValue>(AsConstRValueRef<Value>(other_value)),
An<TypeValue>());
}
EXPECT_THAT(DoGet<UintValue>(Value(UintValue())), An<UintValue>());
{
Value value(UnknownValue{});
Value other_value = value;
EXPECT_THAT(DoGet<UnknownValue>(AsLValueRef<Value>(value)),
An<UnknownValue>());
EXPECT_THAT(DoGet<UnknownValue>(AsConstLValueRef<Value>(value)),
An<UnknownValue>());
EXPECT_THAT(DoGet<UnknownValue>(AsRValueRef<Value>(value)),
An<UnknownValue>());
EXPECT_THAT(DoGet<UnknownValue>(AsConstRValueRef<Value>(other_value)),
An<UnknownValue>());
}
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/value.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/value_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
f3ea2835-ae58-4400-aa72-323aa7b250ca | cpp | google/tensorstore | scaling_rate_limiter | tensorstore/internal/rate_limiter/scaling_rate_limiter.cc | tensorstore/internal/rate_limiter/scaling_rate_limiter_test.cc | #include "tensorstore/internal/rate_limiter/scaling_rate_limiter.h"
#include <algorithm>
#include <cassert>
#include <cmath>
#include <functional>
#include <limits>
#include <utility>
#include "absl/log/absl_check.h"
#include "absl/time/time.h"
#include "tensorstore/internal/rate_limiter/token_bucket_rate_limiter.h"
namespace tensorstore {
namespace internal {
namespace {
double GetLogA(absl::Duration doubling_time) {
if (doubling_time <= absl::ZeroDuration() ||
doubling_time == absl::InfiniteDuration()) {
return 0;
}
return 0.69314718055994530941723212145817656 /
absl::ToDoubleSeconds(doubling_time);
}
double GetMaxAvailable(double initial_rate) {
return std::min(initial_rate * 1000.0, 2000.0);
}
}
DoublingRateLimiter::DoublingRateLimiter(double initial_rate,
absl::Duration doubling_time)
: TokenBucketRateLimiter(GetMaxAvailable(initial_rate)),
initial_rate_(initial_rate),
doubling_time_(doubling_time),
a_(GetLogA(doubling_time)) {
ABSL_CHECK_GT(initial_rate, std::numeric_limits<double>::min());
ABSL_CHECK_GT(a_, 0);
}
DoublingRateLimiter::DoublingRateLimiter(double initial_rate,
absl::Duration doubling_time,
std::function<absl::Time()> clock)
: TokenBucketRateLimiter(GetMaxAvailable(initial_rate), std::move(clock)),
initial_rate_(initial_rate),
doubling_time_(doubling_time),
a_(GetLogA(doubling_time)) {
ABSL_CHECK_GT(initial_rate, std::numeric_limits<double>::min());
ABSL_CHECK_GT(a_, 0);
}
double DoublingRateLimiter::TokensToAdd(absl::Time current,
absl::Time previous) const {
double int_current =
std::exp(a_ * absl::ToDoubleSeconds(current - start_time_));
double int_prev =
std::exp(a_ * absl::ToDoubleSeconds(previous - start_time_));
return initial_rate_ * (int_current - int_prev) / a_;
}
absl::Duration DoublingRateLimiter::GetSchedulerDelay() const {
return absl::Milliseconds(10);
}
ConstantRateLimiter::ConstantRateLimiter(double initial_rate)
: TokenBucketRateLimiter(GetMaxAvailable(initial_rate)),
initial_rate_(initial_rate),
r_(absl::Seconds(1.0 / initial_rate)) {
ABSL_CHECK_GT(initial_rate, std::numeric_limits<double>::min());
}
ConstantRateLimiter::ConstantRateLimiter(double initial_rate,
std::function<absl::Time()> clock)
: TokenBucketRateLimiter(GetMaxAvailable(initial_rate), std::move(clock)),
initial_rate_(initial_rate),
r_(absl::Seconds(1.0 / initial_rate)) {
ABSL_CHECK_GT(initial_rate, std::numeric_limits<double>::min());
}
double ConstantRateLimiter::TokensToAdd(absl::Time current,
absl::Time previous) const {
return initial_rate_ * absl::ToDoubleSeconds(current - previous);
}
absl::Duration ConstantRateLimiter::GetSchedulerDelay() const {
return std::max(r_, absl::Milliseconds(10));
}
}
} | #include "tensorstore/internal/rate_limiter/scaling_rate_limiter.h"
#include <stddef.h>
#include <atomic>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/rate_limiter/rate_limiter.h"
#include "tensorstore/util/executor.h"
namespace {
using ::tensorstore::Executor;
using ::tensorstore::ExecutorTask;
using ::tensorstore::internal::adopt_object_ref;
using ::tensorstore::internal::AtomicReferenceCount;
using ::tensorstore::internal::ConstantRateLimiter;
using ::tensorstore::internal::DoublingRateLimiter;
using ::tensorstore::internal::IntrusivePtr;
using ::tensorstore::internal::MakeIntrusivePtr;
using ::tensorstore::internal::RateLimiter;
using ::tensorstore::internal::RateLimiterNode;
struct Node : public RateLimiterNode, public AtomicReferenceCount<Node> {
RateLimiter* queue_;
ExecutorTask task_;
Node(RateLimiter* queue, ExecutorTask task)
: queue_(queue), task_(std::move(task)) {}
~Node() { queue_->Finish(this); }
static void Start(void* task) {
IntrusivePtr<Node> self(reinterpret_cast<Node*>(task), adopt_object_ref);
std::move(self->task_)();
}
};
TEST(ConstantRateLimiter, Basic) {
absl::Time now = absl::Now();
ConstantRateLimiter queue(0.2, [&now]() { return now; });
EXPECT_EQ(0.2, queue.initial_rate());
EXPECT_EQ(now, queue.start_time());
EXPECT_EQ(now, queue.last_update());
EXPECT_EQ(0, queue.available());
EXPECT_EQ(0, queue.TokensToAdd(now, now));
EXPECT_EQ(2, queue.TokensToAdd(now + absl::Seconds(10), now));
EXPECT_EQ(60, queue.TokensToAdd(now + absl::Seconds(300), now));
std::atomic<size_t> done{0};
{
for (int i = 0; i < 100; i++) {
auto node = MakeIntrusivePtr<Node>(&queue, [&done] {
done++;
});
intrusive_ptr_increment(node.get());
queue.Admit(node.get(), &Node::Start);
}
}
now += absl::Seconds(10);
queue.PeriodicCallForTesting();
EXPECT_EQ(2, done);
now += absl::Seconds(100);
queue.PeriodicCallForTesting();
EXPECT_EQ(22, done);
now += absl::Seconds(400);
queue.PeriodicCallForTesting();
EXPECT_EQ(100, done);
}
TEST(DoublingRateLimiter, Basic) {
absl::Time now = absl::Now();
DoublingRateLimiter queue(2, absl::Seconds(10), [&now]() { return now; });
EXPECT_EQ(2, queue.initial_rate());
EXPECT_EQ(absl::Seconds(10), queue.doubling_time());
EXPECT_EQ(0, queue.available());
EXPECT_EQ(0, queue.TokensToAdd(now, now));
EXPECT_THAT(
queue.TokensToAdd(now + absl::Seconds(11), now + absl::Seconds(10)),
::testing::Gt(4));
EXPECT_THAT(
queue.TokensToAdd(now + absl::Seconds(21), now + absl::Seconds(20)),
::testing::Gt(8));
std::atomic<size_t> done{0};
{
for (int i = 0; i < 100; i++) {
auto node = MakeIntrusivePtr<Node>(&queue, [&done] {
done++;
});
intrusive_ptr_increment(node.get());
queue.Admit(node.get(), &Node::Start);
}
}
EXPECT_EQ(0, done);
now += absl::Seconds(1);
queue.PeriodicCallForTesting();
EXPECT_EQ(2, done);
now += absl::Seconds(10);
queue.PeriodicCallForTesting();
EXPECT_EQ(32, done);
now += absl::Seconds(20);
queue.PeriodicCallForTesting();
EXPECT_EQ(100, done);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/rate_limiter/scaling_rate_limiter.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/rate_limiter/scaling_rate_limiter_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
b01051db-62dc-4b93-9e88-82ac8ddbf30d | cpp | tensorflow/tensorflow | concat_op | tensorflow/compiler/tf2xla/kernels/concat_op.cc | tensorflow/core/kernels/concat_op_test.cc | #include <cstdint>
#include <limits>
#include <vector>
#include "tensorflow/compiler/tf2xla/kernels/shape_util.h"
#include "tensorflow/compiler/tf2xla/type_util.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/literal_util.h"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
class ConcatBaseOp : public XlaOpKernel {
public:
ConcatBaseOp(OpKernelConstruction* c, int64_t axis_index)
: XlaOpKernel(c), axis_index_(axis_index) {}
void Compile(XlaOpKernelContext* ctx) override {
const TensorShape concat_dim_tensor_shape = ctx->InputShape(axis_index_);
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(concat_dim_tensor_shape),
errors::InvalidArgument(
"Concat dim tensor should be a scalar, but got shape ",
concat_dim_tensor_shape.DebugString()));
int64_t concat_dim;
OP_REQUIRES_OK(ctx,
ctx->ConstantInputAsIntScalar(axis_index_, &concat_dim));
std::vector<xla::XlaOp> values;
std::vector<TensorShape> shapes;
OP_REQUIRES_OK(ctx, ctx->InputList("values", &values, &shapes));
const int N = values.size();
const int input_dims = shapes[0].dims();
const TensorShape& input_shape = shapes[0];
int64_t axis = concat_dim < 0 ? concat_dim + input_dims : concat_dim;
OP_REQUIRES(ctx, 0 <= axis && axis < input_dims,
errors::InvalidArgument(
"ConcatOp : Expected concatenating dimensions in the range "
"[",
-input_dims, ", ", input_dims, "), but got ", concat_dim));
std::vector<xla::XlaOp> input_data;
int output_concat_dim = 0;
for (int i = 0; i < N; ++i) {
xla::XlaOp handle = values[i];
const TensorShape& in_shape = shapes[i];
OP_REQUIRES(
ctx, in_shape.dims() == input_dims,
errors::InvalidArgument(
"ConcatOp : Ranks of all input tensors should match: shape[0] = ",
input_shape.DebugString(), " vs. shape[", i,
"] = ", in_shape.DebugString()));
if (in_shape.dims() == 0) {
input_data.push_back(xla::Reshape(handle, {1}));
} else {
input_data.push_back(handle);
}
output_concat_dim += in_shape.dims() > 0 ? in_shape.dim_size(axis) : 1;
}
VLOG(1) << "Concat dim " << concat_dim << " equivalent to " << axis;
ctx->SetOutput(0, xla::ConcatInDim(ctx->builder(), input_data, axis));
}
private:
int axis_index_;
};
class ConcatOp : public ConcatBaseOp {
public:
explicit ConcatOp(OpKernelConstruction* c)
: ConcatBaseOp(c, 0) {}
};
class ConcatV2Op : public ConcatBaseOp {
public:
explicit ConcatV2Op(OpKernelConstruction* c)
: ConcatBaseOp(c, c->num_inputs() - 1) {}
};
REGISTER_XLA_OP(Name("Concat").CompileTimeConstantInput("concat_dim"),
ConcatOp);
REGISTER_XLA_OP(Name("ConcatV2")
.TypeConstraint("Tidx", {DT_INT32, DT_INT64})
.CompileTimeConstantInput("axis"),
ConcatV2Op);
class ConcatOffsetOp : public XlaOpKernel {
public:
explicit ConcatOffsetOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("shape_type", &shape_type_));
}
void Compile(XlaOpKernelContext* ctx) override {
const TensorShape concat_dim_shape = ctx->InputShape(0);
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(concat_dim_shape),
errors::InvalidArgument(
"Concat dim tensor should be a scalar, but got shape ",
concat_dim_shape.DebugString()));
for (int i = 1; i < ctx->num_inputs(); ++i) {
OP_REQUIRES(ctx, TensorShapeUtils::IsVector(ctx->InputShape(i)),
errors::InvalidArgument("input ", i,
" should be a vector, but got shape ",
ctx->InputShape(i).DebugString()));
}
const int32_t N = ctx->num_inputs() - 1;
const TensorShape inp0_shape = ctx->InputShape(1);
std::vector<int64_t> inp0_dims;
OP_REQUIRES_OK(ctx,
ctx->ConstantInputAsIntVector(
1, &inp0_dims, xla::ValueInferenceMode::kUpperBound));
const int64_t inp0_rank = inp0_shape.num_elements();
int64_t cdim;
OP_REQUIRES_OK(ctx, ctx->ConstantInputAsIntScalar(0, &cdim));
VLOG(1) << "ConcatOffset " << cdim << "," << inp0_rank;
int32_t axis = cdim < 0 ? cdim + inp0_rank : cdim;
OP_REQUIRES(ctx, FastBoundsCheck(axis, inp0_rank),
errors::InvalidArgument("Concat dim is out of range: ", axis,
" vs. ", inp0_rank));
int64_t offset = 0;
for (int i = 0; i < N; ++i) {
const TensorShape inp_shape = ctx->InputShape(1 + i);
OP_REQUIRES(ctx, inp0_rank == inp_shape.num_elements(),
errors::InvalidArgument("input ", i, " should contain ",
inp0_rank, " elements, but got ",
inp_shape.num_elements()));
std::vector<int64_t> inp_dims;
OP_REQUIRES_OK(
ctx, ctx->ConstantInputAsIntVector(
1 + i, &inp_dims, xla::ValueInferenceMode::kUpperBound));
std::vector<int64_t> output_dims(inp0_rank);
for (int64_t j = 0; j < inp0_rank; ++j) {
if (j == axis) {
output_dims[j] = offset;
offset += inp_dims[j];
} else {
const int64_t inp0_element = inp0_dims[j];
const int64_t inp_element = inp_dims[j];
OP_REQUIRES(ctx, inp0_element == inp_element,
errors::InvalidArgument(
"All dimensions except ", axis, " must match. Input ",
i, " has shape [", absl::StrJoin(inp_dims, " "),
"] and doesn't match input 0 with shape [",
absl::StrJoin(inp0_dims, " "), "]."));
output_dims[j] = 0;
}
}
TensorShape out_shape;
OP_REQUIRES_OK(ctx,
TensorShape::BuildTensorShape(output_dims, &out_shape));
Tensor out_constant(shape_type_, TensorShape({inp0_rank}));
OP_REQUIRES_OK(ctx, TensorShapeToConstant(out_shape, &out_constant));
ctx->SetConstantOutput(i, out_constant);
}
}
private:
DataType shape_type_;
};
REGISTER_XLA_OP(Name("ConcatOffset")
.TypeConstraint("shape_type", {DT_INT32, DT_INT64})
.CompileTimeConstantInput("concat_dim")
.CompileTimeConstantInput("shape"),
ConcatOffsetOp);
}
} | #include <functional>
#include <memory>
#include <vector>
#include "absl/base/prefetch.h"
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
template <typename T>
void FillTensorWithRandomValues(Tensor* t, int string_length, int64_t* bytes) {
t->flat<T>().setRandom();
*bytes = t->flat<T>().size() * sizeof(T);
}
template <>
void FillTensorWithRandomValues<tstring>(Tensor* t, int string_length,
int64_t* bytes) {
auto ts = t->flat<tstring>();
*bytes = 0;
for (int i = 0; i < ts.size(); i++) {
ts(i) = tstring(string_length, 'x');
*bytes += sizeof(ts(i)) + ts(i).size();
}
}
template <typename T>
static void ConcatHelper(::testing::benchmark::State& state,
int concat_dimension, int dim2,
int string_length = 0) {
Graph* g = new Graph(OpRegistry::Global());
DataType dt = DataTypeToEnum<T>::v();
const int kDim1 = 100;
Tensor concat_dim(DT_INT32, TensorShape({}));
concat_dim.scalar<int32>()() = concat_dimension;
Tensor in0(dt, TensorShape({kDim1, dim2}));
Tensor in1(dt, TensorShape({kDim1, dim2}));
int64_t in0_bytes, in1_bytes;
FillTensorWithRandomValues<T>(&in0, string_length, &in0_bytes);
FillTensorWithRandomValues<T>(&in1, string_length, &in1_bytes);
Node* node;
TF_CHECK_OK(
NodeBuilder(g->NewName("n"), "Concat")
.Input(test::graph::Constant(g, concat_dim))
.Input({test::graph::Constant(g, in0), test::graph::Constant(g, in1)})
.Attr("N", 2)
.Attr("T", dt)
.Finalize(g, &node));
test::Benchmark("cpu", g, false).Run(state);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) *
(in0_bytes + in1_bytes));
}
void BM_ConcatDim0Float(::testing::benchmark::State& state) {
const int dim2 = state.range(0);
ConcatHelper<float>(state, 0, dim2);
}
void BM_ConcatDim1Float(::testing::benchmark::State& state) {
const int dim2 = state.range(0);
ConcatHelper<float>(state, 1, dim2);
}
BENCHMARK(BM_ConcatDim0Float)
->UseRealTime()
->Arg(1000)
->Arg(100000)
->Arg(1000000);
BENCHMARK(BM_ConcatDim1Float)
->UseRealTime()
->Arg(1000)
->Arg(100000)
->Arg(1000000);
void BM_ConcatDim0String(::testing::benchmark::State& state) {
const int dim2 = state.range(0);
const int string_length = state.range(1);
ConcatHelper<tstring>(state, 0, dim2, string_length);
}
BENCHMARK(BM_ConcatDim0String)
->UseRealTime()
->ArgPair(1, 16)
->ArgPair(1, 10000)
->ArgPair(100, 16);
void BM_ConcatDim1uint8(::testing::benchmark::State& state) {
const int dim2 = state.range(0);
ConcatHelper<uint8>(state, 1, dim2);
}
void BM_ConcatDim1int16(::testing::benchmark::State& state) {
const int dim2 = state.range(0);
ConcatHelper<int16>(state, 1, dim2);
}
void BM_ConcatDim1bfloat16(::testing::benchmark::State& state) {
const int dim2 = state.range(0);
ConcatHelper<bfloat16>(state, 1, dim2);
}
BENCHMARK(BM_ConcatDim1uint8)
->UseRealTime()
->Arg(1000)
->Arg(100000)
->Arg(1000000);
BENCHMARK(BM_ConcatDim1int16)
->UseRealTime()
->Arg(1000)
->Arg(100000)
->Arg(1000000);
BENCHMARK(BM_ConcatDim1bfloat16)
->UseRealTime()
->Arg(1000)
->Arg(100000)
->Arg(1000000);
template <typename T>
static void ConcatManyHelper(::testing::benchmark::State& state,
int concat_dimension, int dim2) {
Graph* g = new Graph(OpRegistry::Global());
DataType dt = DataTypeToEnum<T>::v();
const int kDim1 = 40000;
const int kNumInputs = 64;
Tensor concat_dim(DT_INT32, TensorShape({}));
concat_dim.scalar<int32>()() = concat_dimension;
std::vector<NodeBuilder::NodeOut> inputs;
inputs.reserve(kNumInputs);
for (int i = 0; i < kNumInputs; ++i) {
Tensor in(dt, TensorShape({kDim1, dim2}));
in.flat<T>().setRandom();
inputs.push_back(test::graph::Constant(g, in));
}
Node* node;
TF_CHECK_OK(NodeBuilder(g->NewName("n"), "Concat")
.Input(test::graph::Constant(g, concat_dim))
.Input(inputs)
.Attr("N", 64)
.Attr("T", dt)
.Finalize(g, &node));
test::Benchmark("cpu", g, false).Run(state);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * kDim1 *
dim2 * kNumInputs * sizeof(T));
}
void BM_ConcatManyDim1bfloat16(::testing::benchmark::State& state) {
const int dim2 = state.range(0);
ConcatManyHelper<bfloat16>(state, 1, dim2);
}
BENCHMARK(BM_ConcatManyDim1bfloat16)->UseRealTime()->Arg(18)->Arg(34)->Arg(60);
void MemcpyAlternativeHelper(::testing::benchmark::State& state, int dim2) {
const int kDim1 = 100;
std::vector<float> data1(kDim1 * dim2, 1.0f);
std::vector<float> data2(kDim1 * dim2, 2.0f);
for (auto s : state) {
const size_t n0 = data1.size();
const size_t n1 = data2.size();
float* result = new float[n0 + n1];
memcpy(&result[0], &data1[0], n0 * sizeof(float));
memcpy(&result[n0], &data2[0], n1 * sizeof(float));
delete[] result;
}
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) *
((kDim1 * dim2) + (kDim1 * dim2)) * sizeof(float));
}
void BM_MemcpyAlternativeDim0(::testing::benchmark::State& state) {
const int dim2 = state.range(0);
MemcpyAlternativeHelper(state, dim2);
}
void BM_MemcpyAlternativeDim1(::testing::benchmark::State& state) {
const int dim2 = state.range(0);
MemcpyAlternativeHelper(state, dim2);
}
BENCHMARK(BM_MemcpyAlternativeDim0)
->UseRealTime()
->Arg(1000)
->Arg(100000)
->Arg(1000000);
BENCHMARK(BM_MemcpyAlternativeDim1)
->UseRealTime()
->Arg(1000)
->Arg(100000)
->Arg(1000000);
typedef Eigen::TensorMap<Eigen::Tensor<bfloat16, 1, Eigen::RowMajor>,
Eigen::Unaligned>
EigenMap;
void MemcpyManyAlternative1(::testing::benchmark::State& state) {
int dim2 = state.range(0);
const int kDim1 = 40000;
const int kNumCopies = 64;
const int size = kDim1 * dim2 * kNumCopies;
bfloat16* data = new bfloat16[size];
EigenMap map(data, size);
map.setRandom();
for (auto s : state) {
std::vector<bfloat16*> inputs(kNumCopies);
for (int i = 0; i < kNumCopies; ++i) {
inputs[i] = &data[i * kDim1 * dim2];
}
bfloat16* result = new bfloat16[size];
for (int j = 0; j < kNumCopies; ++j) {
bfloat16* output = &result[j * dim2];
for (int i = 0; i < kDim1; ++i) {
if (i + 1 < kDim1) {
absl::PrefetchToLocalCache(inputs[j] + dim2);
}
memcpy(output, inputs[j], dim2 * sizeof(bfloat16));
inputs[j] += dim2;
output += dim2 * kNumCopies;
}
}
delete[] result;
}
delete[] data;
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * kDim1 *
dim2 * kNumCopies * sizeof(bfloat16));
}
void MemcpyManyAlternative2(::testing::benchmark::State& state) {
int dim2 = state.range(0);
const int kDim1 = 40000;
const int kNumCopies = 64;
const int size = kDim1 * dim2 * kNumCopies;
bfloat16* data = new bfloat16[size];
EigenMap map(data, size);
map.setRandom();
std::vector<bfloat16*> inputs(kNumCopies);
for (auto s : state) {
bfloat16* result = new bfloat16[size];
for (int i = 0; i < kNumCopies; ++i) {
inputs[i] = &data[i * kDim1 * dim2];
}
bfloat16* output = result;
for (int i = 0; i < kDim1; ++i) {
for (int j = 0; j < kNumCopies; ++j) {
if (j + 1 < kNumCopies) {
absl::PrefetchToLocalCache(inputs[j + 1]);
}
memcpy(output, inputs[j], dim2 * sizeof(bfloat16));
inputs[j] += dim2;
output += dim2;
}
}
delete[] result;
}
delete[] data;
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * kDim1 *
dim2 * kNumCopies * sizeof(bfloat16));
}
BENCHMARK(MemcpyManyAlternative1)
->Arg(16)
->Arg(17)
->Arg(18)
->Arg(32)
->Arg(33)
->Arg(34)
->Arg(60)
->Arg(64)
->Arg(65);
BENCHMARK(MemcpyManyAlternative2)
->Arg(16)
->Arg(17)
->Arg(18)
->Arg(32)
->Arg(33)
->Arg(34)
->Arg(60)
->Arg(64)
->Arg(65);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/concat_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/concat_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
00590ed8-bc69-40a8-87ea-2346409383fe | cpp | google/tsl | stacktrace_handler | tsl/platform/windows/stacktrace_handler.cc | tsl/platform/stacktrace_handler_test.cc | #include "tsl/platform/stacktrace_handler.h"
#include <windows.h>
#include <dbghelp.h>
#include <errno.h>
#include <io.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <thread>
#include "tsl/platform/mutex.h"
#include "tsl/platform/stacktrace.h"
#include "tsl/platform/types.h"
namespace tsl {
static mutex alarm_mu(LINKER_INITIALIZED);
static bool alarm_activated = false;
static void AlarmThreadBody() {
alarm_mu.lock();
alarm_mu.Await(Condition(&alarm_activated));
alarm_mu.unlock();
Sleep(60000);
signal(SIGABRT, SIG_DFL);
abort();
}
static bool PtrToString(uintptr_t ptr, char* buf, size_t size) {
static constexpr char kHexCharacters[] = "0123456789abcdef";
static constexpr int kHexBase = 16;
size_t num_hex_chars = 2 * sizeof(uintptr_t);
if (size < (num_hex_chars + 4)) {
return false;
}
buf[0] = '0';
buf[1] = 'x';
int start_index = 2;
for (int i = num_hex_chars - 1 + start_index; i >= start_index; --i) {
buf[i] = kHexCharacters[ptr % kHexBase];
ptr /= kHexBase;
}
int current_index = start_index + num_hex_chars;
buf[current_index] = '\n';
buf[current_index + 1] = '\0';
return true;
}
static inline void SafePrintStackTracePointers() {
static constexpr char begin_msg[] = "*** BEGIN STACK TRACE POINTERS ***\n";
(void)_write(_fileno(stderr), begin_msg, strlen(begin_msg));
static constexpr int kMaxStackFrames = 64;
void* trace[kMaxStackFrames];
int num_frames = CaptureStackBackTrace(0, kMaxStackFrames, trace, NULL);
for (int i = 0; i < num_frames; ++i) {
char buffer[32] = "unsuccessful ptr conversion";
PtrToString(reinterpret_cast<uintptr_t>(trace[i]), buffer, sizeof(buffer));
(void)_write(_fileno(stderr), buffer, strlen(buffer));
}
static constexpr char end_msg[] = "*** END STACK TRACE POINTERS ***\n\n";
(void)_write(_fileno(stderr), end_msg, strlen(end_msg));
}
static void StacktraceHandler(int sig) {
alarm_mu.lock();
alarm_activated = true;
alarm_mu.unlock();
char buf[128];
snprintf(buf, sizeof(buf), "*** Received signal %d ***\n", sig);
(void)write(_fileno(stderr), buf, strlen(buf));
SafePrintStackTracePointers();
std::string stacktrace = CurrentStackTrace();
(void)write(_fileno(stderr), stacktrace.c_str(), stacktrace.length());
signal(SIGABRT, SIG_DFL);
abort();
}
namespace testing {
void InstallStacktraceHandler() {
int handled_signals[] = {SIGSEGV, SIGABRT, SIGILL, SIGFPE};
std::thread alarm_thread(AlarmThreadBody);
alarm_thread.detach();
typedef void (*SignalHandlerPointer)(int);
for (int sig : handled_signals) {
SignalHandlerPointer previousHandler = signal(sig, StacktraceHandler);
if (previousHandler == SIG_ERR) {
char buf[128];
snprintf(buf, sizeof(buf),
"tensorflow::InstallStackTraceHandler: Warning, can't install "
"backtrace signal handler for signal %d, errno:%d \n",
sig, errno);
(void)write(_fileno(stderr), buf, strlen(buf));
} else if (previousHandler != SIG_DFL) {
char buf[128];
snprintf(buf, sizeof(buf),
"tensorflow::InstallStackTraceHandler: Warning, backtrace "
"signal handler for signal %d overwrote previous handler.\n",
sig);
(void)write(_fileno(stderr), buf, strlen(buf));
}
}
}
}
} | #include <csignal>
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace {
TEST(StacktraceHandlerTest, GeneratesStacktrace) {
EXPECT_DEATH(raise(SIGABRT), "testing::internal::UnitTestImpl::RunAllTests");
}
}
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/windows/stacktrace_handler.cc | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/stacktrace_handler_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
e436db0a-f077-4f69-8a2a-f6758b0a0ae3 | cpp | tensorflow/tensorflow | sharding_util_ops | tensorflow/compiler/tf2xla/kernels/sharding_util_ops.cc | tensorflow/core/tpu/kernels/sharding_util_ops_test.cc | #include <vector>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "tensorflow/compiler/tf2xla/type_util.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace {
constexpr absl::string_view kNumSplitsAttrName = "num_splits";
constexpr absl::string_view kNumConcatsAttrName = "num_concats";
template <bool Split>
Status GetAndValidateAttributes(OpKernelConstruction* ctx,
std::vector<int64_t>& num_partitions,
int& num_slices, std::vector<int64_t>& paddings,
bool& has_paddings) {
absl::string_view num_partitions_attr_name =
Split ? kNumSplitsAttrName : kNumConcatsAttrName;
TF_RETURN_IF_ERROR(ctx->GetAttr(num_partitions_attr_name, &num_partitions));
int num_dims_to_split = 0;
for (int i = 0, e = num_partitions.size(); i < e; ++i) {
const auto& split = num_partitions[i];
if (split <= 0) {
return errors::InvalidArgument("'", num_partitions_attr_name,
"' at index ", i,
" must be positive, but got ", split, ".");
}
if (split > 1) {
++num_dims_to_split;
}
num_slices *= split;
}
int n;
TF_RETURN_IF_ERROR(ctx->GetAttr("N", &n));
if (n != num_slices) {
return errors::InvalidArgument(
"'N' must match number of slices ", num_slices, " from '",
num_partitions_attr_name, "', but got ", n, ".");
}
TF_RETURN_IF_ERROR(ctx->GetAttr("paddings", &paddings));
const int expected_rank = num_partitions.size();
if (!paddings.empty()) {
if (paddings.size() != expected_rank) {
return errors::InvalidArgument(
"'paddings' length must match '", num_partitions_attr_name,
"' length ", expected_rank, ", but got ", paddings.size(), ".");
}
for (int dim = 0; dim < expected_rank; ++dim) {
if (paddings[dim] < 0) {
return errors::InvalidArgument(
"'padding' must be all non-negative, but got ", paddings[dim],
" at index ", dim, ".");
}
if (paddings[dim] > 0) {
has_paddings = true;
}
}
} else {
paddings.assign(expected_rank, 0);
}
return absl::OkStatus();
}
std::vector<int64_t> GetSliceIndices(absl::Span<const int64> num_partitions,
absl::Span<const int64> slice_shape,
const int index) {
DCHECK_EQ(num_partitions.size(), slice_shape.size());
std::vector<int64_t> slice_indices(num_partitions.size());
if (num_partitions.empty()) {
return slice_indices;
}
auto divisor = [&](const int dim) {
int divisor = 1;
for (int i = num_partitions.size() - 1; i > dim; --i) {
divisor *= num_partitions[i];
}
return divisor;
};
for (int dim = num_partitions.size() - 1; dim > 0; --dim) {
slice_indices[dim] =
((index / divisor(dim)) % num_partitions[dim]) * slice_shape[dim];
}
slice_indices[0] = (index / divisor(0)) * slice_shape[0];
return slice_indices;
}
constexpr absl::string_view kTensorName = "'input' tensor";
constexpr absl::string_view kResourceName = "'resource' variable tensor";
template <bool Resource>
class XlaSplitNDBaseOp : public XlaOpKernel {
public:
explicit XlaSplitNDBaseOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
OP_REQUIRES_OK(ctx,
GetAndValidateAttributes<true>(ctx, num_splits_, num_slices_,
paddings_, has_paddings_));
}
protected:
Status CompileInternal(XlaOpKernelContext* ctx, const xla::XlaOp input,
const TensorShape& input_shape,
const DataType input_dtype) {
xla::PrimitiveType type;
TF_RETURN_IF_ERROR(DataTypeToPrimitiveType(input_dtype, &type));
absl::string_view input_name = Resource ? kResourceName : kTensorName;
const int rank = input_shape.dims();
if (rank != num_splits_.size()) {
return errors::InvalidArgument(
input_name, " rank must be the same as 'num_splits' length ",
num_splits_.size(), ", but got rank ", rank, ".");
}
for (int dim = 0; dim < rank; ++dim) {
if ((input_shape.dim_size(dim) + paddings_[dim]) % num_splits_[dim] !=
0) {
return errors::InvalidArgument(
input_name, " shape dimension ", dim, " (",
input_shape.dim_size(dim), ") with padding ", paddings_[dim],
" must be evenly divisible by 'num_splits' ", num_splits_[dim],
".");
}
}
if (num_slices_ == 1 && has_paddings_) {
xla::PaddingConfig padding_config;
for (int dim = 0; dim < rank; ++dim) {
auto* padding_dim = padding_config.add_dimensions();
padding_dim->set_edge_padding_low(0);
padding_dim->set_edge_padding_high(paddings_[dim]);
padding_dim->set_interior_padding(0);
}
ctx->SetOutput(
0,
xla::Pad(input,
xla::ConstantR0WithType(ctx->builder(), type, 0),
padding_config));
return absl::OkStatus();
} else if (num_slices_ == 1) {
ctx->SetOutput(0, input);
return absl::OkStatus();
}
std::vector<int64_t> slice_shape(rank);
for (int dim = 0; dim < rank; ++dim) {
slice_shape[dim] =
(input_shape.dim_size(dim) + paddings_[dim]) / num_splits_[dim];
}
const std::vector<int64_t> slice_strides(rank, 1);
for (int i = 0; i < num_slices_; ++i) {
int num_complete_pad_dims = 0;
int num_partial_pad_dims = 0;
std::vector<int64_t> slice_start_indices =
GetSliceIndices(num_splits_, slice_shape, i);
std::vector<int64_t> slice_limit_indices(slice_shape.size());
xla::PaddingConfig slice_padding_config;
for (int dim = 0; dim < rank; ++dim) {
auto* padding_dim = slice_padding_config.add_dimensions();
padding_dim->set_edge_padding_low(0);
padding_dim->set_edge_padding_high(0);
padding_dim->set_interior_padding(0);
}
for (int dim = 0; dim < rank; ++dim) {
const int64 dim_size = input_shape.dim_size(dim);
if (slice_start_indices[dim] >= dim_size) {
slice_start_indices[dim] = dim_size;
slice_limit_indices[dim] = dim_size;
slice_padding_config.mutable_dimensions(dim)->set_edge_padding_high(
slice_shape[dim]);
++num_complete_pad_dims;
} else if (slice_start_indices[dim] + slice_shape[dim] > dim_size) {
slice_limit_indices[dim] = dim_size;
slice_padding_config.mutable_dimensions(dim)->set_edge_padding_high(
slice_start_indices[dim] + slice_shape[dim] - dim_size);
++num_partial_pad_dims;
} else {
slice_limit_indices[dim] =
slice_start_indices[dim] + slice_shape[dim];
}
}
if (num_complete_pad_dims == rank) {
ctx->SetOutput(i, xla::Broadcast(xla::ConstantR0WithType(
ctx->builder(), type, 0),
slice_shape));
} else if (num_complete_pad_dims > 0 || num_partial_pad_dims > 0) {
ctx->SetOutput(
i,
xla::Pad(xla::Slice(input, slice_start_indices, slice_limit_indices,
slice_strides),
xla::ConstantR0WithType(ctx->builder(), type, 0),
slice_padding_config));
} else {
ctx->SetOutput(i, xla::Slice(input, slice_start_indices,
slice_limit_indices, slice_strides));
}
}
return absl::OkStatus();
}
private:
std::vector<int64_t> num_splits_;
int num_slices_ = 1;
std::vector<int64_t> paddings_;
bool has_paddings_ = false;
};
class XlaSplitNDOp : public XlaSplitNDBaseOp<false> {
public:
explicit XlaSplitNDOp(OpKernelConstruction* ctx)
: XlaSplitNDBaseOp<false>(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
OP_REQUIRES_OK(ctx,
this->CompileInternal(ctx, ctx->Input(0), ctx->InputShape(0),
ctx->input_type(0)));
}
};
REGISTER_XLA_OP(Name("XlaSplitND"), XlaSplitNDOp);
class ReadVariableXlaSplitNDOp : public XlaSplitNDBaseOp<true> {
public:
explicit ReadVariableXlaSplitNDOp(OpKernelConstruction* ctx)
: XlaSplitNDBaseOp<true>(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("T", &dtype_));
}
void Compile(XlaOpKernelContext* ctx) override {
DataType variable_input_dtype;
TensorShape variable_input_shape;
OP_REQUIRES_OK(
ctx, ctx->GetVariableTypeAndShape(0, &variable_input_dtype,
&variable_input_shape));
OP_REQUIRES(
ctx, variable_input_dtype == dtype_,
errors::InvalidArgument("'T' must match 'resource' variable dtype ",
DataTypeString(variable_input_dtype),
", but got ", dtype_));
xla::XlaOp handle;
OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(0, dtype_,
nullptr, &handle));
OP_REQUIRES_OK(
ctx, this->CompileInternal(ctx, handle, variable_input_shape, dtype_));
}
private:
DataType dtype_;
};
REGISTER_XLA_OP(Name("ReadVariableXlaSplitND"), ReadVariableXlaSplitNDOp);
class XlaConcatNDBaseOp : public XlaOpKernel {
public:
explicit XlaConcatNDBaseOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
OP_REQUIRES_OK(
ctx, GetAndValidateAttributes<false>(ctx, num_concats_, num_slices_,
paddings_, has_paddings_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("T", &dtype_));
}
protected:
absl::StatusOr<xla::XlaOp> CompileInternal(XlaOpKernelContext* ctx) {
xla::PrimitiveType type;
TF_RETURN_IF_ERROR(DataTypeToPrimitiveType(dtype_, &type));
std::vector<xla::XlaOp> input_handles;
std::vector<TensorShape> input_shapes;
std::vector<int64_t> output_shape;
TF_RETURN_IF_ERROR(GetInputsAndOutputShape(ctx, input_handles, input_shapes,
output_shape));
const int rank = output_shape.size();
if (num_slices_ == 1 && has_paddings_) {
return xla::Slice(input_handles[0],
std::vector<int64_t>(rank, 0),
output_shape,
std::vector<int64_t>(rank, 1));
} else if (num_slices_ == 1) {
return input_handles[0];
}
auto slice_shape = input_shapes[0].dim_sizes();
xla::XlaOp output = xla::Broadcast(
xla::ConstantR0WithType(ctx->builder(), type, 0),
output_shape);
const std::vector<int64_t> input_slice_start_indices(rank, 0);
const std::vector<int64_t> slice_strides(rank, 1);
for (int i = 0; i < num_slices_; ++i) {
std::vector<int64_t> slice_start_indices =
GetSliceIndices(num_concats_, slice_shape, i);
int num_complete_pad_dims = 0;
int num_partial_pad_dims = 0;
std::vector<int64_t> slice_limit_indices(rank);
for (int dim = 0; dim < rank; ++dim) {
const int64_t dim_size = output_shape[dim];
if (slice_start_indices[dim] >= dim_size) {
slice_start_indices[dim] = dim_size;
slice_limit_indices[dim] = dim_size;
++num_complete_pad_dims;
} else if (slice_start_indices[dim] + slice_shape[dim] > dim_size) {
slice_limit_indices[dim] = dim_size;
++num_partial_pad_dims;
} else {
slice_limit_indices[dim] =
slice_start_indices[dim] + slice_shape[dim];
}
}
if (num_complete_pad_dims == rank) {
continue;
}
xla::XlaOp input_slice = input_handles[i];
if (num_complete_pad_dims > 0 || num_partial_pad_dims > 0) {
std::vector<int64_t> input_slice_limit_indices(rank);
for (int dim = 0; dim < rank; ++dim) {
input_slice_limit_indices[dim] =
slice_limit_indices[dim] - slice_start_indices[dim];
}
input_slice = xla::Slice(input_slice, input_slice_start_indices,
input_slice_limit_indices, slice_strides);
}
std::vector<xla::XlaOp> update_slice_start_indices;
update_slice_start_indices.reserve(rank);
for (int64 start_index : slice_start_indices) {
update_slice_start_indices.push_back(
xla::ConstantR0<int32>(ctx->builder(), start_index));
}
output = xla::DynamicUpdateSlice(output, input_slice,
update_slice_start_indices);
}
return output;
}
DataType dtype_;
private:
Status GetInputsAndOutputShape(XlaOpKernelContext* ctx,
std::vector<xla::XlaOp>& input_handles,
std::vector<TensorShape>& input_shapes,
std::vector<int64_t>& output_shape) {
TF_RETURN_IF_ERROR(ctx->InputList("inputs", &input_handles, &input_shapes));
const TensorShape& slice_shape = input_shapes[0];
if (slice_shape.dims() != num_concats_.size()) {
return errors::InvalidArgument(
"'inputs' rank must be the same as 'num_concats' length ",
num_concats_.size(), ", but got rank ", slice_shape.dims(), ".");
}
for (int i = 1; i < num_slices_; ++i) {
const TensorShape& slice_shape_i = input_shapes[i];
if (slice_shape != slice_shape_i) {
return errors::InvalidArgument(
"'inputs' must all have the same expected shape ", slice_shape,
", but got ", slice_shape_i, " at index ", i, ".");
}
}
const int rank = input_shapes[0].dims();
for (int dim = 0; dim < rank; ++dim) {
const int max_dim_size = slice_shape.dim_size(dim) * num_concats_[dim];
if (paddings_[dim] > max_dim_size) {
return errors::InvalidArgument(
"'paddings' must not exceed expected output shape dimension ",
max_dim_size, " at index ", dim, ", but got ", paddings_[dim], ".");
}
output_shape.push_back(max_dim_size - paddings_[dim]);
}
return absl::OkStatus();
}
std::vector<int64_t> num_concats_;
int num_slices_ = 1;
std::vector<int64_t> paddings_;
bool has_paddings_ = false;
};
class XlaConcatNDOp : public XlaConcatNDBaseOp {
public:
explicit XlaConcatNDOp(OpKernelConstruction* ctx) : XlaConcatNDBaseOp(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
auto output_or = this->CompileInternal(ctx);
OP_REQUIRES_OK(ctx, output_or.status());
ctx->SetOutput(0, output_or.value());
}
};
REGISTER_XLA_OP(Name("XlaConcatND"), XlaConcatNDOp);
class AssignVariableXlaConcatNDOp : public XlaConcatNDBaseOp {
public:
explicit AssignVariableXlaConcatNDOp(OpKernelConstruction* ctx)
: XlaConcatNDBaseOp(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
auto output_or = this->CompileInternal(ctx);
OP_REQUIRES_OK(ctx, output_or.status());
OP_REQUIRES_OK(ctx,
ctx->AssignVariable("resource", dtype_, output_or.value()));
}
};
REGISTER_XLA_OP(Name("AssignVariableXlaConcatND"), AssignVariableXlaConcatNDOp);
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <numeric>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/session_options.h"
#include "tsl/platform/errors.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace {
MATCHER_P2(IsStatus, error_code, error_message, "") {
return arg.code() == error_code &&
absl::StrContains(arg.message(), error_message);
}
Status RunGraph(const Graph& graph,
const std::vector<std::string>& output_tensor_names,
const std::vector<std::string>& target_tensor_names,
std::vector<Tensor>* output_tensors) {
GraphDef graph_def;
graph.ToGraphDef(&graph_def);
SessionOptions session_options;
std::unique_ptr<Session> session(NewSession(session_options));
TF_RETURN_IF_ERROR(session->Create(graph_def));
RunOptions run_options;
return session->Run(run_options, {}, output_tensor_names,
target_tensor_names, output_tensors,
nullptr);
}
TEST(ReadVariableXlaSplitNDOpTest, VariableMissing) {
Graph graph(OpRegistry::Global());
Node* var_handle = nullptr;
DataType data_type = DataTypeToEnum<int32_t>::value;
const TensorShape input_shape({4, 4});
TF_ASSERT_OK(NodeBuilder(graph.NewName("var_handle"), "VarHandleOp")
.Attr("dtype", data_type)
.Attr("shape", input_shape)
.Finalize(&graph, &var_handle));
Node* xla_op = nullptr;
const std::vector<int32_t> num_splits = {2, 2};
const int num_outputs = 4;
TF_ASSERT_OK(NodeBuilder(graph.NewName("xla_op"), "ReadVariableXlaSplitND")
.Input(var_handle)
.Attr("num_splits", num_splits)
.Attr("T", data_type)
.Attr("N", num_outputs)
.Finalize(&graph, &xla_op));
std::vector<Tensor> output_tensors;
EXPECT_THAT(RunGraph(graph, {xla_op->name()},
{}, &output_tensors),
IsStatus(error::INVALID_ARGUMENT, "cannot be found"));
}
TEST(ReadVariableXlaSplitNDOpTest, DTypeInvalid) {
Graph graph(OpRegistry::Global());
Node* var_handle = nullptr;
DataType data_type = DataTypeToEnum<int32_t>::value;
const TensorShape input_shape({4, 4});
TF_ASSERT_OK(NodeBuilder(graph.NewName("var_handle"), "VarHandleOp")
.Attr("dtype", data_type)
.Attr("shape", input_shape)
.Finalize(&graph, &var_handle));
Tensor input_tensor(data_type, input_shape);
test::FillIota<int32_t>(&input_tensor, 0);
Node* input = test::graph::Constant(&graph, input_tensor);
Node* assign_var = nullptr;
TF_ASSERT_OK(NodeBuilder(graph.NewName("assign_var"), "AssignVariableOp")
.Input(var_handle)
.Input(input)
.Attr("dtype", data_type)
.Finalize(&graph, &assign_var));
Node* xla_op = nullptr;
const std::vector<int32_t> num_splits = {2, 2};
const int num_outputs = 4;
TF_ASSERT_OK(NodeBuilder(graph.NewName("xla_op"), "ReadVariableXlaSplitND")
.Input(var_handle)
.ControlInput(assign_var)
.Attr("num_splits", num_splits)
.Attr("T", DataTypeToEnum<float>::value)
.Attr("N", num_outputs)
.Finalize(&graph, &xla_op));
std::vector<Tensor> output_tensors;
EXPECT_THAT(RunGraph(graph, {xla_op->name()},
{}, &output_tensors),
IsStatus(error::INVALID_ARGUMENT, "'T' must match 'resource'"));
}
Status CreateSplitTensorGraph(const TensorShape& input_shape,
absl::Span<const int32_t> num_splits,
absl::Span<const int32_t> paddings,
const int num_outputs, Graph* graph,
std::vector<std::string>* output_tensor_names) {
DataType data_type = DataTypeToEnum<int32_t>::value;
Tensor input_tensor(data_type, input_shape);
test::FillIota<int32_t>(&input_tensor, 0);
Node* input = test::graph::Constant(graph, input_tensor);
Node* xla_op = nullptr;
TF_RETURN_IF_ERROR(NodeBuilder(graph->NewName("xla_op"), "XlaSplitND")
.Input(input)
.Attr("num_splits", num_splits)
.Attr("paddings", paddings)
.Attr("T", data_type)
.Attr("N", num_outputs)
.Finalize(graph, &xla_op));
output_tensor_names->reserve(num_outputs);
for (int i = 0; i < num_outputs; ++i) {
output_tensor_names->push_back(absl::StrCat(xla_op->name(), ":", i));
}
return absl::OkStatus();
}
Status CreateSplitResourceGraph(const TensorShape& input_shape,
absl::Span<const int32_t> num_splits,
absl::Span<const int32_t> paddings,
const int num_outputs, Graph* graph,
std::vector<std::string>* output_tensor_names) {
Node* var_handle = nullptr;
DataType data_type = DataTypeToEnum<int32_t>::value;
TF_RETURN_IF_ERROR(NodeBuilder(graph->NewName("var_handle"), "VarHandleOp")
.Attr("dtype", data_type)
.Attr("shape", input_shape)
.Finalize(graph, &var_handle));
Tensor input_tensor(data_type, input_shape);
test::FillIota<int32_t>(&input_tensor, 0);
Node* input = test::graph::Constant(graph, input_tensor);
Node* assign_var = nullptr;
TF_RETURN_IF_ERROR(
NodeBuilder(graph->NewName("assign_var"), "AssignVariableOp")
.Input(var_handle)
.Input(input)
.Attr("dtype", data_type)
.Finalize(graph, &assign_var));
Node* xla_op = nullptr;
TF_RETURN_IF_ERROR(
NodeBuilder(graph->NewName("xla_op"), "ReadVariableXlaSplitND")
.Input(var_handle)
.ControlInput(assign_var)
.Attr("num_splits", num_splits)
.Attr("paddings", paddings)
.Attr("T", data_type)
.Attr("N", num_outputs)
.Finalize(graph, &xla_op));
output_tensor_names->reserve(num_outputs);
for (int i = 0; i < num_outputs; ++i) {
output_tensor_names->push_back(absl::StrCat(xla_op->name(), ":", i));
}
return absl::OkStatus();
}
struct XlaSplitNDTestParam {
std::string name;
std::function<Status(const TensorShape&, absl::Span<const int32_t>,
absl::Span<const int32_t>, const int num_outputs, Graph*,
std::vector<std::string>*)>
graph_creator;
};
using XlaSplitNDOpTest = ::testing::TestWithParam<XlaSplitNDTestParam>;
TEST_P(XlaSplitNDOpTest, SplitDimensionZero) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({1, 1, 1});
const std::vector<int32_t> num_splits = {1, 1, 0};
const std::vector<int32_t> paddings;
const int num_outputs = 1;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_splits, paddings,
num_outputs, &graph,
&output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(
RunGraph(graph, output_tensor_names, {},
&output_tensors),
IsStatus(error::INVALID_ARGUMENT, "index 2 must be positive, but got 0"));
}
TEST_P(XlaSplitNDOpTest, SplitDimensionNegative) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({1, 1, 1});
const std::vector<int32_t> num_splits = {1, -1, 1};
const std::vector<int32_t> paddings;
const int num_outputs = 1;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_splits, paddings,
num_outputs, &graph,
&output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(RunGraph(graph, output_tensor_names, {},
&output_tensors),
IsStatus(error::INVALID_ARGUMENT,
"index 1 must be positive, but got -1"));
}
TEST_P(XlaSplitNDOpTest, NumOutputsMismatch) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({2});
const std::vector<int32_t> num_splits = {2};
const std::vector<int> paddings;
const int num_outputs = 1;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_splits, paddings,
num_outputs, &graph,
&output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(
RunGraph(graph, output_tensor_names, {},
&output_tensors),
IsStatus(error::INVALID_ARGUMENT, "'N' must match number of slices 2"));
}
TEST_P(XlaSplitNDOpTest, PaddingsLengthMismatch) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({2, 2});
const std::vector<int32_t> num_splits = {2, 2};
const std::vector<int32_t> paddings = {0};
const int num_outputs = 4;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_splits, paddings,
num_outputs, &graph,
&output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(RunGraph(graph, output_tensor_names, {},
&output_tensors),
IsStatus(error::INVALID_ARGUMENT, "length 2, but got 1"));
}
TEST_P(XlaSplitNDOpTest, PaddingsNegative) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({2, 2});
const std::vector<int32_t> num_splits = {2, 2};
const std::vector<int32_t> paddings = {0, -1};
const int num_outputs = 4;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_splits, paddings,
num_outputs, &graph,
&output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(
RunGraph(graph, output_tensor_names, {},
&output_tensors),
IsStatus(error::INVALID_ARGUMENT, "non-negative, but got -1 at index 1"));
}
TEST_P(XlaSplitNDOpTest, InputRank0) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({});
const std::vector<int32_t> num_splits = {2};
const std::vector<int32_t> paddings;
const int num_outputs = 2;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_splits, paddings,
num_outputs, &graph,
&output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(RunGraph(graph, output_tensor_names, {},
&output_tensors),
IsStatus(error::INVALID_ARGUMENT, "range (0, 8], but got 0"));
}
TEST_P(XlaSplitNDOpTest, InputRank9) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({2, 2, 2, 2, 2, 2, 2, 2, 2});
const std::vector<int32_t> num_splits(9, 2);
const std::vector<int32_t> paddings;
const int num_outputs = 512;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_splits, paddings,
num_outputs, &graph,
&output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(RunGraph(graph, output_tensor_names, {},
&output_tensors),
IsStatus(error::INVALID_ARGUMENT, "range (0, 8], but got 9"));
}
TEST_P(XlaSplitNDOpTest, InputRankSplitMismatch) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({2, 2});
const std::vector<int32_t> num_splits = {2, 2, 2};
const std::vector<int32_t> paddings;
const int num_outputs = 8;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_splits, paddings,
num_outputs, &graph,
&output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(RunGraph(graph, output_tensor_names, {},
&output_tensors),
IsStatus(error::INVALID_ARGUMENT,
"'num_splits' length 3, but got rank 2"));
}
TEST_P(XlaSplitNDOpTest, DimNotEvenlySplit) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({4, 2});
const std::vector<int32_t> num_splits = {3, 2};
const std::vector<int32_t> paddings;
const int num_outputs = 6;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_splits, paddings,
num_outputs, &graph,
&output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(RunGraph(graph, output_tensor_names, {},
&output_tensors),
IsStatus(error::INVALID_ARGUMENT, "divisible by 'num_splits' 3"));
}
TEST_P(XlaSplitNDOpTest, DimWithPaddingNotEvenlySplit) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({4, 2});
const std::vector<int32_t> num_splits = {2, 2};
const std::vector<int32_t> paddings = {0, 1};
const int num_outputs = 4;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_splits, paddings,
num_outputs, &graph,
&output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(RunGraph(graph, output_tensor_names, {},
&output_tensors),
IsStatus(error::INVALID_ARGUMENT, "divisible by 'num_splits' 2"));
}
TEST_P(XlaSplitNDOpTest, NoSplits) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({2, 2, 2});
const std::vector<int32_t> num_splits = {1, 1, 1};
const std::vector<int> paddings;
const int num_outputs = 1;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_splits, paddings,
num_outputs, &graph,
&output_tensor_names));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(RunGraph(graph, output_tensor_names, {},
&output_tensors));
ASSERT_EQ(output_tensors.size(), 1);
test::ExpectTensorEqual<int32_t>(
output_tensors[0], test::AsTensor<int32_t>({0, 1, 2, 3, 4, 5, 6, 7},
TensorShape({2, 2, 2})));
}
TEST_P(XlaSplitNDOpTest, NoSplitsWithPadding) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({2, 1, 1});
const std::vector<int32_t> num_splits = {1, 1, 1};
const std::vector<int> paddings = {0, 1, 1};
const int num_outputs = 1;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_splits, paddings,
num_outputs, &graph,
&output_tensor_names));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(RunGraph(graph, output_tensor_names, {},
&output_tensors));
ASSERT_EQ(output_tensors.size(), 1);
std::vector<int32_t> expected_values(3 * 3 * 3);
test::ExpectTensorEqual<int32_t>(
output_tensors[0], test::AsTensor<int32_t>({0, 0, 0, 0, 1, 0, 0, 0},
TensorShape({2, 2, 2})));
}
TEST_P(XlaSplitNDOpTest, SplitNoPadding) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({4, 4});
const std::vector<int32_t> num_splits = {2, 2};
const std::vector<int32_t> paddings;
const int num_outputs = 4;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_splits, paddings,
num_outputs, &graph,
&output_tensor_names));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(RunGraph(graph, output_tensor_names, {},
&output_tensors));
ASSERT_EQ(output_tensors.size(), num_outputs);
test::ExpectTensorEqual<int32_t>(
output_tensors[0],
test::AsTensor<int32_t>({0, 1, 4, 5}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[1],
test::AsTensor<int32_t>({2, 3, 6, 7}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[2],
test::AsTensor<int32_t>({8, 9, 12, 13}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[3],
test::AsTensor<int32_t>({10, 11, 14, 15}, TensorShape({2, 2})));
}
TEST_P(XlaSplitNDOpTest, SplitPartialPadding) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({3, 3});
const std::vector<int32_t> num_splits = {2, 2};
const std::vector<int32_t> paddings = {1, 1};
const int num_outputs = 4;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_splits, paddings,
num_outputs, &graph,
&output_tensor_names));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(RunGraph(graph, output_tensor_names, {},
&output_tensors));
ASSERT_EQ(output_tensors.size(), num_outputs);
test::ExpectTensorEqual<int32_t>(
output_tensors[0],
test::AsTensor<int32_t>({0, 1, 3, 4}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[1],
test::AsTensor<int32_t>({2, 0, 5, 0}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[2],
test::AsTensor<int32_t>({6, 7, 0, 0}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[3],
test::AsTensor<int32_t>({8, 0, 0, 0}, TensorShape({2, 2})));
}
TEST_P(XlaSplitNDOpTest, SplitCompletePadding) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({2, 1});
const std::vector<int32_t> num_splits = {2, 2};
const std::vector<int32_t> paddings = {2, 3};
const int num_outputs = 4;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_splits, paddings,
num_outputs, &graph,
&output_tensor_names));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(RunGraph(graph, output_tensor_names, {},
&output_tensors));
ASSERT_EQ(output_tensors.size(), num_outputs);
test::ExpectTensorEqual<int32_t>(
output_tensors[0],
test::AsTensor<int32_t>({0, 0, 1, 0}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[1],
test::AsTensor<int32_t>({0, 0, 0, 0}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[2],
test::AsTensor<int32_t>({0, 0, 0, 0}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[3],
test::AsTensor<int32_t>({0, 0, 0, 0}, TensorShape({2, 2})));
}
INSTANTIATE_TEST_SUITE_P(
XlaSplitNDOpTest, XlaSplitNDOpTest,
::testing::ValuesIn<XlaSplitNDTestParam>(
{{"Tensor", CreateSplitTensorGraph},
{"Resource", CreateSplitResourceGraph}}),
[](const ::testing::TestParamInfo<XlaSplitNDOpTest::ParamType>& info) {
return info.param.name;
});
struct RankedXlaSplitNDTestParam {
std::string name;
int rank = 0;
std::function<Status(const TensorShape&, absl::Span<const int32_t>,
absl::Span<const int32_t>, const int num_outputs, Graph*,
std::vector<std::string>*)>
graph_creator;
};
class RankedXlaSplitNDOpTest
: public ::testing::TestWithParam<RankedXlaSplitNDTestParam> {};
TEST_P(RankedXlaSplitNDOpTest, TestSubscriptRank) {
const int rank = GetParam().rank;
const std::vector<int32_t> num_splits(rank, 2);
Graph graph(OpRegistry::Global());
const TensorShape input_shape(std::vector<int64_t>(rank, 2));
const std::vector<int32_t> paddings;
const int num_outputs = 2 << (rank - 1);
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_splits, paddings,
num_outputs, &graph,
&output_tensor_names));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(RunGraph(graph, output_tensor_names, {},
&output_tensors));
ASSERT_EQ(output_tensors.size(), num_outputs);
TensorShape output_shape(std::vector<int64_t>(rank, 1));
for (int i = 0; i < num_outputs; ++i) {
test::ExpectTensorEqual<int32_t>(
output_tensors[i], test::AsTensor<int32_t>({i}, output_shape));
}
}
INSTANTIATE_TEST_SUITE_P(
RankedXlaSplitNDOpTest, RankedXlaSplitNDOpTest,
::testing::ValuesIn<RankedXlaSplitNDTestParam>(
{{"TensorRanked1", 1, CreateSplitTensorGraph},
{"TensorRanked2", 2, CreateSplitTensorGraph},
{"TensorRanked3", 3, CreateSplitTensorGraph},
{"TensorRanked4", 4, CreateSplitTensorGraph},
{"TensorRanked5", 5, CreateSplitTensorGraph},
{"TensorRanked6", 6, CreateSplitTensorGraph},
{"TensorRanked7", 7, CreateSplitTensorGraph},
{"TensorRanked8", 8, CreateSplitTensorGraph},
{"ResourceRanked1", 1, CreateSplitResourceGraph},
{"ResourceRanked2", 2, CreateSplitResourceGraph},
{"ResourceRanked3", 3, CreateSplitResourceGraph},
{"ResourceRanked4", 4, CreateSplitResourceGraph},
{"ResourceRanked5", 5, CreateSplitResourceGraph},
{"ResourceRanked6", 6, CreateSplitResourceGraph},
{"ResourceRanked7", 7, CreateSplitResourceGraph},
{"ResourceRanked8", 8, CreateSplitResourceGraph}}),
[](const ::testing::TestParamInfo<RankedXlaSplitNDOpTest::ParamType>&
info) { return info.param.name; });
TEST(AssignVariableXlaConcatNDOpTest, HandleDTypeInvalid) {
Graph graph(OpRegistry::Global());
Node* var_handle = nullptr;
DataType handle_dtype = DataTypeToEnum<int32_t>::value;
PartialTensorShape handle_shape;
TF_ASSERT_OK(NodeBuilder(graph.NewName("var_handle"), "VarHandleOp")
.Attr("dtype", handle_dtype)
.Attr("shape", handle_shape)
.Finalize(&graph, &var_handle));
DataType update_data_type = DataTypeToEnum<float>::value;
const TensorShape update_input_shape({4, 4});
Tensor update_input_tensor(update_data_type, update_input_shape);
test::FillIota<float>(&update_input_tensor, 0.f);
Node* update_input = test::graph::Constant(&graph, update_input_tensor);
Node* xla_op = nullptr;
const std::vector<int32_t> num_concats = {1, 1};
const int num_inputs = 1;
TF_ASSERT_OK(NodeBuilder(graph.NewName("xla_op"), "AssignVariableXlaConcatND")
.Input(var_handle)
.Input(std::vector<NodeBuilder::NodeOut>{update_input})
.Attr("num_concats", num_concats)
.Attr("T", update_data_type)
.Attr("N", num_inputs)
.Finalize(&graph, &xla_op));
std::vector<Tensor> output_tensors;
EXPECT_THAT(
RunGraph(graph, {},
{xla_op->name()}, &output_tensors),
IsStatus(error::INVALID_ARGUMENT, "dtype int32, but got float"));
}
TEST(AssignVariableXlaConcatNDOpTest, TensorDTypeInvalid) {
Graph graph(OpRegistry::Global());
Node* var_handle = nullptr;
DataType handle_dtype = DataTypeToEnum<float>::value;
PartialTensorShape handle_shape;
TF_ASSERT_OK(NodeBuilder(graph.NewName("var_handle"), "VarHandleOp")
.Attr("dtype", handle_dtype)
.Attr("shape", handle_shape)
.Finalize(&graph, &var_handle));
DataType init_data_type = DataTypeToEnum<int32_t>::value;
const TensorShape init_input_shape({4, 4});
Tensor init_input_tensor(init_data_type, init_input_shape);
test::FillIota<int32_t>(&init_input_tensor, 0);
Node* input = test::graph::Constant(&graph, init_input_tensor);
Node* assign_var = nullptr;
TF_ASSERT_OK(NodeBuilder(graph.NewName("assign_var"), "AssignVariableOp")
.Input(var_handle)
.Input(input)
.Attr("dtype", init_data_type)
.Finalize(&graph, &assign_var));
DataType update_data_type = DataTypeToEnum<float>::value;
const TensorShape update_input_shape({4, 4});
Tensor update_input_tensor(update_data_type, update_input_shape);
test::FillIota<float>(&update_input_tensor, 0.f);
Node* update_input = test::graph::Constant(&graph, update_input_tensor);
Node* xla_op = nullptr;
const std::vector<int32_t> num_concats = {1, 1};
const int num_inputs = 1;
TF_ASSERT_OK(NodeBuilder(graph.NewName("xla_op"), "AssignVariableXlaConcatND")
.Input(var_handle)
.Input(std::vector<NodeBuilder::NodeOut>{update_input})
.ControlInput(assign_var)
.Attr("num_concats", num_concats)
.Attr("T", update_data_type)
.Attr("N", num_inputs)
.Finalize(&graph, &xla_op));
std::vector<Tensor> output_tensors;
EXPECT_THAT(
RunGraph(graph, {},
{xla_op->name()}, &output_tensors),
IsStatus(error::INVALID_ARGUMENT, "dtype int32, but got float"));
}
TEST(AssignVariableXlaConcatNDOpTest, HandleShapeIncompatible) {
Graph graph(OpRegistry::Global());
Node* var_handle = nullptr;
DataType handle_dtype = DataTypeToEnum<float>::value;
PartialTensorShape handle_shape({});
TF_ASSERT_OK(NodeBuilder(graph.NewName("var_handle"), "VarHandleOp")
.Attr("dtype", handle_dtype)
.Attr("shape", handle_shape)
.Finalize(&graph, &var_handle));
DataType update_data_type = DataTypeToEnum<float>::value;
const TensorShape update_input_shape({4, 4});
Tensor update_input_tensor(update_data_type, update_input_shape);
test::FillIota<float>(&update_input_tensor, 0.f);
Node* update_input = test::graph::Constant(&graph, update_input_tensor);
Node* xla_op = nullptr;
const std::vector<int32_t> num_concats = {1, 1};
const int num_inputs = 1;
TF_ASSERT_OK(NodeBuilder(graph.NewName("xla_op"), "AssignVariableXlaConcatND")
.Input(var_handle)
.Input(std::vector<NodeBuilder::NodeOut>{update_input})
.Attr("num_concats", num_concats)
.Attr("T", update_data_type)
.Attr("N", num_inputs)
.Finalize(&graph, &xla_op));
std::vector<Tensor> output_tensors;
EXPECT_THAT(
RunGraph(graph, {},
{xla_op->name()}, &output_tensors),
IsStatus(error::INVALID_ARGUMENT, "expected shape [4,4], but got []"));
}
TEST(AssignVariableXlaConcatNDOpTest, HandleShapeWithPaddingIncompatible) {
Graph graph(OpRegistry::Global());
Node* var_handle = nullptr;
DataType handle_dtype = DataTypeToEnum<float>::value;
PartialTensorShape handle_shape({4, 4});
TF_ASSERT_OK(NodeBuilder(graph.NewName("var_handle"), "VarHandleOp")
.Attr("dtype", handle_dtype)
.Attr("shape", handle_shape)
.Finalize(&graph, &var_handle));
DataType update_data_type = DataTypeToEnum<float>::value;
const TensorShape update_input_shape({4, 4});
Tensor update_input_tensor(update_data_type, update_input_shape);
test::FillIota<float>(&update_input_tensor, 0.f);
Node* update_input = test::graph::Constant(&graph, update_input_tensor);
Node* xla_op = nullptr;
const std::vector<int32_t> num_concats = {1, 1};
const std::vector<int32_t> paddings = {1, 1};
const int num_inputs = 1;
TF_ASSERT_OK(NodeBuilder(graph.NewName("xla_op"), "AssignVariableXlaConcatND")
.Input(var_handle)
.Input(std::vector<NodeBuilder::NodeOut>{update_input})
.Attr("num_concats", num_concats)
.Attr("paddings", paddings)
.Attr("T", update_data_type)
.Attr("N", num_inputs)
.Finalize(&graph, &xla_op));
std::vector<Tensor> output_tensors;
EXPECT_THAT(
RunGraph(graph, {},
{xla_op->name()}, &output_tensors),
IsStatus(error::INVALID_ARGUMENT, "expected shape [3,3], but got [4,4]"));
}
TEST(AssignVariableXlaConcatNDOpTest, AssignDifferentShape) {
Graph graph(OpRegistry::Global());
Node* var_handle = nullptr;
DataType data_type = DataTypeToEnum<float>::value;
TF_ASSERT_OK(NodeBuilder(graph.NewName("var_handle"), "VarHandleOp")
.Attr("dtype", data_type)
.Attr("shape", PartialTensorShape({4, -1}))
.Finalize(&graph, &var_handle));
const TensorShape init_input_shape({4, 2});
Tensor init_input_tensor(data_type, init_input_shape);
test::FillFn<float>(&init_input_tensor, [](int unused) { return -1.f; });
Node* init_input = test::graph::Constant(&graph, init_input_tensor);
Node* assign_var = nullptr;
TF_ASSERT_OK(NodeBuilder(graph.NewName("assign_var"), "AssignVariableOp")
.Input(var_handle)
.Input(init_input)
.Attr("dtype", data_type)
.Finalize(&graph, &assign_var));
const TensorShape update_input_shape({4, 4});
Tensor update_input_tensor(data_type, update_input_shape);
test::FillIota<float>(&update_input_tensor, 0.f);
Node* update_input = test::graph::Constant(&graph, update_input_tensor);
Node* xla_op = nullptr;
const std::vector<int32_t> num_concats = {1, 1};
const int num_inputs = 1;
TF_ASSERT_OK(NodeBuilder(graph.NewName("xla_op"), "AssignVariableXlaConcatND")
.Input(var_handle)
.Input(std::vector<NodeBuilder::NodeOut>{update_input})
.ControlInput(assign_var)
.Attr("num_concats", num_concats)
.Attr("T", data_type)
.Attr("N", num_inputs)
.Finalize(&graph, &xla_op));
Node* read_var = nullptr;
TF_ASSERT_OK(NodeBuilder(graph.NewName("read_var"), "ReadVariableOp")
.Input(var_handle)
.ControlInput(xla_op)
.Attr("dtype", data_type)
.Finalize(&graph, &read_var));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(RunGraph(
graph, {absl::StrCat(read_var->name(), ":", 0)},
{}, &output_tensors));
ASSERT_EQ(output_tensors.size(), 1);
test::ExpectTensorNear<float>(output_tensors[0], update_input_tensor,
1e-6);
}
Status CreateConcatTensorGraph(absl::Span<const TensorShape> input_shapes,
absl::Span<const int32_t> num_concats,
absl::Span<const int32_t> paddings, Graph* graph,
std::vector<std::string>* output_tensor_names) {
int32_t val = 0;
DataType data_type = DataTypeToEnum<int32_t>::value;
std::vector<NodeBuilder::NodeOut> inputs;
inputs.reserve(input_shapes.size());
for (const TensorShape& input_shape : input_shapes) {
Tensor input_tensor(data_type, input_shape);
test::FillIota<int32_t>(&input_tensor, val);
val += input_tensor.NumElements();
inputs.push_back(test::graph::Constant(graph, input_tensor));
}
Node* xla_op = nullptr;
TF_RETURN_IF_ERROR(NodeBuilder(graph->NewName("xla_op"), "XlaConcatND")
.Input(inputs)
.Attr("num_concats", num_concats)
.Attr("paddings", paddings)
.Attr("T", data_type)
.Attr("N", static_cast<int64_t>(input_shapes.size()))
.Finalize(graph, &xla_op));
output_tensor_names->push_back(absl::StrCat(xla_op->name(), ":", 0));
return absl::OkStatus();
}
template <bool Init>
Status CreateConcatResourceGraph(
absl::Span<const TensorShape> input_shapes,
absl::Span<const int32_t> num_concats, absl::Span<const int32_t> paddings,
Graph* graph, std::vector<std::string>* output_tensor_names) {
Node* var_handle = nullptr;
DataType data_type = DataTypeToEnum<int32_t>::value;
TF_RETURN_IF_ERROR(NodeBuilder(graph->NewName("var_handle"), "VarHandleOp")
.Attr("dtype", data_type)
.Attr("shape", PartialTensorShape())
.Finalize(graph, &var_handle));
Node* assign_var = nullptr;
if (Init) {
Tensor init_input_tensor(data_type, input_shapes.front());
test::FillFn<int32_t>(&init_input_tensor, [](int unused) { return -1; });
Node* init_input = test::graph::Constant(graph, init_input_tensor);
TF_RETURN_IF_ERROR(
NodeBuilder(graph->NewName("assign_var"), "AssignVariableOp")
.Input(var_handle)
.Input(init_input)
.Attr("dtype", data_type)
.Finalize(graph, &assign_var));
}
int32_t val = 0;
std::vector<NodeBuilder::NodeOut> inputs;
inputs.reserve(input_shapes.size());
for (const TensorShape& input_shape : input_shapes) {
Tensor input_tensor(data_type, input_shape);
test::FillIota<int32_t>(&input_tensor, val);
val += input_tensor.NumElements();
inputs.push_back(test::graph::Constant(graph, input_tensor));
}
Node* xla_op = nullptr;
NodeBuilder builder(graph->NewName("xla_op"), "AssignVariableXlaConcatND");
builder.Input(var_handle);
builder.Input(inputs);
if (assign_var != nullptr) {
builder.ControlInput(assign_var);
}
TF_RETURN_IF_ERROR(builder.Attr("num_concats", num_concats)
.Attr("paddings", paddings)
.Attr("T", data_type)
.Attr("N", static_cast<int64_t>(input_shapes.size()))
.Finalize(graph, &xla_op));
Node* read_var = nullptr;
TF_RETURN_IF_ERROR(NodeBuilder(graph->NewName("read_var"), "ReadVariableOp")
.Input(var_handle)
.ControlInput(xla_op)
.Attr("dtype", data_type)
.Finalize(graph, &read_var));
output_tensor_names->push_back(absl::StrCat(read_var->name(), ":", 0));
return absl::OkStatus();
}
struct XlaConcatNDTestParam {
std::string name;
std::function<Status(absl::Span<const TensorShape>, absl::Span<const int32_t>,
absl::Span<const int32_t>, Graph*,
std::vector<std::string>*)>
graph_creator;
};
using XlaConcatNDOpTest = ::testing::TestWithParam<XlaConcatNDTestParam>;
TEST_P(XlaConcatNDOpTest, ConcatDimensionZero) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({1, 1, 1});
const std::vector<int32_t> num_concats = {1, 1, 0};
const std::vector<int32_t> paddings;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator({input_shape}, num_concats, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(
RunGraph(graph, output_tensor_names,
{}, &output_tensors),
IsStatus(error::INVALID_ARGUMENT, "index 2 must be positive, but got 0"));
}
TEST_P(XlaConcatNDOpTest, ConcatDimensionNegative) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({1, 1, 1});
const std::vector<int32_t> num_splits = {1, -1, 1};
const std::vector<int32_t> paddings;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator({input_shape}, num_splits, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(RunGraph(graph, output_tensor_names,
{}, &output_tensors),
IsStatus(error::INVALID_ARGUMENT,
"index 1 must be positive, but got -1"));
}
TEST_P(XlaConcatNDOpTest, NumInputsMismatch) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({2});
const std::vector<int32_t> num_concats = {2};
const std::vector<int> paddings;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator({input_shape}, num_concats, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(
RunGraph(graph, output_tensor_names, {},
&output_tensors),
IsStatus(error::INVALID_ARGUMENT, "'N' must match number of slices 2"));
}
TEST_P(XlaConcatNDOpTest, PaddingsLengthMismatch) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({2, 2});
const std::vector<int32_t> num_concats = {1, 1};
const std::vector<int32_t> paddings = {0};
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator({input_shape}, num_concats, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(RunGraph(graph, output_tensor_names,
{}, &output_tensors),
IsStatus(error::INVALID_ARGUMENT, "length 2, but got 1"));
}
TEST_P(XlaConcatNDOpTest, PaddingsNegative) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({2, 2});
const std::vector<int32_t> num_concats = {1, 1};
const std::vector<int32_t> paddings = {0, -1};
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator({input_shape}, num_concats, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(
RunGraph(graph, output_tensor_names, {},
&output_tensors),
IsStatus(error::INVALID_ARGUMENT, "non-negative, but got -1 at index 1"));
}
TEST_P(XlaConcatNDOpTest, InputRank0) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({});
const std::vector<int32_t> num_concats;
const std::vector<int32_t> paddings;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator({input_shape}, num_concats, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(RunGraph(graph, output_tensor_names,
{}, &output_tensors),
IsStatus(error::INVALID_ARGUMENT, "range (0, 8], but got 0"));
}
TEST_P(XlaConcatNDOpTest, InputRank9) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({1, 1, 1, 1, 1, 1, 1, 1, 1});
const std::vector<int32_t> num_concats(9, 1);
const std::vector<int32_t> paddings;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator({input_shape}, num_concats, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(RunGraph(graph, output_tensor_names,
{}, &output_tensors),
IsStatus(error::INVALID_ARGUMENT, "range (0, 8], but got 9"));
}
TEST_P(XlaConcatNDOpTest, InputRankConcatMismatch) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({1});
const std::vector<int32_t> num_concats = {1, 1};
const std::vector<int32_t> paddings;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator({input_shape}, num_concats, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(RunGraph(graph, output_tensor_names,
{}, &output_tensors),
IsStatus(error::INVALID_ARGUMENT,
"'num_concats' length 2, but got rank 1"));
}
TEST_P(XlaConcatNDOpTest, DifferentShapedInputs) {
Graph graph(OpRegistry::Global());
const std::vector<TensorShape> input_shapes{{1}, {2}};
const std::vector<int32_t> num_concats = {2};
const std::vector<int32_t> paddings;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shapes, num_concats, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(RunGraph(graph, output_tensor_names,
{}, &output_tensors),
IsStatus(error::INVALID_ARGUMENT,
"same expected shape [1], but got [2] at index 1"));
}
TEST_P(XlaConcatNDOpTest, PaddingExceedsOutputDimSize) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({1});
const std::vector<int32_t> num_concats = {1};
const std::vector<int32_t> paddings = {2};
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator({input_shape}, num_concats, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
EXPECT_THAT(
RunGraph(graph, output_tensor_names,
{}, &output_tensors),
IsStatus(
error::INVALID_ARGUMENT,
"exceed expected output shape dimension 1 at index 0, but got 2"));
}
TEST_P(XlaConcatNDOpTest, NoConcats) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({2, 2, 2});
const std::vector<int32_t> num_concats = {1, 1, 1};
const std::vector<int> paddings;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator({input_shape}, num_concats, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(RunGraph(graph, output_tensor_names, {},
&output_tensors));
ASSERT_EQ(output_tensors.size(), 1);
test::ExpectTensorEqual<int32_t>(
output_tensors[0], test::AsTensor<int32_t>({0, 1, 2, 3, 4, 5, 6, 7},
TensorShape({2, 2, 2})));
}
TEST_P(XlaConcatNDOpTest, NoConcatsWithPadding) {
Graph graph(OpRegistry::Global());
const TensorShape input_shape({2, 2, 2});
const std::vector<int32_t> num_concats = {1, 1, 1};
const std::vector<int> paddings = {1, 1, 1};
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator({input_shape}, num_concats, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(RunGraph(graph, output_tensor_names,
{}, &output_tensors));
ASSERT_EQ(output_tensors.size(), 1);
test::ExpectTensorEqual<int32_t>(
output_tensors[0], test::AsTensor<int32_t>({0}, TensorShape({1, 1, 1})));
}
TEST_P(XlaConcatNDOpTest, ConcatNoPadding) {
Graph graph(OpRegistry::Global());
const std::vector<TensorShape> input_shapes{{2, 2}, {2, 2}, {2, 2}, {2, 2}};
const std::vector<int32_t> num_concats = {2, 2};
const std::vector<int32_t> paddings;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shapes, num_concats, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(RunGraph(graph, output_tensor_names,
{}, &output_tensors));
ASSERT_EQ(output_tensors.size(), 1);
test::ExpectTensorEqual<int32_t>(
output_tensors[0], test::AsTensor<int32_t>({0, 1, 4, 5, 2, 3, 6, 7, 8, 9,
12, 13, 10, 11, 14, 15},
TensorShape({4, 4})));
}
TEST_P(XlaConcatNDOpTest, ConcatPartialPadding) {
Graph graph(OpRegistry::Global());
const std::vector<TensorShape> input_shapes{{2, 2}, {2, 2}, {2, 2}, {2, 2}};
const std::vector<int32_t> num_concats = {2, 2};
const std::vector<int32_t> paddings = {1, 1};
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shapes, num_concats, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(RunGraph(graph, output_tensor_names,
{}, &output_tensors));
ASSERT_EQ(output_tensors.size(), 1);
test::ExpectTensorEqual<int32_t>(
output_tensors[0], test::AsTensor<int32_t>({0, 1, 4, 2, 3, 6, 8, 9, 12},
TensorShape({3, 3})));
}
TEST_P(XlaConcatNDOpTest, ConcatCompletePadding) {
Graph graph(OpRegistry::Global());
const std::vector<TensorShape> input_shapes{{2, 2}, {2, 2}, {2, 2}, {2, 2}};
const std::vector<int32_t> num_concats = {2, 2};
const std::vector<int32_t> paddings = {2, 2};
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shapes, num_concats, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(RunGraph(graph, output_tensor_names,
{}, &output_tensors));
ASSERT_EQ(output_tensors.size(), 1);
test::ExpectTensorEqual<int32_t>(
output_tensors[0],
test::AsTensor<int32_t>({0, 1, 2, 3}, TensorShape({2, 2})));
}
INSTANTIATE_TEST_SUITE_P(
XlaConcatNDOpTest, XlaConcatNDOpTest,
::testing::ValuesIn<XlaConcatNDTestParam>(
{{"Tensor", CreateConcatTensorGraph},
{"InitializedResource", CreateConcatResourceGraph<true>},
{"UninitializedResource", CreateConcatResourceGraph<false>}}),
[](const ::testing::TestParamInfo<XlaConcatNDOpTest::ParamType>& info) {
return info.param.name;
});
struct RankedXlaConcatNDTestParam {
std::string name;
int rank = 0;
std::function<Status(absl::Span<const TensorShape>, absl::Span<const int32_t>,
absl::Span<const int32_t>, Graph*,
std::vector<std::string>*)>
graph_creator;
};
class RankedXlaConcatNDOpTest
: public ::testing::TestWithParam<RankedXlaConcatNDTestParam> {};
TEST_P(RankedXlaConcatNDOpTest, TestSubscriptRank) {
const int rank = GetParam().rank;
const std::vector<int32_t> num_concats(rank, 2);
Graph graph(OpRegistry::Global());
const int num_inputs = 2 << (rank - 1);
const TensorShape base_input_shape(std::vector<int64_t>(rank, 1));
const std::vector<TensorShape> input_shapes(num_inputs, base_input_shape);
const std::vector<int32_t> paddings;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shapes, num_concats, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(RunGraph(graph, output_tensor_names, {},
&output_tensors));
ASSERT_EQ(output_tensors.size(), 1);
std::vector<int32_t> expected_values(num_inputs);
std::iota(expected_values.begin(), expected_values.end(), 0);
test::ExpectTensorEqual<int32_t>(
output_tensors[0],
test::AsTensor<int32_t>(expected_values,
TensorShape(std::vector<int64_t>(rank, 2))));
}
INSTANTIATE_TEST_SUITE_P(
RankedXlaConcatNDOpTest, RankedXlaConcatNDOpTest,
::testing::ValuesIn<RankedXlaConcatNDTestParam>(
{{"TensorRanked1", 1, CreateConcatTensorGraph},
{"TensorRanked2", 2, CreateConcatTensorGraph},
{"TensorRanked3", 3, CreateConcatTensorGraph},
{"TensorRanked4", 4, CreateConcatTensorGraph},
{"TensorRanked5", 5, CreateConcatTensorGraph},
{"TensorRanked6", 6, CreateConcatTensorGraph},
{"TensorRanked7", 7, CreateConcatTensorGraph},
{"TensorRanked8", 8, CreateConcatTensorGraph},
{"InitializedResourceRanked1", 1, CreateConcatResourceGraph<true>},
{"InitializedResourceRanked2", 2, CreateConcatResourceGraph<true>},
{"InitializedResourceRanked3", 3, CreateConcatResourceGraph<true>},
{"InitializedResourceRanked4", 4, CreateConcatResourceGraph<true>},
{"InitializedResourceRanked5", 5, CreateConcatResourceGraph<true>},
{"InitializedResourceRanked6", 6, CreateConcatResourceGraph<true>},
{"InitializedResourceRanked7", 7, CreateConcatResourceGraph<true>},
{"InitializedResourceRanked8", 8, CreateConcatResourceGraph<true>},
{"UninitializedResourceRanked1", 1, CreateConcatResourceGraph<false>},
{"UninitializedResourceRanked2", 2, CreateConcatResourceGraph<false>},
{"UninitializedResourceRanked3", 3, CreateConcatResourceGraph<false>},
{"UninitializedResourceRanked4", 4, CreateConcatResourceGraph<false>},
{"UninitializedResourceRanked5", 5, CreateConcatResourceGraph<false>},
{"UninitializedResourceRanked6", 6, CreateConcatResourceGraph<false>},
{"UninitializedResourceRanked7", 7, CreateConcatResourceGraph<false>},
{"UninitializedResourceRanked8", 8,
CreateConcatResourceGraph<false>}}),
[](const ::testing::TestParamInfo<RankedXlaConcatNDOpTest::ParamType>&
info) { return info.param.name; });
Status CreateRoundtripTensorGraph(
const TensorShape& input_shape, absl::Span<const int32_t> num_partitions,
absl::Span<const int32_t> paddings, Graph* graph,
std::vector<std::string>* output_tensor_names) {
const int32_t num_partitions_size =
std::accumulate(num_partitions.begin(), num_partitions.end(), 1,
std::multiplies<int32_t>());
DataType data_type = DataTypeToEnum<int32_t>::value;
Tensor input_tensor(data_type, input_shape);
test::FillIota<int32_t>(&input_tensor, 0);
Node* input = test::graph::Constant(graph, input_tensor);
Node* xla_split_op = nullptr;
TF_RETURN_IF_ERROR(NodeBuilder(graph->NewName("xla_split_op"), "XlaSplitND")
.Input(input)
.Attr("num_splits", num_partitions)
.Attr("paddings", paddings)
.Attr("T", data_type)
.Attr("N", num_partitions_size)
.Finalize(graph, &xla_split_op));
std::vector<NodeBuilder::NodeOut> concat_inputs;
concat_inputs.reserve(num_partitions_size);
for (int i = 0; i < num_partitions_size; ++i) {
concat_inputs.push_back({xla_split_op, i});
}
Node* xla_concat_op = nullptr;
TF_RETURN_IF_ERROR(NodeBuilder(graph->NewName("xla_concat_op"), "XlaConcatND")
.Input(concat_inputs)
.Attr("num_concats", num_partitions)
.Attr("paddings", paddings)
.Attr("T", data_type)
.Attr("N", num_partitions_size)
.Finalize(graph, &xla_concat_op));
Node* equal = nullptr;
TF_RETURN_IF_ERROR(NodeBuilder(graph->NewName("equal"), "Equal")
.Input(input)
.Input(xla_concat_op)
.Attr("T", data_type)
.Finalize(graph, &equal));
output_tensor_names->push_back(absl::StrCat(equal->name(), ":", 0));
return absl::OkStatus();
}
Status CreateRoundtripResourceGraph(
const TensorShape& input_shape, absl::Span<const int32_t> num_partitions,
absl::Span<const int32_t> paddings, Graph* graph,
std::vector<std::string>* output_tensor_names) {
const int32_t num_partitions_size =
std::accumulate(num_partitions.begin(), num_partitions.end(), 1,
std::multiplies<int32_t>());
Node* var_handle = nullptr;
DataType data_type = DataTypeToEnum<int32_t>::value;
TF_RETURN_IF_ERROR(NodeBuilder(graph->NewName("var_handle"), "VarHandleOp")
.Attr("dtype", data_type)
.Attr("shape", PartialTensorShape())
.Finalize(graph, &var_handle));
Tensor input_tensor(data_type, input_shape);
test::FillIota<int32_t>(&input_tensor, 0);
Node* input = test::graph::Constant(graph, input_tensor);
Node* assign_var = nullptr;
TF_RETURN_IF_ERROR(
NodeBuilder(graph->NewName("assign_var"), "AssignVariableOp")
.Input(var_handle)
.Input(input)
.Attr("dtype", data_type)
.Finalize(graph, &assign_var));
Node* xla_split_op = nullptr;
TF_RETURN_IF_ERROR(
NodeBuilder(graph->NewName("xla_split_op"), "ReadVariableXlaSplitND")
.Input(var_handle)
.ControlInput(assign_var)
.Attr("num_splits", num_partitions)
.Attr("paddings", paddings)
.Attr("T", data_type)
.Attr("N", num_partitions_size)
.Finalize(graph, &xla_split_op));
std::vector<NodeBuilder::NodeOut> concat_inputs;
concat_inputs.reserve(num_partitions_size);
for (int i = 0; i < num_partitions_size; ++i) {
concat_inputs.push_back({xla_split_op, i});
}
Node* xla_concat_op = nullptr;
TF_RETURN_IF_ERROR(
NodeBuilder(graph->NewName("xla_op"), "AssignVariableXlaConcatND")
.Input(var_handle)
.Input(concat_inputs)
.Attr("num_concats", num_partitions)
.Attr("paddings", paddings)
.Attr("T", data_type)
.Attr("N", num_partitions_size)
.Finalize(graph, &xla_concat_op));
Node* read_var = nullptr;
TF_RETURN_IF_ERROR(NodeBuilder(graph->NewName("read_var"), "ReadVariableOp")
.Input(var_handle)
.ControlInput(xla_concat_op)
.Attr("dtype", data_type)
.Finalize(graph, &read_var));
Node* equal = nullptr;
TF_RETURN_IF_ERROR(NodeBuilder(graph->NewName("equal"), "Equal")
.Input(input)
.Input(read_var)
.Attr("T", data_type)
.Finalize(graph, &equal));
output_tensor_names->push_back(absl::StrCat(equal->name(), ":", 0));
return absl::OkStatus();
}
struct RoundtripXlaSplitConcatNDTestParam {
std::string name;
int rank = 0;
std::function<Status(const TensorShape&, absl::Span<const int32_t>,
absl::Span<const int32_t>, Graph*,
std::vector<std::string>*)>
graph_creator;
};
class RoundtripXlaSplitConcatNDTest
: public ::testing::TestWithParam<RoundtripXlaSplitConcatNDTestParam> {};
template <typename T>
Tensor Constant(T v, TensorShape shape) {
Tensor ret(DataTypeToEnum<T>::value, shape);
ret.flat<T>().setConstant(v);
return ret;
}
TEST_P(RoundtripXlaSplitConcatNDTest, NoPadding) {
const int rank = GetParam().rank;
const std::vector<int32_t> num_partitions(rank, 2);
Graph graph(OpRegistry::Global());
const TensorShape input_shape(std::vector<int64_t>(rank, 4));
const std::vector<int32_t> paddings;
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_partitions, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(RunGraph(graph, output_tensor_names, {},
&output_tensors));
ASSERT_EQ(output_tensors.size(), 1);
test::ExpectTensorEqual<bool>(
output_tensors[0],
Constant<bool>(true, TensorShape(std::vector<int64_t>(rank, 4))));
}
TEST_P(RoundtripXlaSplitConcatNDTest, PartialPadding) {
const int rank = GetParam().rank;
const std::vector<int32_t> num_partitions(rank, 2);
Graph graph(OpRegistry::Global());
const TensorShape input_shape(std::vector<int64_t>(rank, 4));
const std::vector<int32_t> paddings(rank, 2);
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_partitions, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(RunGraph(graph, output_tensor_names, {},
&output_tensors));
ASSERT_EQ(output_tensors.size(), 1);
test::ExpectTensorEqual<bool>(
output_tensors[0],
Constant<bool>(true, TensorShape(std::vector<int64_t>(rank, 4))));
}
TEST_P(RoundtripXlaSplitConcatNDTest, CompletePadding) {
const int rank = GetParam().rank;
const std::vector<int32_t> num_partitions(rank, 2);
Graph graph(OpRegistry::Global());
const TensorShape input_shape(std::vector<int64_t>(rank, 4));
const std::vector<int32_t> paddings(rank, 4);
std::vector<std::string> output_tensor_names;
TF_ASSERT_OK(GetParam().graph_creator(input_shape, num_partitions, paddings,
&graph, &output_tensor_names));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(RunGraph(graph, output_tensor_names, {},
&output_tensors));
ASSERT_EQ(output_tensors.size(), 1);
test::ExpectTensorEqual<bool>(
output_tensors[0],
Constant<bool>(true, TensorShape(std::vector<int64_t>(rank, 4))));
}
INSTANTIATE_TEST_SUITE_P(
RoundtripXlaSplitConcatNDTest, RoundtripXlaSplitConcatNDTest,
::testing::ValuesIn<RoundtripXlaSplitConcatNDTestParam>(
{{"TensorRanked1", 1, CreateRoundtripTensorGraph},
{"TensorRanked2", 2, CreateRoundtripTensorGraph},
{"TensorRanked3", 3, CreateRoundtripTensorGraph},
{"TensorRanked4", 4, CreateRoundtripTensorGraph},
{"TensorRanked5", 5, CreateRoundtripTensorGraph},
{"TensorRanked6", 6, CreateRoundtripTensorGraph},
{"TensorRanked7", 7, CreateRoundtripTensorGraph},
{"TensorRanked8", 8, CreateRoundtripTensorGraph},
{"ResourceRanked1", 1, CreateRoundtripResourceGraph},
{"ResourceRanked2", 2, CreateRoundtripResourceGraph},
{"ResourceRanked3", 3, CreateRoundtripResourceGraph},
{"ResourceRanked4", 4, CreateRoundtripResourceGraph},
{"ResourceRanked5", 5, CreateRoundtripResourceGraph},
{"ResourceRanked6", 6, CreateRoundtripResourceGraph},
{"ResourceRanked7", 7, CreateRoundtripResourceGraph},
{"ResourceRanked8", 8, CreateRoundtripResourceGraph}}),
[](const ::testing::TestParamInfo<RoundtripXlaSplitConcatNDTest::ParamType>&
info) { return info.param.name; });
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/sharding_util_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tpu/kernels/sharding_util_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2f0272bc-020c-4677-9fbd-22418cfd65da | cpp | tensorflow/tensorflow | atrace_profiler | tensorflow/lite/profiling/atrace_profiler.cc | tensorflow/lite/profiling/atrace_profiler_test.cc | #include "tensorflow/lite/profiling/atrace_profiler.h"
#include <dlfcn.h>
#include "tensorflow/lite/core/api/profiler.h"
#if defined(__ANDROID__)
#include <sys/system_properties.h>
#endif
#include <string>
#include <type_traits>
namespace tflite {
namespace profiling {
class ATraceProfiler : public tflite::Profiler {
public:
using FpIsEnabled = std::add_pointer<bool()>::type;
using FpBeginSection = std::add_pointer<void(const char*)>::type;
using FpEndSection = std::add_pointer<void()>::type;
ATraceProfiler() {
handle_ = dlopen("libandroid.so", RTLD_NOW | RTLD_LOCAL);
if (handle_) {
atrace_is_enabled_ =
reinterpret_cast<FpIsEnabled>(dlsym(handle_, "ATrace_isEnabled"));
atrace_begin_section_ = reinterpret_cast<FpBeginSection>(
dlsym(handle_, "ATrace_beginSection"));
atrace_end_section_ =
reinterpret_cast<FpEndSection>(dlsym(handle_, "ATrace_endSection"));
if (!atrace_is_enabled_ || !atrace_begin_section_ ||
!atrace_end_section_) {
dlclose(handle_);
handle_ = nullptr;
}
}
}
~ATraceProfiler() override {
if (handle_) {
dlclose(handle_);
}
}
uint32_t BeginEvent(const char* tag, EventType event_type,
int64_t event_metadata1,
int64_t event_metadata2) override {
if (handle_ && atrace_is_enabled_()) {
std::string trace_event_tag = tag;
trace_event_tag += "@";
trace_event_tag += std::to_string(event_metadata1) + "/" +
std::to_string(event_metadata2);
atrace_begin_section_(trace_event_tag.c_str());
}
return 0;
}
void EndEvent(uint32_t event_handle) override {
if (handle_) {
atrace_end_section_();
}
}
private:
void* handle_;
FpIsEnabled atrace_is_enabled_;
FpBeginSection atrace_begin_section_;
FpEndSection atrace_end_section_;
};
std::unique_ptr<tflite::Profiler> MaybeCreateATraceProfiler() {
#if defined(TFLITE_ENABLE_DEFAULT_PROFILER)
return std::unique_ptr<tflite::Profiler>(new ATraceProfiler());
#else
#if defined(__ANDROID__)
constexpr char kTraceProp[] = "debug.tflite.trace";
char trace_enabled[PROP_VALUE_MAX] = "";
int length = __system_property_get(kTraceProp, trace_enabled);
if (length == 1 && trace_enabled[0] == '1') {
return std::unique_ptr<tflite::Profiler>(new ATraceProfiler());
}
#endif
return nullptr;
#endif
}
}
} | #include "tensorflow/lite/profiling/atrace_profiler.h"
#if defined(__ANDROID__)
#include <sys/system_properties.h>
#endif
#include <gtest/gtest.h>
namespace tflite {
namespace profiling {
namespace {
TEST(ATraceProfilerTest, MaybeCreateATraceProfiler) {
auto initial_state_profiler = MaybeCreateATraceProfiler();
#if !defined(TFLITE_ENABLE_DEFAULT_PROFILER)
EXPECT_EQ(nullptr, initial_state_profiler.get());
#else
EXPECT_NE(nullptr, initial_state_profiler.get());
#endif
#if defined(__ANDROID__)
if (__system_property_set("debug.tflite.trace", "1") == 0) {
auto on_state_profiler = MaybeCreateATraceProfiler();
EXPECT_NE(nullptr, on_state_profiler.get());
}
if (__system_property_set("debug.tflite.trace", "0") == 0) {
auto off_state_profiler = MaybeCreateATraceProfiler();
#if !defined(TFLITE_ENABLE_DEFAULT_PROFILER)
EXPECT_EQ(nullptr, off_state_profiler.get());
#else
EXPECT_NE(nullptr, off_state_profiler.get());
#endif
}
#endif
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/atrace_profiler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/atrace_profiler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bd82b62c-d379-4d92-a458-68403e5ddeac | cpp | tensorflow/tensorflow | eager_executor | tensorflow/core/common_runtime/eager/eager_executor.cc | tensorflow/core/common_runtime/eager/eager_executor_test.cc | #include "tensorflow/core/common_runtime/eager/eager_executor.h"
#include <forward_list>
#include <functional>
#include <memory>
#include <utility>
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/util/env_var.h"
namespace tensorflow {
namespace {
bool IsAsyncWaitForRemoteFunctionEnabled() {
bool enabled = true;
TF_CHECK_OK(ReadBoolFromEnvVar("TF_ENABLE_ASYNC_WAIT_FOR_REMOTE_FUNCTION",
true, &enabled));
return enabled;
}
}
EagerExecutor::EagerExecutor(bool async, bool enable_streaming_enqueue,
int in_flight_nodes_limit)
: next_node_id_(0),
ok_(true),
thread_(async ? tensorflow::Env::Default()->StartThread(
tensorflow::ThreadOptions(), "eager_async_executor",
std::bind(&EagerExecutor::Run, this))
: nullptr),
last_eager_client_(nullptr),
enable_async_wait_for_remote_function_(
IsAsyncWaitForRemoteFunctionEnabled()),
enable_streaming_enqueue_(enable_streaming_enqueue),
in_flight_nodes_limit_(in_flight_nodes_limit) {
if (async && in_flight_nodes_limit_ > 0) {
VLOG(4) << "EagerExecutor InFlightNodes limit is set to "
<< in_flight_nodes_limit_;
}
}
EagerExecutor::~EagerExecutor() {
tensorflow::mutex_lock l(node_queue_mutex_);
state_ = ExecutorState::kShutDown;
nodes_pending_.notify_all();
for (const auto& cleanups_for_key : cleanups_) {
for (const std::function<void()>& cleanup : cleanups_for_key.second) {
cleanup();
}
}
}
Status EagerExecutor::ShutDown() {
{
bool has_thread;
Status status;
{
tensorflow::mutex_lock l(node_queue_mutex_);
if (state_ != ExecutorState::kShutDown) {
state_ = ExecutorState::kShuttingDown;
}
WaitForAllPendingNodesLocked(&l).IgnoreError();
state_ = ExecutorState::kShutDown;
has_thread = thread_ != nullptr;
status = status_;
if (has_thread) {
nodes_pending_.notify_all();
}
}
if (!has_thread) {
return status;
}
}
thread_exited_notification_.WaitForNotification();
return status();
}
const char* EagerExecutor::StateStringLocked() {
switch (state_) {
case ExecutorState::kActive:
return "Active";
case ExecutorState::kShuttingDown:
return "ShuttingDown";
case ExecutorState::kShutDown:
return "ShutDown";
}
}
Status EagerExecutor::SyncExecute(EagerNode* node) {
if (Async()) {
return errors::Internal("SyncExecute does not support async execution.");
}
if (node->AsAsync() != nullptr) {
return errors::Internal("Executor does not support executing async nodes");
}
uint64 id = next_node_id_++;
Status s = node->Prepare();
if (!s.ok()) {
return s;
}
s = node->Run();
tensorflow::mutex_lock l(node_queue_mutex_);
NotifyWaiters(id);
return s;
}
Status EagerExecutor::AddOrExecute(std::unique_ptr<EagerNode> node) {
Status status;
core::RefCountPtr<NodeItem> item(new NodeItem);
item->id = next_node_id_++;
item->node = std::move(node);
item->state = NodeState::kPENDING;
status = item->node->Prepare();
if (!status.ok()) {
item->node->Abort(status);
return status;
}
if (!Async()) {
return RunItem(std::move(item), false);
} else {
tensorflow::mutex_lock l(node_queue_mutex_);
DVLOG(3) << "Add node [id " << item->id << "]" << item->node->DebugString()
<< " with status: " << status_;
if (state_ != ExecutorState::kActive) {
status = errors::FailedPrecondition(
"EagerExecutor accepts new EagerNodes to run only in Active state. "
"Current state is '",
StateStringLocked(), "'");
} else {
status = status_;
if (status.ok()) {
node_queue_.push(std::move(item));
if (node_queue_.size() == 1) {
nodes_pending_.notify_all();
}
if (in_flight_nodes_limit_ == 0) {
return absl::OkStatus();
}
while (true) {
int64_t in_flight_nodes_count =
node_queue_.size() + unfinished_nodes_.size();
if (in_flight_nodes_count < in_flight_nodes_limit_) {
break;
}
VLOG(4) << "Hitting in-flight node limit node_queue_.size() = "
<< node_queue_.size()
<< " unfinished_nodes_.size() = " << unfinished_nodes_.size()
<< ".";
nodes_done_.wait(l);
}
return absl::OkStatus();
}
}
}
item->node->Abort(status);
return status;
}
tensorflow::Status EagerExecutor::WaitForAllPendingNodes() {
tensorflow::mutex_lock l(node_queue_mutex_);
return WaitForAllPendingNodesLocked(&l);
}
tensorflow::Status EagerExecutor::WaitForAllPendingNodesLocked(
mutex_lock* lock) {
tensorflow::condition_variable cond;
if (!status_.ok()) return status_;
if (node_queue_.empty() && unfinished_nodes_.empty()) return absl::OkStatus();
DCHECK(Async() || node_queue_.empty());
auto last_id = next_node_id_ - 1;
DVLOG(3) << "Wait for Node: [id " << last_id << "] ";
node_done_notifications_.insert(std::make_pair(last_id, &cond));
cond.wait(*lock);
return status_;
}
void EagerExecutor::ClearError() {
if (ok()) return;
tensorflow::mutex_lock l(node_queue_mutex_);
DCHECK(node_done_notifications_.empty());
DCHECK(node_queue_.empty());
status_ = absl::OkStatus();
ok_ = true;
last_eager_client_ = nullptr;
nodes_pending_.notify_all();
}
void EagerExecutor::NodeDone(const core::RefCountPtr<NodeItem>& item,
const Status& status, bool from_queue) {
DVLOG(3) << "Node Done: [id " << item->id << "] " << item->node->DebugString()
<< " with status: " << status;
DCHECK(item->state != NodeState::kDONE);
item->state = NodeState::kDONE;
bool async = item->node->AsAsync() != nullptr;
if (status.ok() && !from_queue && !async) {
return;
}
std::forward_list<core::RefCountPtr<NodeItem>> items_to_destroy;
{
mutex_lock l(node_queue_mutex_);
if (!status_.ok()) return;
bool need_notification = from_queue;
if (from_queue) {
DCHECK(!node_queue_.empty() && item.get() == node_queue_.front().get());
node_queue_.pop();
} else if (async) {
need_notification = item->id == unfinished_nodes_.begin()->first;
auto result = unfinished_nodes_.erase(item->id);
if (result == 0) return;
}
if (!status.ok() && item->node->Fatal()) {
need_notification = true;
status_ = status;
ok_ = false;
if (Async()) {
errors::AppendToMessage(&status_,
"Encountered when executing an operation using "
"EagerExecutor. This error cancels all future "
"operations and poisons their output tensors.");
}
while (!node_queue_.empty()) {
items_to_destroy.push_front(std::move(node_queue_.front()));
node_queue_.pop();
}
for (auto& it : unfinished_nodes_) {
items_to_destroy.push_front(std::move(it.second));
}
unfinished_nodes_.clear();
}
if (need_notification) {
NotifyWaiters(item->id);
}
nodes_done_.notify_all();
}
for (auto& item : items_to_destroy) {
item->node->Abort(status);
}
}
void EagerExecutor::NotifyWaiters(uint64 id) {
if (!node_done_notifications_.empty()) {
uint64 upperbound_id = 0;
if (!unfinished_nodes_.empty()) {
upperbound_id = unfinished_nodes_.begin()->first - 1;
} else if (!node_queue_.empty()) {
upperbound_id = node_queue_.front()->id - 1;
} else {
upperbound_id = next_node_id_ - 1;
}
if (upperbound_id < id) {
return;
}
DVLOG(3) << "Notify node done: [id " << id << " to " << upperbound_id
<< "] ";
const auto range =
status_.ok() ? std::make_pair(
node_done_notifications_.lower_bound(id),
node_done_notifications_.upper_bound(upperbound_id))
: std::make_pair(node_done_notifications_.begin(),
node_done_notifications_.end());
for (auto it = range.first; it != range.second; ++it) {
it->second->notify_all();
}
node_done_notifications_.erase(range.first, range.second);
}
}
void EagerExecutor::Run() {
auto thread_exited_notifier =
gtl::MakeCleanup([this] { thread_exited_notification_.Notify(); });
while (true) {
core::RefCountPtr<NodeItem> curr_item;
{
tensorflow::mutex_lock l(node_queue_mutex_);
while (node_queue_.empty() || !status_.ok()) {
if (state_ == ExecutorState::kShutDown) return;
nodes_pending_.wait(l);
}
curr_item.reset(node_queue_.front().get());
curr_item->Ref();
}
Status status = RunItem(std::move(curr_item), true);
if (!status.ok()) {
VLOG(1) << "Failed to run item: " << status;
}
}
}
Status EagerExecutor::RunItem(core::RefCountPtr<NodeItem> item,
bool from_queue) {
DVLOG(3) << "Running Node: [id " << item->id << "] "
<< item->node->DebugString();
AsyncRemoteExecuteNode* async_remote_node =
item->node->AsAsyncRemoteExecuteNode();
if (enable_async_wait_for_remote_function_) {
if (async_remote_node != nullptr) {
if (last_eager_client_ != nullptr &&
async_remote_node->eager_client() != nullptr &&
last_eager_client_ != async_remote_node->eager_client()) {
DVLOG(3) << "Executing Sync Executor for node" << item->id;
tensorflow::Status status = async_remote_node->SyncExecutors();
if (!status.ok()) {
NodeDone(item, status, from_queue);
return status;
}
last_eager_client_ = nullptr;
}
if (async_remote_node->eager_client() != nullptr &&
async_remote_node->needs_remote_inputs() &&
async_remote_node->allow_multiple_pending_requests()) {
last_eager_client_ = async_remote_node->eager_client();
}
}
}
AsyncEagerNode* async_node = item->node->AsAsync();
if (async_node == nullptr) {
tensorflow::Status status = item->node->Run();
NodeDone(item, status, from_queue);
return status;
}
item->state = NodeState::kSCHEDULED;
auto async_ref = item.get();
async_ref->Ref();
TF_RETURN_IF_ERROR(MoveToUnfinished(std::move(item), from_queue));
async_node->RunAsync([this, async_ref](const Status& status) {
core::RefCountPtr<NodeItem> async_item(async_ref);
NodeDone(async_item, status, false);
});
return status();
}
Status EagerExecutor::MoveToUnfinished(core::RefCountPtr<NodeItem> item,
bool from_queue) {
tensorflow::mutex_lock l(node_queue_mutex_);
if (!status_.ok()) {
return status_;
}
if (from_queue) {
DCHECK(!node_queue_.empty() && item.get() == node_queue_.front().get());
node_queue_.pop();
}
DVLOG(3) << "Add Node: [id " << item->id << "] to unfinished map.";
unfinished_nodes_.emplace_hint(unfinished_nodes_.end(), item->id,
std::move(item));
return absl::OkStatus();
}
void EagerExecutor::AddCleanup(intptr_t key, std::function<void()> callback) {
cleanups_[key].push_back(callback);
}
void EagerExecutor::RemoveCleanups(intptr_t key) { cleanups_.erase(key); }
} | #include "tensorflow/core/common_runtime/eager/eager_executor.h"
#include <memory>
#include <utility>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/platform/status.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace {
class TestState {
public:
enum State { kSuccess, kNotRun, kFailure };
TestState() : state_(kNotRun) {}
TestState(const TestState&) = delete;
TestState& operator=(const TestState&) = delete;
State read_state() { return state_; }
void update_success_state() { state_ = kSuccess; }
void update_run_error_state() { state_ = kFailure; }
private:
State state_;
};
class TestEagerNode : public EagerNode {
public:
explicit TestEagerNode(TestState* state,
Status prepare_return_status = absl::OkStatus(),
Status run_return_status = absl::OkStatus())
: state_(state),
prepare_return_status_(prepare_return_status),
run_return_status_(run_return_status) {}
TestEagerNode(const TestEagerNode&) = delete;
TestEagerNode& operator=(const TestEagerNode&) = delete;
Status Prepare() override { return prepare_return_status_; }
Status Run() override {
if (run_return_status_.ok()) {
state_->update_success_state();
} else {
state_->update_run_error_state();
}
return run_return_status_;
};
void Abort(Status status) override {}
string DebugString() const override { return "testEagerNode"; }
private:
TestState* state_;
Status prepare_return_status_;
Status run_return_status_;
};
class TestAsyncEagerNode : public AsyncEagerNode {
public:
explicit TestAsyncEagerNode(TestState* state,
Status prepare_return_status = absl::OkStatus(),
Status run_return_status = absl::OkStatus())
: state_(state),
prepare_return_status_(prepare_return_status),
run_return_status_(run_return_status) {}
TestAsyncEagerNode(const TestAsyncEagerNode&) = delete;
TestAsyncEagerNode& operator=(const TestAsyncEagerNode&) = delete;
Status Prepare() override { return prepare_return_status_; }
void RunAsync(StatusCallback done) override {
if (run_return_status_.ok()) {
state_->update_success_state();
} else {
state_->update_run_error_state();
}
done(run_return_status_);
};
void Abort(Status status) override {}
string DebugString() const override { return "testAsyncEagerNode"; }
private:
TestState* state_;
Status prepare_return_status_;
Status run_return_status_;
};
TEST(EagerExecutorTest, TestSyncExecutorWithEagerNode) {
auto sync_executor = std::make_unique<EagerExecutor>(
false, true);
auto state = std::make_unique<TestState>();
auto node = std::make_unique<TestEagerNode>(state.get());
TF_ASSERT_OK(sync_executor->AddOrExecute(std::move(node)));
ASSERT_EQ(state->read_state(), TestState::State::kSuccess);
}
TEST(EagerExecutorTest, TestSyncExecuteMethodFailureCases) {
auto async_executor = std::make_unique<EagerExecutor>(
true, true);
auto state = std::make_unique<TestState>();
auto sync_node = std::make_unique<TestEagerNode>(state.get());
EXPECT_THAT(async_executor->SyncExecute(sync_node.get()),
tensorflow::testing::StatusIs(tensorflow::error::INTERNAL));
ASSERT_EQ(state->read_state(), TestState::kNotRun);
auto sync_executor = std::make_unique<EagerExecutor>(
false, true);
state = std::make_unique<TestState>();
auto async_node = std::make_unique<TestAsyncEagerNode>(state.get());
EXPECT_THAT(sync_executor->SyncExecute(async_node.get()),
tensorflow::testing::StatusIs(tensorflow::error::INTERNAL));
ASSERT_EQ(state->read_state(), TestState::State::kNotRun);
}
TEST(EagerExecutorTest, TestSyncExecuteMethodSuccessCase) {
auto sync_executor = std::make_unique<EagerExecutor>(
false, true);
auto state = std::make_unique<TestState>();
auto node = std::make_unique<TestEagerNode>(state.get());
TF_ASSERT_OK(sync_executor->SyncExecute(node.get()));
ASSERT_EQ(state->read_state(), TestState::State::kSuccess);
}
TEST(EagerExecutorTest, TestSyncExecutorFailPrepare) {
auto sync_executor = std::make_unique<EagerExecutor>(
false, true);
auto state = std::make_unique<TestState>();
auto node = std::make_unique<TestEagerNode>(state.get(),
errors::InvalidArgument("test"));
auto status = sync_executor->AddOrExecute(std::move(node));
ASSERT_EQ(status.code(), absl::StatusCode::kInvalidArgument);
ASSERT_EQ(state->read_state(), TestState::State::kNotRun);
}
TEST(EagerExecutorTest, TestSyncExecutorFailRun) {
auto sync_executor = std::make_unique<EagerExecutor>(
false, true);
auto state = std::make_unique<TestState>();
auto node = std::make_unique<TestEagerNode>(state.get(), absl::OkStatus(),
errors::Internal("test"));
auto status = sync_executor->AddOrExecute(std::move(node));
ASSERT_EQ(status.code(), tensorflow::error::INTERNAL);
ASSERT_EQ(state->read_state(), TestState::State::kFailure);
}
TEST(EagerExecutorTest, TestAsyncExecutorWithAsyncEagerNode) {
auto async_executor = std::make_unique<EagerExecutor>(
true, true);
auto state = std::make_unique<TestState>();
auto node = std::make_unique<TestAsyncEagerNode>(state.get());
TF_ASSERT_OK(async_executor->AddOrExecute(std::move(node)));
TF_ASSERT_OK(async_executor->WaitForAllPendingNodes());
ASSERT_EQ(state->read_state(), TestState::State::kSuccess);
}
TEST(EagerExecutorTest, TestAsyncExecutorWithInFlightRequestLimit) {
auto async_executor = std::make_unique<EagerExecutor>(
true, true,
1);
auto state = std::make_unique<TestState>();
auto node = std::make_unique<TestAsyncEagerNode>(state.get());
TF_ASSERT_OK(async_executor->AddOrExecute(std::move(node)));
auto node1 = std::make_unique<TestAsyncEagerNode>(state.get());
TF_ASSERT_OK(async_executor->AddOrExecute(std::move(node1)));
TF_ASSERT_OK(async_executor->WaitForAllPendingNodes());
ASSERT_EQ(state->read_state(), TestState::State::kSuccess);
}
TEST(EagerExecutorTest, TestAsyncExecutorWithEagerNode) {
auto async_executor = std::make_unique<EagerExecutor>(
true, true);
auto state = std::make_unique<TestState>();
auto node = std::make_unique<TestEagerNode>(state.get());
TF_ASSERT_OK(async_executor->AddOrExecute(std::move(node)));
TF_ASSERT_OK(async_executor->WaitForAllPendingNodes());
ASSERT_EQ(state->read_state(), TestState::State::kSuccess);
}
TEST(EagerExecutorTest, TestAsyncExecutorFailPrepare) {
auto async_executor = std::make_unique<EagerExecutor>(
true, true);
auto state = std::make_unique<TestState>();
auto node = std::make_unique<TestEagerNode>(state.get(),
errors::InvalidArgument("test"));
auto status = async_executor->AddOrExecute(std::move(node));
ASSERT_EQ(status.code(), absl::StatusCode::kInvalidArgument);
ASSERT_EQ(state->read_state(), TestState::State::kNotRun);
}
TEST(EagerExecutorTest, TestAsyncExecutorFailRun) {
auto async_executor = std::make_unique<EagerExecutor>(
true, true);
auto state = std::make_unique<TestState>();
auto node = std::make_unique<TestEagerNode>(state.get(), absl::OkStatus(),
errors::Internal("test"));
TF_ASSERT_OK(async_executor->AddOrExecute(std::move(node)));
auto status = async_executor->WaitForAllPendingNodes();
ASSERT_EQ(status.code(), tensorflow::error::INTERNAL);
ASSERT_EQ(state->read_state(), TestState::State::kFailure);
}
TEST(EagerExecutorTest, TestAsyncExecutorFailPrepareWithAsyncNode) {
auto async_executor = std::make_unique<EagerExecutor>(
true, true);
auto state = std::make_unique<TestState>();
auto node = std::make_unique<TestAsyncEagerNode>(
state.get(), errors::InvalidArgument("test"));
auto status = async_executor->AddOrExecute(std::move(node));
ASSERT_EQ(status.code(), absl::StatusCode::kInvalidArgument);
ASSERT_EQ(state->read_state(), TestState::State::kNotRun);
}
TEST(EagerExecutorTest, TestAsyncExecutorFailRunWithAsyncNode) {
auto async_executor = std::make_unique<EagerExecutor>(
true, true);
auto state = std::make_unique<TestState>();
auto node = std::make_unique<TestAsyncEagerNode>(
state.get(), absl::OkStatus(), errors::Internal("test"));
TF_ASSERT_OK(async_executor->AddOrExecute(std::move(node)));
auto status = async_executor->WaitForAllPendingNodes();
ASSERT_EQ(status.code(), tensorflow::error::INTERNAL);
ASSERT_EQ(state->read_state(), TestState::State::kFailure);
}
TEST(EagerExecutorTest, TestAsyncExecutorAddNodesAfterShutdown) {
auto async_executor = std::make_unique<EagerExecutor>(
true, true);
auto state = std::make_unique<TestState>();
auto node = std::make_unique<TestAsyncEagerNode>(state.get());
TF_ASSERT_OK(async_executor->ShutDown());
EXPECT_THAT(
async_executor->AddOrExecute(std::move(node)),
tensorflow::testing::StatusIs(tensorflow::error::FAILED_PRECONDITION));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/eager_executor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/eager_executor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b4905ce5-1e2f-4847-a006-430bc3261142 | cpp | tensorflow/tensorflow | max_pool_with_argmax | tensorflow/lite/kernels/perception/max_pool_with_argmax.cc | tensorflow/lite/kernels/perception/max_pool_with_argmax_test.cc | #include <algorithm>
#include <string>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/runtime_shape.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/padding.h"
namespace tflite {
namespace ops {
namespace custom {
namespace max_pool_with_argmax {
namespace {
template <typename T>
inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
const RuntimeShape& output_shape, const T* input_data,
T* output_data, int32_t* indices_data) {
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int32_t batches = MatchingDim(input_shape, 0, output_shape, 0);
const int32_t depth = MatchingDim(input_shape, 3, output_shape, 3);
const int32_t input_height = input_shape.Dims(1);
const int32_t input_width = input_shape.Dims(2);
const int32_t output_height = output_shape.Dims(1);
const int32_t output_width = output_shape.Dims(2);
const int32_t stride_height = params.stride_height;
const int32_t stride_width = params.stride_width;
for (int32_t batch = 0; batch < batches; ++batch) {
for (int32_t out_y = 0; out_y < output_height; ++out_y) {
for (int32_t out_x = 0; out_x < output_width; ++out_x) {
for (int32_t channel = 0; channel < depth; ++channel) {
const int32_t in_x_origin =
(out_x * stride_width) - params.padding_values.width;
const int32_t in_y_origin =
(out_y * stride_height) - params.padding_values.height;
const int32_t filter_x_start = std::max(0, -in_x_origin);
const int32_t filter_x_end =
std::min(params.filter_width, input_width - in_x_origin);
const int32_t filter_y_start = std::max(0, -in_y_origin);
const int32_t filter_y_end =
std::min(params.filter_height, input_height - in_y_origin);
float max = std::numeric_limits<float>::lowest();
int32_t max_x = 0;
int32_t max_y = 0;
for (int32_t filter_y = filter_y_start; filter_y < filter_y_end;
++filter_y) {
for (int32_t filter_x = filter_x_start; filter_x < filter_x_end;
++filter_x) {
const int32_t in_x = in_x_origin + filter_x;
const int32_t in_y = in_y_origin + filter_y;
float cur =
input_data[Offset(input_shape, batch, in_y, in_x, channel)];
if (cur > max) {
max = cur;
max_x = in_x;
max_y = in_y;
}
}
}
int32_t output_idx =
Offset(output_shape, batch, out_y, out_x, channel);
output_data[output_idx] = ActivationFunctionWithMinMax(
max, params.float_activation_min, params.float_activation_max);
indices_data[output_idx] =
(max_y * input_width + max_x) * depth + channel;
}
}
}
}
}
}
constexpr int kDataInputTensor = 0;
constexpr int kDataOutputTensor = 0;
constexpr int kIndicesOutputTensor = 1;
constexpr const char kIncludeBatchStr[] = "include_batch_in_index";
constexpr const char kPoolSizeStr[] = "ksize";
constexpr const char kStridesStr[] = "strides";
constexpr const char kPaddingStr[] = "padding";
constexpr const char kPaddingSameStr[] = "SAME";
constexpr const char kPaddingValidStr[] = "VALID";
struct OpData {
TfLitePoolParams params;
bool include_batch_in_index;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
const flexbuffers::Map& m =
flexbuffers::GetRoot(reinterpret_cast<const uint8_t*>(buffer), length)
.AsMap();
OpData* op_data = new OpData;
op_data->params.computed.padding = TfLitePaddingValues{0, 0, 0, 0};
op_data->include_batch_in_index = m[kIncludeBatchStr].AsBool();
op_data->params.activation = kTfLiteActNone;
const std::string padding = m[kPaddingStr].AsString().str();
if (padding == kPaddingValidStr) {
op_data->params.padding = kTfLitePaddingValid;
} else if (padding == kPaddingSameStr) {
op_data->params.padding = kTfLitePaddingSame;
} else {
op_data->params.padding = kTfLitePaddingUnknown;
}
const auto pool_size = m[kPoolSizeStr].AsTypedVector();
TFLITE_CHECK_EQ(pool_size.size(), 4);
TFLITE_CHECK_EQ(pool_size[0].AsInt32(), 1);
TFLITE_CHECK_EQ(pool_size[3].AsInt32(), 1);
op_data->params.filter_height = pool_size[1].AsInt32();
op_data->params.filter_width = pool_size[2].AsInt32();
const auto strides = m[kStridesStr].AsTypedVector();
TFLITE_CHECK_EQ(strides.size(), 4);
TFLITE_CHECK_EQ(strides[0].AsInt32(), 1);
TFLITE_CHECK_EQ(strides[3].AsInt32(), 1);
op_data->params.stride_height = strides[1].AsInt32();
op_data->params.stride_width = strides[2].AsInt32();
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2);
TfLiteTensor *output, *indices;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kDataOutputTensor, &output));
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, kIndicesOutputTensor, &indices));
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kDataInputTensor, &input));
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32);
TF_LITE_ENSURE(context, indices->type == kTfLiteInt32);
TF_LITE_ENSURE(context, op_data->params.padding != kTfLitePaddingUnknown);
TF_LITE_ENSURE_MSG(
context, !op_data->include_batch_in_index,
"Include batch dimension in flattened index is not yet supported.");
int batches = input->dims->data[0];
int height = input->dims->data[1];
int width = input->dims->data[2];
int channels_out = input->dims->data[3];
int out_width, out_height;
op_data->params.computed.padding = ComputePaddingHeightWidth(
op_data->params.stride_height, op_data->params.stride_width, 1, 1, height,
width, op_data->params.filter_height, op_data->params.filter_width,
op_data->params.padding, &out_height, &out_width);
TfLiteIntArray* output_size = TfLiteIntArrayCreate(4);
output_size->data[0] = batches;
output_size->data[1] = out_height;
output_size->data[2] = out_width;
output_size->data[3] = channels_out;
TfLiteIntArray* indices_size = TfLiteIntArrayCopy(output_size);
TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, indices, indices_size));
return context->ResizeTensor(context, output, output_size);
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
float activation_min, activation_max;
CalculateActivationRange(op_data->params.activation, &activation_min,
&activation_max);
tflite::PoolParams op_params;
op_params.stride_height = op_data->params.stride_height;
op_params.stride_width = op_data->params.stride_width;
op_params.filter_height = op_data->params.filter_height;
op_params.filter_width = op_data->params.filter_width;
op_params.padding_values.height = op_data->params.computed.padding.height;
op_params.padding_values.width = op_data->params.computed.padding.width;
op_params.float_activation_min = activation_min;
op_params.float_activation_max = activation_max;
TfLiteTensor *output, *indices;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kDataOutputTensor, &output));
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, kIndicesOutputTensor, &indices));
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kDataInputTensor, &input));
switch (input->type) {
case kTfLiteFloat32:
MaxPool<float>(op_params, GetTensorShape(input), GetTensorShape(output),
GetTensorData<float>(input), GetTensorData<float>(output),
GetTensorData<int32_t>(indices));
break;
default:
TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* RegisterMaxPoolWithArgmax() {
static TfLiteRegistration r = {
max_pool_with_argmax::Init, max_pool_with_argmax::Free,
max_pool_with_argmax::Prepare, max_pool_with_argmax::Eval};
return &r;
}
TfLiteRegistration* Register_MAX_POOL_WITH_ARGMAX() {
return RegisterMaxPoolWithArgmax();
}
}
}
} | #include <cstdint>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/perception/perception_ops.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace ops {
namespace custom {
namespace {
using testing::ElementsAreArray;
class MaxpoolingWithArgMaxOpModel : public SingleOpModel {
public:
MaxpoolingWithArgMaxOpModel(const TensorData& input, int stride_height,
int stride_width, int filter_height,
int filter_width, TfLitePadding padding,
const TensorData& output,
const TensorData& indices) {
input_ = AddInput(input);
output_ = AddOutput(output);
indices_ = AddOutput(indices);
std::vector<uint8_t> custom_option = CreateCustomOptions(
stride_height, stride_width, filter_height, filter_width, padding);
SetCustomOp("MaxPoolWithArgmax", custom_option, RegisterMaxPoolWithArgmax);
BuildInterpreter({GetShape(input_)});
}
void SetInput(const std::vector<float>& data) {
PopulateTensor(input_, data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
std::vector<int32_t> GetIndices() { return ExtractVector<int32_t>(indices_); }
std::vector<int> GetIndicesShape() { return GetTensorShape(indices_); }
protected:
int input_;
int output_;
int indices_;
private:
std::vector<uint8_t> CreateCustomOptions(int stride_height, int stride_width,
int filter_height, int filter_width,
TfLitePadding padding) {
auto flex_builder = std::make_unique<flexbuffers::Builder>();
size_t map_start = flex_builder->StartMap();
flex_builder->Bool("include_batch_in_index", false);
if (padding == kTfLitePaddingValid) {
flex_builder->String("padding", "VALID");
} else {
flex_builder->String("padding", "SAME");
}
auto start = flex_builder->StartVector("ksize");
flex_builder->Add(1);
flex_builder->Add(filter_height);
flex_builder->Add(filter_width);
flex_builder->Add(1);
flex_builder->EndVector(start, true, false);
auto strides_start = flex_builder->StartVector("strides");
flex_builder->Add(1);
flex_builder->Add(stride_height);
flex_builder->Add(stride_width);
flex_builder->Add(1);
flex_builder->EndVector(strides_start, true, false);
flex_builder->EndMap(map_start);
flex_builder->Finish();
return flex_builder->GetBuffer();
}
};
TEST(MaxpoolWithArgMaxTest, UnsupportedInt64Test) {
EXPECT_DEATH_IF_SUPPORTED(MaxpoolingWithArgMaxOpModel model(
{TensorType_FLOAT32, {1, 2, 4, 1}},
2, 2,
2, 2,
kTfLitePaddingSame,
{TensorType_FLOAT32, {}},
{TensorType_INT64, {}});
, "indices->type == kTfLiteInt32 was not true.");
}
TEST(MaxpoolWithArgMaxTest, SimpleTest) {
MaxpoolingWithArgMaxOpModel model(
{TensorType_FLOAT32, {1, 2, 4, 1}},
2, 2,
2, 2,
kTfLitePaddingSame,
{TensorType_FLOAT32, {}},
{TensorType_INT32, {}});
model.SetInput({0, 13, 2, 0, 0, 1, 4, 0});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 2, 1}));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({13, 4}));
EXPECT_THAT(model.GetIndicesShape(), ElementsAreArray({1, 1, 2, 1}));
EXPECT_THAT(model.GetIndices(), ElementsAreArray({1, 6}));
}
TEST(MaxpoolWithArgMaxTest, Strides2x1Test) {
MaxpoolingWithArgMaxOpModel model(
{TensorType_FLOAT32, {1, 4, 2, 2}},
2, 1,
2, 2,
kTfLitePaddingSame,
{TensorType_FLOAT32, {}},
{TensorType_INT32, {}});
model.SetInput({1, 0, 0, 2, 3, 0, 0, 4, 5, 0, 0, 6, 7, 0, 0, 8});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 2, 2, 2}));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({3, 4, 0, 4, 7, 8, 0, 8}));
EXPECT_THAT(model.GetIndicesShape(), ElementsAreArray({1, 2, 2, 2}));
EXPECT_THAT(model.GetIndices(),
ElementsAreArray({4, 7, 2, 7, 12, 15, 10, 15}));
}
TEST(MaxpoolWithArgMaxTest, Strides2x2Test) {
MaxpoolingWithArgMaxOpModel model(
{TensorType_FLOAT32, {1, 4, 8, 1}},
2, 2,
2, 2,
kTfLitePaddingSame,
{TensorType_FLOAT32, {}},
{TensorType_INT32, {}});
model.SetInput({1, 0, 0, 0, 0, 2, 0, 0, 0, 0, 3, 0, 0, 4, 0, 0,
0, 0, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 8});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 2, 4, 1}));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 3, 4, 0, 0, 7, 6, 8}));
EXPECT_THAT(model.GetIndicesShape(), ElementsAreArray({1, 2, 4, 1}));
EXPECT_THAT(model.GetIndices(),
ElementsAreArray({0, 10, 13, 6, 16, 27, 20, 31}));
}
TEST(MaxpoolWithArgMaxTest, Strides2x2UnfitTest) {
MaxpoolingWithArgMaxOpModel model(
{TensorType_FLOAT32, {1, 4, 7, 1}},
2, 2,
2, 2,
kTfLitePaddingSame,
{TensorType_FLOAT32, {}},
{TensorType_INT32, {}});
model.SetInput({1, 0, 0, 0, 0, 2, 0, 0, 0, 0, 3, 0, 0, 4,
0, 0, 0, 0, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 2, 4, 1}));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 3, 2, 4, 0, 0, 5, 7}));
EXPECT_THAT(model.GetIndicesShape(), ElementsAreArray({1, 2, 4, 1}));
EXPECT_THAT(model.GetIndices(),
ElementsAreArray({0, 10, 5, 13, 14, 16, 19, 27}));
}
TEST(MaxpoolWithArgMaxTest, PaddingValidTest) {
MaxpoolingWithArgMaxOpModel model(
{TensorType_FLOAT32, {1, 4, 5, 1}},
2, 2,
2, 3,
kTfLitePaddingValid,
{TensorType_FLOAT32, {}},
{TensorType_INT32, {}});
model.SetInput(
{0, 0, 0, 0, 0, 0, 7, 0, 0, 10, 0, 0, 0, 0, 0, 0, 20, 0, 0, 19});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 2, 2, 1}));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({7, 10, 20, 19}));
EXPECT_THAT(model.GetIndicesShape(), ElementsAreArray({1, 2, 2, 1}));
EXPECT_THAT(model.GetIndices(), ElementsAreArray({6, 9, 16, 19}));
}
TEST(MaxpoolWithArgMaxTest, PaddingValidUnfitTest) {
MaxpoolingWithArgMaxOpModel model(
{TensorType_FLOAT32, {1, 4, 6, 1}},
2, 2,
2, 3,
kTfLitePaddingValid,
{TensorType_FLOAT32, {}},
{TensorType_INT32, {}});
model.SetInput({0, 0, 0, 0, 0, 0, 7, 0, 0, 10, 0, 0,
0, 0, 0, 0, 20, 0, 0, 19, 24, 1, 2, 44});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 2, 2, 1}));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({7, 10, 24, 24}));
EXPECT_THAT(model.GetIndicesShape(), ElementsAreArray({1, 2, 2, 1}));
EXPECT_THAT(model.GetIndices(), ElementsAreArray({6, 9, 20, 20}));
}
TEST(MaxpoolWithArgMaxTest, InputWithBatchTest) {
MaxpoolingWithArgMaxOpModel model(
{TensorType_FLOAT32, {2, 4, 12, 2}},
2, 3,
2, 2,
kTfLitePaddingSame,
{TensorType_FLOAT32, {}},
{TensorType_INT32, {}});
model.SetInput({0, 0, 1, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 6,
0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 8, 9, 0, 0, 10,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 0,
0, 16, 0, 0, 0, 0, 0, 0, 11, 0, 0, 12, 0, 0, 0, 14,
13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
17, 18, 0, 0, 0, 30, 0, 20, 0, 0, 0, 0, 0, 0, 21, 0,
0, 0, 0, 0, 0, 24, 0, 0, 0, 0, 0, 0, 0, 0, 19, 0,
0, 0, 0, 22, 0, 0, 0, 0, 0, 0, 23, 0, 0, 0, 0, 0,
0, 0, 27, 28, 0, 0, 0, 0, 29, 0, 0, 0, 0, 0, 0, 32,
0, 0, 0, 0, 25, 26, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 31, 0, 0, 0, 0, 0, 0, 0, 0, 0});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 2, 4, 2}));
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({1, 0, 3, 4, 5, 6, 9, 8, 11, 12, 13,
14, 15, 0, 0, 0, 17, 18, 19, 20, 21, 0,
23, 24, 27, 28, 29, 0, 31, 32, 25, 26}));
EXPECT_THAT(model.GetIndicesShape(), ElementsAreArray({2, 2, 4, 2}));
EXPECT_THAT(model.GetIndices(),
ElementsAreArray({2, 1, 8, 9, 12, 15, 44, 43, 72, 75, 80,
79, 62, 61, 66, 67, 0, 1, 30, 7, 14, 13,
42, 21, 50, 51, 56, 55, 86, 63, 68, 69}));
}
TEST(MaxpoolWithArgMaxTest, InputWithBatchAndPaddingValidTest) {
MaxpoolingWithArgMaxOpModel model(
{TensorType_FLOAT32, {2, 4, 11, 2}},
2, 3,
2, 2,
kTfLitePaddingValid,
{TensorType_FLOAT32, {}},
{TensorType_INT32, {}});
model.SetInput({0, 0, 1, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 6,
0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 8, 9, 0, 0, 10,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 0,
0, 16, 0, 0, 0, 0, 0, 0, 11, 0, 0, 12, 0, 0, 0, 14,
13, 0, 0, 0, 0, 0, 0, 0, 17, 18, 0, 0, 0, 30, 0, 20,
0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 24, 0, 0,
0, 0, 0, 0, 0, 0, 19, 0, 0, 0, 0, 22, 0, 0, 0, 0,
0, 0, 23, 0, 0, 0, 0, 0, 0, 0, 27, 28, 0, 0, 0, 0,
29, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 25, 26, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 31, 0});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 2, 4, 2}));
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
23, 24, 25, 26, 27, 28, 29, 0, 31, 32}));
EXPECT_THAT(model.GetIndicesShape(), ElementsAreArray({2, 2, 4, 2}));
EXPECT_THAT(model.GetIndices(),
ElementsAreArray({2, 23, 8, 9, 12, 15, 40, 43, 44, 47, 72,
75, 80, 79, 62, 65, 0, 1, 30, 7, 14, 35,
42, 21, 68, 69, 50, 51, 56, 57, 86, 63}));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/perception/max_pool_with_argmax.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/perception/max_pool_with_argmax_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cc3ff74c-5d7d-4122-8c0c-2ae4c13a0a4d | cpp | google/tsl | tstring | tsl/platform/tstring.h | tsl/platform/tstring_test.cc | #ifndef TENSORFLOW_TSL_PLATFORM_TSTRING_H_
#define TENSORFLOW_TSL_PLATFORM_TSTRING_H_
#include <assert.h>
#include <ostream>
#include <string>
#include "tsl/platform/cord.h"
#include "tsl/platform/ctstring.h"
#include "tsl/platform/platform.h"
#include "tsl/platform/stringpiece.h"
namespace tsl {
class tstring {
TF_TString tstr_;
public:
enum Type {
SMALL = TF_TSTR_SMALL,
LARGE = TF_TSTR_LARGE,
OFFSET = TF_TSTR_OFFSET,
VIEW = TF_TSTR_VIEW,
};
class view {
const char* data_;
size_t size_;
public:
explicit view(const char* data, size_t size) : data_(data), size_(size) {}
explicit view(const char* data) : data_(data), size_(::strlen(data)) {}
const char* data() const { return data_; }
size_t size() const { return size_; }
view() = delete;
view(const view&) = delete;
view& operator=(const view&) = delete;
};
typedef const char* const_iterator;
tstring();
tstring(const std::string& str);
tstring(const char* str, size_t len);
tstring(const char* str);
tstring(size_t n, char c);
explicit tstring(const absl::string_view str);
#ifdef PLATFORM_GOOGLE
explicit tstring(const absl::Cord& cord);
#endif
tstring(const tstring& str);
tstring(tstring&& str) noexcept;
~tstring();
tstring& operator=(const tstring& str);
tstring& operator=(const std::string& str);
tstring& operator=(const char* str);
tstring& operator=(char ch);
tstring& operator=(const absl::string_view str);
#ifdef PLATFORM_GOOGLE
tstring& operator=(const absl::Cord& cord);
#endif
tstring& operator=(const view& tsv);
tstring& operator=(tstring&& str) noexcept;
int compare(const char* str, size_t len) const;
bool operator<(const tstring& o) const;
bool operator>(const tstring& o) const;
bool operator==(const char* str) const;
bool operator==(const tstring& o) const;
bool operator!=(const char* str) const;
bool operator!=(const tstring& o) const;
operator std::string() const;
operator absl::string_view() const;
#ifdef PLATFORM_GOOGLE
template <typename T,
typename std::enable_if<std::is_same<T, absl::AlphaNum>::value,
T>::type* = nullptr>
operator T() const;
#endif
size_t size() const;
size_t length() const;
size_t capacity() const;
bool empty() const;
Type type() const;
void resize(size_t new_size, char c = 0);
void resize_uninitialized(size_t new_size);
void clear() noexcept;
void reserve(size_t n);
const_iterator begin() const;
const_iterator end() const;
const char* c_str() const;
const char* data() const;
const char& operator[](size_t i) const;
const char& back() const;
char* mdata();
char* data();
char& operator[](size_t i);
tstring& assign(const char* str, size_t len);
tstring& assign(const char* str);
tstring& assign_as_view(const tstring& str);
tstring& assign_as_view(const std::string& str);
tstring& assign_as_view(const absl::string_view str);
tstring& assign_as_view(const char* str, size_t len);
tstring& assign_as_view(const char* str);
tstring& append(const tstring& str);
tstring& append(const char* str, size_t len);
tstring& append(const char* str);
tstring& append(size_t n, char c);
tstring& erase(size_t pos, size_t len);
tstring& insert(size_t pos, const tstring& str, size_t subpos, size_t sublen);
tstring& insert(size_t pos, size_t n, char c);
void swap(tstring& str) noexcept;
void push_back(char ch);
friend bool operator==(const char* a, const tstring& b);
friend bool operator==(const std::string& a, const tstring& b);
friend tstring operator+(const tstring& a, const tstring& b);
friend std::ostream& operator<<(std::ostream& o, const tstring& str);
friend std::hash<tstring>;
};
bool operator==(const char* a, const tstring& b);
bool operator==(const std::string& a, const tstring& b);
tstring operator+(const tstring& a, const tstring& b);
std::ostream& operator<<(std::ostream& o, const tstring& str);
inline tstring::tstring() { TF_TString_Init(&tstr_); }
inline tstring::tstring(const char* str, size_t len) {
TF_TString_Init(&tstr_);
TF_TString_Copy(&tstr_, str, len);
}
inline tstring::tstring(const char* str) : tstring(str, ::strlen(str)) {}
inline tstring::tstring(size_t n, char c) {
TF_TString_Init(&tstr_);
TF_TString_Resize(&tstr_, n, c);
}
inline tstring::tstring(const std::string& str)
: tstring(str.data(), str.size()) {}
inline tstring::tstring(const absl::string_view str)
: tstring(str.data(), str.size()) {}
#ifdef PLATFORM_GOOGLE
inline tstring::tstring(const absl::Cord& cord) {
TF_TString_Init(&tstr_);
TF_TString_ResizeUninitialized(&tstr_, cord.size());
cord.CopyToArray(data());
}
#endif
inline tstring::tstring(const tstring& str) {
TF_TString_Init(&tstr_);
TF_TString_Assign(&tstr_, &str.tstr_);
}
inline tstring::tstring(tstring&& str) noexcept {
TF_TString_Init(&tstr_);
TF_TString_Move(&tstr_, &str.tstr_);
}
inline tstring::~tstring() { TF_TString_Dealloc(&tstr_); }
inline tstring& tstring::operator=(const tstring& str) {
TF_TString_Assign(&tstr_, &str.tstr_);
return *this;
}
inline tstring& tstring::operator=(const std::string& str) {
TF_TString_Copy(&tstr_, str.data(), str.size());
return *this;
}
inline tstring& tstring::operator=(const char* str) {
TF_TString_Copy(&tstr_, str, ::strlen(str));
return *this;
}
inline tstring& tstring::operator=(char c) {
resize_uninitialized(1);
(*this)[0] = c;
return *this;
}
inline tstring& tstring::operator=(const absl::string_view str) {
TF_TString_Copy(&tstr_, str.data(), str.size());
return *this;
}
#ifdef PLATFORM_GOOGLE
inline tstring& tstring::operator=(const absl::Cord& cord) {
TF_TString_ResizeUninitialized(&tstr_, cord.size());
cord.CopyToArray(data());
return *this;
}
#endif
inline tstring& tstring::operator=(const tstring::view& tsv) {
assign_as_view(tsv.data(), tsv.size());
return *this;
}
inline tstring& tstring::operator=(tstring&& str) noexcept {
TF_TString_Move(&tstr_, &str.tstr_);
return *this;
}
inline int tstring::compare(const char* str, size_t len) const {
int ret = ::memcmp(data(), str, std::min(len, size()));
if (ret < 0) return -1;
if (ret > 0) return +1;
if (size() < len) return -1;
if (size() > len) return +1;
return 0;
}
inline bool tstring::operator<(const tstring& o) const {
return compare(o.data(), o.size()) < 0;
}
inline bool tstring::operator>(const tstring& o) const {
return compare(o.data(), o.size()) > 0;
}
inline bool tstring::operator==(const char* str) const {
return ::strlen(str) == size() && ::memcmp(data(), str, size()) == 0;
}
inline bool tstring::operator==(const tstring& o) const {
return o.size() == size() && ::memcmp(data(), o.data(), size()) == 0;
}
inline bool tstring::operator!=(const char* str) const {
return !(*this == str);
}
inline bool tstring::operator!=(const tstring& o) const {
return !(*this == o);
}
inline tstring::operator std::string() const {
return std::string(data(), size());
}
inline tstring::operator absl::string_view() const {
return absl::string_view(data(), size());
}
#ifdef PLATFORM_GOOGLE
template <typename T, typename std::enable_if<
std::is_same<T, absl::AlphaNum>::value, T>::type*>
inline tstring::operator T() const {
return T(absl::string_view(*this));
}
#endif
inline size_t tstring::size() const { return TF_TString_GetSize(&tstr_); }
inline size_t tstring::length() const { return size(); }
inline size_t tstring::capacity() const {
return TF_TString_GetCapacity(&tstr_);
}
inline bool tstring::empty() const { return size() == 0; }
inline tstring::Type tstring::type() const {
return static_cast<tstring::Type>(TF_TString_GetType(&tstr_));
}
inline void tstring::resize(size_t new_size, char c) {
TF_TString_Resize(&tstr_, new_size, c);
}
inline void tstring::resize_uninitialized(size_t new_size) {
TF_TString_ResizeUninitialized(&tstr_, new_size);
}
inline void tstring::clear() noexcept {
TF_TString_ResizeUninitialized(&tstr_, 0);
}
inline void tstring::reserve(size_t n) { TF_TString_Reserve(&tstr_, n); }
inline tstring::const_iterator tstring::begin() const { return &(*this)[0]; }
inline tstring::const_iterator tstring::end() const { return &(*this)[size()]; }
inline const char* tstring::c_str() const { return data(); }
inline const char* tstring::data() const {
return TF_TString_GetDataPointer(&tstr_);
}
inline const char& tstring::operator[](size_t i) const { return data()[i]; }
inline const char& tstring::back() const { return (*this)[size() - 1]; }
inline char* tstring::mdata() {
return TF_TString_GetMutableDataPointer(&tstr_);
}
inline char* tstring::data() {
return mdata();
}
inline char& tstring::operator[](size_t i) { return mdata()[i]; }
inline tstring& tstring::assign(const char* str, size_t len) {
TF_TString_Copy(&tstr_, str, len);
return *this;
}
inline tstring& tstring::assign(const char* str) {
assign(str, ::strlen(str));
return *this;
}
inline tstring& tstring::assign_as_view(const tstring& str) {
assign_as_view(str.data(), str.size());
return *this;
}
inline tstring& tstring::assign_as_view(const std::string& str) {
assign_as_view(str.data(), str.size());
return *this;
}
inline tstring& tstring::assign_as_view(const absl::string_view str) {
assign_as_view(str.data(), str.size());
return *this;
}
inline tstring& tstring::assign_as_view(const char* str, size_t len) {
TF_TString_AssignView(&tstr_, str, len);
return *this;
}
inline tstring& tstring::assign_as_view(const char* str) {
assign_as_view(str, ::strlen(str));
return *this;
}
inline tstring& tstring::append(const tstring& str) {
TF_TString_Append(&tstr_, &str.tstr_);
return *this;
}
inline tstring& tstring::append(const char* str, size_t len) {
TF_TString_AppendN(&tstr_, str, len);
return *this;
}
inline tstring& tstring::append(const char* str) {
append(str, ::strlen(str));
return *this;
}
inline tstring& tstring::append(size_t n, char c) {
const size_t new_size = size() + n;
TF_TString_ReserveAmortized(&tstr_, new_size);
resize(new_size, c);
return *this;
}
inline tstring& tstring::erase(size_t pos, size_t len) {
memmove(mdata() + pos, data() + pos + len, size() - len - pos);
resize(size() - len);
return *this;
}
inline tstring& tstring::insert(size_t pos, const tstring& str, size_t subpos,
size_t sublen) {
size_t orig_size = size();
TF_TString_ResizeUninitialized(&tstr_, orig_size + sublen);
memmove(mdata() + pos + sublen, data() + pos, orig_size - pos);
memmove(mdata() + pos, str.data() + subpos, sublen);
return *this;
}
inline tstring& tstring::insert(size_t pos, size_t n, char c) {
size_t size_ = size();
TF_TString_ResizeUninitialized(&tstr_, size_ + n);
memmove(mdata() + pos + n, data() + pos, size_ - pos);
memset(mdata() + pos, c, n);
return *this;
}
inline void tstring::swap(tstring& str) noexcept {
std::swap(tstr_, str.tstr_);
}
inline void tstring::push_back(char ch) { append(1, ch); }
inline bool operator==(const char* a, const tstring& b) {
return ::strlen(a) == b.size() && ::memcmp(a, b.data(), b.size()) == 0;
}
inline bool operator==(const std::string& a, const tstring& b) {
return a.size() == b.size() && ::memcmp(a.data(), b.data(), b.size()) == 0;
}
inline tstring operator+(const tstring& a, const tstring& b) {
tstring r;
r.reserve(a.size() + b.size());
r.append(a);
r.append(b);
return r;
}
inline std::ostream& operator<<(std::ostream& o, const tstring& str) {
return o.write(str.data(), str.size());
}
}
#endif | #include "tsl/platform/tstring.h"
#include <memory>
#include <string>
#include "tsl/platform/cord.h"
#include "tsl/platform/platform.h"
#include "tsl/platform/stringpiece.h"
#include "tsl/platform/test.h"
using ::tsl::tstring;
static const char kLongString[] =
"abcdefghij"
"klmnopqrst"
"uvwxyz0123"
"456789ABCD"
"EFGHIKLMNO";
const size_t kLongStringLen = sizeof(kLongString) / sizeof(char) - sizeof(char);
TEST(TF_TStringTest, Construction) {
tstring s10;
tstring s11("a\0a", 3);
tstring s12(kLongString);
tstring s13(3, 'b');
tstring s14(absl::string_view("hi"));
tstring s15(std::string("bye"));
EXPECT_EQ("", s10);
EXPECT_TRUE(s10.empty());
EXPECT_EQ(tstring::Type::SMALL, s10.type());
EXPECT_EQ(0, s10.size());
EXPECT_EQ(0, s10.length());
EXPECT_EQ(TF_TString_SmallCapacity, s10.capacity());
EXPECT_EQ(std::string("a\0a", 3), s11);
EXPECT_FALSE(s11.empty());
EXPECT_EQ(3, s11.size());
EXPECT_EQ(3, s11.length());
EXPECT_EQ(kLongString, s12);
EXPECT_EQ(kLongStringLen, s12.size());
EXPECT_EQ(tstring::Type::LARGE, s12.type());
EXPECT_LT(TF_TString_SmallCapacity, s12.capacity());
EXPECT_EQ("bbb", s13);
EXPECT_EQ("hi", s14);
EXPECT_EQ(tstring::Type::SMALL, s14.type());
EXPECT_EQ("bye", s15);
}
TEST(TF_TStringTest, CopyMove) {
tstring s20(kLongString);
tstring s21(s20);
tstring s22;
EXPECT_EQ(s20, s21);
s22 = std::move(s21);
EXPECT_EQ(s20, s22);
EXPECT_EQ("", s21);
EXPECT_EQ(tstring::Type::SMALL, s21.type());
}
TEST(TF_TStringTest, Assignment) {
tstring s30("123456789012345678901234567890");
tstring s31;
tstring s32;
s31 = s30;
EXPECT_EQ(s30, s31);
EXPECT_EQ(tstring::Type::LARGE, s31.type());
EXPECT_EQ(s30.size(), s31.size());
s32 = std::move(s30);
EXPECT_EQ(s31, s32);
EXPECT_EQ("", s30);
EXPECT_EQ(tstring::Type::SMALL, s30.type());
EXPECT_EQ(tstring::Type::LARGE, s32.type());
s32 = tstring::view(kLongString);
EXPECT_EQ(kLongString, s32);
EXPECT_EQ(tstring::Type::VIEW, s32.type());
EXPECT_EQ(kLongStringLen, s32.size());
EXPECT_EQ(0, s32.capacity());
tstring s33(std::move(s32));
EXPECT_EQ(kLongString, s33);
EXPECT_EQ(tstring::Type::VIEW, s33.type());
EXPECT_EQ(kLongStringLen, s33.size());
s32 = std::string(kLongString);
EXPECT_EQ(kLongString, s32);
EXPECT_EQ(tstring::Type::LARGE, s32.type());
EXPECT_EQ(kLongStringLen, s32.size());
s32 = "hello";
EXPECT_EQ("hello", s32);
EXPECT_EQ(tstring::Type::SMALL, s32.type());
EXPECT_EQ(5, s32.size());
s33 = 'a';
EXPECT_EQ("a", s33);
EXPECT_EQ(tstring::Type::SMALL, s33.type());
EXPECT_EQ(1, s33.size());
s32 = absl::string_view(kLongString);
EXPECT_EQ(kLongString, s32);
EXPECT_EQ(tstring::Type::LARGE, s32.type());
EXPECT_EQ(kLongStringLen, s32.size());
s32.resize(TF_TString_SmallCapacity * 2);
EXPECT_EQ(absl::string_view(kLongString, TF_TString_SmallCapacity * 2), s32);
EXPECT_EQ(tstring::Type::LARGE, s32.type());
EXPECT_EQ(TF_TString_SmallCapacity * 2, s32.size());
s32 = tstring::view(kLongString, kLongStringLen);
EXPECT_EQ(kLongString, s32);
EXPECT_EQ(tstring::Type::VIEW, s32.type());
EXPECT_EQ(kLongStringLen, s32.size());
s32.assign("hello1");
EXPECT_EQ("hello1", s32);
s32.assign("hello2", 5);
EXPECT_EQ("hello", s32);
s30.assign_as_view(kLongString);
EXPECT_EQ(tstring::Type::VIEW, s30.type());
s31.assign_as_view(s30);
EXPECT_EQ(tstring::Type::VIEW, s31.type());
EXPECT_EQ(kLongString, s30.c_str());
EXPECT_EQ(kLongString, s31.c_str());
std::string tmp(kLongString);
s32.assign_as_view(tmp);
EXPECT_EQ(tstring::Type::VIEW, s32.type());
EXPECT_STREQ(kLongString, s32.c_str());
s33.assign_as_view(kLongString, 2);
EXPECT_EQ(2, s33.size());
s32.assign_as_view(absl::string_view(kLongString));
EXPECT_EQ(tstring::Type::VIEW, s32.type());
EXPECT_EQ(kLongString, s32.c_str());
#ifdef PLATFORM_GOOGLE
s33 = absl::Cord(kLongString);
EXPECT_EQ(kLongString, s33);
EXPECT_EQ(tstring::Type::LARGE, s33.type());
EXPECT_EQ(kLongStringLen, s33.size());
tstring s34((absl::Cord(kLongString)));
EXPECT_EQ(kLongString, s34);
EXPECT_EQ(tstring::Type::LARGE, s34.type());
EXPECT_EQ(kLongStringLen, s34.size());
#endif
}
TEST(TF_TStringTest, Comparison) {
tstring empty("");
tstring a("a");
tstring aa("aa");
tstring a_("a");
tstring b("b");
const char c[] = "c";
tstring nulla("\0a", 2);
tstring nullb("\0b", 2);
tstring nullaa("\0aa", 3);
EXPECT_TRUE(a < b);
EXPECT_TRUE(a != b);
EXPECT_FALSE(a > b);
EXPECT_FALSE(a == b);
EXPECT_TRUE(a < aa);
EXPECT_TRUE(a != aa);
EXPECT_FALSE(a > aa);
EXPECT_FALSE(a == aa);
EXPECT_TRUE(b > a);
EXPECT_TRUE(b != a);
EXPECT_FALSE(b < a);
EXPECT_FALSE(b == a);
EXPECT_FALSE(a == b);
EXPECT_FALSE(b == c);
EXPECT_TRUE(b != c);
EXPECT_TRUE(empty < a);
EXPECT_TRUE(empty != a);
EXPECT_FALSE(empty > a);
EXPECT_FALSE(empty == a);
EXPECT_TRUE(a > empty);
EXPECT_TRUE(a != empty);
EXPECT_FALSE(a < empty);
EXPECT_FALSE(a == empty);
EXPECT_FALSE(a < a_);
EXPECT_FALSE(a != a_);
EXPECT_FALSE(a > a_);
EXPECT_TRUE(a == a_);
EXPECT_TRUE(nulla < nullaa);
EXPECT_TRUE(nulla != nullaa);
EXPECT_FALSE(nulla > nullaa);
EXPECT_FALSE(nulla == nullaa);
EXPECT_TRUE(nulla < nullb);
EXPECT_TRUE(nullaa > nulla);
EXPECT_TRUE(nullaa != nulla);
EXPECT_FALSE(nullaa < nulla);
EXPECT_FALSE(nullaa == nulla);
}
TEST(TF_TStringTest, Conversion) {
tstring s50(kLongString);
std::string s51(s50);
absl::string_view s52(s50);
EXPECT_EQ(kLongString, s51);
EXPECT_EQ(kLongStringLen, s51.size());
EXPECT_EQ(kLongString, s52);
EXPECT_EQ(kLongStringLen, s52.size());
#ifdef PLATFORM_GOOGLE
absl::AlphaNum s53(s50);
EXPECT_STREQ(kLongString, s53.data());
EXPECT_EQ(kLongStringLen, s53.size());
#endif
}
TEST(TF_TStringTest, Allocation) {
tstring s60;
s60.resize(2);
EXPECT_EQ(std::string("\0\0", 2), s60);
EXPECT_EQ(2, s60.size());
EXPECT_EQ(2, s60.length());
s60.resize(6, 'a');
EXPECT_EQ(std::string("\0\0aaaa", 6), s60);
EXPECT_EQ(6, s60.size());
EXPECT_EQ(6, s60.length());
s60.resize(3, 'b');
EXPECT_EQ(std::string("\0\0a", 3), s60);
EXPECT_EQ(3, s60.size());
EXPECT_EQ(3, s60.length());
s60.clear();
EXPECT_EQ("", s60);
EXPECT_TRUE(s60.empty());
EXPECT_EQ(0, s60.size());
EXPECT_EQ(0, s60.length());
s60.reserve(100);
EXPECT_EQ(111, s60.capacity());
s60.reserve(100);
}
TEST(TF_TStringTest, ElementAccess) {
tstring s70(kLongString);
EXPECT_STREQ(kLongString, s70.data());
EXPECT_EQ(s70.data(), s70.c_str());
for (size_t i = 0; i < s70.size(); i++) {
EXPECT_EQ(kLongString[i], s70.data()[i]);
}
tstring::const_iterator i = s70.begin();
const char* j = kLongString;
for (; *j != '\0'; i++, j++) {
EXPECT_EQ(*j, *i);
}
EXPECT_EQ('\0', *s70.end());
EXPECT_EQ(*i, *s70.end());
EXPECT_EQ(*(i - 1), s70.back());
}
TEST(TF_TStringTest, Modifiers) {
tstring s80("ba");
tstring s81;
tstring s82(kLongString);
s81.append(s80);
EXPECT_EQ("ba", s81);
s81.append(s80);
EXPECT_EQ("baba", s81);
s81.append("\0c", 2);
EXPECT_EQ(std::string("baba\0c", 6), s81);
s81.append("dd");
EXPECT_EQ(std::string("baba\0cdd", 8), s81);
s81.append(3, 'z');
EXPECT_EQ(tstring("baba\0cddzzz", 11), s81);
s81.append(0, 'z');
s81.append("dd", 0);
s81.append("");
s81.append(tstring());
EXPECT_EQ(std::string("baba\0cddzzz", 11), s81);
s81.erase(0, 1);
EXPECT_EQ(std::string("aba\0cddzzz", 10), s81);
s81.erase(4, 6);
EXPECT_EQ(std::string("aba\0", 4), s81);
s81.insert(1, tstring("\0moo\0", 5), 1, 4);
EXPECT_EQ(std::string("amoo\0ba\0", 8), s81);
s81.insert(0, 2, '\0');
s81.insert(s81.size() - 1, 1, 'q');
EXPECT_EQ(std::string("\0\0amoo\0baq\0", 11), s81);
s81.erase(0, s81.size());
EXPECT_EQ(tstring(), s81);
s80.swap(s82);
EXPECT_EQ(kLongString, s80);
EXPECT_EQ("ba", s82);
s82.push_back('\0');
s82.push_back('q');
EXPECT_EQ(std::string("ba\0q", 4), s82);
}
TEST(TF_TStringTest, Friends) {
tstring s90("b");
tstring s91("\0a\0", 3);
tstring s92;
EXPECT_EQ("b", s90 + s92);
EXPECT_EQ("b", s92 + s90);
EXPECT_EQ(std::string("\0a\0", 3), s92 + s91);
EXPECT_EQ(std::string("\0a\0", 3), s91 + s92);
EXPECT_EQ(std::string("b\0a\0", 4), s90 + s91);
EXPECT_EQ(std::string("\0a\0b", 4), s91 + s90);
std::stringstream ss;
ss << s91;
EXPECT_EQ(std::string("\0a\0", 3), ss.str());
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/tstring.h | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/tstring_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
2332d521-390d-487b-a49c-f328427eabe2 | cpp | abseil/abseil-cpp | bounded_utf8_length_sequence | absl/debugging/internal/bounded_utf8_length_sequence.h | absl/debugging/internal/bounded_utf8_length_sequence_test.cc | #ifndef ABSL_DEBUGGING_INTERNAL_BOUNDED_UTF8_LENGTH_SEQUENCE_H_
#define ABSL_DEBUGGING_INTERNAL_BOUNDED_UTF8_LENGTH_SEQUENCE_H_
#include <cstdint>
#include "absl/base/config.h"
#include "absl/numeric/bits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace debugging_internal {
template <uint32_t max_elements>
class BoundedUtf8LengthSequence {
public:
BoundedUtf8LengthSequence() = default;
uint32_t InsertAndReturnSumOfPredecessors(
uint32_t index, uint32_t utf8_length) {
if (index >= max_elements) index = max_elements - 1;
if (utf8_length == 0 || utf8_length > 4) utf8_length = 1;
const uint32_t word_index = index/32;
const uint32_t bit_index = 2 * (index % 32);
const uint64_t ones_bit = uint64_t{1} << bit_index;
const uint64_t odd_bits_mask = 0xaaaaaaaaaaaaaaaa;
const uint64_t lower_seminibbles_mask = ones_bit - 1;
const uint64_t higher_seminibbles_mask = ~lower_seminibbles_mask;
const uint64_t same_word_bits_below_insertion =
rep_[word_index] & lower_seminibbles_mask;
int full_popcount = absl::popcount(same_word_bits_below_insertion);
int odd_popcount =
absl::popcount(same_word_bits_below_insertion & odd_bits_mask);
for (uint32_t j = word_index; j > 0; --j) {
const uint64_t word_below_insertion = rep_[j - 1];
full_popcount += absl::popcount(word_below_insertion);
odd_popcount += absl::popcount(word_below_insertion & odd_bits_mask);
}
const uint32_t sum_of_predecessors =
index + static_cast<uint32_t>(full_popcount + odd_popcount);
for (uint32_t j = max_elements/32 - 1; j > word_index; --j) {
rep_[j] = (rep_[j] << 2) | (rep_[j - 1] >> 62);
}
rep_[word_index] =
(rep_[word_index] & lower_seminibbles_mask) |
(uint64_t{utf8_length - 1} << bit_index) |
((rep_[word_index] & higher_seminibbles_mask) << 2);
return sum_of_predecessors;
}
private:
static_assert(max_elements > 0 && max_elements % 32 == 0,
"max_elements must be a positive multiple of 32");
uint64_t rep_[max_elements/32] = {};
};
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/debugging/internal/bounded_utf8_length_sequence.h"
#include <cstdint>
#include "gtest/gtest.h"
#include "absl/base/config.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace debugging_internal {
namespace {
TEST(BoundedUtf8LengthSequenceTest, RemembersAValueOfOneCorrectly) {
BoundedUtf8LengthSequence<32> seq;
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 1), 0);
EXPECT_EQ(seq.InsertAndReturnSumOfPredecessors(1, 1), 1);
}
TEST(BoundedUtf8LengthSequenceTest, RemembersAValueOfTwoCorrectly) {
BoundedUtf8LengthSequence<32> seq;
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 2), 0);
EXPECT_EQ(seq.InsertAndReturnSumOfPredecessors(1, 1), 2);
}
TEST(BoundedUtf8LengthSequenceTest, RemembersAValueOfThreeCorrectly) {
BoundedUtf8LengthSequence<32> seq;
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 3), 0);
EXPECT_EQ(seq.InsertAndReturnSumOfPredecessors(1, 1), 3);
}
TEST(BoundedUtf8LengthSequenceTest, RemembersAValueOfFourCorrectly) {
BoundedUtf8LengthSequence<32> seq;
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 4), 0);
EXPECT_EQ(seq.InsertAndReturnSumOfPredecessors(1, 1), 4);
}
TEST(BoundedUtf8LengthSequenceTest, RemembersSeveralAppendedValues) {
BoundedUtf8LengthSequence<32> seq;
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 1), 0);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(1, 4), 1);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(2, 2), 5);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(3, 3), 7);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(4, 1), 10);
}
TEST(BoundedUtf8LengthSequenceTest, RemembersSeveralPrependedValues) {
BoundedUtf8LengthSequence<32> seq;
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 4), 0);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 3), 0);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 2), 0);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 1), 0);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(4, 1), 10);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(3, 1), 6);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(2, 1), 3);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(1, 1), 1);
}
TEST(BoundedUtf8LengthSequenceTest, RepeatedInsertsShiftValuesOutTheRightEnd) {
BoundedUtf8LengthSequence<32> seq;
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 2), 0);
for (uint32_t i = 1; i < 31; ++i) {
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 1), 0)
<< "while moving the 2 into position " << i;
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(31, 1), 32)
<< "after moving the 2 into position " << i;
}
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 1), 0)
<< "while moving the 2 into position 31";
EXPECT_EQ(seq.InsertAndReturnSumOfPredecessors(31, 1), 31)
<< "after moving the 2 into position 31";
}
TEST(BoundedUtf8LengthSequenceTest, InsertsIntoWord1LeaveWord0Untouched) {
BoundedUtf8LengthSequence<64> seq;
for (uint32_t i = 0; i < 32; ++i) {
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(i, 2), 2 * i)
<< "at index " << i;
}
EXPECT_EQ(seq.InsertAndReturnSumOfPredecessors(32, 1), 64);
EXPECT_EQ(seq.InsertAndReturnSumOfPredecessors(32, 1), 64);
}
TEST(BoundedUtf8LengthSequenceTest, InsertsIntoWord0ShiftValuesIntoWord1) {
BoundedUtf8LengthSequence<64> seq;
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(29, 2), 29);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(30, 3), 31);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(31, 4), 34);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 1), 0);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 1), 0);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(34, 1), 31 + 2 + 3 + 4);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(32, 1), 31 + 2);
}
TEST(BoundedUtf8LengthSequenceTest, ValuesAreShiftedCorrectlyAmongThreeWords) {
BoundedUtf8LengthSequence<96> seq;
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(31, 3), 31);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(63, 4), 62 + 3);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 1), 0);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(65, 1), 63 + 3 + 4);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(64, 1), 63 + 3);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(33, 1), 32 + 3);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(32, 1), 32);
}
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/debugging/internal/bounded_utf8_length_sequence.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/debugging/internal/bounded_utf8_length_sequence_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
4b03c7db-37a2-41d3-bf1b-d4b8f63cfab0 | cpp | google/tensorstore | any_receiver | tensorstore/util/execution/any_receiver.h | tensorstore/util/execution/any_receiver_test.cc | #ifndef TENSORSTORE_UTIL_EXECUTION_ANY_RECEIVER_H_
#define TENSORSTORE_UTIL_EXECUTION_ANY_RECEIVER_H_
#include <utility>
#include "absl/base/attributes.h"
#include "tensorstore/internal/poly/poly.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender.h"
namespace tensorstore {
using AnyCancelReceiver = poly::Poly<0, false, void()>;
namespace internal_sender {
template <typename E, typename... V>
using ReceiverPoly = poly::Poly<sizeof(void*) * 2, false,
void(internal_execution::set_value_t, V...),
void(internal_execution::set_error_t, E),
void(internal_execution::set_cancel_t)>;
template <typename E, typename... V>
using FlowReceiverPoly =
poly::Poly<sizeof(void*) * 2, false,
void(internal_execution::set_starting_t, AnyCancelReceiver up),
void(internal_execution::set_value_t, V...),
void(internal_execution::set_done_t),
void(internal_execution::set_error_t, E),
void(internal_execution::set_stopping_t)>;
}
template <typename E, typename... V>
class AnyReceiver : public internal_sender::ReceiverPoly<E, V...> {
using Base = internal_sender::ReceiverPoly<E, V...>;
public:
using Base::Base;
AnyReceiver() : Base(NullReceiver{}) {}
ABSL_ATTRIBUTE_ALWAYS_INLINE void set_value(V... v) {
(*this)(internal_execution::set_value_t{}, std::forward<V>(v)...);
}
ABSL_ATTRIBUTE_ALWAYS_INLINE void set_error(E e) {
(*this)(internal_execution::set_error_t{}, std::forward<E>(e));
}
ABSL_ATTRIBUTE_ALWAYS_INLINE void set_cancel() {
(*this)(internal_execution::set_cancel_t{});
}
};
template <typename E, typename... V>
class AnyFlowReceiver : public internal_sender::FlowReceiverPoly<E, V...> {
using Base = internal_sender::FlowReceiverPoly<E, V...>;
public:
using Base::Base;
AnyFlowReceiver() : Base(NullReceiver{}) {}
ABSL_ATTRIBUTE_ALWAYS_INLINE void set_starting(AnyCancelReceiver cancel) {
(*this)(internal_execution::set_starting_t{}, std::move(cancel));
}
ABSL_ATTRIBUTE_ALWAYS_INLINE void set_value(V... v) {
(*this)(internal_execution::set_value_t{}, std::forward<V>(v)...);
}
ABSL_ATTRIBUTE_ALWAYS_INLINE void set_done() {
(*this)(internal_execution::set_done_t{});
}
ABSL_ATTRIBUTE_ALWAYS_INLINE void set_error(E e) {
(*this)(internal_execution::set_error_t{}, std::forward<E>(e));
}
ABSL_ATTRIBUTE_ALWAYS_INLINE void set_stopping() {
(*this)(internal_execution::set_stopping_t{});
}
};
}
#endif | #include "tensorstore/util/execution/any_receiver.h"
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender.h"
#include "tensorstore/util/execution/sender_testutil.h"
namespace {
TEST(AnyReceiverTest, Construct) {
tensorstore::AnyReceiver<int, std::string> receiver(
tensorstore::NullReceiver{});
}
TEST(AnyReceiverTest, Assignment) {
tensorstore::AnyReceiver<int, std::string> receiver;
receiver = tensorstore::NullReceiver{};
{
tensorstore::NullReceiver tmp{};
receiver = tmp;
}
}
TEST(AnyReceiverTest, NullSetValue) {
tensorstore::AnyReceiver<int, std::string> receiver;
tensorstore::execution::set_value(receiver, "message");
}
TEST(AnyReceiverTest, NullSetError) {
tensorstore::AnyReceiver<int, std::string> receiver;
tensorstore::execution::set_error(receiver, 3);
}
TEST(AnyReceiverTest, NullSetCancel) {
tensorstore::AnyReceiver<int> receiver;
tensorstore::execution::set_cancel(receiver);
}
TEST(AnyReceiverTest, LoggingSetValue) {
std::vector<std::string> log;
tensorstore::AnyReceiver<int, std::string> receiver(
tensorstore::LoggingReceiver{&log});
tensorstore::execution::set_value(receiver, "ok");
EXPECT_THAT(log, ::testing::ElementsAre("set_value: ok"));
}
TEST(AnyReceiverTest, SetErrorInt) {
std::vector<std::string> log;
tensorstore::AnyReceiver<int, std::string> receiver(
tensorstore::LoggingReceiver{&log});
tensorstore::execution::set_error(receiver, 5);
EXPECT_THAT(log, ::testing::ElementsAre("set_error: 5"));
}
TEST(AnyReceiverTest, SetCancel) {
std::vector<std::string> log;
tensorstore::AnyReceiver<int, std::string> receiver(
tensorstore::LoggingReceiver{&log});
tensorstore::execution::set_cancel(receiver);
EXPECT_THAT(log, ::testing::ElementsAre("set_cancel"));
}
TEST(AnyFlowReceiver, Construct) {
tensorstore::AnyFlowReceiver<int, std::string> receiver(
tensorstore::NullReceiver{});
}
TEST(AnyFlowReceiver, Assignment) {
tensorstore::AnyFlowReceiver<int, std::string> receiver;
receiver = tensorstore::NullReceiver{};
{
tensorstore::NullReceiver tmp{};
receiver = tmp;
}
}
TEST(AnyFlowReceiver, NullSetStarting) {
tensorstore::AnyFlowReceiver<int> receiver;
tensorstore::execution::set_starting(receiver, []() {});
}
TEST(AnyFlowReceiver, NullSetValue) {
tensorstore::AnyFlowReceiver<int, std::string> receiver;
tensorstore::execution::set_value(receiver, "messaage");
}
TEST(AnyFlowReceiver, NullSetError) {
tensorstore::AnyFlowReceiver<int, std::string> receiver;
tensorstore::execution::set_error(receiver, 3);
}
TEST(AnyFlowReceiver, NullSetDone) {
tensorstore::AnyFlowReceiver<int> receiver;
tensorstore::execution::set_done(receiver);
}
TEST(AnyFlowReceiver, NullSetStopping) {
tensorstore::AnyFlowReceiver<int> receiver;
tensorstore::execution::set_stopping(receiver);
}
TEST(AnyFlowReceiver, LoggingSetValue) {
std::vector<std::string> log;
tensorstore::AnyFlowReceiver<int, std::string> receiver(
tensorstore::LoggingReceiver{&log});
tensorstore::execution::set_starting(receiver, []() {});
tensorstore::execution::set_value(receiver, "A");
tensorstore::execution::set_value(receiver, "B");
tensorstore::execution::set_done(receiver);
tensorstore::execution::set_stopping(receiver);
EXPECT_THAT(
log, ::testing::ElementsAre("set_starting", "set_value: A",
"set_value: B", "set_done", "set_stopping"));
}
TEST(AnyFlowReceiver, LoggingSetError) {
std::vector<std::string> log;
tensorstore::AnyFlowReceiver<int, std::string> receiver(
tensorstore::LoggingReceiver{&log});
tensorstore::execution::set_starting(receiver, []() {});
tensorstore::execution::set_value(receiver, "A");
tensorstore::execution::set_error(receiver, 5);
tensorstore::execution::set_done(receiver);
tensorstore::execution::set_stopping(receiver);
EXPECT_THAT(
log, ::testing::ElementsAre("set_starting", "set_value: A",
"set_error: 5", "set_done", "set_stopping"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/any_receiver.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/any_receiver_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
9ec08230-232b-4d04-9482-3ba951dfa44e | cpp | google/arolla | inplace_expr_compiler | arolla/serving/inplace_expr_compiler.cc | arolla/serving/inplace_expr_compiler_test.cc | #include "arolla/serving/inplace_expr_compiler.h"
#include <cstddef>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "arolla/naming/table.h"
#include "arolla/qexpr/evaluation_engine.h"
#include "arolla/qtype/named_field_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::inplace_expr_compiler_impl {
TypedSlotMap CollectInternalSlots(TypedSlot root_slot) {
TypedSlotMap result;
if (GetFieldNames(root_slot.GetType()).empty()) {
return result;
}
std::vector<std::pair<TypedSlot, naming::TablePath>> stack{{root_slot, {}}};
while (!stack.empty()) {
auto [slot, table] = stack.back();
stack.pop_back();
auto field_names = GetFieldNames(slot.GetType());
for (size_t i = 0; i < field_names.size(); ++i) {
const auto& field_name = field_names[i];
const TypedSlot& field_slot = slot.SubSlot(i);
result.emplace(table.Column(naming::FieldAccess(field_name)).FullName(),
field_slot);
if (!GetFieldNames(field_slot.GetType()).empty()) {
stack.emplace_back(field_slot,
table.Child(naming::FieldAccess(field_name)));
}
}
}
return result;
}
namespace {
absl::Status CheckField(QTypePtr qtype, const TypedSlotMap& slot_map,
QTypePtr field_qtype, absl::string_view field_name) {
if (GetFieldNames(qtype).empty()) {
return absl::FailedPreconditionError(
absl::StrCat("no registered field names for ", qtype->name(),
" in Compile.*ExprOnStructInput"));
}
if (!slot_map.contains(field_name)) {
return absl::FailedPreconditionError(
absl::StrCat("input `", field_name, "` not found in ", qtype->name(),
" in Compile.*ExprOnStructInput"));
}
QTypePtr result_type = slot_map.at(field_name).GetType();
if (result_type != field_qtype) {
return absl::FailedPreconditionError(absl::StrCat(
"input `", field_name, "` type mismatch for ", qtype->name(),
" in Compile.*ExprOnStructInput, expected in struct: ",
result_type->name(), ", found in expr: ", field_qtype->name()));
}
return absl::OkStatus();
}
absl::StatusOr<TypedSlotMap> CollectInputSlots(
QTypePtr qtype, const TypedSlotMap& struct_slot_map,
const CompiledExpr& compiled_expr) {
TypedSlotMap input_slots;
input_slots.reserve(compiled_expr.input_types().size());
for (const auto& [name, field_qtype] : compiled_expr.input_types()) {
RETURN_IF_ERROR(CheckField(qtype, struct_slot_map, field_qtype, name));
input_slots.emplace(name, struct_slot_map.at(name));
}
return input_slots;
}
}
absl::StatusOr<IoSlots> CollectIoSlots(QTypePtr qtype,
const CompiledExpr& compiled_expr,
absl::string_view final_output_name) {
TypedSlotMap struct_slot_map =
CollectInternalSlots(TypedSlot::UnsafeFromOffset(qtype, 0));
ASSIGN_OR_RETURN(TypedSlotMap input_slots,
CollectInputSlots(qtype, struct_slot_map, compiled_expr));
RETURN_IF_ERROR(CheckField(qtype, struct_slot_map,
compiled_expr.output_type(), final_output_name));
if (compiled_expr.input_types().contains(final_output_name)) {
return absl::FailedPreconditionError(absl::StrCat(
final_output_name, " present both as an input and as final output"));
}
if (compiled_expr.named_output_types().contains(final_output_name)) {
return absl::FailedPreconditionError(
absl::StrCat(final_output_name,
" present both as final output and as named output"));
}
for (const auto& [name, field_qtype] : compiled_expr.input_types()) {
if (compiled_expr.named_output_types().contains(name)) {
return absl::FailedPreconditionError(
absl::StrCat(name, " present both as an input and as named output"));
}
}
for (const auto& [name, field_qtype] : compiled_expr.named_output_types()) {
RETURN_IF_ERROR(CheckField(qtype, struct_slot_map, field_qtype, name));
}
absl::flat_hash_map<std::string, TypedSlot> named_output_slots;
named_output_slots.reserve(compiled_expr.named_output_types().size());
for (const auto& [name, _] : compiled_expr.named_output_types()) {
named_output_slots.emplace(name, struct_slot_map.at(name));
}
return IoSlots{.input_slots = input_slots,
.output_slot = struct_slot_map.at(final_output_name),
.named_output_slots = named_output_slots};
}
} | #include "arolla/serving/inplace_expr_compiler.h"
#include <array>
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qexpr/eval_context.h"
#include "arolla/qexpr/evaluation_engine.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/simple_qtype.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/serving/expr_compiler.h"
#include "arolla/util/bytes.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/struct_field.h"
#include "arolla/util/text.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::testing::HasSubstr;
using ::testing::MatchesRegex;
struct UnsupportedType {};
struct TestOutputStruct {
double x_plus_y;
double x_times_y;
UnsupportedType unsupported_type_field;
double unused;
static auto ArollaStructFields() {
using CppType = TestOutputStruct;
return std::tuple{
AROLLA_DECLARE_STRUCT_FIELD(x_plus_y),
AROLLA_DECLARE_STRUCT_FIELD(x_times_y),
AROLLA_SKIP_STRUCT_FIELD(unsupported_type_field),
AROLLA_DECLARE_STRUCT_FIELD(unused),
};
}
void ArollaFingerprint(FingerprintHasher* hasher) const {
CombineStructFields(hasher, *this);
}
};
struct TestStruct {
float x;
double y;
void* unsupported_field;
TestOutputStruct side_outputs;
static auto ArollaStructFields() {
using CppType = TestStruct;
return std::tuple{
AROLLA_DECLARE_STRUCT_FIELD(x),
AROLLA_DECLARE_STRUCT_FIELD(y),
AROLLA_SKIP_STRUCT_FIELD(unsupported_field),
AROLLA_DECLARE_STRUCT_FIELD(side_outputs),
};
}
void ArollaFingerprint(FingerprintHasher* hasher) const {
CombineStructFields(hasher, *this);
}
};
struct TestStructWithOptional {
OptionalValue<float> x;
OptionalValue<double> y;
std::array<int, 6> skip_me;
OptionalValue<double> x_plus_y;
constexpr static auto ArollaStructFields() {
using CppType = TestStructWithOptional;
return std::tuple{
AROLLA_DECLARE_STRUCT_FIELD(x),
AROLLA_DECLARE_STRUCT_FIELD(y),
AROLLA_SKIP_STRUCT_FIELD(skip_me),
AROLLA_DECLARE_STRUCT_FIELD(x_plus_y),
};
}
void ArollaFingerprint(FingerprintHasher* hasher) const {
CombineStructFields(hasher, *this);
}
};
struct TestStructWithString {
std::string title;
UnsupportedType it_is_not_supported;
OptionalValue<::arolla::Bytes> name;
UnsupportedType not_supported_sorry;
std::string full_name;
static auto ArollaStructFields() {
using CppType = TestStructWithString;
return std::tuple{
AROLLA_DECLARE_STRUCT_FIELD(title),
AROLLA_SKIP_STRUCT_FIELD(it_is_not_supported),
AROLLA_DECLARE_STRUCT_FIELD(name),
AROLLA_SKIP_STRUCT_FIELD(not_supported_sorry),
AROLLA_DECLARE_STRUCT_FIELD(full_name),
};
}
void ArollaFingerprint(FingerprintHasher* hasher) const {
CombineStructFields(hasher, *this);
}
};
}
AROLLA_DECLARE_SIMPLE_QTYPE(TEST_OUTPUT_STRUCT, TestOutputStruct);
AROLLA_DEFINE_SIMPLE_QTYPE(TEST_OUTPUT_STRUCT, TestOutputStruct);
AROLLA_DECLARE_SIMPLE_QTYPE(TEST_STRUCT, TestStruct);
AROLLA_DEFINE_SIMPLE_QTYPE(TEST_STRUCT, TestStruct);
AROLLA_DECLARE_SIMPLE_QTYPE(TEST_STRUCT_WITH_OPTIONAL, TestStructWithOptional);
AROLLA_DEFINE_SIMPLE_QTYPE(TEST_STRUCT_WITH_OPTIONAL, TestStructWithOptional);
AROLLA_DECLARE_SIMPLE_QTYPE(TEST_STRUCT_WITH_STRING, TestStructWithString);
AROLLA_DEFINE_SIMPLE_QTYPE(TEST_STRUCT_WITH_STRING, TestStructWithString);
namespace {
class FailingCompiledExpr : public InplaceCompiledExpr {
public:
using InplaceCompiledExpr::InplaceCompiledExpr;
absl::StatusOr<std::unique_ptr<BoundExpr>> InplaceBind(
const absl::flat_hash_map<std::string, TypedSlot>& slots,
TypedSlot output_slot,
const absl::flat_hash_map<std::string, TypedSlot>& )
const final {
return absl::InternalError("Fake:(");
}
};
TEST(CompileInplaceExprOnStruct, NoFieldNames) {
FailingCompiledExpr compiled_expr({}, GetQType<double>(), {});
EXPECT_THAT(
CompileInplaceExprOnStruct<int32_t>(compiled_expr, "/final_output"),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*registered field.*INT32.*")));
}
TEST(CompileInplaceExprOnStruct, NoFinalOutputName) {
FailingCompiledExpr compiled_expr({}, GetQType<double>(), {});
EXPECT_THAT(
CompileInplaceExprOnStruct<TestStruct>(compiled_expr, "/final_output"),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*input.*/final_output.*TEST_STRUCT.*")));
}
TEST(CompileInplaceExprOnStruct, InputTypeMismatch) {
FailingCompiledExpr compiled_expr({}, GetQType<double>(), {});
EXPECT_THAT(
CompileInplaceExprOnStruct<TestStruct>(compiled_expr, "/x"),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(
".*/x.*TEST_STRUCT.*expected.*FLOAT32.*found.*FLOAT64")));
}
TEST(CompileInplaceExprOnStruct, InputTypeUnknown) {
FailingCompiledExpr compiled_expr({}, GetQType<double>(), {});
EXPECT_THAT(CompileInplaceExprOnStruct<TestStruct>(compiled_expr, "/qq"),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*input.*/qq.*TEST_STRUCT.*")));
}
TEST(CompileInplaceExprOnStruct, FinalOutputTypeMismatch) {
FailingCompiledExpr compiled_expr({{"/x", GetQType<double>()}},
GetQType<double>(), {});
EXPECT_THAT(
CompileInplaceExprOnStruct<TestStruct>(compiled_expr,
"/side_outputs/x_plus_y"),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(
".*/x.*TEST_STRUCT.*expected.*FLOAT32.*found.*FLOAT64")));
}
TEST(CompileInplaceExprOnStruct, SideOutputTypeMismatch) {
FailingCompiledExpr compiled_expr(
{{"/x", GetQType<float>()}}, GetQType<double>(),
{{"/side_outputs/x_times_y", GetQType<float>()}});
EXPECT_THAT(
CompileInplaceExprOnStruct<TestStruct>(compiled_expr,
"/side_outputs/x_plus_y"),
StatusIs(
absl::StatusCode::kFailedPrecondition,
MatchesRegex(
".*/side_outputs/"
"x_times_y.*TEST_STRUCT.*expected.*FLOAT64.*found.*FLOAT32")));
}
TEST(CompileInplaceExprOnStruct, SideOutputUnknown) {
FailingCompiledExpr compiled_expr(
{{"/x", GetQType<float>()}}, GetQType<double>(),
{{"/side_outputs/x_power_y", GetQType<double>()}});
EXPECT_THAT(
CompileInplaceExprOnStruct<TestStruct>(compiled_expr,
"/side_outputs/x_plus_y"),
StatusIs(
absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*/side_outputs/x_power_y.*not found.*TEST_STRUCT.*")));
}
TEST(CompileInplaceExprOnStruct, CompiledExprBindingFailure) {
FailingCompiledExpr compiled_expr({{"/x", GetQType<float>()}},
GetQType<double>(), {});
EXPECT_THAT(CompileInplaceExprOnStruct<TestStruct>(compiled_expr,
"/side_outputs/x_plus_y"),
StatusIs(absl::StatusCode::kInternal, "Fake:("));
}
TEST(CompileInplaceExprOnStruct, InputSideOutputCollision) {
FailingCompiledExpr compiled_expr({{"/y", GetQType<double>()}},
GetQType<double>(),
{{"/y", GetQType<double>()}});
EXPECT_THAT(CompileInplaceExprOnStruct<TestStruct>(compiled_expr,
"/side_outputs/x_plus_y"),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*/y.*input.*named output.*")));
}
TEST(CompileInplaceExprOnStruct, InputFinalOutputCollision) {
FailingCompiledExpr compiled_expr(
{{"/y", GetQType<double>()}}, GetQType<double>(),
{{"/side_outputs/x_plus_y", GetQType<double>()}});
EXPECT_THAT(CompileInplaceExprOnStruct<TestStruct>(compiled_expr, "/y"),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*/y.*input.*final output.*")));
}
TEST(CompileInplaceExprOnStruct, SideOutputFinalOutputCollision) {
FailingCompiledExpr compiled_expr(
{{"/y", GetQType<double>()}}, GetQType<double>(),
{{"/side_outputs/x_plus_y", GetQType<double>()}});
EXPECT_THAT(
CompileInplaceExprOnStruct<TestStruct>(compiled_expr,
"/side_outputs/x_plus_y"),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(
".*/side_outputs/x_plus_y.*final output.*named output.*")));
}
class TestBoundExpr final : public BoundExpr {
public:
TestBoundExpr(FrameLayout::Slot<float> x, FrameLayout::Slot<double> y,
FrameLayout::Slot<double> x_plus_y,
FrameLayout::Slot<double> x_times_y)
: BoundExpr(
{{"/x", TypedSlot::FromSlot(x)}, {"/y", TypedSlot::FromSlot(y)}},
TypedSlot::FromSlot(x_plus_y),
{{"/side_outputs/x_times_y", TypedSlot::FromSlot(x_times_y)}}),
x_(x),
y_(y),
x_plus_y_(x_plus_y),
x_times_y_(x_times_y) {}
void InitializeLiterals(EvaluationContext*, FramePtr) const final {}
void Execute(EvaluationContext*, FramePtr frame) const final {
frame.Set(x_plus_y_, frame.Get(x_) + frame.Get(y_));
frame.Set(x_times_y_, frame.Get(x_) * frame.Get(y_));
}
private:
FrameLayout::Slot<float> x_;
FrameLayout::Slot<double> y_;
FrameLayout::Slot<double> x_plus_y_;
FrameLayout::Slot<double> x_times_y_;
};
class TestCompiledExpr : public InplaceCompiledExpr {
public:
TestCompiledExpr()
: InplaceCompiledExpr(
{{"/x", GetQType<float>()}, {"/y", GetQType<double>()}},
GetQType<double>(),
{{"/side_outputs/x_times_y", GetQType<double>()}}) {}
absl::StatusOr<std::unique_ptr<BoundExpr>> InplaceBind(
const absl::flat_hash_map<std::string, TypedSlot>& slots,
TypedSlot output_slot,
const absl::flat_hash_map<std::string, TypedSlot>& named_output_slots)
const final {
RETURN_IF_ERROR(VerifySlotTypes(input_types(), slots));
return std::make_unique<TestBoundExpr>(
slots.at("/x").ToSlot<float>().value(),
slots.at("/y").ToSlot<double>().value(),
output_slot.ToSlot<double>().value(),
named_output_slots.at("/side_outputs/x_times_y")
.ToSlot<double>()
.value());
}
};
TEST(CompileInplaceExprOnStructTest, SuccessXPlusY) {
TestCompiledExpr compiled_expr;
ASSERT_OK_AND_ASSIGN(std::function<absl::Status(TestStruct&)> eval_fn,
CompileInplaceExprOnStruct<TestStruct>(
compiled_expr, "/side_outputs/x_plus_y"));
TestStruct input{
.x = 5.f,
.y = 7.,
.side_outputs = {.x_plus_y = -1, .x_times_y = -1, .unused = -1}};
ASSERT_OK(eval_fn(input));
EXPECT_EQ(input.side_outputs.x_plus_y, 12);
EXPECT_EQ(input.side_outputs.x_times_y, 35.);
EXPECT_EQ(input.x, 5);
EXPECT_EQ(input.y, 7);
EXPECT_EQ(input.side_outputs.unused, -1.);
}
class TestBoundExprWithOptionals final : public BoundExpr {
public:
TestBoundExprWithOptionals(FrameLayout::Slot<OptionalValue<float>> x,
FrameLayout::Slot<OptionalValue<double>> y,
FrameLayout::Slot<OptionalValue<double>> x_plus_y)
: BoundExpr(
{{"/x", TypedSlot::FromSlot(x)}, {"/y", TypedSlot::FromSlot(y)}},
TypedSlot::FromSlot(x_plus_y), {}),
x_(x),
y_(y),
x_plus_y_(x_plus_y) {}
void InitializeLiterals(EvaluationContext*, FramePtr) const final {}
void Execute(EvaluationContext*, FramePtr frame) const final {
if (frame.Get(x_).present && frame.Get(y_).present) {
frame.Set(x_plus_y_, frame.Get(x_).value + frame.Get(y_).value);
} else {
frame.Set(x_plus_y_, std::nullopt);
}
}
private:
FrameLayout::Slot<OptionalValue<float>> x_;
FrameLayout::Slot<OptionalValue<double>> y_;
FrameLayout::Slot<OptionalValue<double>> x_plus_y_;
};
class TestCompiledExprWithOptionals : public InplaceCompiledExpr {
public:
TestCompiledExprWithOptionals()
: InplaceCompiledExpr({{"/x", GetQType<OptionalValue<float>>()},
{"/y", GetQType<OptionalValue<double>>()}},
GetQType<OptionalValue<double>>(), {}) {}
absl::StatusOr<std::unique_ptr<BoundExpr>> InplaceBind(
const absl::flat_hash_map<std::string, TypedSlot>& slots,
TypedSlot output_slot,
const absl::flat_hash_map<std::string, TypedSlot>& )
const final {
RETURN_IF_ERROR(VerifySlotTypes(input_types(), slots));
return std::make_unique<TestBoundExprWithOptionals>(
slots.at("/x").ToSlot<OptionalValue<float>>().value(),
slots.at("/y").ToSlot<OptionalValue<double>>().value(),
output_slot.ToSlot<OptionalValue<double>>().value());
}
};
TEST(CompileInplaceExprOnStructTest, SuccessXPlusYWithOptionals) {
TestCompiledExprWithOptionals compiled_expr;
ASSERT_OK_AND_ASSIGN(
std::function<absl::Status(TestStructWithOptional&)> eval_fn,
CompileInplaceExprOnStruct<TestStructWithOptional>(compiled_expr,
"/x_plus_y"));
TestStructWithOptional input{.x = 5.f, .y = 7., .x_plus_y = -1};
ASSERT_OK(eval_fn(input));
EXPECT_EQ(input.x_plus_y, 12.);
EXPECT_EQ(input.x, 5.f);
EXPECT_EQ(input.y, 7.);
}
class TestBoundExprWithStrings final : public BoundExpr {
public:
TestBoundExprWithStrings(FrameLayout::Slot<arolla::Bytes> title,
FrameLayout::Slot<OptionalValue<arolla::Bytes>> name,
FrameLayout::Slot<arolla::Bytes> output)
: BoundExpr({{"/title", TypedSlot::FromSlot(title)},
{"/name", TypedSlot::FromSlot(name)}},
TypedSlot::FromSlot(output), {}),
title_(title),
name_(name),
output_(output) {}
void InitializeLiterals(EvaluationContext*, FramePtr) const final {}
void Execute(EvaluationContext*, FramePtr frame) const final {
if (!frame.Get(name_).present) {
frame.Set(output_, "UNKNOWN");
return;
}
frame.Set(output_,
absl::StrCat(frame.Get(title_), " ", frame.Get(name_).value));
}
private:
FrameLayout::Slot<arolla::Bytes> title_;
FrameLayout::Slot<OptionalValue<arolla::Bytes>> name_;
FrameLayout::Slot<arolla::Bytes> output_;
};
class TestCompiledExprWithStrings : public InplaceCompiledExpr {
public:
TestCompiledExprWithStrings()
: InplaceCompiledExpr(
{{"/title", GetQType<arolla::Bytes>()},
{"/name", GetQType<OptionalValue<arolla::Bytes>>()}},
GetQType<arolla::Bytes>(), {}) {}
absl::StatusOr<std::unique_ptr<BoundExpr>> InplaceBind(
const absl::flat_hash_map<std::string, TypedSlot>& slots,
TypedSlot output_slot,
const absl::flat_hash_map<std::string, TypedSlot>& )
const final {
RETURN_IF_ERROR(VerifySlotTypes(input_types(), slots));
return std::make_unique<TestBoundExprWithStrings>(
slots.at("/title").ToSlot<arolla::Bytes>().value(),
slots.at("/name").ToSlot<OptionalValue<arolla::Bytes>>().value(),
output_slot.ToSlot<arolla::Bytes>().value());
}
};
TEST(CompileInplaceExprOnStructTest, SuccessStringsIO) {
TestCompiledExprWithStrings compiled_expr;
ASSERT_OK_AND_ASSIGN(
std::function<absl::Status(TestStructWithString&)> eval_fn,
CompileInplaceExprOnStruct<TestStructWithString>(compiled_expr,
"/full_name"));
TestStructWithString input{
.title = "Mr.", .name = arolla::Bytes("Abc"), .full_name = "????"};
ASSERT_OK(eval_fn(input));
EXPECT_EQ(input.full_name, "Mr. Abc");
input.name = std::nullopt;
ASSERT_OK(eval_fn(input));
EXPECT_EQ(input.full_name, "UNKNOWN");
}
TEST(CompileDynamicExprOnStructInputTest, TypeError) {
ASSERT_OK_AND_ASSIGN(
expr::ExprNodePtr expr,
expr::CallOp("annotation.qtype",
{expr::Leaf("/x"), expr::Literal(GetQType<int>())}));
EXPECT_THAT((ExprCompiler<TestStruct, std::optional<double>>())
.SetInputLoader(CreateStructInputLoader<TestStruct>())
.Compile(expr)
.status(),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*inconsistent.*qtype.*INT32.*")));
}
TEST(CompileDynamicExprOnStructInputTest, UnknownLeaf) {
expr::ExprNodePtr expr = expr::Leaf("/unknown");
EXPECT_THAT((ExprCompiler<TestStruct, std::optional<double>>())
.SetInputLoader(CreateStructInputLoader<TestStruct>())
.Compile(expr)
.status(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("unknown inputs: /unknown")));
}
TEST(CompileDynamicExprOnStructInputTest, TypeErrorOnCodegenModel) {
TestCompiledExprWithOptionals compiled_expr;
EXPECT_THAT((ExprCompiler<TestStruct, std::optional<double>>())
.SetInputLoader(CreateStructInputLoader<TestStruct>())
.Compile(compiled_expr)
.status(),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*slot types mismatch.*")));
}
TEST(CompileDynamicExprOnStructInputTest, Nested) {
ASSERT_OK_AND_ASSIGN(
expr::ExprNodePtr expr,
expr::CallOp("math.add",
{expr::Leaf("/x"), expr::Leaf("/side_outputs/x_plus_y")}));
ASSERT_OK_AND_ASSIGN(
std::function<absl::StatusOr<double>(const TestStruct&)> eval_fn,
(ExprCompiler<TestStruct, double>())
.SetInputLoader(CreateStructInputLoader<TestStruct>())
.Compile(expr));
TestStruct input{
.x = 5.f,
.y = -1.,
.side_outputs = {.x_plus_y = 7., .x_times_y = -1, .unused = -1}};
EXPECT_THAT(eval_fn(input), IsOkAndHolds(12.));
}
TEST(CompileDynamicExprOnStructInputTest, SuccessXPlusYWithOptionals) {
ASSERT_OK_AND_ASSIGN(
expr::ExprNodePtr expr,
expr::CallOp("math.add", {expr::Leaf("/x"), expr::Leaf("/y")}));
ASSERT_OK_AND_ASSIGN(
std::function<absl::StatusOr<std::optional<double>>(
const TestStructWithOptional&)>
eval_fn,
(ExprCompiler<TestStructWithOptional, std::optional<double>>())
.SetInputLoader(CreateStructInputLoader<TestStructWithOptional>())
.Compile(expr));
TestStructWithOptional input{.x = 5.f, .y = 7., .x_plus_y = -1};
EXPECT_THAT(eval_fn(input), IsOkAndHolds(12.));
input.x = std::nullopt;
EXPECT_THAT(eval_fn(input), IsOkAndHolds(std::nullopt));
}
TEST(CompileDynamicExprOnStructInputTest, ErrorStatus) {
absl::StatusOr<expr::ExprNodePtr> status_or_expr =
absl::InternalError("input error");
auto result =
ExprCompiler<TestStructWithOptional, std::optional<double>>()
.SetInputLoader(CreateStructInputLoader<TestStructWithOptional>())
.Compile(status_or_expr);
EXPECT_THAT(result, StatusIs(absl::StatusCode::kInternal,
MatchesRegex("input error")));
}
TEST(CompileDynamicExprOnStructInputTest, SuccessXPlusYOnCodegenModel) {
TestCompiledExpr compiled_expr;
ASSERT_OK_AND_ASSIGN(
std::function<absl::StatusOr<double>(const TestStruct&)> eval_fn,
(ExprCompiler<TestStruct, double>())
.SetInputLoader(CreateStructInputLoader<TestStruct>())
.Compile(compiled_expr));
TestStruct input{.x = 5.f, .y = 7.};
EXPECT_THAT(eval_fn(input), IsOkAndHolds(12.));
}
TEST(CompileDynamicExprOnStructInputTest, SuccessSideOutputOnCodegenModel) {
TestCompiledExpr compiled_expr;
ASSERT_OK_AND_ASSIGN(
std::function<absl::StatusOr<double>(const TestStruct&, TestStruct*)>
eval_fn,
(ExprCompiler<TestStruct, double, TestStruct>())
.SetInputLoader(CreateStructInputLoader<TestStruct>())
.SetSlotListener(CreateStructSlotListener<TestStruct>())
.Compile(compiled_expr));
TestStruct input{.x = 5.f, .y = 7.};
EXPECT_THAT(eval_fn(input, nullptr), IsOkAndHolds(12.));
EXPECT_THAT(eval_fn(input, &input), IsOkAndHolds(12.));
EXPECT_EQ(input.side_outputs.x_times_y, 35);
}
TEST(CompileDynamicExprOnStructWithBytesInputTest, SuccessUpper) {
ASSERT_OK_AND_ASSIGN(expr::ExprNodePtr title,
expr::CallOp("strings.decode", {expr::Leaf("/title")}));
ASSERT_OK_AND_ASSIGN(
expr::ExprNodePtr name,
expr::CallOp("strings.upper",
{expr::CallOp("strings.decode", {expr::Leaf("/name")})}));
ASSERT_OK_AND_ASSIGN(
expr::ExprNodePtr expr,
expr::CallOp("strings.join", {title, expr::Literal(Text(" ")), name}));
ASSERT_OK_AND_ASSIGN(expr,
expr::CallOp("core.get_optional_value",
{expr::CallOp("strings.encode", {expr})}));
ASSERT_OK_AND_ASSIGN(
std::function<absl::StatusOr<arolla::Bytes>(const TestStructWithString&)>
eval_fn,
(ExprCompiler<TestStructWithString, arolla::Bytes>())
.SetInputLoader(CreateStructInputLoader<TestStructWithString>())
.Compile(expr));
TestStructWithString input{.title = "Mr.", .name = Bytes("abc")};
EXPECT_THAT(eval_fn(input), IsOkAndHolds(Bytes("Mr. ABC")));
input.name = std::nullopt;
EXPECT_THAT(eval_fn(input), StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("expects present value")));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/serving/inplace_expr_compiler.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/serving/inplace_expr_compiler_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
90cb4a88-68ac-45e0-b6bf-8f90d5dc4cb1 | cpp | google/quiche | quic_crypto_client_stream | quiche/quic/core/quic_crypto_client_stream.cc | quiche/quic/core/quic_crypto_client_stream_test.cc | #include "quiche/quic/core/quic_crypto_client_stream.h"
#include <memory>
#include <string>
#include <utility>
#include "quiche/quic/core/crypto/crypto_protocol.h"
#include "quiche/quic/core/crypto/crypto_utils.h"
#include "quiche/quic/core/crypto/null_encrypter.h"
#include "quiche/quic/core/crypto/quic_crypto_client_config.h"
#include "quiche/quic/core/quic_crypto_client_handshaker.h"
#include "quiche/quic/core/quic_packets.h"
#include "quiche/quic/core/quic_session.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/core/tls_client_handshaker.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
namespace quic {
const int QuicCryptoClientStream::kMaxClientHellos;
QuicCryptoClientStreamBase::QuicCryptoClientStreamBase(QuicSession* session)
: QuicCryptoStream(session) {}
QuicCryptoClientStream::QuicCryptoClientStream(
const QuicServerId& server_id, QuicSession* session,
std::unique_ptr<ProofVerifyContext> verify_context,
QuicCryptoClientConfig* crypto_config, ProofHandler* proof_handler,
bool has_application_state)
: QuicCryptoClientStreamBase(session) {
QUICHE_DCHECK_EQ(Perspective::IS_CLIENT,
session->connection()->perspective());
switch (session->connection()->version().handshake_protocol) {
case PROTOCOL_QUIC_CRYPTO:
handshaker_ = std::make_unique<QuicCryptoClientHandshaker>(
server_id, this, session, std::move(verify_context), crypto_config,
proof_handler);
break;
case PROTOCOL_TLS1_3: {
auto handshaker = std::make_unique<TlsClientHandshaker>(
server_id, this, session, std::move(verify_context), crypto_config,
proof_handler, has_application_state);
tls_handshaker_ = handshaker.get();
handshaker_ = std::move(handshaker);
break;
}
case PROTOCOL_UNSUPPORTED:
QUIC_BUG(quic_bug_10296_1)
<< "Attempting to create QuicCryptoClientStream for unknown "
"handshake protocol";
}
}
QuicCryptoClientStream::~QuicCryptoClientStream() {}
bool QuicCryptoClientStream::CryptoConnect() {
return handshaker_->CryptoConnect();
}
int QuicCryptoClientStream::num_sent_client_hellos() const {
return handshaker_->num_sent_client_hellos();
}
bool QuicCryptoClientStream::ResumptionAttempted() const {
return handshaker_->ResumptionAttempted();
}
bool QuicCryptoClientStream::IsResumption() const {
return handshaker_->IsResumption();
}
bool QuicCryptoClientStream::EarlyDataAccepted() const {
return handshaker_->EarlyDataAccepted();
}
ssl_early_data_reason_t QuicCryptoClientStream::EarlyDataReason() const {
return handshaker_->EarlyDataReason();
}
bool QuicCryptoClientStream::ReceivedInchoateReject() const {
return handshaker_->ReceivedInchoateReject();
}
int QuicCryptoClientStream::num_scup_messages_received() const {
return handshaker_->num_scup_messages_received();
}
bool QuicCryptoClientStream::encryption_established() const {
return handshaker_->encryption_established();
}
bool QuicCryptoClientStream::one_rtt_keys_available() const {
return handshaker_->one_rtt_keys_available();
}
const QuicCryptoNegotiatedParameters&
QuicCryptoClientStream::crypto_negotiated_params() const {
return handshaker_->crypto_negotiated_params();
}
CryptoMessageParser* QuicCryptoClientStream::crypto_message_parser() {
return handshaker_->crypto_message_parser();
}
HandshakeState QuicCryptoClientStream::GetHandshakeState() const {
return handshaker_->GetHandshakeState();
}
size_t QuicCryptoClientStream::BufferSizeLimitForLevel(
EncryptionLevel level) const {
return handshaker_->BufferSizeLimitForLevel(level);
}
std::unique_ptr<QuicDecrypter>
QuicCryptoClientStream::AdvanceKeysAndCreateCurrentOneRttDecrypter() {
return handshaker_->AdvanceKeysAndCreateCurrentOneRttDecrypter();
}
std::unique_ptr<QuicEncrypter>
QuicCryptoClientStream::CreateCurrentOneRttEncrypter() {
return handshaker_->CreateCurrentOneRttEncrypter();
}
bool QuicCryptoClientStream::ExportKeyingMaterial(absl::string_view label,
absl::string_view context,
size_t result_len,
std::string* result) {
return handshaker_->ExportKeyingMaterial(label, context, result_len, result);
}
std::string QuicCryptoClientStream::chlo_hash() const {
return handshaker_->chlo_hash();
}
void QuicCryptoClientStream::OnOneRttPacketAcknowledged() {
handshaker_->OnOneRttPacketAcknowledged();
}
void QuicCryptoClientStream::OnHandshakePacketSent() {
handshaker_->OnHandshakePacketSent();
}
void QuicCryptoClientStream::OnConnectionClosed(
const QuicConnectionCloseFrame& frame, ConnectionCloseSource source) {
handshaker_->OnConnectionClosed(frame.quic_error_code, source);
}
void QuicCryptoClientStream::OnHandshakeDoneReceived() {
handshaker_->OnHandshakeDoneReceived();
}
void QuicCryptoClientStream::OnNewTokenReceived(absl::string_view token) {
handshaker_->OnNewTokenReceived(token);
}
void QuicCryptoClientStream::SetServerApplicationStateForResumption(
std::unique_ptr<ApplicationState> application_state) {
handshaker_->SetServerApplicationStateForResumption(
std::move(application_state));
}
SSL* QuicCryptoClientStream::GetSsl() const {
return tls_handshaker_ == nullptr ? nullptr : tls_handshaker_->ssl();
}
bool QuicCryptoClientStream::IsCryptoFrameExpectedForEncryptionLevel(
EncryptionLevel level) const {
return handshaker_->IsCryptoFrameExpectedForEncryptionLevel(level);
}
EncryptionLevel
QuicCryptoClientStream::GetEncryptionLevelToSendCryptoDataOfSpace(
PacketNumberSpace space) const {
return handshaker_->GetEncryptionLevelToSendCryptoDataOfSpace(space);
}
} | #include "quiche/quic/core/quic_crypto_client_stream.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/macros.h"
#include "quiche/quic/core/crypto/aes_128_gcm_12_encrypter.h"
#include "quiche/quic/core/crypto/quic_decrypter.h"
#include "quiche/quic/core/crypto/quic_encrypter.h"
#include "quiche/quic/core/quic_packets.h"
#include "quiche/quic/core/quic_server_id.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/crypto_test_utils.h"
#include "quiche/quic/test_tools/quic_stream_peer.h"
#include "quiche/quic/test_tools/quic_stream_sequencer_peer.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/quic/test_tools/simple_quic_framer.h"
#include "quiche/quic/test_tools/simple_session_cache.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
using testing::_;
namespace quic {
namespace test {
namespace {
const char kServerHostname[] = "test.example.com";
const uint16_t kServerPort = 443;
class QuicCryptoClientStreamTest : public QuicTest {
public:
QuicCryptoClientStreamTest()
: supported_versions_(AllSupportedVersionsWithQuicCrypto()),
server_id_(kServerHostname, kServerPort),
crypto_config_(crypto_test_utils::ProofVerifierForTesting(),
std::make_unique<test::SimpleSessionCache>()),
server_crypto_config_(
crypto_test_utils::CryptoServerConfigForTesting()) {
CreateConnection();
}
void CreateSession() {
session_ = std::make_unique<TestQuicSpdyClientSession>(
connection_, DefaultQuicConfig(), supported_versions_, server_id_,
&crypto_config_);
EXPECT_CALL(*session_, GetAlpnsToOffer())
.WillRepeatedly(testing::Return(std::vector<std::string>(
{AlpnForVersion(connection_->version())})));
}
void CreateConnection() {
connection_ =
new PacketSavingConnection(&client_helper_, &alarm_factory_,
Perspective::IS_CLIENT, supported_versions_);
connection_->AdvanceTime(QuicTime::Delta::FromSeconds(1));
CreateSession();
}
void CompleteCryptoHandshake() {
int proof_verify_details_calls = 1;
if (stream()->handshake_protocol() != PROTOCOL_TLS1_3) {
EXPECT_CALL(*session_, OnProofValid(testing::_))
.Times(testing::AtLeast(1));
proof_verify_details_calls = 0;
}
EXPECT_CALL(*session_, OnProofVerifyDetailsAvailable(testing::_))
.Times(testing::AtLeast(proof_verify_details_calls));
stream()->CryptoConnect();
QuicConfig config;
crypto_test_utils::HandshakeWithFakeServer(
&config, server_crypto_config_.get(), &server_helper_, &alarm_factory_,
connection_, stream(), AlpnForVersion(connection_->version()));
}
QuicCryptoClientStream* stream() {
return session_->GetMutableCryptoStream();
}
MockQuicConnectionHelper server_helper_;
MockQuicConnectionHelper client_helper_;
MockAlarmFactory alarm_factory_;
PacketSavingConnection* connection_;
ParsedQuicVersionVector supported_versions_;
std::unique_ptr<TestQuicSpdyClientSession> session_;
QuicServerId server_id_;
CryptoHandshakeMessage message_;
QuicCryptoClientConfig crypto_config_;
std::unique_ptr<QuicCryptoServerConfig> server_crypto_config_;
};
TEST_F(QuicCryptoClientStreamTest, NotInitiallyConected) {
EXPECT_FALSE(stream()->encryption_established());
EXPECT_FALSE(stream()->one_rtt_keys_available());
}
TEST_F(QuicCryptoClientStreamTest, ConnectedAfterSHLO) {
CompleteCryptoHandshake();
EXPECT_TRUE(stream()->encryption_established());
EXPECT_TRUE(stream()->one_rtt_keys_available());
EXPECT_FALSE(stream()->IsResumption());
EXPECT_EQ(stream()->EarlyDataReason(), ssl_early_data_no_session_offered);
}
TEST_F(QuicCryptoClientStreamTest, MessageAfterHandshake) {
CompleteCryptoHandshake();
EXPECT_CALL(
*connection_,
CloseConnection(QUIC_CRYPTO_MESSAGE_AFTER_HANDSHAKE_COMPLETE, _, _));
message_.set_tag(kCHLO);
crypto_test_utils::SendHandshakeMessageToStream(stream(), message_,
Perspective::IS_CLIENT);
}
TEST_F(QuicCryptoClientStreamTest, BadMessageType) {
stream()->CryptoConnect();
message_.set_tag(kCHLO);
EXPECT_CALL(*connection_, CloseConnection(QUIC_INVALID_CRYPTO_MESSAGE_TYPE,
"Expected REJ", _));
crypto_test_utils::SendHandshakeMessageToStream(stream(), message_,
Perspective::IS_CLIENT);
}
TEST_F(QuicCryptoClientStreamTest, NegotiatedParameters) {
CompleteCryptoHandshake();
const QuicConfig* config = session_->config();
EXPECT_EQ(kMaximumIdleTimeoutSecs, config->IdleNetworkTimeout().ToSeconds());
const QuicCryptoNegotiatedParameters& crypto_params(
stream()->crypto_negotiated_params());
EXPECT_EQ(crypto_config_.aead[0], crypto_params.aead);
EXPECT_EQ(crypto_config_.kexs[0], crypto_params.key_exchange);
}
TEST_F(QuicCryptoClientStreamTest, ExpiredServerConfig) {
CompleteCryptoHandshake();
CreateConnection();
connection_->AdvanceTime(
QuicTime::Delta::FromSeconds(60 * 60 * 24 * 365 * 5));
EXPECT_CALL(*session_, OnProofValid(testing::_));
stream()->CryptoConnect();
ASSERT_EQ(1u, connection_->encrypted_packets_.size());
EXPECT_EQ(ENCRYPTION_INITIAL, connection_->encryption_level());
}
TEST_F(QuicCryptoClientStreamTest, ClientTurnedOffZeroRtt) {
CompleteCryptoHandshake();
CreateConnection();
QuicTagVector options;
options.push_back(kQNZ2);
session_->config()->SetClientConnectionOptions(options);
CompleteCryptoHandshake();
EXPECT_EQ(2, stream()->num_sent_client_hellos());
EXPECT_FALSE(stream()->EarlyDataAccepted());
EXPECT_EQ(stream()->EarlyDataReason(), ssl_early_data_disabled);
}
TEST_F(QuicCryptoClientStreamTest, ClockSkew) {
connection_->AdvanceTime(
QuicTime::Delta::FromSeconds(60 * 60 * 24 * 365 * 5));
CompleteCryptoHandshake();
}
TEST_F(QuicCryptoClientStreamTest, InvalidCachedServerConfig) {
CompleteCryptoHandshake();
CreateConnection();
QuicCryptoClientConfig::CachedState* state =
crypto_config_.LookupOrCreate(server_id_);
std::vector<std::string> certs = state->certs();
std::string cert_sct = state->cert_sct();
std::string signature = state->signature();
std::string chlo_hash = state->chlo_hash();
state->SetProof(certs, cert_sct, chlo_hash, signature + signature);
EXPECT_CALL(*session_, OnProofVerifyDetailsAvailable(testing::_))
.Times(testing::AnyNumber());
stream()->CryptoConnect();
ASSERT_EQ(1u, connection_->encrypted_packets_.size());
}
TEST_F(QuicCryptoClientStreamTest, ServerConfigUpdate) {
CompleteCryptoHandshake();
QuicCryptoClientConfig::CachedState* state =
crypto_config_.LookupOrCreate(server_id_);
EXPECT_NE("xstk", state->source_address_token());
unsigned char stk[] = {'x', 's', 't', 'k'};
unsigned char scfg[] = {
0x53, 0x43, 0x46, 0x47,
0x01, 0x00,
0x00, 0x00,
0x45, 0x58, 0x50, 0x59,
0x08, 0x00, 0x00, 0x00,
'1', '2', '3', '4', '5', '6', '7', '8'};
CryptoHandshakeMessage server_config_update;
server_config_update.set_tag(kSCUP);
server_config_update.SetValue(kSourceAddressTokenTag, stk);
server_config_update.SetValue(kSCFG, scfg);
const uint64_t expiry_seconds = 60 * 60 * 24 * 2;
server_config_update.SetValue(kSTTL, expiry_seconds);
crypto_test_utils::SendHandshakeMessageToStream(
stream(), server_config_update, Perspective::IS_SERVER);
EXPECT_EQ("xstk", state->source_address_token());
const std::string& cached_scfg = state->server_config();
quiche::test::CompareCharArraysWithHexError(
"scfg", cached_scfg.data(), cached_scfg.length(),
reinterpret_cast<char*>(scfg), ABSL_ARRAYSIZE(scfg));
QuicStreamSequencer* sequencer = QuicStreamPeer::sequencer(stream());
EXPECT_FALSE(QuicStreamSequencerPeer::IsUnderlyingBufferAllocated(sequencer));
}
TEST_F(QuicCryptoClientStreamTest, ServerConfigUpdateWithCert) {
CompleteCryptoHandshake();
QuicCryptoServerConfig crypto_config(
QuicCryptoServerConfig::TESTING, QuicRandom::GetInstance(),
crypto_test_utils::ProofSourceForTesting(), KeyExchangeSource::Default());
crypto_test_utils::SetupCryptoServerConfigForTest(
connection_->clock(), QuicRandom::GetInstance(), &crypto_config);
SourceAddressTokens tokens;
QuicCompressedCertsCache cache(1);
CachedNetworkParameters network_params;
CryptoHandshakeMessage server_config_update;
class Callback : public BuildServerConfigUpdateMessageResultCallback {
public:
Callback(bool* ok, CryptoHandshakeMessage* message)
: ok_(ok), message_(message) {}
void Run(bool ok, const CryptoHandshakeMessage& message) override {
*ok_ = ok;
*message_ = message;
}
private:
bool* ok_;
CryptoHandshakeMessage* message_;
};
bool ok = false;
crypto_config.BuildServerConfigUpdateMessage(
session_->transport_version(), stream()->chlo_hash(), tokens,
QuicSocketAddress(QuicIpAddress::Loopback6(), 1234),
QuicSocketAddress(QuicIpAddress::Loopback6(), 4321), connection_->clock(),
QuicRandom::GetInstance(), &cache, stream()->crypto_negotiated_params(),
&network_params,
std::unique_ptr<BuildServerConfigUpdateMessageResultCallback>(
new Callback(&ok, &server_config_update)));
EXPECT_TRUE(ok);
EXPECT_CALL(*session_, OnProofValid(testing::_));
crypto_test_utils::SendHandshakeMessageToStream(
stream(), server_config_update, Perspective::IS_SERVER);
CreateConnection();
EXPECT_CALL(*session_, OnProofValid(testing::_));
EXPECT_CALL(*session_, OnProofVerifyDetailsAvailable(testing::_))
.Times(testing::AnyNumber());
stream()->CryptoConnect();
EXPECT_TRUE(session_->IsEncryptionEstablished());
}
TEST_F(QuicCryptoClientStreamTest, ServerConfigUpdateBeforeHandshake) {
EXPECT_CALL(
*connection_,
CloseConnection(QUIC_CRYPTO_UPDATE_BEFORE_HANDSHAKE_COMPLETE, _, _));
CryptoHandshakeMessage server_config_update;
server_config_update.set_tag(kSCUP);
crypto_test_utils::SendHandshakeMessageToStream(
stream(), server_config_update, Perspective::IS_SERVER);
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_crypto_client_stream.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_crypto_client_stream_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
d91e02de-81f7-406d-b4a6-caa95336163b | cpp | tensorflow/tensorflow | overload | tensorflow/lite/experimental/shlo/overload.h | tensorflow/lite/experimental/shlo/overload_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OVERLOAD_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OVERLOAD_H_
namespace shlo_ref {
template <class... Ts>
class Overload : public Ts... {
public:
explicit Overload(Ts&&... ts) : Ts(static_cast<Ts&&>(ts))... {}
using Ts::operator()...;
};
template <class... Ts>
Overload(Ts&&...) -> Overload<Ts...>;
}
#endif | #include "tensorflow/lite/experimental/shlo/overload.h"
#include <cstdint>
#include <string>
#include <type_traits>
#include <variant>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
namespace {
TEST(OverloadTest, DispatchConsidersTypeWithAutoFallback) {
auto overloaded = shlo_ref::Overload(
[](int v) -> std::string { return absl::StrCat("int ", v); },
[](double v) -> std::string { return absl::StrCat("double ", v); },
[](const char* v) -> std::string {
return absl::StrCat("const char* ", v);
},
[](auto v) -> std::string { return absl::StrCat("auto ", v); }
);
EXPECT_EQ("int 1", overloaded(1));
EXPECT_EQ("double 2.5", overloaded(2.5));
EXPECT_EQ("const char* hello", overloaded("hello"));
EXPECT_EQ("auto 1.5", overloaded(1.5f));
}
TEST(OverloadTest, DispatchConsidersNumberOfArguments) {
auto overloaded = shlo_ref::Overload(
[](int a) { return a + 1; },
[](int a, int b) { return a * b; },
[]() -> absl::string_view { return "none"; }
);
EXPECT_EQ(3, overloaded(2));
EXPECT_EQ(21, overloaded(3, 7));
EXPECT_EQ("none", overloaded());
}
TEST(OverloadTest, SupportsConstantEvaluation) {
auto overloaded = shlo_ref::Overload(
[](int a) { return a + 1; },
[](int a, int b) { return a * b; },
[]() -> absl::string_view { return "none"; }
);
static_assert(overloaded() == "none");
static_assert(overloaded(2) == 3);
static_assert(overloaded(3, 7) == 21);
}
TEST(OverloadTest, PropogatesDefaults) {
auto overloaded = shlo_ref::Overload(
[](int a, int b = 5) { return a * b; },
[](double c) { return c; }
);
EXPECT_EQ(21, overloaded(3, 7));
EXPECT_EQ(35, overloaded(7));
EXPECT_EQ(2.5, overloaded(2.5));
}
TEST(OverloadTest, AmbiguousWithDefaultsNotInvocable) {
auto overloaded = shlo_ref::Overload(
[](int a, int b = 5) { return a * b; },
[](int c) { return c; }
);
static_assert(!std::is_invocable_v<decltype(overloaded), int>);
static_assert(std::is_invocable_v<decltype(overloaded), int, int>);
}
TEST(OverloadTest, AmbiguousDuplicatesNotInvocable) {
auto overloaded = shlo_ref::Overload(
[](int a) { return a; },
[](int c) { return c; }
);
static_assert(!std::is_invocable_v<decltype(overloaded), int>);
}
TEST(OverloadTest, AmbiguousConversionNotInvocable) {
auto overloaded = shlo_ref::Overload(
[](uint16_t a) { return a; },
[](uint64_t c) { return c; }
);
static_assert(!std::is_invocable_v<decltype(overloaded), int>);
}
TEST(OverloadTest, AmbiguousConversionWithAutoNotInvocable) {
auto overloaded = shlo_ref::Overload(
[](auto a) { return a; },
[](auto c) { return c; }
);
static_assert(!std::is_invocable_v<decltype(overloaded), int>);
}
TEST(OverloadTest, DispatchConsidersSfinae) {
auto overloaded = shlo_ref::Overload(
[](auto a) -> decltype(a + 1) { return a + 1; }
);
static_assert(std::is_invocable_v<decltype(overloaded), int>);
static_assert(!std::is_invocable_v<decltype(overloaded), std::string>);
}
TEST(OverloadTest, VariantVisitDispatchesCorrectly) {
std::variant<int, double, std::string> v(1);
auto overloaded = shlo_ref::Overload(
[](int) -> absl::string_view { return "int"; },
[](double) -> absl::string_view { return "double"; },
[](const std::string&) -> absl::string_view { return "string"; }
);
EXPECT_EQ("int", std::visit(overloaded, v));
v = 1.1;
EXPECT_EQ("double", std::visit(overloaded, v));
v = "hello";
EXPECT_EQ("string", std::visit(overloaded, v));
}
TEST(OverloadTest, VariantVisitWithAutoFallbackDispatchesCorrectly) {
std::variant<std::string, int32_t, int64_t> v(int32_t{1});
auto overloaded =
shlo_ref::Overload([](const std::string& s) { return s.size(); },
[](const auto& s) { return sizeof(s); }
);
EXPECT_EQ(4, std::visit(overloaded, v));
v = int64_t{1};
EXPECT_EQ(8, std::visit(overloaded, v));
v = std::string("hello");
EXPECT_EQ(5, std::visit(overloaded, v));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/overload.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/overload_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
44a027b4-1a91-443c-bdaf-d979b02d5afa | cpp | tensorflow/tensorflow | tooling_util | tensorflow/lite/toco/tooling_util.cc | tensorflow/lite/toco/tooling_util_test.cc | #include "tensorflow/lite/toco/tooling_util.h"
#include <algorithm>
#include <functional>
#include <iterator>
#include <set>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include "absl/strings/ascii.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/str_split.h"
#include "re2/re2.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/toco/dump_graphviz.h"
#include "tensorflow/lite/toco/model_flags.pb.h"
#include "tensorflow/lite/toco/toco_graphviz_dump_options.h"
namespace toco {
absl::string_view FindLongestCommonPrefix(absl::string_view a,
absl::string_view b) {
if (a.empty() || b.empty()) return absl::string_view();
const char* pa = a.data();
const char* pb = b.data();
size_t count = 0;
const size_t limit = std::min(a.size(), b.size());
while (count < limit && *pa == *pb) {
++pa;
++pb;
++count;
}
return absl::string_view(a.data(), count);
}
std::string LogName(const Operator& op) {
const std::string& opname = HelpfulOperatorTypeName(op);
if (op.outputs.empty()) {
return toco::port::StringF("{%s operator}", opname);
} else {
return toco::port::StringF("{%s operator with output %s}", opname,
op.outputs[0]);
}
}
std::string ArrayDataTypeName(ArrayDataType data_type) {
switch (data_type) {
case ArrayDataType::kFloat:
return "float";
case ArrayDataType::kInt8:
return "int8";
case ArrayDataType::kUint8:
return "uint8";
case ArrayDataType::kInt16:
return "int16";
case ArrayDataType::kUint16:
return "uint16";
case ArrayDataType::kInt32:
return "int32";
case ArrayDataType::kUint32:
return "uint32";
case ArrayDataType::kInt64:
return "int64";
case ArrayDataType::kUint64:
return "uint64";
case ArrayDataType::kString:
return "string";
case ArrayDataType::kBool:
return "bool";
case ArrayDataType::kComplex64:
return "complex64";
case ArrayDataType::kNone:
return "None";
default:
LOG(FATAL) << "Unhandled array data type " << static_cast<int>(data_type);
}
}
bool IsInputArray(const Model& model, const std::string& array_name) {
for (const auto& input_array : model.flags.input_arrays()) {
if (array_name == input_array.name()) {
return true;
}
}
return false;
}
bool IsOutputArray(const Model& model, const std::string& array_name) {
for (const auto& output_array : model.flags.output_arrays()) {
if (array_name == output_array) {
return true;
}
}
return false;
}
bool IsArrayConsumed(const Model& model, const std::string& name) {
if (GetOpWithInput(model, name)) {
return true;
}
if (IsOutputArray(model, name)) {
return true;
}
for (const auto& rnn_state : model.flags.rnn_states()) {
if (rnn_state.back_edge_source_array() == name) {
return true;
}
}
return false;
}
int CountTrueOutputs(const Model& model, const Operator& op) {
int count = 0;
for (const std::string& output : op.outputs) {
if (IsArrayConsumed(model, output)) {
++count;
}
}
return count;
}
int CountOpsWithInput(const Model& model, const std::string& array_name) {
int count = 0;
for (const auto& op : model.operators) {
for (auto& input : op->inputs) {
if (input == array_name) {
count++;
break;
}
}
}
return count;
}
bool DeleteArrayIfUnused(const std::string& array_name, Model* model) {
if (IsDiscardableArray(*model, array_name) &&
CountOpsWithInput(*model, array_name) == 0 &&
GetOpWithOutput(*model, array_name) == nullptr) {
model->EraseArray(array_name);
return true;
}
return false;
}
bool DeleteArrayIfUnusedOutsideOfOp(const std::string& array_name,
const Operator* op, Model* model) {
if (!IsDiscardableArray(*model, array_name)) {
return false;
}
if (CountOpsWithInput(*model, array_name) > 1) {
return false;
}
const Operator* op_having_this_as_input = GetOpWithInput(*model, array_name);
if (op_having_this_as_input && op_having_this_as_input != op) {
return false;
}
const Operator* op_having_this_as_output =
GetOpWithOutput(*model, array_name);
if (op_having_this_as_output && op_having_this_as_output != op) {
return false;
}
model->EraseArray(array_name);
return true;
}
void DeleteOpAndArrays(Model* model, const Operator* op) {
for (const std::string& array_name : op->inputs) {
DeleteArrayIfUnusedOutsideOfOp(array_name, op, model);
}
for (const std::string& array_name : op->outputs) {
DeleteArrayIfUnusedOutsideOfOp(array_name, op, model);
}
auto op_it = FindOp(*model, op);
CHECK(op_it != model->operators.end());
model->operators.erase(op_it);
}
std::vector<std::unique_ptr<Operator>>::const_iterator FindOpWithOutput(
const Model& model, const std::string& array_name) {
for (auto it = model.operators.begin(); it != model.operators.end(); ++it) {
for (auto& output : it->get()->outputs) {
if (output == array_name) {
return it;
}
}
}
return model.operators.end();
}
std::vector<std::unique_ptr<Operator>>::iterator FindOpWithOutput(
Model& model, const std::string& array_name) {
for (auto it = model.operators.begin(); it != model.operators.end(); ++it) {
for (auto& output : it->get()->outputs) {
if (output == array_name) {
return it;
}
}
}
return model.operators.end();
}
Operator* GetOpWithOutput(const Model& model, const std::string& array_name) {
auto it = FindOpWithOutput(model, array_name);
return it == model.operators.end() ? nullptr : it->get();
}
std::vector<std::unique_ptr<Operator>>::const_iterator FindOpWithInput(
const Model& model, const std::string& array_name) {
for (auto it = model.operators.begin(); it != model.operators.end(); ++it) {
for (auto& input : it->get()->inputs) {
if (input == array_name) {
return it;
}
}
}
return model.operators.end();
}
std::vector<std::unique_ptr<Operator>>::iterator FindOpWithInput(
Model& model, const std::string& array_name) {
for (auto it = model.operators.begin(); it != model.operators.end(); ++it) {
for (auto& input : it->get()->inputs) {
if (input == array_name) {
return it;
}
}
}
return model.operators.end();
}
std::vector<std::unique_ptr<Operator>>::const_iterator FindOp(
const Model& model, const Operator* op) {
for (auto it = model.operators.begin(); it != model.operators.end(); ++it) {
if (it->get() == op) {
return it;
}
}
return model.operators.end();
}
std::vector<std::unique_ptr<Operator>>::iterator FindOp(Model& model,
const Operator* op) {
for (auto it = model.operators.begin(); it != model.operators.end(); ++it) {
if (it->get() == op) {
return it;
}
}
return model.operators.end();
}
Operator* GetOpWithInput(const Model& model, const std::string& array_name) {
auto it = FindOpWithInput(model, array_name);
return it == model.operators.end() ? nullptr : it->get();
}
Operator* GetFirstOpWithInput(const Model& model,
const std::string& array_name) {
auto it = FindOpWithInput(model, array_name);
return it == model.operators.end() ? nullptr : it->get();
}
void ReplaceArrayUsage(Model* model, const std::string& old_array_name,
const std::string& new_array_name) {
for (auto& op_it : model->operators) {
Operator* op = op_it.get();
for (size_t i = 0; i < op->inputs.size(); ++i) {
if (op->inputs[i] == old_array_name) {
op->inputs[i] = new_array_name;
}
}
for (size_t i = 0; i < op->outputs.size(); ++i) {
if (op->outputs[i] == old_array_name) {
op->outputs[i] = new_array_name;
}
}
}
}
std::string FormatArraysList(const Model& model,
const std::vector<std::string>& list) {
if (list.empty()) {
return "[]";
}
std::string result = "";
if (list.size() > 1) {
result += "[ ";
}
for (std::size_t i = 0; i < list.size(); i++) {
if (i > 0) {
result += ", ";
}
result += list[i];
}
if (list.size() > 1) {
result += " ]";
}
return result;
}
const char* OperatorTypeName(OperatorType type) {
switch (type) {
#define HANDLE_OPERATORTYPENAME_CASE(c) \
case OperatorType::k##c: \
return #c;
HANDLE_OPERATORTYPENAME_CASE(Abs)
HANDLE_OPERATORTYPENAME_CASE(Add)
HANDLE_OPERATORTYPENAME_CASE(AddN)
HANDLE_OPERATORTYPENAME_CASE(AveragePool)
HANDLE_OPERATORTYPENAME_CASE(BatchMatMul)
HANDLE_OPERATORTYPENAME_CASE(BatchNormalization)
HANDLE_OPERATORTYPENAME_CASE(Conv)
HANDLE_OPERATORTYPENAME_CASE(Concatenation)
HANDLE_OPERATORTYPENAME_CASE(DepthwiseConv)
HANDLE_OPERATORTYPENAME_CASE(DepthToSpace)
HANDLE_OPERATORTYPENAME_CASE(SpaceToDepth)
HANDLE_OPERATORTYPENAME_CASE(FullyConnected)
HANDLE_OPERATORTYPENAME_CASE(HardSwish)
HANDLE_OPERATORTYPENAME_CASE(Dequantize)
HANDLE_OPERATORTYPENAME_CASE(L2Normalization)
HANDLE_OPERATORTYPENAME_CASE(LocalResponseNormalization)
HANDLE_OPERATORTYPENAME_CASE(Log)
HANDLE_OPERATORTYPENAME_CASE(Logistic)
HANDLE_OPERATORTYPENAME_CASE(LstmCell)
HANDLE_OPERATORTYPENAME_CASE(MaxPool)
HANDLE_OPERATORTYPENAME_CASE(L2Pool)
HANDLE_OPERATORTYPENAME_CASE(FakeQuant)
HANDLE_OPERATORTYPENAME_CASE(Mul)
HANDLE_OPERATORTYPENAME_CASE(RandomUniform)
HANDLE_OPERATORTYPENAME_CASE(Elu)
HANDLE_OPERATORTYPENAME_CASE(Relu)
HANDLE_OPERATORTYPENAME_CASE(Relu1)
HANDLE_OPERATORTYPENAME_CASE(Relu6)
HANDLE_OPERATORTYPENAME_CASE(PRelu)
HANDLE_OPERATORTYPENAME_CASE(ReorderAxes)
HANDLE_OPERATORTYPENAME_CASE(Softmax)
HANDLE_OPERATORTYPENAME_CASE(LogSoftmax)
HANDLE_OPERATORTYPENAME_CASE(Div)
HANDLE_OPERATORTYPENAME_CASE(Tanh)
HANDLE_OPERATORTYPENAME_CASE(Sin)
HANDLE_OPERATORTYPENAME_CASE(All)
HANDLE_OPERATORTYPENAME_CASE(Assert)
HANDLE_OPERATORTYPENAME_CASE(ExpandDims)
HANDLE_OPERATORTYPENAME_CASE(Fill)
HANDLE_OPERATORTYPENAME_CASE(FloorMod)
HANDLE_OPERATORTYPENAME_CASE(FloorDiv)
HANDLE_OPERATORTYPENAME_CASE(Greater)
HANDLE_OPERATORTYPENAME_CASE(GreaterEqual)
HANDLE_OPERATORTYPENAME_CASE(Identity)
HANDLE_OPERATORTYPENAME_CASE(Less)
HANDLE_OPERATORTYPENAME_CASE(LessEqual)
HANDLE_OPERATORTYPENAME_CASE(MatMul)
HANDLE_OPERATORTYPENAME_CASE(ReduceMax)
HANDLE_OPERATORTYPENAME_CASE(Maximum)
HANDLE_OPERATORTYPENAME_CASE(Merge)
HANDLE_OPERATORTYPENAME_CASE(ReduceMin)
HANDLE_OPERATORTYPENAME_CASE(Minimum)
HANDLE_OPERATORTYPENAME_CASE(Neg)
HANDLE_OPERATORTYPENAME_CASE(OneHot)
HANDLE_OPERATORTYPENAME_CASE(Pack)
HANDLE_OPERATORTYPENAME_CASE(Pad)
HANDLE_OPERATORTYPENAME_CASE(PadV2)
HANDLE_OPERATORTYPENAME_CASE(StridedSlice)
HANDLE_OPERATORTYPENAME_CASE(Range)
HANDLE_OPERATORTYPENAME_CASE(Rank)
HANDLE_OPERATORTYPENAME_CASE(Reshape)
HANDLE_OPERATORTYPENAME_CASE(Squeeze)
HANDLE_OPERATORTYPENAME_CASE(Rsqrt)
HANDLE_OPERATORTYPENAME_CASE(SegmentSum)
HANDLE_OPERATORTYPENAME_CASE(Shape)
HANDLE_OPERATORTYPENAME_CASE(Slice)
HANDLE_OPERATORTYPENAME_CASE(Split)
HANDLE_OPERATORTYPENAME_CASE(SplitV)
HANDLE_OPERATORTYPENAME_CASE(Sqrt)
HANDLE_OPERATORTYPENAME_CASE(Square)
HANDLE_OPERATORTYPENAME_CASE(Switch)
HANDLE_OPERATORTYPENAME_CASE(Sub)
HANDLE_OPERATORTYPENAME_CASE(Sum)
HANDLE_OPERATORTYPENAME_CASE(Tile)
HANDLE_OPERATORTYPENAME_CASE(Transpose)
HANDLE_OPERATORTYPENAME_CASE(TransposeConv)
HANDLE_OPERATORTYPENAME_CASE(Concat)
HANDLE_OPERATORTYPENAME_CASE(ConcatV2)
HANDLE_OPERATORTYPENAME_CASE(Cast)
HANDLE_OPERATORTYPENAME_CASE(Floor)
HANDLE_OPERATORTYPENAME_CASE(Ceil)
HANDLE_OPERATORTYPENAME_CASE(Round)
HANDLE_OPERATORTYPENAME_CASE(Gather)
HANDLE_OPERATORTYPENAME_CASE(GatherNd)
HANDLE_OPERATORTYPENAME_CASE(ResizeBilinear)
HANDLE_OPERATORTYPENAME_CASE(SpaceToBatchND)
HANDLE_OPERATORTYPENAME_CASE(BatchToSpaceND)
HANDLE_OPERATORTYPENAME_CASE(Mean)
HANDLE_OPERATORTYPENAME_CASE(ReduceProd)
HANDLE_OPERATORTYPENAME_CASE(Svdf)
HANDLE_OPERATORTYPENAME_CASE(ArgMax)
HANDLE_OPERATORTYPENAME_CASE(ArgMin)
HANDLE_OPERATORTYPENAME_CASE(TopK_V2)
HANDLE_OPERATORTYPENAME_CASE(Unsupported)
HANDLE_OPERATORTYPENAME_CASE(Exp)
HANDLE_OPERATORTYPENAME_CASE(DynamicPartition)
HANDLE_OPERATORTYPENAME_CASE(DynamicStitch)
HANDLE_OPERATORTYPENAME_CASE(Select)
HANDLE_OPERATORTYPENAME_CASE(SparseToDense)
HANDLE_OPERATORTYPENAME_CASE(Equal)
HANDLE_OPERATORTYPENAME_CASE(NotEqual)
HANDLE_OPERATORTYPENAME_CASE(Pow)
HANDLE_OPERATORTYPENAME_CASE(Any)
HANDLE_OPERATORTYPENAME_CASE(LogicalAnd)
HANDLE_OPERATORTYPENAME_CASE(LogicalNot)
HANDLE_OPERATORTYPENAME_CASE(LogicalOr)
HANDLE_OPERATORTYPENAME_CASE(CTCBeamSearchDecoder)
HANDLE_OPERATORTYPENAME_CASE(Unpack)
HANDLE_OPERATORTYPENAME_CASE(ZerosLike)
HANDLE_OPERATORTYPENAME_CASE(UnidirectionalSequenceLstm)
HANDLE_OPERATORTYPENAME_CASE(BidirectionalSequenceLstm)
HANDLE_OPERATORTYPENAME_CASE(BidirectionalSequenceRnn)
HANDLE_OPERATORTYPENAME_CASE(ResizeNearestNeighbor)
HANDLE_OPERATORTYPENAME_CASE(LeakyRelu)
HANDLE_OPERATORTYPENAME_CASE(SquaredDifference)
HANDLE_OPERATORTYPENAME_CASE(MirrorPad)
HANDLE_OPERATORTYPENAME_CASE(Unique)
HANDLE_OPERATORTYPENAME_CASE(UnidirectionalSequenceRnn)
HANDLE_OPERATORTYPENAME_CASE(ReverseV2)
HANDLE_OPERATORTYPENAME_CASE(Cos)
HANDLE_OPERATORTYPENAME_CASE(Where)
HANDLE_OPERATORTYPENAME_CASE(ReverseSequence)
HANDLE_OPERATORTYPENAME_CASE(MatrixDiag)
HANDLE_OPERATORTYPENAME_CASE(MatrixSetDiag)
HANDLE_OPERATORTYPENAME_CASE(MatrixDiagV2)
HANDLE_OPERATORTYPENAME_CASE(MatrixSetDiagV2)
HANDLE_OPERATORTYPENAME_CASE(MatrixDiagV3)
HANDLE_OPERATORTYPENAME_CASE(MatrixSetDiagV3)
HANDLE_OPERATORTYPENAME_CASE(ScatterNd)
default:
LOG(FATAL) << "Unhandled op type";
#undef HANDLE_OPERATORTYPENAME_CASE
}
}
std::string HelpfulOperatorTypeName(const Operator& op) {
if (op.type == OperatorType::kUnsupported) {
return toco::port::StringF(
"(Unsupported TensorFlow op: %s)",
static_cast<const TensorFlowUnsupportedOperator&>(op).tensorflow_op);
}
return OperatorTypeName(op.type);
}
bool OperatorSupportsFusedActivation(OperatorType type) {
switch (type) {
case OperatorType::kAdd:
case OperatorType::kAveragePool:
case OperatorType::kBatchNormalization:
case OperatorType::kConv:
case OperatorType::kDepthwiseConv:
case OperatorType::kDiv:
case OperatorType::kFullyConnected:
case OperatorType::kL2Pool:
case OperatorType::kMaxPool:
case OperatorType::kMul:
case OperatorType::kSub:
case OperatorType::kSquaredDifference:
return true;
default:
return false;
}
}
void LogSummary(int log_level, const Model& model) {
VLOG(log_level) << "Operators summary (" << model.operators.size()
<< " operators):";
std::unordered_multiset<OperatorType> ops_by_type;
for (const auto& op : model.operators) {
ops_by_type.insert(op->type);
}
auto it = ops_by_type.begin();
while (it != ops_by_type.end()) {
int count = ops_by_type.count(*it);
VLOG(log_level) << " " << OperatorTypeName(*it) << ": " << count;
std::advance(it, count);
}
}
void LogArray(int log_level, const Model& model, const std::string& name) {
VLOG(log_level) << "Array: " << name;
if (!model.HasArray(name)) {
VLOG(log_level) << " DOES NOT EXIST";
return;
}
const auto& array = model.GetArray(name);
VLOG(log_level) << " Data type: " << ArrayDataTypeName(array.data_type);
VLOG(log_level) << " Final type: "
<< ArrayDataTypeName(array.final_data_type);
if (array.buffer) {
VLOG(log_level) << " Constant Buffer";
}
if (array.alloc) {
VLOG(log_level) << " Transient Alloc";
}
if (array.has_shape()) {
const Shape& array_shape = array.shape();
if (array_shape.dimensions_count() == 0) {
VLOG(log_level) << " (Zero dimensions)";
} else {
std::string message = " Dims: ";
bool first = true;
for (const int dim : array_shape.dims()) {
if (!first) {
message += ", ";
}
first = false;
toco::port::AppendF(&message, "%d", dim);
}
VLOG(log_level) << message;
}
}
if (array.minmax) {
VLOG(log_level) << " MinMax: " << array.minmax->min << " .. "
<< array.minmax->max;
}
if (array.quantization_params) {
VLOG(log_level) << " QuantizationParams: zero_point="
<< static_cast<int>(array.quantization_params->zero_point)
<< ", scale=" << array.quantization_params->scale;
}
}
void DumpGraphvizVideoFrame(const Model& model) {
namespace port = toco::port;
const auto& dump_options = *GraphVizDumpOptions::singleton();
if (!dump_options.dump_graphviz_video) {
return;
}
CHECK(!dump_options.dump_graphviz.empty());
static int dump_id = 0;
static std::unordered_set<std::size_t> dump_hashes;
std::string graphviz_dump;
DumpGraphviz(model, &graphviz_dump,
toco::port::StringF("VIDEO frame:%05d", dump_id));
std::size_t hash = std::hash<std::string>{}(graphviz_dump);
if (!dump_hashes.count(hash)) {
LOG(INFO) << "DUMPING GRAPHVIZ VIDEO FRAME: " << dump_id;
dump_hashes.insert(hash);
const auto result = port::file::SetContents(
port::file::JoinPath(
dump_options.dump_graphviz,
toco::port::StringF("toco_video_%05d.dot", dump_id)),
graphviz_dump, port::file::Defaults());
QCHECK(result.ok()) << result.message();
dump_id++;
}
}
void LogDump(int log_level, const std::string& message, const Model& model) {
namespace port = toco::port;
const auto& dump_options = *GraphVizDumpOptions::singleton();
DumpGraphvizVideoFrame(model);
if (!dump_options.dump_graphviz.empty()) {
std::string graphviz_dump;
DumpGraphviz(model, &graphviz_dump, message);
const auto result = port::file::SetContents(
port::file::JoinPath(
dump_options.dump_graphviz,
absl::StrCat("toco_", absl::StrReplaceAll(message, {{" ", "_"}}),
".dot")),
graphviz_dump, port::file::Defaults());
QCHECK(result.ok()) << result.message();
}
if (!VLOG_IS_ON(log_level)) {
return;
}
VLOG(log_level) << "BEGIN DUMP OF TOCO MODEL (" << message << ")";
LogSummary(log_level, model);
std::unordered_set<std::string> already_printed_arrays;
for (const auto& op : model.operators) {
for (const auto& input : op->inputs) {
if (!already_printed_arrays.count(input)) {
already_printed_arrays.insert(input);
LogArray(log_level, model, input);
}
}
VLOG(log_level) << HelpfulOperatorTypeName(*op) << " :";
VLOG(log_level) << " " << FormatArraysList(model, op->inputs) << " -> "
<< FormatArraysList(model, op->outputs);
if (op->fused_activation_function != FusedActivationFunctionType::kNone) {
VLOG(log_level) << " (with fused activation function)";
}
for (const auto& output : op->outputs) {
if (!already_printed_arrays.count(output)) {
already_printed_arrays.insert(output);
LogArray(log_level, model, output);
}
}
}
VLOG(log_level) << "END DUMP OF TOCO MODEL (" << message << ")";
}
void ExtendShape(Shape* shape, int new_shape_size) {
CHECK_GE(new_shape_size, shape->dimensions_count());
const int size_increase = new_shape_size - shape->dimensions_count();
auto* shape_dims = shape->mutable_dims();
shape_dims->insert(shape_dims->begin(), size_increase, 1);
}
void UnextendShape(Shape* shape, int new_shape_size) {
CHECK_LE(new_shape_size, shape->dimensions_count());
const int size_reduction = shape->dimensions_count() - new_shape_size;
for (int i = 0; i < size_reduction; i++) {
CHECK_EQ(shape->dims(i), 1);
}
std::vector<int>& shape_dims = *shape->mutable_dims();
shape_dims.erase(shape_dims.begin(), shape_dims.begin() + size_reduction);
}
template <typename Dims>
void CheckValidShapeDimensions(const Dims& dims) {
if (dims.size() == 1 && dims[0] == 0) {
return;
}
for (const auto& dim : dims) {
CHECK_GE(dim, 1);
}
}
void CheckValidShape(const Shape& shape) {
CheckValidShapeDimensions(shape.dims());
}
bool IsNonEmpty(const Shape& shape) {
for (int i = 0; i < shape.dimensions_count(); ++i) {
if (shape.dims(i) < 1) return false;
}
return true;
}
void CheckNonEmptyShapeDimensions(const Shape& shape) {
for (int i = 0; i < shape.dimensions_count(); ++i) {
CHECK_GE(shape.dims()[i], 1) << "shape has dimension 0 at index << " << i
<< ". shape = " << ShapeToString(shape);
}
}
bool ShapesAgreeUpToBroadcasting(const Shape& shape0, const Shape& shape1) {
CheckNonEmptyShapeDimensions(shape0);
CheckNonEmptyShapeDimensions(shape1);
const Shape* longer = &shape0;
const Shape* shorter = &shape1;
if (shape1.dimensions_count() > shape0.dimensions_count()) {
longer = &shape1;
shorter = &shape0;
}
int longer_index = longer->dimensions_count() - 1;
int shorter_index = shorter->dimensions_count() - 1;
while (shorter_index >= 0) {
const int d_long = longer->dims(longer_index);
const int d_short = shorter->dims(shorter_index);
if ((d_long != d_short) && (d_long != 1) && (d_short != 1)) {
return false;
}
longer_index--;
shorter_index--;
}
return true;
}
bool ShapesAgreeUpToExtending(const Shape& shape0, const Shape& shape1) {
CheckNonEmptyShapeDimensions(shape0);
CheckNonEmptyShapeDimensions(shape1);
const Shape* longer = &shape0;
const Shape* shorter = &shape1;
if (shape1.dimensions_count() > shape0.dimensions_count()) {
longer = &shape1;
shorter = &shape0;
}
int longer_index = longer->dimensions_count() - 1;
int shorter_index = shorter->dimensions_count() - 1;
while (shorter_index >= 0) {
const int d_long = longer->dims(longer_index);
const int d_short = shorter->dims(shorter_index);
if (d_long != d_short) {
return false;
}
longer_index--;
shorter_index--;
}
while (longer_index >= 0) {
const int d_long = longer->dims(longer_index);
if (d_long != 1) {
return false;
}
longer_index--;
}
return true;
}
int RequiredBufferSizeForShape(const Shape& shape) {
CheckValidShape(shape);
int max_offset = 1;
for (const auto& dim : shape.dims()) {
max_offset *= dim;
}
return max_offset;
}
bool IsConstantParameterArray(const Model& model, const std::string& name) {
if (!model.HasArray(name)) {
return false;
}
return !!model.GetArray(name).buffer;
}
namespace {
template <ArrayDataType A>
bool CompareArrayBuffers(const Array& lhs_array, const Array& rhs_array) {
CHECK(lhs_array.data_type == rhs_array.data_type) << "Data types must match";
CHECK(lhs_array.buffer) << "LHS must be constant";
CHECK(rhs_array.buffer) << "RHS must be constant";
const auto& lhs_data = lhs_array.GetBuffer<A>().data;
const auto& rhs_data = rhs_array.GetBuffer<A>().data;
CHECK_EQ(lhs_data.size(), rhs_data.size())
<< "Buffer sizes must match in element count";
for (int i = 0; i < lhs_data.size(); ++i) {
if (lhs_data[i] != rhs_data[i]) {
return false;
}
}
return true;
}
bool HaveSameMinMax(const Array& lhs_array, const Array& rhs_array) {
if (lhs_array.minmax || rhs_array.minmax) {
if (!lhs_array.minmax || !rhs_array.minmax) {
return false;
}
if (!(*lhs_array.minmax == *rhs_array.minmax)) {
return false;
}
}
return true;
}
bool HaveSameQuantizationParams(const Array& lhs_array,
const Array& rhs_array) {
if (lhs_array.quantization_params || rhs_array.quantization_params) {
if (!lhs_array.quantization_params || !rhs_array.quantization_params) {
return false;
}
if (!(*lhs_array.quantization_params == *rhs_array.quantization_params)) {
return false;
}
}
return true;
}
}
bool CompareConstantArrays(const Array& lhs_array, const Array& rhs_array) {
bool attrs_equal = lhs_array.shape() == rhs_array.shape() &&
lhs_array.data_type == rhs_array.data_type &&
lhs_array.final_data_type == rhs_array.final_data_type &&
HaveSameMinMax(lhs_array, rhs_array) &&
HaveSameQuantizationParams(lhs_array, rhs_array) &&
lhs_array.narrow_range == rhs_array.narrow_range;
if (!attrs_equal) {
return false;
}
switch (lhs_array.data_type) {
case ArrayDataType::kBool:
return CompareArrayBuffers<ArrayDataType::kBool>(lhs_array, rhs_array);
case ArrayDataType::kFloat:
return CompareArrayBuffers<ArrayDataType::kFloat>(lhs_array, rhs_array);
case ArrayDataType::kInt8:
return CompareArrayBuffers<ArrayDataType::kInt8>(lhs_array, rhs_array);
case ArrayDataType::kUint8:
return CompareArrayBuffers<ArrayDataType::kUint8>(lhs_array, rhs_array);
case ArrayDataType::kInt16:
return CompareArrayBuffers<ArrayDataType::kInt16>(lhs_array, rhs_array);
case ArrayDataType::kUint16:
return CompareArrayBuffers<ArrayDataType::kUint16>(lhs_array, rhs_array);
case ArrayDataType::kInt32:
return CompareArrayBuffers<ArrayDataType::kInt32>(lhs_array, rhs_array);
case ArrayDataType::kUint32:
return CompareArrayBuffers<ArrayDataType::kUint32>(lhs_array, rhs_array);
case ArrayDataType::kInt64:
return CompareArrayBuffers<ArrayDataType::kInt64>(lhs_array, rhs_array);
case ArrayDataType::kUint64:
return CompareArrayBuffers<ArrayDataType::kUint64>(lhs_array, rhs_array);
case ArrayDataType::kString:
return CompareArrayBuffers<ArrayDataType::kString>(lhs_array, rhs_array);
case ArrayDataType::kComplex64:
return CompareArrayBuffers<ArrayDataType::kComplex64>(lhs_array,
rhs_array);
default:
LOG(FATAL) << "Unsupported data type: "
<< ArrayDataTypeName(lhs_array.data_type);
return false;
}
}
namespace {
std::string SanitizeNameForTFNode(const std::string& array_name) {
auto node_name = array_name;
std::replace(node_name.begin(), node_name.end(), ':', '_');
return node_name;
}
void CheckInputArraysAreNotOutputArrays(const ModelFlags& model_flags) {
for (const auto& input_array : model_flags.input_arrays()) {
for (const std::string& output_array : model_flags.output_arrays()) {
QCHECK_NE(input_array.name(), output_array)
<< "The array " << output_array
<< " is listed in both --input_arrays and --output_arrays.";
}
}
}
bool IsAsciiPrintable(const std::string& name) {
for (char c : name) {
if (!absl::ascii_isprint(c)) {
return false;
}
}
return true;
}
std::string DumpAscii(const std::string& name) {
std::string result;
port::AppendF(&result, "ASCII | Hex\n");
port::AppendF(&result, "------+----\n");
for (char c : name) {
if (absl::ascii_isprint(c)) {
port::AppendF(&result, "%c | %x\n", c, c);
} else {
port::AppendF(&result, " | %x Not ASCII printable!\n", c);
}
}
return result;
}
void CheckNonAsciiIOArrays(const ModelFlags& model_flags) {
if (model_flags.allow_nonascii_arrays()) {
return;
}
for (const auto& input_array : model_flags.input_arrays()) {
QCHECK(IsAsciiPrintable(input_array.name()))
<< "Non-ASCII-printable character found in --input_arrays: "
<< input_array.name()
<< ". Pass --allow_nonascii_arrays to allow that. "
<< "Here is a dump of the string:\n\n"
<< DumpAscii(input_array.name());
}
for (const std::string& output_array : model_flags.output_arrays()) {
QCHECK(IsAsciiPrintable(output_array))
<< "Non-ASCII-printable character found in --output_arrays: "
<< output_array << ". Pass --allow_nonascii_arrays to allow that. "
<< "Here is a dump of the string:\n\n"
<< DumpAscii(output_array);
}
}
void CheckNonExistentIOArrays(const Model& model) {
if (model.flags.allow_nonexistent_arrays()) {
return;
}
static constexpr char general_comment[] =
"Is it a typo? This should not happen. If you trigger this error "
"please send a bug report (with code to reproduce this error), to the "
"TensorFlow Lite team.";
for (const std::string& output_array : model.flags.output_arrays()) {
if (IsConstantParameterArray(model, output_array)) {
continue;
}
QCHECK(GetOpWithOutput(model, output_array))
<< "Specified output array \"" << output_array
<< "\" is not produced by any op in this graph. " << general_comment;
}
for (const auto& rnn_state : model.flags.rnn_states()) {
if (!rnn_state.discardable()) {
QCHECK(GetOpWithInput(model, rnn_state.state_array()))
<< "Specified RNN state \"" << rnn_state.state_array()
<< "\" is not consumed by any op in this graph. " << general_comment;
QCHECK(GetOpWithOutput(model, rnn_state.back_edge_source_array()))
<< "Specified RNN back-edge source array \""
<< rnn_state.back_edge_source_array()
<< "\" is not produced by any op in this graph. " << general_comment;
}
}
}
}
void CheckNoMissingArray(const Model& model) {
for (const auto& op : model.operators) {
for (const auto& input : op->inputs) {
CHECK(model.HasArray(input) || model.optional_arrays.count(input))
<< "Input: " << input << " missing for op: " << op->outputs[0] << ".";
}
for (const auto& output : op->outputs) {
CHECK(model.HasArray(output)) << "Output: " << output << " missing.";
}
}
CheckNonExistentIOArrays(model);
}
void FixNoMissingArray(Model* model) {
for (const auto& op : model->operators) {
for (const auto& input : op->inputs) {
if (!model->HasArray(input) && !model->IsOptionalArray(input)) {
model->GetOrCreateArray(input);
}
}
for (const auto& output : op->outputs) {
if (!model->HasArray(output) && !model->IsOptionalArray(output)) {
model->GetOrCreateArray(output);
}
}
}
if (model->flags.allow_nonexistent_arrays()) {
for (const std::string& output_array : model->flags.output_arrays()) {
model->GetOrCreateArray(output_array);
}
for (const auto& rnn_state : model->flags.rnn_states()) {
model->GetOrCreateArray(rnn_state.state_array());
model->GetOrCreateArray(rnn_state.back_edge_source_array());
}
}
}
void CheckNoOrphanedArray(const Model& model) {
std::unordered_set<std::string> arrays_without_known_use;
for (const auto& array : model.GetArrayMap()) {
if (IsDiscardableArray(model, array.first)) {
arrays_without_known_use.insert(array.first);
}
}
for (const auto& op : model.operators) {
for (const auto& input : op->inputs) {
arrays_without_known_use.erase(input);
}
for (const auto& output : op->outputs) {
arrays_without_known_use.erase(output);
}
}
for (const auto& rnn_state : model.flags.rnn_states()) {
arrays_without_known_use.erase(rnn_state.state_array());
arrays_without_known_use.erase(rnn_state.back_edge_source_array());
}
if (!arrays_without_known_use.empty()) {
for (const auto& array : arrays_without_known_use) {
LOG(INFO) << "Error: Orphaned array: " << array;
}
}
CHECK(arrays_without_known_use.empty());
}
void FixNoOrphanedArray(Model* model) {
std::unordered_set<std::string> arrays_without_known_use;
for (const auto& array : model->GetArrayMap()) {
arrays_without_known_use.insert(array.first);
}
for (const auto& op : model->operators) {
for (const auto& input : op->inputs) {
arrays_without_known_use.erase(input);
}
for (const auto& output : op->outputs) {
arrays_without_known_use.erase(output);
}
}
for (const auto& rnn_state : model->flags.rnn_states()) {
arrays_without_known_use.erase(rnn_state.state_array());
arrays_without_known_use.erase(rnn_state.back_edge_source_array());
}
for (const auto& array : arrays_without_known_use) {
if (IsDiscardableArray(*model, array)) {
model->EraseArray(array);
}
}
}
void CheckEachArray(const Model& model) {
for (const auto& array_entry : model.GetArrayMap()) {
const auto& array = array_entry.second;
CHECK(!array->buffer || !array->alloc) << "Tensor: " << array_entry.first;
if (array->buffer) {
CHECK(array->buffer->type == array->data_type)
<< "Tensor: " << array_entry.first;
CHECK(array->has_shape()) << array_entry.first;
CheckValidShape(array->shape());
CHECK_EQ(array->buffer->Length(),
RequiredBufferSizeForShape(array->shape()))
<< "Tensor: " << array_entry.first;
}
const std::string& name = array_entry.first;
auto colon_pos = name.find_first_of(':');
if (colon_pos != std::string::npos) {
CHECK_EQ(name.substr(colon_pos + 1).find_first_not_of("0123456789"),
std::string::npos)
<< "Array '" << name << "' has non-digit characters after colon.";
}
CHECK_GT(colon_pos, 0) << "Array '" << name
<< "' must not start with a colon.";
}
}
void CheckOperatorOrdering(const Model& model) {
std::unordered_set<std::string> arrays_behind_us;
for (const auto& array_entry : model.GetArrayMap()) {
if (!GetOpWithOutput(model, array_entry.first)) {
arrays_behind_us.insert(array_entry.first);
}
}
arrays_behind_us.insert(model.optional_arrays.begin(),
model.optional_arrays.end());
for (const auto& op : model.operators) {
for (const auto& input : op->inputs) {
if (!IsConstantParameterArray(model, input)) {
CHECK(arrays_behind_us.count(input));
}
}
for (const auto& output : op->outputs) {
CHECK(!arrays_behind_us.count(output));
arrays_behind_us.insert(output);
}
}
for (const std::string& output_array : model.flags.output_arrays()) {
CHECK(arrays_behind_us.count(output_array));
}
}
void FixOperatorOrdering(Model* model) {
std::unordered_set<std::string> arrays_behind_us;
for (const auto& array_entry : model->GetArrayMap()) {
if (!GetOpWithOutput(*model, array_entry.first)) {
arrays_behind_us.insert(array_entry.first);
}
}
arrays_behind_us.insert(model->optional_arrays.begin(),
model->optional_arrays.end());
std::vector<std::unique_ptr<Operator>> old_operators;
std::swap(old_operators, model->operators);
std::set<std::size_t> remaining;
for (std::size_t i = 0; i < old_operators.size(); i++) {
remaining.insert(i);
}
std::unordered_map<std::string, std::string> reason_why_leftover;
while (true) {
bool inserted_something = false;
for (const auto& i : remaining) {
bool can_insert = true;
auto& op = old_operators[i];
CHECK(op);
for (const auto& input : op->inputs) {
if (!IsConstantParameterArray(*model, input) &&
!arrays_behind_us.count(input)) {
for (const std::string& output : op->outputs) {
reason_why_leftover[output] = input;
}
can_insert = false;
break;
}
}
if (can_insert) {
model->operators.emplace_back(nullptr);
for (const auto& output : op->outputs) {
arrays_behind_us.insert(output);
}
std::swap(op, model->operators.back());
remaining.erase(i);
inserted_something = true;
break;
}
}
if (!inserted_something) {
break;
}
}
if (!remaining.empty()) {
LOG(ERROR)
<< "No viable ordering of operators was found. "
<< "Here is a 'backtrace' of at least one part of the graph that is "
<< "problematic. It starts with the first operator that has as "
<< "problematic input array, and then walks back the graph to "
<< "the operator that produced that input array, etc., until we find "
<< "the root cause:";
LOG(ERROR) << "BEGIN TRACE OF OPERATOR WITH BAD INPUT";
LOG(ERROR) << "Here is the first-encountered operator with a bad input: ";
const Operator* bad_op = old_operators[*remaining.begin()].get();
std::unordered_set<std::string> bad_inputs_already_traced;
while (true) {
LOG(ERROR) << HelpfulOperatorTypeName(*bad_op) << " : "
<< FormatArraysList(*model, bad_op->inputs) << " -> "
<< FormatArraysList(*model, bad_op->outputs);
bool found_bad_output = false;
std::string bad_output;
for (const std::string& output : bad_op->outputs) {
if (reason_why_leftover.count(output)) {
found_bad_output = true;
bad_output = output;
break;
}
}
CHECK(found_bad_output);
const std::string& bad_input = reason_why_leftover[bad_output];
LOG(ERROR) << "The bad input here is: " << bad_input;
if (bad_inputs_already_traced.count(bad_input)) {
LOG(FATAL)
<< "Cycle found! We already encountered that "
<< "input array, " << bad_input << ", earlier in the "
<< "above trace! We expect graphs to be acyclic, even "
<< "RNNs. Let us know if some graph actually needs to have "
<< "cycles, but first, please check if it really is "
<< "an *inference* graph. *Training* graphs are out-of-scope "
<< "for toco.";
}
bad_inputs_already_traced.insert(bad_input);
bad_op = nullptr;
for (const auto& i : remaining) {
const Operator* op = old_operators[i].get();
for (const std::string& output : op->outputs) {
if (bad_input == output) {
bad_op = op;
break;
}
}
if (bad_op) {
break;
}
}
if (!bad_op) {
LOG(ERROR) << "And that's the root cause: "
<< "that array, " << bad_input << ", isn't produced by any "
<< "operator, or provided in any other way.";
LOG(ERROR) << "END TRACE OF OPERATOR WITH BAD INPUT";
LOG(FATAL) << "(The above was a multi-line fatal error)";
}
LOG(ERROR) << "And that array is the output of the following operator:";
}
}
CHECK(remaining.empty())
<< "Should never get here! In case of bad graph, "
<< "the above code should have generated a FATAL error already!";
}
void CheckInvariants(const Model& model) {
CheckInputArraysAreNotOutputArrays(model.flags);
CheckNonAsciiIOArrays(model.flags);
CheckNoMissingArray(model);
CheckNoOrphanedArray(model);
CheckEachArray(model);
CheckOperatorOrdering(model);
}
void CheckCountInRange(const ::toco::ModelFlags::ModelCheck& model_check,
const int count, const std::string& count_description) {
if (model_check.count_min() >= 0) {
CHECK_GE(count, model_check.count_min())
<< "Mismatch in " << count_description << ": count was " << count
<< ", but the specified "
<< (model_check.count_max() > model_check.count_min() ? "minimum"
: "value")
<< " was " << model_check.count_min() << ".";
}
if (model_check.count_max() > model_check.count_min()) {
CHECK_LE(count, model_check.count_max())
<< "Mismatch in " << count_description << ": count was " << count
<< ", but the specified maximum was " << model_check.count_max() << ".";
}
}
void CheckModelCounts(const Model& model) {
std::unordered_multiset<OperatorType> ops_by_type;
std::unordered_map<std::string, OperatorType> op_type_by_name;
if (model.flags.model_checks_size() == 0) {
return;
}
for (const auto& op : model.operators) {
ops_by_type.insert(op->type);
op_type_by_name[OperatorTypeName(op->type)] = op->type;
}
for (const auto& model_check : model.flags.model_checks()) {
std::string count_type = model_check.count_type();
if (count_type == "None") {
continue;
} else if (count_type == "Arrays") {
CheckCountInRange(model_check, model.GetArrayMap().size(),
"count of arrays");
} else if (count_type == "Total") {
CheckCountInRange(model_check, model.operators.size(),
"count of all operator instances");
} else {
const int found_count =
op_type_by_name.count(count_type) > 0
? ops_by_type.count(op_type_by_name[count_type])
: 0;
CheckCountInRange(model_check, found_count,
"count of instances of " + count_type + " operator");
}
}
}
void FixEdgeArrays(Model* model) {
for (const std::string& output_array_name : model->flags.output_arrays()) {
if (!GetOpWithOutput(*model, output_array_name)) {
LOG(WARNING) << "Fixing constant output array " << output_array_name
<< " by inserting a copy. This is not optimal.";
std::string intermediate_array_name =
AvailableArrayName(*model, output_array_name + "_copy");
CloneArray(model, output_array_name, intermediate_array_name);
InsertCopyOperator(model, intermediate_array_name, output_array_name);
}
}
}
void DedupeConstantArrays(Model* model, size_t min_size) {
const auto& array_map = model->GetArrayMap();
for (auto lhs_array_it = array_map.begin(); lhs_array_it != array_map.end();
++lhs_array_it) {
const auto& lhs_array_name = lhs_array_it->first;
const auto& lhs_array = *lhs_array_it->second;
if (!IsConstantParameterArray(*model, lhs_array_name)) {
continue;
}
ArrayDataType final_data_type =
lhs_array.final_data_type != ArrayDataType::kNone
? lhs_array.final_data_type
: lhs_array.data_type;
if (final_data_type != ArrayDataType::kString) {
size_t array_byte_size =
lhs_array.buffer->Length() * ElementSize(final_data_type);
if (array_byte_size < min_size) {
continue;
}
}
auto next_lhs_array_it = lhs_array_it;
++next_lhs_array_it;
for (auto rhs_array_it = next_lhs_array_it;
rhs_array_it != array_map.end();) {
const auto& rhs_array_name = rhs_array_it->first;
const auto& rhs_array = *rhs_array_it->second;
++rhs_array_it;
if (!IsConstantParameterArray(*model, rhs_array_name)) {
continue;
}
if (!IsDiscardableArray(*model, rhs_array_name)) {
continue;
}
if (!CompareConstantArrays(lhs_array, rhs_array)) {
continue;
}
VLOG(1) << "Deduplicating arrays; using " << lhs_array_name
<< " in place of " << rhs_array_name;
ReplaceArrayUsage(model, rhs_array_name, lhs_array_name);
model->EraseArray(rhs_array_name);
}
}
}
namespace {
void CopyArrayAttribs(const Array& source_array, Array* target_array) {
target_array->data_type = source_array.data_type;
target_array->final_data_type = source_array.final_data_type;
if (source_array.has_shape()) {
target_array->copy_shape(source_array.shape());
}
if (source_array.minmax) {
target_array->GetOrCreateMinMax() = source_array.GetMinMax();
} else {
target_array->minmax.reset();
}
if (source_array.quantization_params) {
target_array->GetOrCreateQuantizationParams() =
source_array.GetQuantizationParams();
} else {
target_array->quantization_params.reset();
}
}
}
void InsertCopyOperator(Model* model, const std::string& source_array_name,
const std::string& target_array_name) {
const Array& source_array = model->GetArray(source_array_name);
std::vector<int> shape = source_array.shape().dims();
Array& target_array = model->GetOrCreateArray(target_array_name);
target_array.buffer.reset();
CopyArrayAttribs(source_array, &target_array);
auto* copy_op = new TensorFlowReshapeOperator;
copy_op->inputs = {
source_array_name,
CreateInt32Array(
model, AvailableArrayName(*model, target_array_name + "_copy_shape"),
shape)};
copy_op->outputs = {target_array_name};
if (target_array.has_shape()) {
copy_op->shape = target_array.shape().dims();
}
model->operators.emplace_back(copy_op);
}
void CloneArray(Model* model, const std::string& source_array_name,
const std::string& target_array_name) {
CHECK(!model->HasArray(target_array_name));
const Array& source_array = model->GetArray(source_array_name);
Array& target_array = model->GetOrCreateArray(target_array_name);
CopyArrayAttribs(source_array, &target_array);
if (!source_array.buffer) {
return;
}
switch (source_array.data_type) {
case ArrayDataType::kBool:
CopyArrayBuffer<ArrayDataType::kBool>(source_array, &target_array);
break;
case ArrayDataType::kFloat:
CopyArrayBuffer<ArrayDataType::kFloat>(source_array, &target_array);
break;
case ArrayDataType::kInt8:
CopyArrayBuffer<ArrayDataType::kInt8>(source_array, &target_array);
break;
case ArrayDataType::kUint8:
CopyArrayBuffer<ArrayDataType::kUint8>(source_array, &target_array);
break;
case ArrayDataType::kInt16:
CopyArrayBuffer<ArrayDataType::kInt16>(source_array, &target_array);
break;
case ArrayDataType::kUint16:
CopyArrayBuffer<ArrayDataType::kUint16>(source_array, &target_array);
break;
case ArrayDataType::kInt32:
CopyArrayBuffer<ArrayDataType::kInt32>(source_array, &target_array);
break;
case ArrayDataType::kUint32:
CopyArrayBuffer<ArrayDataType::kUint32>(source_array, &target_array);
break;
case ArrayDataType::kInt64:
CopyArrayBuffer<ArrayDataType::kInt64>(source_array, &target_array);
break;
case ArrayDataType::kUint64:
CopyArrayBuffer<ArrayDataType::kUint64>(source_array, &target_array);
break;
case ArrayDataType::kString:
CopyArrayBuffer<ArrayDataType::kString>(source_array, &target_array);
break;
case ArrayDataType::kComplex64:
CopyArrayBuffer<ArrayDataType::kComplex64>(source_array, &target_array);
break;
default:
LOG(FATAL) << "Unsupported data type: "
<< ArrayDataTypeName(source_array.data_type);
return;
}
}
void MakeArrayDims(int num_dims, int batch, int height, int width, int depth,
std::vector<int>* out_dims) {
CHECK(out_dims->empty());
if (num_dims == 0) {
return;
} else if (num_dims == 1) {
CHECK_EQ(batch, 1);
*out_dims = {depth};
} else if (num_dims == 2) {
*out_dims = {batch, depth};
} else if (num_dims == 3) {
CHECK_EQ(batch, 1);
*out_dims = {height, width, depth};
} else if (num_dims == 4) {
*out_dims = {batch, height, width, depth};
} else {
LOG(FATAL) << "Should not get here: " << num_dims;
}
}
void CreateOrCheckRnnStateArray(const std::string& name, int size,
int state_num_dims, Model* model) {
int batch = 1;
int num_dims = -1;
if (state_num_dims > 0) {
num_dims = state_num_dims;
} else {
for (const auto& input_array : model->flags.input_arrays()) {
if (input_array.name() == name || num_dims == -1) {
num_dims = input_array.shape().dims_size();
if (num_dims > 0) {
batch = input_array.shape().dims(0);
}
}
}
}
Array& array = model->GetOrCreateArray(name);
if (array.has_shape()) {
num_dims = array.shape().dimensions_count();
}
if (!array.has_shape() && num_dims >= 0) {
Shape* shape = array.mutable_shape();
std::vector<int> dims;
MakeArrayDims(num_dims, batch, 1, 1, size, &dims);
*shape->mutable_dims() = dims;
}
}
void ResolveModelFlags(const ModelFlags& model_flags, Model* model) {
for (const auto& specified_input_array : model_flags.input_arrays()) {
toco::InputArray* dst_input_array = nullptr;
for (int i = 0; i < model->flags.input_arrays_size(); i++) {
toco::InputArray* candidate_dst_input_array =
model->flags.mutable_input_arrays(i);
if (candidate_dst_input_array->name() == specified_input_array.name()) {
dst_input_array = candidate_dst_input_array;
break;
}
}
if (!dst_input_array) {
if (model->flags.input_arrays_size() == 1 &&
model_flags.input_arrays_size() == 1 &&
!specified_input_array.has_name()) {
dst_input_array = model->flags.mutable_input_arrays(0);
}
}
if (!dst_input_array) {
dst_input_array = model->flags.add_input_arrays();
dst_input_array->set_name(specified_input_array.name());
}
#define RESOLVE_MODEL_FLAG(field_name) \
if (specified_input_array.has_##field_name()) { \
if (dst_input_array->has_##field_name()) { \
QCHECK_EQ(dst_input_array->field_name(), \
specified_input_array.field_name()) \
<< "For input array '" << dst_input_array->name() << "', " \
<< "specified " #field_name " flag with value: " \
<< specified_input_array.field_name() \
<< " does not agree with already defined " #field_name \
" of this model, with value: " \
<< specified_input_array.field_name(); \
} else { \
dst_input_array->set_##field_name(specified_input_array.field_name()); \
} \
}
RESOLVE_MODEL_FLAG(std_value);
RESOLVE_MODEL_FLAG(mean_value);
#undef RESOLVE_MODEL_FLAG
if (specified_input_array.has_shape()) {
if (dst_input_array->has_shape()) {
QCHECK_EQ(specified_input_array.shape().dims_size(),
dst_input_array->shape().dims_size())
<< "For input array '" << specified_input_array.name() << "', "
<< "size of specified input shape flag with size: "
<< specified_input_array.shape().dims_size()
<< " does not agree with already defined input shape"
" of this model, with size: "
<< dst_input_array->shape().dims_size();
for (int i = 1; i < specified_input_array.shape().dims_size(); i++) {
QCHECK_EQ(specified_input_array.shape().dims(i),
dst_input_array->shape().dims(i))
<< "At dimension number " << i << " of input array "
<< specified_input_array.name() << ", the specified shape's "
<< "dimension flag with dimension: "
<< specified_input_array.shape().dims(i)
<< " does not agree with already defined shape"
<< " of this model, with dimension: "
<< dst_input_array->shape().dims(i);
}
} else {
*dst_input_array->mutable_shape() = specified_input_array.shape();
}
}
if (specified_input_array.has_data_type()) {
QCHECK(!dst_input_array->has_data_type());
dst_input_array->set_data_type(specified_input_array.data_type());
}
}
if (model_flags.output_arrays_size() > 0) {
model->flags.mutable_output_arrays()->CopyFrom(model_flags.output_arrays());
}
#define RESOLVE_MODEL_FLAG(name) \
if (model_flags.has_##name()) { \
if (model->flags.has_##name()) { \
QCHECK_EQ(model_flags.name(), model->flags.name()) \
<< "Specified " #name " flag with value: " << model_flags.name() \
<< " does not agree with already defined " #name \
" of this model, with value: " \
<< model->flags.name(); \
} else { \
model->flags.set_##name(model_flags.name()); \
} \
}
RESOLVE_MODEL_FLAG(variable_batch)
#undef RESOLVE_MODEL_FLAG
if (!model_flags.rnn_states().empty()) {
model->flags.mutable_rnn_states()->CopyFrom(model_flags.rnn_states());
}
if (model->flags.model_checks_size() == 0) {
model->flags.mutable_model_checks()->CopyFrom(model_flags.model_checks());
}
QCHECK_GT(model->flags.output_arrays_size(), 0)
<< "This model does not define output arrays, so a "
"--output_arrays flag must be given on the command-line.";
for (auto& input_array_proto : *model->flags.mutable_input_arrays()) {
auto& input_array = model->GetOrCreateArray(input_array_proto.name());
if (input_array_proto.has_data_type()) {
const ArrayDataType specified_type =
ConvertIODataTypeToArrayDataType(input_array_proto.data_type());
QCHECK(specified_type != ArrayDataType::kNone);
if (input_array.data_type != ArrayDataType::kNone) {
QCHECK(specified_type == input_array.data_type)
<< "For input array " << input_array_proto.name()
<< " the specified input data type "
<< IODataType_Name(input_array_proto.data_type())
<< " conflicts with the existing type.";
}
input_array.data_type = specified_type;
}
if (input_array.data_type == ArrayDataType::kNone) {
input_array.data_type = ArrayDataType::kFloat;
}
if (!input_array.has_shape()) {
if (input_array_proto.has_shape()) {
auto& input_array_dims = *input_array.mutable_shape()->mutable_dims();
CheckValidShapeDimensions(input_array_proto.shape().dims());
for (const auto& dim : input_array_proto.shape().dims()) {
input_array_dims.push_back(dim);
}
}
} else {
if (input_array_proto.has_shape()) {
const auto& input_array_dims =
*input_array.mutable_shape()->mutable_dims();
CHECK_EQ(input_array_dims.size(),
input_array_proto.shape().dims_size());
for (int i = 0; i < input_array_dims.size(); i++) {
CHECK_EQ(input_array_dims[i], input_array_proto.shape().dims(i));
}
} else {
for (int i = 0; i < input_array.shape().dimensions_count(); i++) {
input_array_proto.mutable_shape()->add_dims(
input_array.shape().dims(i));
}
}
}
const float mean_value = input_array_proto.mean_value();
const float std_value = input_array_proto.std_value();
MinMax input_minmax;
float qmin = 0, qmax = 255;
if (input_array.data_type == ArrayDataType::kInt16) {
qmin = -32768;
qmax = 32767;
}
input_minmax.min = (qmin - mean_value) / std_value;
input_minmax.max = (qmax - mean_value) / std_value;
if (!input_array.minmax) {
input_array.GetOrCreateMinMax() = input_minmax;
}
}
for (const auto& rnn_state : model->flags.rnn_states()) {
CreateOrCheckRnnStateArray(rnn_state.state_array(), rnn_state.size(),
rnn_state.num_dims(), model);
}
model->flags.set_change_concat_input_ranges(
model_flags.change_concat_input_ranges());
model->flags.set_allow_nonascii_arrays(model_flags.allow_nonascii_arrays());
model->flags.set_allow_nonexistent_arrays(
model_flags.allow_nonexistent_arrays());
CHECK(!model->flags.has_arrays_extra_info());
*model->flags.mutable_arrays_extra_info() = model_flags.arrays_extra_info();
}
void CheckIsReadyForQuantization(const Model& model) {
for (const auto& op : model.operators) {
for (const auto& input : op->inputs) {
const auto& input_array = model.GetArray(input);
if (input_array.data_type != ArrayDataType::kFloat) {
continue;
}
if (input_array.minmax) {
continue;
}
if (input_array.buffer) {
continue;
}
LOG(FATAL)
<< "Array " << input << ", which is an input to the "
<< HelpfulOperatorTypeName(*op) << " operator producing the output "
<< "array " << op->outputs[0] << ", is lacking min/max data, "
<< "which is necessary for quantization. If accuracy matters, either "
<< "target a non-quantized output format, or run quantized training "
<< "with your model from a floating point checkpoint to change the "
<< "input graph to contain min/max information. If you don't care "
<< "about accuracy, you can pass --default_ranges_min= and "
<< "--default_ranges_max= for easy experimentation.";
}
}
}
int ElementSize(ArrayDataType data_type) {
switch (data_type) {
case ArrayDataType::kBool:
return sizeof(bool);
case ArrayDataType::kFloat:
return 4;
case ArrayDataType::kInt8:
return 1;
case ArrayDataType::kUint8:
return 1;
case ArrayDataType::kInt16:
return 2;
case ArrayDataType::kUint16:
return 2;
case ArrayDataType::kInt32:
return 4;
case ArrayDataType::kUint32:
return 4;
case ArrayDataType::kInt64:
return 8;
case ArrayDataType::kUint64:
return 8;
case ArrayDataType::kComplex64:
return 8;
case ArrayDataType::kComplex128:
return 16;
case ArrayDataType::kFloat64:
return 8;
case ArrayDataType::kString:
LOG(FATAL) << "Transient arrays with strings are not supported yet";
return 0;
default:
LOG(FATAL) << "Unknown data_type = " << static_cast<int>(data_type);
return 0;
}
}
void DropMinMax(Model* model, const std::string& array_name) {
auto& array = model->GetArray(array_name);
if (!!array.minmax) {
LOG(WARNING) << "Dropping MinMax information in array " << array_name
<< ". Expect inaccuracy in quantized inference.";
array.minmax = nullptr;
}
}
bool IsAllocatableTransientArray(const Model& model,
const std::string& array_name) {
if (model.IsOptionalArray(array_name)) return false;
if (IsInputArray(model, array_name) || IsOutputArray(model, array_name)) {
return false;
}
const auto& array = &model.GetArray(array_name);
if (!!array->buffer) {
return false;
}
if (!array->has_shape()) {
return false;
}
if (array->final_data_type == ArrayDataType::kString ||
array->data_type == ArrayDataType::kString) {
return false;
}
return true;
}
std::string AvailableArrayName(const Model& model, const std::string& name) {
std::string sanitized_name = SanitizeNameForTFNode(name);
if (!model.HasArray(sanitized_name) &&
!model.IsOptionalArray(sanitized_name)) {
return sanitized_name;
}
const int kNumSuffixesToTry = 1000;
for (int i = 0; i < kNumSuffixesToTry; i++) {
const std::string& name_with_suffix =
toco::port::StringF("%s_%d", sanitized_name, i);
if (!model.HasArray(name_with_suffix) &&
!model.IsOptionalArray(name_with_suffix)) {
return name_with_suffix;
}
}
LOG(FATAL) << "Could not find an available array name starting with "
<< sanitized_name << ". Tried " << kNumSuffixesToTry
<< " suffixes, all were taken!";
return "";
}
std::string ShapeToString(const Shape& shape) {
if (shape.dimensions_count() == 0) {
return "[]";
}
return absl::StrCat("[ ", absl::StrJoin(shape.dims(), ", "), " ]");
}
void PrintArrayShape(Model* model, const std::string& name) {
if (!model->GetArray(name).has_shape()) {
LOG(INFO) << name << " has no shape";
return;
}
LOG(INFO) << name
<< " has shape: " << ShapeToString(model->GetArray(name).shape());
}
bool IsArrayFullyConnectedWeights(const Model& model, const std::string& name) {
bool is_fc_weights = false;
bool is_something_else = false;
for (const auto& op : model.operators) {
for (int input_index = 0; input_index < op->inputs.size(); input_index++) {
if (op->inputs[input_index] == name) {
if (op->type == OperatorType::kFullyConnected && input_index == 1) {
is_fc_weights = true;
} else {
is_something_else = true;
}
}
}
}
CHECK(!(is_fc_weights && is_something_else));
return is_fc_weights;
}
std::string CreateInt32Array(Model* model, const std::string& param_name,
const std::vector<int>& value) {
auto param_array_name = AvailableArrayName(*model, param_name);
auto& param_array = model->GetOrCreateArray(param_array_name);
param_array.mutable_shape()->ReplaceDims({static_cast<int>(value.size())});
param_array.data_type = ArrayDataType::kInt32;
auto& param_array_data =
param_array.GetMutableBuffer<ArrayDataType::kInt32>().data;
param_array_data.resize(RequiredBufferSizeForShape(param_array.shape()));
for (int i = 0; i < value.size(); ++i) {
param_array_data[i] = value[i];
}
return param_array_name;
}
bool EstimateArithmeticOpsCount(const Model& model, const Operator& op,
int64_t* result) {
switch (op.type) {
case OperatorType::kFullyConnected:
case OperatorType::kConv:
case OperatorType::kDepthwiseConv: {
const auto& output_array = model.GetArray(op.outputs[0]);
const auto& weights_array = model.GetArray(op.inputs[1]);
if (!output_array.has_shape() || !weights_array.has_shape()) {
return false;
}
int64_t cols = 1;
for (int i = 0; i < output_array.shape().dimensions_count() - 1; i++) {
cols *= output_array.shape().dims(i);
}
const int64_t cost_per_col =
2 * RequiredBufferSizeForShape(weights_array.shape());
*result = cost_per_col * cols;
if (op.inputs.size() > 2) {
*result += RequiredBufferSizeForShape(output_array.shape());
}
break;
}
case OperatorType::kTransposeConv: {
const auto& input_array = model.GetArray(op.inputs[2]);
const auto& weights_array = model.GetArray(op.inputs[1]);
if (!input_array.has_shape() || !weights_array.has_shape()) {
return false;
}
const Shape& input = input_array.shape();
const Shape& weights = weights_array.shape();
*result = 2 * input.dims(0) * input.dims(1) * input.dims(2) *
input.dims(3) * weights.dims(1) * weights.dims(2) *
weights.dims(0);
break;
}
case OperatorType::kAdd:
case OperatorType::kSub:
case OperatorType::kMul: {
const auto& output_array = model.GetArray(op.outputs[0]);
if (!output_array.has_shape()) {
return false;
}
*result = RequiredBufferSizeForShape(output_array.shape());
break;
}
case OperatorType::kAddN: {
const auto& output_array = model.GetArray(op.outputs[0]);
if (!output_array.has_shape()) {
return false;
}
const int64_t num_adds = op.inputs.size() - 1;
*result = num_adds * RequiredBufferSizeForShape(output_array.shape());
break;
}
case OperatorType::kLogistic:
case OperatorType::kSoftmax:
case OperatorType::kLogSoftmax:
case OperatorType::kTanh: {
const auto& output_array = model.GetArray(op.outputs[0]);
if (!output_array.has_shape()) {
return false;
}
*result = 64 * RequiredBufferSizeForShape(output_array.shape());
break;
}
case OperatorType::kMaxPool: {
const auto& maxpool = *static_cast<const MaxPoolOperator*>(&op);
const auto& output_array = model.GetArray(op.outputs[0]);
if (!output_array.has_shape()) {
return false;
}
*result = RequiredBufferSizeForShape(output_array.shape()) *
maxpool.kheight * maxpool.kwidth;
break;
}
case OperatorType::kAveragePool: {
const auto& avgpool = *static_cast<const AveragePoolOperator*>(&op);
const auto& output_array = model.GetArray(op.outputs[0]);
if (!output_array.has_shape()) {
return false;
}
*result = RequiredBufferSizeForShape(output_array.shape()) *
avgpool.kheight * avgpool.kwidth;
break;
}
case OperatorType::kL2Pool: {
const auto* maxpool = static_cast<const MaxPoolOperator*>(&op);
const auto& output_array = model.GetArray(op.outputs[0]);
if (!output_array.has_shape()) {
return false;
}
const int64_t cost_per_val = 2 * maxpool->kheight * maxpool->kwidth + 32;
*result = RequiredBufferSizeForShape(output_array.shape()) * cost_per_val;
break;
}
case OperatorType::kL2Normalization: {
const auto& output_array = model.GetArray(op.outputs[0]);
if (!output_array.has_shape()) {
return false;
}
*result = 3 * RequiredBufferSizeForShape(output_array.shape());
break;
}
default:
*result = 0;
break;
}
return true;
}
bool EstimateArithmeticOpsCount(const Model& model, int64_t* result) {
int64_t total = 0;
for (const auto& op : model.operators) {
int64_t num_ops;
if (!EstimateArithmeticOpsCount(model, *op, &num_ops)) {
return false;
}
total += num_ops;
}
*result = total;
return true;
}
std::string FormattedNumber(int64_t x) {
const int64_t million = 1000000;
const int64_t billion = 1000000000;
if (x < 10000) {
return toco::port::StringF("%d ", x);
} else if (x < billion) {
return toco::port::StringF("%.3f M", static_cast<double>(x) / million);
} else {
return toco::port::StringF("%.3f G", static_cast<double>(x) / billion);
}
}
void GetShuffleShape(AxesOrder input_axes_order, AxesOrder output_axes_order,
std::vector<int>* shuffle) {
CHECK_EQ(AxesCount(input_axes_order), AxesCount(output_axes_order));
shuffle->resize(4);
for (int i = 0; i < 4; i++) {
(*shuffle)[i] = i;
}
if (input_axes_order == output_axes_order) {
} else if (AxesCount(input_axes_order) == 2) {
shuffle->resize(2);
(*shuffle)[0] = 1;
(*shuffle)[1] = 0;
} else if (input_axes_order == AxesOrder::kOHWI &&
output_axes_order == AxesOrder::kHWIO) {
*shuffle = {1, 2, 3, 0};
} else if (input_axes_order == AxesOrder::kHWIO &&
output_axes_order == AxesOrder::kOHWI) {
*shuffle = {3, 0, 1, 2};
} else if (input_axes_order == AxesOrder::kOHWI &&
output_axes_order == AxesOrder::kHWOI) {
*shuffle = {1, 2, 0, 3};
} else {
LOG(FATAL) << "Bad shuffle";
}
}
void ExtendShuffle(const std::vector<int>& input_shuffle, int newdim,
std::vector<int>* extended_shuffle) {
*extended_shuffle = input_shuffle;
CHECK(newdim >= input_shuffle.size());
const int pad_size = newdim - input_shuffle.size();
extended_shuffle->resize(newdim);
for (int i = 0; i < pad_size; i++) {
(*extended_shuffle)[i] = i;
}
for (int i = pad_size; i < newdim; i++) {
(*extended_shuffle)[i] = input_shuffle[i - pad_size] + pad_size;
}
}
void ShuffleDims(const Shape& input_shape, AxesOrder input_axes_order,
AxesOrder output_axes_order, Shape* output_shape) {
if (input_axes_order == AxesOrder::kHWIM &&
output_axes_order == AxesOrder::k1HWO) {
*output_shape = Shape({1, input_shape.dims(0), input_shape.dims(1),
input_shape.dims(3) * input_shape.dims(2)});
} else {
std::vector<int> shuffle;
GetShuffleShape(input_axes_order, output_axes_order, &shuffle);
std::vector<int>* output_dims = output_shape->mutable_dims();
output_dims->resize(input_shape.dimensions_count());
for (int i = 0; i < input_shape.dimensions_count(); i++) {
(*output_dims)[i] = input_shape.dims(shuffle[i]);
}
}
}
template <typename T>
void ShuffleArrayTemplate(const Shape& input_shape, AxesOrder input_axes_order,
AxesOrder output_axes_order,
const Shape& output_shape, const T* input_data,
T* output_data) {
if (input_axes_order == AxesOrder::kHWIM &&
output_axes_order == AxesOrder::k1HWO) {
memcpy(output_data, input_data,
RequiredBufferSizeForShape(input_shape) * sizeof(output_data[0]));
return;
}
CHECK(input_shape.dimensions_count() == output_shape.dimensions_count());
const int dim = input_shape.dimensions_count();
CHECK_LE(dim, 4);
std::vector<int> shuffle;
GetShuffleShape(input_axes_order, output_axes_order, &shuffle);
CHECK(shuffle.size() >= dim);
for (int i = 0; i < dim; i++) {
CHECK(shuffle[i] >= 0 && shuffle[i] < dim);
CHECK(input_shape.dims(shuffle[i]) == output_shape.dims(i));
}
Shape extended_input_shape = input_shape;
ExtendShape(&extended_input_shape, 4);
Shape extended_output_shape = output_shape;
ExtendShape(&extended_output_shape, 4);
std::vector<int> extended_shuffle;
ExtendShuffle(shuffle, 4, &extended_shuffle);
const std::vector<int>& extended_input_dims = extended_input_shape.dims();
const std::vector<int>& extended_output_dims = extended_output_shape.dims();
int input_strides[4];
input_strides[3] = 1;
input_strides[2] = extended_input_dims[3];
input_strides[1] = input_strides[2] * extended_input_dims[2];
input_strides[0] = input_strides[1] * extended_input_dims[1];
const int input_stride_0 = input_strides[extended_shuffle[3]];
const int input_stride_1 = input_strides[extended_shuffle[2]];
const int input_stride_2 = input_strides[extended_shuffle[1]];
const int input_stride_3 = input_strides[extended_shuffle[0]];
const int output_size_0 = extended_output_dims[3];
const int output_size_1 = extended_output_dims[2];
const int output_size_2 = extended_output_dims[1];
const int output_size_3 = extended_output_dims[0];
const int output_stride_0 = 1;
const int output_stride_1 = output_size_0;
const int output_stride_2 = output_stride_1 * output_size_1;
const int output_stride_3 = output_stride_2 * output_size_2;
for (int i3 = 0; i3 < output_size_3; i3++) {
const T* const input_ptr_3 = input_data + i3 * input_stride_3;
T* const output_ptr_3 = output_data + i3 * output_stride_3;
for (int i2 = 0; i2 < output_size_2; i2++) {
const T* const input_ptr_2 = input_ptr_3 + i2 * input_stride_2;
T* const output_ptr_2 = output_ptr_3 + i2 * output_stride_2;
for (int i1 = 0; i1 < output_size_1; i1++) {
const T* input_ptr = input_ptr_2 + i1 * input_stride_1;
T* output_ptr = output_ptr_2 + i1 * output_stride_1;
T* const output_ptr_end = output_ptr + output_size_0 * output_stride_0;
while (output_ptr != output_ptr_end) {
*output_ptr = *input_ptr;
input_ptr += input_stride_0;
output_ptr += output_stride_0;
}
}
}
}
}
void ShuffleArray(const Shape& input_shape, AxesOrder input_axes_order,
AxesOrder output_axes_order, const Shape& output_shape,
const uint8* input_data, uint8* output_data) {
ShuffleArrayTemplate<uint8>(input_shape, input_axes_order, output_axes_order,
output_shape, input_data, output_data);
}
void ShuffleArray(const Shape& input_shape, AxesOrder input_axes_order,
AxesOrder output_axes_order, const Shape& output_shape,
const float* input_data, float* output_data) {
ShuffleArrayTemplate<float>(input_shape, input_axes_order, output_axes_order,
output_shape, input_data, output_data);
}
int AxesCount(AxesOrder axes_order) {
switch (axes_order) {
case AxesOrder::kOneAxis:
return 1;
case AxesOrder::kRC:
return 2;
case AxesOrder::kCR:
return 2;
case AxesOrder::kHWIO:
return 4;
case AxesOrder::kOHWI:
return 4;
case AxesOrder::kHWIM:
return 4;
case AxesOrder::k1HWO:
return 4;
case AxesOrder::kNHWC:
return 4;
case AxesOrder::kHWOI:
return 4;
default:
LOG(FATAL) << "Bad AxesOrder";
return 0;
}
}
bool IsDiscardableArray(const Model& model, const std::string& array_name) {
if (IsInputArray(model, array_name) || IsOutputArray(model, array_name)) {
return false;
}
for (const auto& rnn_state : model.flags.rnn_states()) {
if (!rnn_state.discardable()) {
if (array_name == rnn_state.state_array()) {
return false;
}
if (array_name == rnn_state.back_edge_source_array()) {
return false;
}
}
}
return true;
}
bool ReshapeIsEquivalentToTranspose(const Model& model,
const TensorFlowReshapeOperator* op,
bool allow_extra_unary_dims) {
CHECK(!op->shape.empty());
CHECK(model.HasArray(op->inputs[0]));
CHECK(model.HasArray(op->outputs[0]));
const auto& input_array = model.GetArray(op->inputs[0]);
const auto& output_array = model.GetArray(op->outputs[0]);
CHECK(input_array.has_shape());
CHECK(output_array.has_shape());
std::vector<int> in_shape = input_array.shape().dims();
std::vector<int> out_shape = output_array.shape().dims();
if (!allow_extra_unary_dims && in_shape.size() != out_shape.size()) {
return false;
}
in_shape.erase(std::remove(in_shape.begin(), in_shape.end(), 1),
in_shape.end());
out_shape.erase(std::remove(out_shape.begin(), out_shape.end(), 1),
out_shape.end());
return in_shape == out_shape;
}
void CheckFinalDataTypesSatisfied(const Model& model) {
for (const auto& array_entry : model.GetArrayMap()) {
const auto& array = *array_entry.second;
if (array.data_type == ArrayDataType::kBool) {
continue;
}
if (array.final_data_type != ArrayDataType::kNone &&
array.final_data_type != ArrayDataType::kInt16) {
CHECK(array.data_type == array.final_data_type)
<< "Array \"" << array_entry.first
<< "\" has mis-matching actual and final data types (data_type="
<< ArrayDataTypeName(array.data_type)
<< ", final_data_type=" << ArrayDataTypeName(array.final_data_type)
<< ").";
}
}
}
ArrayDataType ConvertIODataTypeToArrayDataType(IODataType type) {
switch (type) {
case FLOAT:
return ArrayDataType::kFloat;
case UINT8:
case QUANTIZED_UINT8:
return ArrayDataType::kUint8;
case INT8:
case QUANTIZED_INT8:
return ArrayDataType::kInt8;
case INT16:
case QUANTIZED_INT16:
return ArrayDataType::kInt16;
case UINT16:
return ArrayDataType::kUint16;
case INT32:
return ArrayDataType::kInt32;
case UINT32:
return ArrayDataType::kUint32;
case INT64:
return ArrayDataType::kInt64;
case UINT64:
return ArrayDataType::kUint64;
case BOOL:
return ArrayDataType::kBool;
case STRING:
return ArrayDataType::kString;
case COMPLEX64:
return ArrayDataType::kComplex64;
case COMPLEX128:
return ArrayDataType::kComplex128;
case FLOAT16:
return ArrayDataType::kFloat16;
case FLOAT64:
return ArrayDataType::kFloat64;
case RESOURCE:
case VARIANT:
default:
return ArrayDataType::kNone;
}
}
void FinishBuildingRNNStates(Model* model) {
for (const auto& rnn_state : model->flags.rnn_states()) {
if (!model->HasArray(rnn_state.back_edge_source_array()) ||
!model->HasArray(rnn_state.state_array())) {
CHECK(model->HasArray(rnn_state.back_edge_source_array()));
CHECK(model->HasArray(rnn_state.state_array()));
continue;
}
const auto& src_array = model->GetArray(rnn_state.back_edge_source_array());
auto& dst_array = model->GetArray(rnn_state.state_array());
if (src_array.data_type == ArrayDataType::kNone &&
dst_array.data_type == ArrayDataType::kNone) {
dst_array.data_type = ArrayDataType::kFloat;
}
}
}
std::unordered_set<std::string> ScanArrayNames(
const Model& model, const toco::ArraysExtraInfo_Entry& entry) {
std::unordered_set<std::string> matches;
if (model.HasArray(entry.name())) {
matches.insert(entry.name());
}
if (!entry.name_regexp().empty()) {
const auto& arrays = model.GetArrayMap();
const RE2 name_regexp = {entry.name_regexp()};
for (auto it = arrays.begin(); it != arrays.end(); ++it) {
if (RE2::FullMatch(it->first, name_regexp)) {
matches.insert(it->first);
}
}
}
return matches;
}
void UseArraysExtraInfo(Model* model, bool quantize_output) {
for (const auto& entry : model->flags.arrays_extra_info().entries()) {
const auto matches = ScanArrayNames(*model, entry);
if (matches.empty()) {
LOG(ERROR) << "arrays_extra_info_file: No matching arrays found for "
<< (entry.has_name() ? entry.name() : "")
<< (entry.has_name_regexp() ? entry.name_regexp() : "");
continue;
}
for (const auto& matched_name : matches) {
auto& array = model->GetArray(matched_name);
if (entry.has_min() || entry.has_max()) {
CHECK_EQ(entry.has_min(), entry.has_max());
auto& minmax = array.GetOrCreateMinMax();
minmax.min = entry.min();
minmax.max = entry.max();
}
if (entry.has_data_type() && quantize_output) {
array.final_data_type =
ConvertIODataTypeToArrayDataType(entry.data_type());
}
if (entry.has_shape()) {
array.clear_shape();
array.mutable_shape();
for (const auto& dim : entry.shape().dims()) {
array.mutable_shape()->mutable_dims()->push_back(dim);
}
}
if (entry.has_constant_float_value()) {
CHECK(array.has_shape());
if (array.data_type == ArrayDataType::kFloat) {
auto& data = array.GetMutableBuffer<ArrayDataType::kFloat>().data;
data.resize(RequiredBufferSizeForShape(array.shape()));
for (float& f : data) {
f = entry.constant_float_value();
}
}
}
}
}
}
void UndoWeightsShuffling(Model* model) {
for (const auto& op : model->operators) {
if (op->type != toco::OperatorType::kFullyConnected) {
continue;
}
const auto& fc_op = static_cast<toco::FullyConnectedOperator&>(*op);
if (fc_op.weights_format == FullyConnectedWeightsFormat::kDefault) {
continue;
}
const std::string& weights_name = fc_op.inputs[1];
QCHECK_EQ(CountOpsWithInput(*model, weights_name), 1);
auto& weights_array = model->GetArray(weights_name);
QCHECK(weights_array.data_type == ArrayDataType::kUint8);
auto& weights_data =
weights_array.GetMutableBuffer<toco::ArrayDataType::kUint8>().data;
const auto& weights_shape = weights_array.shape();
QCHECK_EQ(weights_shape.dimensions_count(), 2);
const int rows = weights_shape.dims(0);
const int cols = weights_shape.dims(1);
QCHECK_EQ(rows % 4, 0);
QCHECK_EQ(cols % 16, 0);
CHECK_EQ(rows * cols, weights_data.size());
std::vector<uint8> deshuffled_data(weights_data.size());
uint8* shuffled_data_ptr = weights_data.data();
for (int r = 0; r < rows; r += 4) {
for (int c = 0; c < cols; c += 16) {
for (int i = 0; i < 4; i++) {
uint8* deshuffled_data_ptr =
deshuffled_data.data() + (r + i) * cols + c;
for (int j = 0; j < 16; j++) {
uint8 shuffled_val = *shuffled_data_ptr++;
uint8 deshuffled_val = shuffled_val ^ 0x80;
*deshuffled_data_ptr++ = deshuffled_val;
}
}
}
}
CHECK_EQ(shuffled_data_ptr, weights_data.data() + rows * cols);
weights_data = std::move(deshuffled_data);
}
}
void CopyMinMaxAndQuantizationRelatedFields(const Array& src, Array* dst) {
if (src.minmax) {
dst->GetOrCreateMinMax() = src.GetMinMax();
}
if (src.quantization_params) {
dst->GetOrCreateQuantizationParams() = src.GetQuantizationParams();
}
dst->narrow_range = src.narrow_range;
}
} | #include <tuple>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/lite/testing/util.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/toco_port.h"
#include "tensorflow/lite/toco/tooling_util.h"
namespace toco {
enum class Agreement { kBroadcast, kExtend, kBroadcastNotExtend, kNeither };
struct ShapePair {
Shape left;
Shape right;
Agreement agreement;
};
std::vector<ShapePair> CreateShapePairs() {
return std::vector<ShapePair>(
{
{Shape({3}), Shape({3}), Agreement::kBroadcast},
{Shape({256, 256, 3}), Shape({256, 256, 3}), Agreement::kBroadcast},
{Shape({256, 256, 3}), Shape({3}), Agreement::kBroadcast},
{Shape({8, 1, 6, 1}), Shape({7, 1, 5}), Agreement::kBroadcast},
{Shape({}), Shape({3}), Agreement::kBroadcast},
{Shape({}), Shape({3, 1}), Agreement::kBroadcast},
{Shape({3}), Shape({3}), Agreement::kExtend},
{Shape({256, 256, 3}), Shape({256, 256, 3}), Agreement::kExtend},
{Shape({1, 1, 3}), Shape({1, 1, 3}), Agreement::kExtend},
{Shape({1, 1, 3}), Shape({3}), Agreement::kExtend},
{Shape({1, 1, 3}), Shape({1, 3}), Agreement::kExtend},
{Shape({256, 256, 3}), Shape({3}), Agreement::kBroadcastNotExtend},
{Shape({5, 4}), Shape({1}), Agreement::kBroadcastNotExtend},
{Shape({5, 4}), Shape({4}), Agreement::kBroadcastNotExtend},
{Shape({15, 3, 5}), Shape({15, 1, 5}), Agreement::kBroadcastNotExtend},
{Shape({15, 3, 5}), Shape({3, 5}), Agreement::kBroadcastNotExtend},
{Shape({15, 3, 5}), Shape({3, 1}), Agreement::kBroadcastNotExtend},
{Shape({3, 1}), Shape({}), Agreement::kBroadcastNotExtend},
{Shape({3}), Shape({4}), Agreement::kNeither},
{Shape({2, 1}), Shape({8, 4, 3}), Agreement::kNeither}});
}
class ShapeTest : public ::testing::TestWithParam<ShapePair> {};
TEST_P(ShapeTest, Agrees) {
const ShapePair& param = GetParam();
switch (param.agreement) {
case Agreement::kBroadcast: {
EXPECT_TRUE(ShapesAgreeUpToBroadcasting(param.left, param.right));
break;
}
case Agreement::kExtend: {
EXPECT_TRUE(ShapesAgreeUpToExtending(param.left, param.right));
EXPECT_TRUE(ShapesAgreeUpToBroadcasting(param.left, param.right));
break;
}
case Agreement::kBroadcastNotExtend: {
EXPECT_TRUE(ShapesAgreeUpToBroadcasting(param.left, param.right));
EXPECT_FALSE(ShapesAgreeUpToExtending(param.left, param.right));
break;
}
case Agreement::kNeither: {
EXPECT_FALSE(ShapesAgreeUpToExtending(param.left, param.right));
EXPECT_FALSE(ShapesAgreeUpToBroadcasting(param.left, param.right));
break;
}
}
}
INSTANTIATE_TEST_SUITE_P(AgreeBroadcast, ShapeTest,
::testing::ValuesIn(CreateShapePairs()));
static const char kNegativeValuesMessage[] =
"Tensor shape should not include negative values";
static const char kLargeTensorMessage[] = "Tensor shape is too large";
TEST(NumElementsTest, Int) {
int count;
tensorflow::Status status = absl::OkStatus();
status = NumElements(std::vector<int>{1024, 1024, 2047}, &count);
EXPECT_TRUE(status.ok());
EXPECT_EQ(count, 2146435072);
status = NumElements(std::vector<int>{1024, 0, 2048}, &count);
EXPECT_TRUE(status.ok());
EXPECT_EQ(count, 0);
status = NumElements(std::vector<int>{1, 2, -3}, &count);
EXPECT_EQ(status.message(), kNegativeValuesMessage);
status = NumElements(std::vector<int>{1024, 1024, 2048}, &count);
EXPECT_EQ(status.message(), kLargeTensorMessage);
}
TEST(NumElementsTest, Int32) {
int32_t count;
tensorflow::Status status = absl::OkStatus();
status = NumElements(std::vector<int32_t>{1024, 1024, 2047}, &count);
EXPECT_TRUE(status.ok());
EXPECT_EQ(count, 2146435072);
status = NumElements(std::vector<int32_t>{1, 2, -3}, &count);
EXPECT_EQ(status.message(), kNegativeValuesMessage);
status = NumElements(std::vector<int32_t>{1024, 1024, 2048}, &count);
EXPECT_EQ(status.message(), kLargeTensorMessage);
}
TEST(NumElementsTest, Int64) {
int64_t count;
tensorflow::Status status = absl::OkStatus();
status = NumElements(std::vector<int64_t>{16777216, 16777216, 32767}, &count);
EXPECT_TRUE(status.ok());
EXPECT_EQ(count, 9223090561878065152LL);
status = NumElements(std::vector<int64_t>{1, 2, -3}, &count);
EXPECT_EQ(status.message(), kNegativeValuesMessage);
status = NumElements(std::vector<int64_t>{16777216, 16777216, 32768}, &count);
EXPECT_EQ(status.message(), kLargeTensorMessage);
}
TEST(NumElementsTest, UnsignedInt32) {
uint32_t count;
tensorflow::Status status = absl::OkStatus();
status = NumElements(std::vector<uint32_t>{1024, 2048, 2047}, &count);
EXPECT_TRUE(status.ok());
EXPECT_EQ(count, 4292870144);
status = NumElements(std::vector<int>{1, 2, -3}, &count);
EXPECT_EQ(status.message(), kNegativeValuesMessage);
status = NumElements(std::vector<uint32_t>{1024, 2048, 2048}, &count);
EXPECT_EQ(status.message(), kLargeTensorMessage);
}
TEST(NumElementsTest, UnsignedInt64) {
uint64_t count;
tensorflow::Status status = absl::OkStatus();
status =
NumElements(std::vector<uint64_t>{16777216, 16777216, 65535}, &count);
EXPECT_TRUE(status.ok());
EXPECT_EQ(count, 18446462598732840960ULL);
status = NumElements(std::vector<int>{1, 2, -3}, &count);
EXPECT_EQ(status.message(), kNegativeValuesMessage);
status =
NumElements(std::vector<uint64_t>{16777216, 16777216, 65536}, &count);
EXPECT_EQ(status.message(), kLargeTensorMessage);
}
TEST(NumElementsTest, Scalar) {
tensorflow::Status status = absl::OkStatus();
int32_t count;
status = NumElements(std::vector<int32_t>{}, &count);
EXPECT_TRUE(status.ok());
EXPECT_EQ(count, 1);
uint64_t countu64;
status = NumElements(std::vector<uint64_t>{}, &countu64);
EXPECT_TRUE(status.ok());
EXPECT_EQ(countu64, 1ULL);
}
TEST(FusedActivationTest, DefaultsToUnfused) {
EXPECT_TRUE(OperatorSupportsFusedActivation(OperatorType::kAdd));
EXPECT_FALSE(OperatorSupportsFusedActivation(OperatorType::kNone));
EXPECT_FALSE(OperatorSupportsFusedActivation(static_cast<OperatorType>(255)));
}
}
int main(int argc, char** argv) {
::tflite::LogToStderr();
::testing::InitGoogleTest(&argc, argv);
::toco::port::InitGoogleWasDoneElsewhere();
return RUN_ALL_TESTS();
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/tooling_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/tooling_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
03b95d9e-0506-4114-a488-66d1d3a06da7 | cpp | tensorflow/tensorflow | iota | tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/iota.cc | tensorflow/lite/experimental/shlo/legacy/test/iota_test.cc | #include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/iota.h"
#include <cstdint>
#include <tuple>
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/op_util_common.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
namespace mlir::odml {
namespace {
class LegalizeIota : public OpConversionPattern<mhlo::IotaOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::IotaOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final;
};
bool IsIotaLegal(mhlo::IotaOp op) {
auto e_type = llvm::cast<ShapedType>(op.getType()).getElementType();
return !(e_type.isF32() || e_type.isSignlessInteger(32) ||
e_type.isSignlessInteger(64));
}
std::tuple<DenseElementsAttr, DenseElementsAttr, DenseElementsAttr>
BuildRangeParams(Type e_type, int64_t iota_dim_size, OpBuilder& b) {
if (e_type.isInteger(32)) {
return std::tuple(BuildScalarDense<int>(e_type, 0),
BuildScalarDense<int>(e_type, iota_dim_size),
BuildScalarDense<int>(e_type, 1));
} else if (e_type.isInteger(64)) {
return std::tuple(BuildScalarDense<int64_t>(e_type, 0),
BuildScalarDense<int64_t>(e_type, iota_dim_size),
BuildScalarDense<int64_t>(e_type, 1));
}
return std::tuple(BuildScalarDense<float>(e_type, 0.0),
BuildScalarDense<float>(e_type, iota_dim_size),
BuildScalarDense<float>(e_type, 1.0));
}
LogicalResult LegalizeIota::matchAndRewrite(
mhlo::IotaOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
if (IsIotaLegal(op)) {
return rewriter.notifyMatchFailure(op, "Must be i32, i64 or f32");
}
auto type = llvm::cast<ShapedType>(op.getType());
auto e_type = type.getElementType();
const int64_t iota_dim_size = type.getDimSize(op.getIotaDimension());
auto [start, limit, delta] =
BuildRangeParams(e_type, iota_dim_size, rewriter);
auto start_op = rewriter.create<arith::ConstantOp>(op->getLoc(), start);
auto limit_op = rewriter.create<arith::ConstantOp>(op->getLoc(), limit);
auto delta_op = rewriter.create<arith::ConstantOp>(op->getLoc(), delta);
auto range_type = RankedTensorType::get({iota_dim_size}, e_type);
auto range_op = rewriter.create<TFL::RangeOp>(op->getLoc(), range_type,
start_op, limit_op, delta_op);
if (type.getRank() == 1) {
rewriter.replaceOp(op, range_op);
return success();
}
llvm::SmallVector<int64_t> reshape_shape(type.getRank(), 1);
reshape_shape[op.getIotaDimension()] = iota_dim_size;
Value reshape_shape_cst = rewriter.create<arith::ConstantOp>(
op->getLoc(), rewriter.getI64TensorAttr(reshape_shape));
reshape_shape_cst = rewriter.create<TFL::CastOp>(
op->getLoc(),
llvm::cast<ShapedType>(reshape_shape_cst.getType())
.clone(rewriter.getI32Type()),
reshape_shape_cst);
auto reshape_type = RankedTensorType::get(reshape_shape, e_type);
auto reshape_op = rewriter.create<TFL::ReshapeOp>(
op->getLoc(), reshape_type, range_op, reshape_shape_cst);
auto broad_cast_shape_cst = rewriter.create<arith::ConstantOp>(
op->getLoc(), rewriter.getI64TensorAttr(type.getShape()));
rewriter.replaceOpWithNewOp<TFL::BroadcastToOp>(op, type, reshape_op,
broad_cast_shape_cst);
return success();
}
class LegalizeDynamicIotaOp : public OpConversionPattern<mhlo::DynamicIotaOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::DynamicIotaOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final;
};
bool IsDynamicIotaLegal(mhlo::DynamicIotaOp op) {
auto type = llvm::cast<ShapedType>(op.getType());
auto element_type = type.getElementType();
return (!element_type.isF32() && !element_type.isSignlessInteger(32) &&
!element_type.isSignlessInteger(64)) ||
type.getRank() > 1 || op.getIotaDimension() != 0;
}
LogicalResult LegalizeDynamicIotaOp::matchAndRewrite(
mhlo::DynamicIotaOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
if (IsDynamicIotaLegal(op)) {
return failure();
}
auto type = llvm::cast<ShapedType>(op.getType());
Type element_type = type.getElementType();
auto [start, unused_limit, delta] =
BuildRangeParams(element_type, 0, rewriter);
auto start_op = rewriter.create<arith::ConstantOp>(op.getLoc(), start);
auto delta_op = rewriter.create<arith::ConstantOp>(op.getLoc(), delta);
auto output_shape = op.getOperand();
if (mlir::isa<FloatType>(element_type)) {
auto cast_type =
mlir::cast<ShapedType>(output_shape.getType()).clone(element_type);
output_shape =
rewriter.create<TFL::CastOp>(op.getLoc(), cast_type, output_shape);
}
DenseIntElementsAttr scalar_attr = DenseIntElementsAttr::get(
RankedTensorType::get({0}, rewriter.getI32Type()),
llvm::ArrayRef<int32_t>({}));
auto scalar_shape =
rewriter.create<arith::ConstantOp>(op.getLoc(), scalar_attr);
auto limit_scalar = rewriter.create<TFL::ReshapeOp>(
op.getLoc(), RankedTensorType::get({}, element_type), output_shape,
scalar_shape);
const uint64_t dimension = op.getIotaDimension();
auto range_type =
RankedTensorType::get({type.getShape()[dimension]}, element_type);
rewriter.replaceOpWithNewOp<TFL::RangeOp>(op, range_type, start_op,
limit_scalar, delta_op);
return success();
}
}
void PopulateIotaPatterns(MLIRContext* ctx, RewritePatternSet& patterns,
ConversionTarget& target) {
patterns.add<LegalizeIota, LegalizeDynamicIotaOp>(ctx);
target.addDynamicallyLegalOp<mhlo::IotaOp>(IsIotaLegal);
target.addDynamicallyLegalOp<mhlo::DynamicIotaOp>(IsDynamicIotaLegal);
}
} | #include <initializer_list>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/legacy/include/shlo.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/debug.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/storage.h"
#include "tensorflow/lite/experimental/shlo/legacy/test/util.h"
namespace stablehlo {
namespace testing {
template <ElementType element_type>
void test(std::initializer_list<DimensionSize>&& shape,
DimensionSize iota_dimension,
std::vector<typename Storage<element_type>::Type>&& expected_values) {
Tensor expected(TensorType(Shape(shape), element_type),
expected_values.data());
std::vector<typename Storage<element_type>::Type> result_values(
expected_values.size());
Tensor result(TensorType(Shape(shape), element_type), result_values.data());
ASSERT_OK(Iota(iota_dimension, result));
EXPECT_EQ(result, expected) << "\niota_dimension: " << iota_dimension;
}
template <ElementType storage_type, ElementType expressed_type>
void test(
QuantizedParameter&& quantized_parameter,
std::initializer_list<DimensionSize>&& shape, DimensionSize iota_dimension,
std::vector<typename Storage<expressed_type>::Type>&& expected_values) {
auto expected_quant_values = QuantizeVector<storage_type, expressed_type>(
expected_values, quantized_parameter);
decltype(expected_quant_values) result_quant_values(
expected_quant_values.size());
QuantizedTensorElementType element_type(storage_type, expressed_type,
std::move(quantized_parameter));
QuantizedTensor expected(
QuantizedTensorType(Shape(shape),
QuantizedTensorElementType(element_type)),
expected_quant_values.data());
QuantizedTensor result(
QuantizedTensorType(Shape(shape),
QuantizedTensorElementType(element_type)),
result_quant_values.data());
ASSERT_OK(Iota(iota_dimension, result));
EXPECT_EQ(result, expected) << "\niota_dimension: " << iota_dimension;
}
TEST(Iota, Unquantized) {
test<ElementType::kSI8>(
{4, 5}, 0, {0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3});
test<ElementType::kSI8>(
{4, 5}, 1, {0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4});
test<ElementType::kSI16>(
{4, 5}, 0, {0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3});
test<ElementType::kSI16>(
{4, 5}, 1, {0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4});
test<ElementType::kSI32>(
{4, 5}, 0, {0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3});
test<ElementType::kSI32>(
{4, 5}, 1, {0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4});
test<ElementType::kBF16>(
{4, 5}, 0, {0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3});
test<ElementType::kBF16>(
{4, 5}, 1, {0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4});
test<ElementType::kF16>(
{4, 5}, 0, {0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3});
test<ElementType::kF16>(
{4, 5}, 1, {0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4});
test<ElementType::kF32>(
{4, 5}, 0, {0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3});
test<ElementType::kF32>(
{4, 5}, 1, {0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4});
}
TEST(Iota, Quantized) {
test<ElementType::kSI8, ElementType::kBF16>(
{.scale = 0.1, .zero_point = 0}, {4, 5}, 0,
{0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3});
test<ElementType::kSI8, ElementType::kBF16>(
{.scale = 0.1, .zero_point = 0}, {4, 5}, 1,
{0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4});
test<ElementType::kSI8, ElementType::kF16>(
{.scale = 0.1, .zero_point = 0}, {4, 5}, 0,
{0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3});
test<ElementType::kSI8, ElementType::kF16>(
{.scale = 0.1, .zero_point = 0}, {4, 5}, 1,
{0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4});
test<ElementType::kSI8, ElementType::kF32>(
{.scale = 0.1, .zero_point = 0}, {4, 5}, 0,
{0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3});
test<ElementType::kSI8, ElementType::kF32>(
{.scale = 0.1, .zero_point = 0}, {4, 5}, 1,
{0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4});
test<ElementType::kSI16, ElementType::kBF16>(
{.scale = 1e-2, .zero_point = 0}, {4, 5}, 0,
{0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3});
test<ElementType::kSI16, ElementType::kBF16>(
{.scale = 1e-2, .zero_point = 0}, {4, 5}, 1,
{0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4});
test<ElementType::kSI16, ElementType::kF16>(
{.scale = 1e-2, .zero_point = 0}, {4, 5}, 0,
{0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3});
test<ElementType::kSI16, ElementType::kF16>(
{.scale = 1e-2, .zero_point = 0}, {4, 5}, 1,
{0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4});
test<ElementType::kSI16, ElementType::kF32>(
{.scale = 1e-3, .zero_point = 0}, {4, 5}, 0,
{0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3});
test<ElementType::kSI16, ElementType::kF32>(
{.scale = 1e-3, .zero_point = 0}, {4, 5}, 1,
{0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4});
test<ElementType::kSI32, ElementType::kBF16>(
{.scale = 1e-2, .zero_point = 0}, {4, 5}, 0,
{0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3});
test<ElementType::kSI32, ElementType::kBF16>(
{.scale = 1e-2, .zero_point = 0}, {4, 5}, 1,
{0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4});
test<ElementType::kSI32, ElementType::kF16>(
{.scale = 1e-2, .zero_point = 0}, {4, 5}, 0,
{0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3});
test<ElementType::kSI32, ElementType::kF16>(
{.scale = 1e-2, .zero_point = 0}, {4, 5}, 1,
{0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4});
test<ElementType::kSI32, ElementType::kF32>(
{.scale = 1e-3, .zero_point = 0}, {4, 5}, 0,
{0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3});
test<ElementType::kSI32, ElementType::kF32>(
{.scale = 1e-3, .zero_point = 0}, {4, 5}, 1,
{0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/iota.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/test/iota_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3b452d23-eecd-4019-aa11-415a1c1afaff | cpp | tensorflow/tensorflow | yuv_to_rgb | tensorflow/lite/experimental/ml_adjacent/algo/yuv_to_rgb.cc | tensorflow/lite/experimental/ml_adjacent/algo/yuv_to_rgb_test.cc | #include "tensorflow/lite/experimental/ml_adjacent/algo/image_utils.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace ml_adj {
namespace yuv_to_rgb {
namespace {
using ::ml_adj::algo::Algo;
using ::ml_adj::algo::InputPack;
using ::ml_adj::algo::OutputPack;
using ::ml_adj::data::DataRef;
using ::ml_adj::data::MutableDataRef;
constexpr float kYuv2RgbKernel[] = {1.0f, 0.0f,
1.13988303f,
1.0f, -0.394642334f,
-0.58062185f,
1.0f, 2.03206185f, 0.0f};
constexpr int kYuv2RgbKernelDim =
sizeof(kYuv2RgbKernel) / sizeof(kYuv2RgbKernel[0]);
void ComputeYuvToRgb(const InputPack& inputs, const OutputPack& outputs) {
TFLITE_DCHECK(inputs.size() == 1);
TFLITE_DCHECK(outputs.size() == 1);
const DataRef* img = inputs[0];
const float* input_data = reinterpret_cast<const float*>(img->Data());
const dim_t batches = img->Dims()[0];
const dim_t height = img->Dims()[1];
const dim_t width = img->Dims()[2];
const dim_t channels = img->Dims()[3];
MutableDataRef* output = outputs[0];
output->Resize({batches, height, width, channels});
float* output_data = reinterpret_cast<float*>(output->Data());
ConvertColorSpace(batches, height, width, input_data, output_data,
&kYuv2RgbKernel[0], kYuv2RgbKernelDim);
}
}
const Algo* Impl_YuvToRgb() {
static const Algo yuv_to_rgb = {&ComputeYuvToRgb, nullptr};
return &yuv_to_rgb;
}
}
} | #include "tensorflow/lite/experimental/ml_adjacent/algo/yuv_to_rgb.h"
#include <cstring>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
using ::ml_adj::algo::Algo;
using ::ml_adj::data::OwningVectorRef;
namespace ml_adj {
namespace yuv_to_rgb {
namespace {
struct YuvToRgbTestParams {
const std::vector<dim_t> img_dims;
const std::vector<float> img_data;
const std::vector<float> expected_data;
const std::vector<dim_t> expected_shape;
};
class YuvToRgbTest : public ::testing::TestWithParam<YuvToRgbTestParams> {};
TEST_P(YuvToRgbTest, FloatPixelType) {
constexpr float kAbsError = 0.1f;
const YuvToRgbTestParams& params = GetParam();
OwningVectorRef img(etype_t::f32);
img.Resize(dims_t(params.img_dims));
ASSERT_EQ(img.Bytes(), params.img_data.size() * sizeof(float));
std::memcpy(img.Data(), params.img_data.data(), img.Bytes());
OwningVectorRef output(etype_t::f32);
const Algo* yuv_to_rgb = Impl_YuvToRgb();
yuv_to_rgb->process({&img}, {&output});
ASSERT_EQ(output.Bytes(), params.expected_data.size() * sizeof(float));
ASSERT_EQ(output.Dims(), params.expected_shape);
const float* out_data = reinterpret_cast<float*>(output.Data());
for (int i = 0; i < output.NumElements(); ++i) {
EXPECT_NEAR(out_data[i], params.expected_data[i], kAbsError)
<< "out_data[" << i << "] = " << out_data[i] << ", expected_data[" << i
<< "] = " << params.expected_data[i];
}
}
INSTANTIATE_TEST_SUITE_P(
YuvToRgbTests, YuvToRgbTest,
testing::ValuesIn({
YuvToRgbTestParams{{1, 3, 2, 3},
{
92.5f,
58.3f,
-71.5f,
93.5f,
58.3f,
-71.5f,
102.5f,
58.3f,
-71.5f,
103.5f,
58.3f,
-71.5f,
112.5f,
58.3f,
-71.5f,
113.5f,
58.3f,
-71.5f,
},
{11, 111, 211, 12, 112, 212,
21, 121, 221, 22, 122, 222,
31, 131, 231, 32, 132, 232},
{1, 3, 2, 3}},
YuvToRgbTestParams{{2, 3, 2, 3},
{92.5f, 58.3f, -71.5f, 93.5f, 58.3f, -71.5f,
102.5f, 58.3f, -71.5f, 103.5f, 58.3f, -71.5f,
112.5f, 58.3f, -71.5f, 113.5f, 58.3f, -71.5f,
92.5f, 58.3f, -71.5f, 93.5f, 58.3f, -71.5f,
102.5f, 58.3f, -71.5f, 103.5f, 58.3f, -71.5f,
112.5f, 58.3f, -71.5f, 113.5f, 58.3f, -71.5f},
{11, 111, 211, 12, 112, 212,
21, 121, 221, 22, 122, 222,
31, 131, 231, 32, 132, 232,
11, 111, 211, 12, 112, 212,
21, 121, 221, 22, 122, 222,
31, 131, 231, 32, 132, 232},
{2, 3, 2, 3}},
}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/algo/yuv_to_rgb.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/algo/yuv_to_rgb_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
Subsets and Splits