ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
44
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 16
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 127
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 96
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
6faa1ee0-6de7-43eb-8611-02cba56c2d4f | cpp | tensorflow/tensorflow | tensor_slice_util | tensorflow/lite/kernels/tensor_slice_util.cc | tensorflow/lite/kernels/tensor_slice_util_test.cc | #include "tensorflow/lite/kernels/tensor_slice_util.h"
#include <cstdint>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/runtime_shape.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
namespace tflite {
namespace ops {
namespace builtin {
template <typename IndexType>
Index<IndexType> ReadIndexVector(const TfLiteTensor* indices_tensor,
const RuntimeShape& tensor_shape,
const Index<IndexType>& other_indices,
int64_t dim_to_read) {
Index<IndexType> index;
index.reserve(tensor_shape.DimensionsCount());
int shift = 0;
for (int64_t dim = 0; dim < tensor_shape.DimensionsCount(); ++dim) {
if (dim == dim_to_read) {
index.push_back(0);
shift = 1;
} else {
index.push_back(other_indices[dim - shift]);
}
}
int64_t index_vector_size = tensor_shape.Dims(dim_to_read);
Index<IndexType> result;
result.reserve(index_vector_size);
for (IndexType index_vector_idx = 0; index_vector_idx < index_vector_size;
++index_vector_idx) {
index[dim_to_read] = index_vector_idx;
IndexType flat_index = TensorIndexToFlat(
index.data(), tensor_shape.DimensionsCount(), tensor_shape);
const IndexType* tensor_data = GetTensorData<IndexType>(indices_tensor);
result.push_back(tensor_data[flat_index]);
}
return result;
}
template Index<int32_t> ReadIndexVector(const TfLiteTensor* indices_tensor,
const RuntimeShape& tensor_shape,
const Index<int32_t>& other_indices,
int64_t dim_to_read);
template Index<int64_t> ReadIndexVector(const TfLiteTensor* indices_tensor,
const RuntimeShape& tensor_shape,
const Index<int64_t>& other_indices,
int64_t dim_to_read);
}
}
} | #include "tensorflow/lite/kernels/tensor_slice_util.h"
#include <cstdint>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/runtime_shape.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace {
using ::testing::ElementsAreArray;
TEST(TensorSliceUtil, ArrayContains) {
std::vector<int64_t> array = {1, 2, 3};
EXPECT_TRUE(ArrayContains(array.data(), array.size(), 2));
EXPECT_FALSE(ArrayContains(array.data(), array.size(), 0));
}
TEST(TensorSliceUtil, ArrayContainsWorkOnEmptyArray) {
std::vector<int64_t> array = {};
EXPECT_FALSE(ArrayContains(array.data(), 0, 2));
}
TEST(TensorSliceUtil, ScatterIndexHandlesNullPtr) {
Index<int64_t> index = {3, 5};
std::vector<int64_t> scatter_dims = {1, 0};
Index<int64_t>* result = nullptr;
TfLiteStatus status =
ScatterIndex(index, scatter_dims.data(), scatter_dims.size(), 3, result);
EXPECT_THAT(status, kTfLiteError);
}
TEST(TensorSliceUtil, ScatterIndexHandlesOutOfBoundIndices) {
Index<int64_t> index = {3, 5};
std::vector<int64_t> scatter_dims = {4, 0};
Index<int64_t> result;
TfLiteStatus status =
ScatterIndex(index, scatter_dims.data(), scatter_dims.size(), 3, &result);
EXPECT_THAT(status, kTfLiteError);
}
TEST(TensorSliceUtil, ScatterIndex) {
Index<int64_t> index = {3, 5};
std::vector<int64_t> scatter_dims = {1, 0};
Index<int64_t> result;
ScatterIndex(index, scatter_dims.data(), scatter_dims.size(), 3, &result);
EXPECT_THAT(result, ElementsAreArray({5, 3, 0}));
}
TEST(TensorSliceUtil, TensorIndexToFlatWorksForScalars) {
Index<int64_t> index = {0};
RuntimeShape shape(0);
EXPECT_EQ(TensorIndexToFlat(index.data(), index.size(), shape), 0);
}
TEST(TensorSliceUtil, TensorIndexToFlat) {
Index<int64_t> index = {2, 4};
RuntimeShape shape({3, 5});
EXPECT_EQ(TensorIndexToFlat(index.data(), index.size(), shape), 14);
}
TEST(TensorSliceUtil, AddIndices) {
Index<int64_t> index1 = {1, 2, 3};
Index<int64_t> index2 = {2, 7, 5};
EXPECT_THAT(AddIndices(index1, index2), ElementsAreArray({3, 9, 8}));
}
TEST(TensorSliceUtil, ExpandDimsHandlesEmptyIndex) {
Index<int64_t> index = {};
std::vector<int64_t> avoided_dims = {0, 1};
Index<int64_t> result;
ExpandDims(index, avoided_dims.data(), avoided_dims.size(), &result);
EXPECT_THAT(result, ElementsAreArray({0, 0}));
}
TEST(TensorSliceUtil, ExpandDims) {
Index<int64_t> index = {2, 4};
std::vector<int64_t> avoided_dims = {0, 2};
Index<int64_t> result;
ExpandDims(index, avoided_dims.data(), avoided_dims.size(), &result);
EXPECT_THAT(result, ElementsAreArray({0, 2, 0, 4}));
}
TEST(TensorSliceUtil, ReadIndexVector) {
TfLiteTensor tensor;
tensor.type = kTfLiteInt64;
std::vector<int64_t> tensor_data = {0, 2, 1, 0, 2, 1, 0, 1, 1, 0, 0, 9};
TfLitePtrUnion ptr_union;
ptr_union.i64 = tensor_data.data();
tensor.data = ptr_union;
RuntimeShape shape = {2, 3, 2};
Index<int64_t> other_indices = {1, 1};
int64_t dim_to_read = 1;
EXPECT_THAT(ReadIndexVector(&tensor, shape, other_indices, dim_to_read),
ElementsAreArray({1, 0, 9}));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/tensor_slice_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/tensor_slice_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
49a0ec33-fc7c-48d1-bb5c-a573c76e4ae8 | cpp | tensorflow/tensorflow | run_handler_concurrent_work_queue | tensorflow/core/tfrt/run_handler_thread_pool/run_handler_concurrent_work_queue.cc | tensorflow/core/tfrt/run_handler_thread_pool/run_handler_concurrent_work_queue_test.cc | #include "tensorflow/core/tfrt/run_handler_thread_pool/run_handler_concurrent_work_queue.h"
#include <memory>
#include <optional>
#include <ostream>
#include <utility>
#include "absl/strings/str_join.h"
#include "tensorflow/core/tfrt/run_handler_thread_pool/run_handler.h"
#include "tfrt/host_context/async_dispatch.h"
#include "tfrt/host_context/async_value.h"
#include "tfrt/host_context/execution_context.h"
namespace tfrt {
namespace tf {
RunHandlerThreadWorkQueue::RunHandlerThreadWorkQueue(const Options& options)
: options_(options),
quiescing_state_(std::make_unique<::tfrt::internal::QuiescingState>()),
non_blocking_work_queue_(quiescing_state_.get(),
1),
blocking_work_queue_(quiescing_state_.get(),
1) {
CHECK(options.num_threads_in_sub_thread_pool.size() ==
options.num_sub_thread_pool);
CHECK(options.sub_thread_request_percentage.size() ==
options.num_sub_thread_pool);
RunHandlerPool::Options pool_options;
pool_options.num_inter_op_threads = options.num_main_threads;
pool_options.num_intra_op_threads = options.num_complementary_threads;
pool_options.max_concurrent_handler = options.max_concurrent_handler;
pool_options.blocking_threads_max_sleep_time_micro_sec =
options.blocking_threads_max_sleep_time_micro_sec;
pool_options.non_blocking_threads_sleep_time_micro_sec =
options.non_blocking_threads_sleep_time_micro_sec;
pool_options.num_sub_thread_pool = options.num_sub_thread_pool;
pool_options.num_threads_in_sub_thread_pool =
options.num_threads_in_sub_thread_pool;
pool_options.sub_thread_request_percentage =
options.sub_thread_request_percentage;
pool_options.enable_wake_up = options.enable_wake_up;
pool_options.wait_if_no_active_request = options.wait_if_no_active_request;
pool_options.use_adaptive_waiting_time = options.use_adaptive_waiting_time;
handler_pool_ = std::make_unique<RunHandlerPool>(pool_options);
}
absl::StatusOr<std::unique_ptr<tensorflow::tfrt_stub::WorkQueueInterface>>
RunHandlerThreadWorkQueue::InitializeRequest(int64_t request_id) const {
RunHandlerOptions options;
std::unique_ptr<RunHandler> handler =
handler_pool_->Get(request_id, options_.init_timeout_ms, options);
if (!handler) {
return tensorflow::errors::Internal(absl::StrCat(
"Could not obtain RunHandler for request after waiting for ",
options_.init_timeout_ms, " ms."));
}
return {std::make_unique<RunHandlerWorkQueue>(std::move(handler))};
}
void RunHandlerThreadWorkQueue::AddTask(TaskFunction work) {
non_blocking_work_queue_.AddTask(std::move(work));
}
std::optional<TaskFunction> RunHandlerThreadWorkQueue::AddBlockingTask(
TaskFunction work, bool allow_queuing) {
if (allow_queuing) {
return blocking_work_queue_.EnqueueBlockingTask(std::move(work));
} else {
return blocking_work_queue_.RunBlockingTask(std::move(work));
}
return std::nullopt;
}
void RunHandlerThreadWorkQueue::Quiesce() {
handler_pool_->Quiesce();
non_blocking_work_queue_.Quiesce();
blocking_work_queue_.Quiesce();
}
void RunHandlerThreadWorkQueue::Await(
ArrayRef<RCReference<AsyncValue>> values) {
tfrt::Await(values);
}
bool RunHandlerThreadWorkQueue::IsInWorkerThread() const {
return true;
}
std::ostream& operator<<(std::ostream& strm,
const RunHandlerThreadWorkQueue::Options& options) {
return strm << "{"
<< "num_main_threads = " << options.num_main_threads
<< ", num_complementary_threads = "
<< options.num_complementary_threads
<< ", init_timeout_ms = " << options.init_timeout_ms
<< ", max_concurrent_handler = " << options.max_concurrent_handler
<< ", num_sub_thread_pool = " << options.num_sub_thread_pool
<< ", num_threads_in_sub_thread_pool = ["
<< absl::StrJoin(options.num_threads_in_sub_thread_pool, ",")
<< "]"
<< ", sub_thread_request_percentage = ["
<< absl::StrJoin(options.sub_thread_request_percentage, ",")
<< "]"
<< ", non_blocking_threads_sleep_time_micro_sec = "
<< options.non_blocking_threads_sleep_time_micro_sec
<< ", blocking_threads_max_sleep_time_micro_sec = "
<< options.blocking_threads_max_sleep_time_micro_sec
<< ", use_adaptive_waiting_time = "
<< options.use_adaptive_waiting_time
<< ", wait_if_no_active_request = "
<< options.wait_if_no_active_request
<< ", enable_wake_up = " << options.enable_wake_up << "}";
}
}
} | #include "tensorflow/core/tfrt/run_handler_thread_pool/run_handler_concurrent_work_queue.h"
#include <cstdio>
#include <memory>
#include <utility>
#include <gtest/gtest.h>
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/time/time.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tfrt/host_context/concurrent_work_queue.h"
#include "tfrt/host_context/diagnostic.h"
#include "tfrt/host_context/execution_context.h"
#include "tfrt/host_context/host_allocator.h"
#include "tfrt/host_context/host_context.h"
#include "tfrt/host_context/task_function.h"
#include "tfrt/support/mutex.h"
namespace tfrt {
namespace tf {
namespace {
const int kNumMainThreads = 1;
const int kNumComplementaryThreads = 1;
class RunHandlerThreadWorkQueueTest : public ::testing::Test {
protected:
void SetUp() override {
RunHandlerThreadWorkQueue::Options options;
options.num_complementary_threads = kNumComplementaryThreads;
options.num_main_threads = kNumMainThreads;
options.init_timeout_ms = 100;
pool_ = std::make_unique<RunHandlerThreadWorkQueue>(options);
auto decoded_diagnostic_handler = [&](const DecodedDiagnostic& diag) {};
std::unique_ptr<ConcurrentWorkQueue> work_queue =
CreateSingleThreadedWorkQueue();
std::unique_ptr<HostAllocator> host_allocator = CreateMallocAllocator();
host_ = std::make_unique<HostContext>(decoded_diagnostic_handler,
std::move(host_allocator),
std::move(work_queue));
RequestContextBuilder req_ctx_builder{host_.get(),
nullptr};
auto queue = pool_->InitializeRequest(100);
TF_CHECK_OK(queue.status());
queue_ = std::move(*queue);
auto req_ctx = std::move(req_ctx_builder).build();
ASSERT_TRUE(static_cast<bool>(req_ctx));
exec_ctx_ = std::make_unique<ExecutionContext>(std::move(*req_ctx));
}
std::unique_ptr<RunHandlerThreadWorkQueue> pool_;
std::unique_ptr<tensorflow::tfrt_stub::WorkQueueInterface> queue_;
std::unique_ptr<HostContext> host_;
std::unique_ptr<ExecutionContext> exec_ctx_;
};
TEST_F(RunHandlerThreadWorkQueueTest, RunningBlockingTask) {
int n = 0;
tensorflow::mutex m;
for (int i = 0; i < 10; ++i) {
ASSERT_FALSE(pool_->AddBlockingTask(TaskFunction([&n, &m] {
tensorflow::mutex_lock lock(m);
++n;
}),
true));
}
pool_->Quiesce();
EXPECT_EQ(n, 10);
}
TEST_F(RunHandlerThreadWorkQueueTest, RunningBlockingTaskNoExecCtx) {
int n = 0;
tensorflow::mutex m;
for (int i = 0; i < 10; ++i) {
pool_->AddBlockingTask(TaskFunction([&n, &m] {
tensorflow::mutex_lock lock(m);
++n;
}),
true);
}
pool_->Quiesce();
EXPECT_EQ(n, 10);
}
TEST_F(RunHandlerThreadWorkQueueTest, RunningBlockingTaskNoQueueing) {
int n = 0;
tensorflow::mutex m;
for (int i = 0; i < 10; ++i) {
ASSERT_FALSE(pool_->AddBlockingTask(TaskFunction([&n, &m] {
tensorflow::mutex_lock lock(m);
++n;
}),
false));
}
pool_->Quiesce();
EXPECT_EQ(n, 10);
}
TEST_F(RunHandlerThreadWorkQueueTest, RunningNonBlockingTask) {
int n = 0;
tensorflow::mutex m;
for (int i = 0; i < 10; ++i) {
queue_->AddTask(TaskFunction([&n, &m] {
tensorflow::mutex_lock lock(m);
++n;
}));
}
pool_->Quiesce();
EXPECT_EQ(n, 10);
}
TEST_F(RunHandlerThreadWorkQueueTest, RunningNonBlockingTaskWithNoExecCtx) {
int n = 0;
tensorflow::mutex m;
for (int i = 0; i < 10; ++i) {
pool_->AddTask(TaskFunction([&n, &m] {
tensorflow::mutex_lock lock(m);
++n;
}));
}
pool_->Quiesce();
EXPECT_EQ(n, 10);
}
TEST_F(RunHandlerThreadWorkQueueTest, RunningMixedTask) {
int n = 0;
tensorflow::mutex m;
for (int i = 0; i < 10; ++i) {
queue_->AddTask(TaskFunction([&n, &m] {
tensorflow::mutex_lock lock(m);
++n;
}));
ASSERT_FALSE(pool_->AddBlockingTask(TaskFunction([&n, &m] {
tensorflow::mutex_lock lock(m);
++n;
}),
true));
}
pool_->Quiesce();
EXPECT_EQ(n, 20);
}
TEST_F(RunHandlerThreadWorkQueueTest, NameReturnsValidString) {
EXPECT_TRUE(absl::StrContains(pool_->name(), "RunHandlerThreadWorkQueue"));
}
TEST_F(RunHandlerThreadWorkQueueTest, GetParallelismLevelOk) {
EXPECT_EQ(pool_->GetParallelismLevel(),
kNumComplementaryThreads + kNumMainThreads);
}
TEST_F(RunHandlerThreadWorkQueueTest, IsWorkerThreadOk) {
EXPECT_TRUE(pool_->IsInWorkerThread());
}
TEST_F(RunHandlerThreadWorkQueueTest, NoHandlerReturnsError) {
RunHandlerThreadWorkQueue::Options options;
options.num_complementary_threads = 0;
options.num_main_threads = 0;
options.init_timeout_ms = 1;
options.max_concurrent_handler = 0;
auto queue = std::make_unique<RunHandlerThreadWorkQueue>(options);
tfrt::RequestContextBuilder ctx_builder(nullptr, nullptr);
EXPECT_THAT(
queue->InitializeRequest(100),
tensorflow::testing::StatusIs(
tensorflow::error::INTERNAL,
"Could not obtain RunHandler for request after waiting for 1 ms."));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/run_handler_thread_pool/run_handler_concurrent_work_queue.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/run_handler_thread_pool/run_handler_concurrent_work_queue_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
16f36227-4093-4bab-99de-c06d8b586284 | cpp | tensorflow/tensorflow | compression_utils | tensorflow/core/data/compression_utils.cc | tensorflow/core/data/compression_utils_test.cc | #include "tensorflow/core/data/compression_utils.h"
#include <limits>
#include <string>
#include <vector>
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/variant_op_registry.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/snappy.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace data {
namespace {
constexpr int kCompressedElementVersion = 0;
}
class Iov {
public:
explicit Iov(size_t size) : iov_(size), idx_(0), num_bytes_(0) {}
void Add(void* base, size_t len) {
iov_[idx_].iov_base = base;
iov_[idx_].iov_len = len;
num_bytes_ += len;
++idx_;
}
iovec* Data() { return iov_.data(); }
size_t NumBytes() const { return num_bytes_; }
size_t NumPieces() const { return iov_.size(); }
private:
std::vector<struct iovec> iov_;
size_t idx_;
size_t num_bytes_;
};
Status CompressElement(const std::vector<Tensor>& element,
CompressedElement* out) {
size_t num_string_tensors = 0;
size_t num_string_tensor_strings = 0;
std::vector<TensorProto> nonmemcpyable_components;
size_t total_nonmemcpyable_size = 0;
for (const auto& component : element) {
if (component.dtype() == DT_STRING) {
++num_string_tensors;
num_string_tensor_strings += component.NumElements();
} else if (!DataTypeCanUseMemcpy(component.dtype())) {
nonmemcpyable_components.emplace_back();
component.AsProtoTensorContent(&nonmemcpyable_components.back());
total_nonmemcpyable_size +=
nonmemcpyable_components.back().ByteSizeLong();
}
}
Iov iov{element.size() + num_string_tensor_strings - num_string_tensors};
tstring nonmemcpyable;
nonmemcpyable.resize_uninitialized(total_nonmemcpyable_size);
char* nonmemcpyable_pos = nonmemcpyable.mdata();
int nonmemcpyable_component_index = 0;
for (int i = 0; i < element.size(); ++i) {
const auto& component = element[i];
CompressedComponentMetadata* metadata =
out->mutable_component_metadata()->Add();
metadata->set_dtype(component.dtype());
component.shape().AsProto(metadata->mutable_tensor_shape());
if (DataTypeCanUseMemcpy(component.dtype())) {
const TensorBuffer* buffer = DMAHelper::buffer(&component);
if (buffer) {
iov.Add(buffer->data(), buffer->size());
metadata->add_uncompressed_bytes(buffer->size());
}
} else if (component.dtype() == DT_STRING) {
const auto& flats = component.unaligned_flat<tstring>();
for (int i = 0; i < flats.size(); ++i) {
iov.Add(const_cast<char*>(flats.data()[i].data()),
flats.data()[i].size());
metadata->add_uncompressed_bytes(flats.data()[i].size());
}
} else {
TensorProto& proto =
nonmemcpyable_components[nonmemcpyable_component_index++];
proto.SerializeToArray(nonmemcpyable_pos, proto.ByteSizeLong());
iov.Add(nonmemcpyable_pos, proto.ByteSizeLong());
nonmemcpyable_pos += proto.ByteSizeLong();
metadata->add_uncompressed_bytes(proto.ByteSizeLong());
}
}
if (iov.NumBytes() > kuint32max) {
return errors::OutOfRange("Encountered dataset element of size ",
iov.NumBytes(),
", exceeding the 4GB Snappy limit.");
}
if (!port::Snappy_CompressFromIOVec(iov.Data(), iov.NumBytes(),
out->mutable_data())) {
return errors::Internal("Failed to compress using snappy.");
}
out->set_version(kCompressedElementVersion);
VLOG(3) << "Compressed element from " << iov.NumBytes() << " bytes to "
<< out->data().size() << " bytes";
return absl::OkStatus();
}
Status UncompressElement(const CompressedElement& compressed,
std::vector<Tensor>* out) {
if (compressed.version() != kCompressedElementVersion) {
return errors::Internal("Unsupported compressed element version: ",
compressed.version());
}
int num_components = compressed.component_metadata_size();
out->clear();
out->reserve(num_components);
size_t num_string_tensors = 0;
size_t num_string_tensor_strings = 0;
size_t total_nonmemcpyable_size = 0;
for (const auto& metadata : compressed.component_metadata()) {
if (metadata.dtype() == DT_STRING) {
++num_string_tensors;
num_string_tensor_strings += metadata.uncompressed_bytes_size();
} else if (!DataTypeCanUseMemcpy(metadata.dtype())) {
total_nonmemcpyable_size += metadata.uncompressed_bytes(0);
}
}
Iov iov{num_components + num_string_tensor_strings - num_string_tensors};
tstring nonmemcpyable;
nonmemcpyable.resize_uninitialized(total_nonmemcpyable_size);
char* nonmemcpyable_pos = nonmemcpyable.mdata();
for (const auto& metadata : compressed.component_metadata()) {
if (DataTypeCanUseMemcpy(metadata.dtype())) {
out->emplace_back(metadata.dtype(), metadata.tensor_shape());
TensorBuffer* buffer = DMAHelper::buffer(&out->back());
if (buffer) {
iov.Add(buffer->data(), metadata.uncompressed_bytes(0));
}
} else if (metadata.dtype() == DT_STRING) {
out->emplace_back(metadata.dtype(), metadata.tensor_shape());
const auto& flats = out->back().unaligned_flat<tstring>();
for (int i = 0; i < metadata.uncompressed_bytes_size(); ++i) {
flats.data()[i].resize(metadata.uncompressed_bytes(i));
iov.Add(flats.data()[i].mdata(), metadata.uncompressed_bytes(i));
}
} else {
out->emplace_back();
iov.Add(nonmemcpyable_pos, metadata.uncompressed_bytes(0));
nonmemcpyable_pos += metadata.uncompressed_bytes(0);
}
}
const std::string& compressed_data = compressed.data();
size_t uncompressed_size;
if (!port::Snappy_GetUncompressedLength(
compressed_data.data(), compressed_data.size(), &uncompressed_size)) {
return errors::Internal(
"Could not get snappy uncompressed length. Compressed data size: ",
compressed_data.size());
}
if (uncompressed_size != static_cast<size_t>(iov.NumBytes())) {
return errors::Internal(
"Uncompressed size mismatch. Snappy expects ", uncompressed_size,
" whereas the tensor metadata suggests ", iov.NumBytes());
}
if (!port::Snappy_UncompressToIOVec(compressed_data.data(),
compressed_data.size(), iov.Data(),
iov.NumPieces())) {
return errors::Internal("Failed to perform snappy decompression.");
}
nonmemcpyable_pos = nonmemcpyable.mdata();
for (int i = 0; i < num_components; ++i) {
const CompressedComponentMetadata& metadata =
compressed.component_metadata(i);
if (!DataTypeCanUseMemcpy(metadata.dtype()) &&
metadata.dtype() != DT_STRING) {
TensorProto tp;
if (!tp.ParseFromString(
{nonmemcpyable_pos,
static_cast<size_t>(metadata.uncompressed_bytes(0))})) {
return errors::Internal("Could not parse TensorProto");
}
if (!out->at(i).FromProto(tp)) {
return errors::Internal("Could not parse Tensor");
}
nonmemcpyable_pos += metadata.uncompressed_bytes(0);
}
}
return absl::OkStatus();
}
REGISTER_UNARY_VARIANT_DECODE_FUNCTION(CompressedElement,
"tensorflow.data.CompressedElement");
}
} | #include "tensorflow/core/data/compression_utils.h"
#include <string>
#include <vector>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tsl/platform/status_matchers.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::HasSubstr;
using ::tsl::testing::StatusIs;
TEST(CompressionUtilsTest, Exceeds4GB) {
std::vector<Tensor> element = {
CreateTensor<int64_t>(TensorShape{1024, 1024, 513})};
CompressedElement compressed;
EXPECT_THAT(CompressElement(element, &compressed),
StatusIs(error::OUT_OF_RANGE,
HasSubstr("exceeding the 4GB Snappy limit")));
}
std::vector<std::vector<Tensor>> TestCases() {
return {
CreateTensors<int64_t>(TensorShape{1}, {{1}}),
CreateTensors<int64_t>(TensorShape{1}, {{1}, {2}}),
CreateTensors<tstring>(TensorShape{1}, {{"a"}, {"b"}}),
{CreateTensor<tstring>(TensorShape{1, 2}, {"abc", "xyz"}),
CreateTensor<tstring>(TensorShape{2, 1}, {"ijk", "mnk"})},
{CreateTensor<tstring>(TensorShape{1}, {"a"}),
CreateTensor<int64_t>(TensorShape{1}, {1})},
{},
{CreateTensor<int64_t>(TensorShape{1, 0})},
{CreateTensor<int64_t>(TensorShape{128, 128}),
CreateTensor<int64_t>(TensorShape{64, 2})},
{
DatasetOpsTestBase::CreateTestVariantTensor(
{CreateTensor<int64_t>(TensorShape{3, 1}, {1, 2, 3}),
CreateTensor<tstring>(TensorShape{}, {"abc"})}),
DatasetOpsTestBase::CreateTestVariantTensor(
{CreateTensor<int64_t>(TensorShape{3, 1}, {10, 11, 12}),
CreateTensor<tstring>(TensorShape{}, {"xyz"})}),
},
};
}
class ParameterizedCompressionUtilsTest
: public DatasetOpsTestBase,
public ::testing::WithParamInterface<std::vector<Tensor>> {};
TEST_P(ParameterizedCompressionUtilsTest, RoundTrip) {
std::vector<Tensor> element = GetParam();
CompressedElement compressed;
TF_ASSERT_OK(CompressElement(element, &compressed));
std::vector<Tensor> round_trip_element;
TF_ASSERT_OK(UncompressElement(compressed, &round_trip_element));
TF_EXPECT_OK(
ExpectEqual(element, round_trip_element, true));
}
TEST_P(ParameterizedCompressionUtilsTest, CompressedElementVersion) {
std::vector<Tensor> element = GetParam();
CompressedElement compressed;
TF_ASSERT_OK(CompressElement(element, &compressed));
EXPECT_EQ(0, compressed.version());
}
TEST_P(ParameterizedCompressionUtilsTest, VersionMismatch) {
std::vector<Tensor> element = GetParam();
CompressedElement compressed;
TF_ASSERT_OK(CompressElement(element, &compressed));
compressed.set_version(1);
std::vector<Tensor> round_trip_element;
EXPECT_THAT(UncompressElement(compressed, &round_trip_element),
StatusIs(error::INTERNAL));
}
INSTANTIATE_TEST_SUITE_P(Instantiation, ParameterizedCompressionUtilsTest,
::testing::ValuesIn(TestCases()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/compression_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/compression_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
524d7df9-40ce-40ac-b402-546c47ddbe12 | cpp | tensorflow/tensorflow | ragged_range_op | tensorflow/core/kernels/ragged_range_op.cc | tensorflow/core/kernels/ragged_range_op_test.cc | #include <cstdint>
#include <limits>
#include <memory>
#include <string>
#include <type_traits>
#include <vector>
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
using errors::InvalidArgument;
template <typename T, typename SPLITS_TYPE>
class RaggedRangeOp : public OpKernel {
public:
using OpKernel::OpKernel;
void Compute(OpKernelContext* context) override {
const Tensor& starts_in = context->input(0);
const Tensor& limits_in = context->input(1);
const Tensor& deltas_in = context->input(2);
OP_REQUIRES(context, starts_in.shape().dims() <= 1,
InvalidArgument("starts must be a scalar or vector"));
OP_REQUIRES(context, limits_in.shape().dims() <= 1,
InvalidArgument("limits must be a scalar or vector"));
OP_REQUIRES(context, deltas_in.shape().dims() <= 1,
InvalidArgument("deltas must be a scalar or vector"));
bool broadcast_starts = starts_in.shape().dims() == 0;
bool broadcast_limits = limits_in.shape().dims() == 0;
bool broadcast_deltas = deltas_in.shape().dims() == 0;
std::vector<int> in_sizes;
if (!broadcast_starts) in_sizes.push_back(starts_in.shape().dim_size(0));
if (!broadcast_limits) in_sizes.push_back(limits_in.shape().dim_size(0));
if (!broadcast_deltas) in_sizes.push_back(deltas_in.shape().dim_size(0));
for (int i = 1; i < in_sizes.size(); ++i) {
OP_REQUIRES(context, in_sizes[i] == in_sizes[i - 1],
InvalidArgument("starts, limits, and deltas must have the "
"same shape"));
}
SPLITS_TYPE nrows = in_sizes.empty() ? 1 : in_sizes[0];
const auto& starts = starts_in.flat<T>();
const auto& limits = limits_in.flat<T>();
const auto& deltas = deltas_in.flat<T>();
Tensor* rt_nested_splits_out = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(0, TensorShape({nrows + 1}),
&rt_nested_splits_out));
auto rt_nested_splits = rt_nested_splits_out->flat<SPLITS_TYPE>();
rt_nested_splits(0) = 0;
for (int row = 0; row < nrows; ++row) {
T start = broadcast_starts ? starts(0) : starts(row);
T limit = broadcast_limits ? limits(0) : limits(row);
T delta = broadcast_deltas ? deltas(0) : deltas(row);
OP_REQUIRES(context, delta != 0, InvalidArgument("Requires delta != 0"));
SPLITS_TYPE size;
if (((delta > 0) && (limit < start)) ||
((delta < 0) && (limit > start))) {
size = 0;
} else if constexpr (std::is_integral<T>::value) {
size = Eigen::divup(Eigen::numext::abs(limit - start),
Eigen::numext::abs(delta));
} else {
auto size_auto =
Eigen::numext::ceil(Eigen::numext::abs((limit - start) / delta));
OP_REQUIRES(
context, size_auto <= std::numeric_limits<int64_t>::max(),
errors::InvalidArgument("Requires ((limit - start) / delta) <= ",
std::numeric_limits<int64_t>::max()));
size = static_cast<SPLITS_TYPE>(size_auto);
}
OP_REQUIRES(context, size >= 0, InvalidArgument("Requires size >= 0"));
OP_REQUIRES(
context,
size <=
std::numeric_limits<SPLITS_TYPE>::max() - rt_nested_splits(row),
InvalidArgument("The total range size overflowed. Consider using "
"int64 instead of int32 for row_splits_dtype."));
rt_nested_splits(row + 1) = rt_nested_splits(row) + size;
}
SPLITS_TYPE nvals = rt_nested_splits(nrows);
Tensor* rt_dense_values_out = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(1, TensorShape({nvals}),
&rt_dense_values_out));
auto rt_dense_values = rt_dense_values_out->flat<T>();
int value_index = 0;
for (int row = 0; row < nrows; ++row) {
SPLITS_TYPE row_size = rt_nested_splits(row + 1) - rt_nested_splits(row);
T value = broadcast_starts ? starts(0) : starts(row);
T delta = broadcast_deltas ? deltas(0) : deltas(row);
for (SPLITS_TYPE i = 0; i < row_size; ++i) {
rt_dense_values(value_index++) = T(value);
value += delta;
}
}
}
};
#define REGISTER_CPU_KERNEL(TYPE) \
REGISTER_KERNEL_BUILDER(Name("RaggedRange") \
.Device(DEVICE_CPU) \
.TypeConstraint<TYPE>("T") \
.TypeConstraint<int32>("Tsplits"), \
RaggedRangeOp<TYPE, int32>); \
REGISTER_KERNEL_BUILDER(Name("RaggedRange") \
.Device(DEVICE_CPU) \
.TypeConstraint<TYPE>("T") \
.TypeConstraint<int64_t>("Tsplits"), \
RaggedRangeOp<TYPE, int64>);
TF_CALL_float(REGISTER_CPU_KERNEL);
TF_CALL_double(REGISTER_CPU_KERNEL);
TF_CALL_int32(REGISTER_CPU_KERNEL);
TF_CALL_int64(REGISTER_CPU_KERNEL);
#undef REGISTER_CPU_KERNEL
} | #include <gtest/gtest.h>
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class RaggedRangeOpTest : public ::tensorflow::OpsTestBase {
protected:
static constexpr int kSplitsOutput = 0;
static constexpr int kValuesOutput = 1;
template <typename T>
void BuildRaggedRangeGraph() {
const auto& dtype = DataTypeToEnum<T>::v();
TF_ASSERT_OK(NodeDefBuilder("tested_op", "RaggedRange")
.Input(FakeInput(dtype))
.Input(FakeInput(dtype))
.Input(FakeInput(dtype))
.Attr("T", dtype)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
TEST_F(RaggedRangeOpTest, IntValues) {
BuildRaggedRangeGraph<int>();
AddInputFromArray<int>(TensorShape({4}), {0, 5, 8, 5});
AddInputFromArray<int>(TensorShape({4}), {8, 7, 8, 1});
AddInputFromArray<int>(TensorShape({4}), {2, 1, 1, -1});
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorEqual<int64_t>(*GetOutput(kSplitsOutput),
test::AsTensor<int64_t>({0, 4, 6, 6, 10}));
test::ExpectTensorEqual<int>(
*GetOutput(kValuesOutput),
test::AsTensor<int>({0, 2, 4, 6, 5, 6, 5, 4, 3, 2}));
}
TEST_F(RaggedRangeOpTest, FloatValues) {
BuildRaggedRangeGraph<float>();
AddInputFromArray<float>(TensorShape({4}), {0, 5, 8, 5});
AddInputFromArray<float>(TensorShape({4}), {8, 7, 8, 1});
AddInputFromArray<float>(TensorShape({4}), {2, 1, 1, -1});
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorEqual<int64_t>(*GetOutput(kSplitsOutput),
test::AsTensor<int64_t>({0, 4, 6, 6, 10}));
test::ExpectTensorNear<float>(
*GetOutput(kValuesOutput),
test::AsTensor<float>({0, 2, 4, 6, 5, 6, 5, 4, 3, 2}), 0.1);
}
TEST_F(RaggedRangeOpTest, RangeSizeOverflow) {
BuildRaggedRangeGraph<float>();
AddInputFromArray<float>(TensorShape({2}), {1.1, 0.1});
AddInputFromArray<float>(TensorShape({2}), {10.0, 1e10});
AddInputFromArray<float>(TensorShape({2}), {1, 1e-10});
EXPECT_EQ(absl::StrCat("Requires ((limit - start) / delta) <= ",
std::numeric_limits<int64_t>::max()),
RunOpKernel().message());
}
TEST_F(RaggedRangeOpTest, BroadcastDeltas) {
BuildRaggedRangeGraph<int>();
AddInputFromArray<int>(TensorShape({3}), {0, 5, 8});
AddInputFromArray<int>(TensorShape({3}), {8, 7, 8});
AddInputFromArray<int>(TensorShape({}), {1});
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorEqual<int64_t>(*GetOutput(kSplitsOutput),
test::AsTensor<int64_t>({0, 8, 10, 10}));
test::ExpectTensorEqual<int>(
*GetOutput(kValuesOutput),
test::AsTensor<int>({0, 1, 2, 3, 4, 5, 6, 7, 5, 6}));
}
TEST_F(RaggedRangeOpTest, BroadcastLimitsAndDeltas) {
BuildRaggedRangeGraph<int>();
AddInputFromArray<int>(TensorShape({}), {0});
AddInputFromArray<int>(TensorShape({3}), {3, 0, 2});
AddInputFromArray<int>(TensorShape({}), {1});
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorEqual<int64_t>(*GetOutput(kSplitsOutput),
test::AsTensor<int64_t>({0, 3, 3, 5}));
test::ExpectTensorEqual<int>(*GetOutput(kValuesOutput),
test::AsTensor<int>({0, 1, 2, 0, 1}));
}
TEST_F(RaggedRangeOpTest, BroadcastStartsAndLimits) {
BuildRaggedRangeGraph<int>();
AddInputFromArray<int>(TensorShape({}), {0});
AddInputFromArray<int>(TensorShape({}), {12});
AddInputFromArray<int>(TensorShape({3}), {3, 4, 5});
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorEqual<int64_t>(*GetOutput(kSplitsOutput),
test::AsTensor<int64_t>({0, 4, 7, 10}));
test::ExpectTensorEqual<int>(
*GetOutput(kValuesOutput),
test::AsTensor<int>({0, 3, 6, 9, 0, 4, 8, 0, 5, 10}));
}
TEST_F(RaggedRangeOpTest, AllScalarInputs) {
BuildRaggedRangeGraph<int>();
AddInputFromArray<int>(TensorShape({}), {0});
AddInputFromArray<int>(TensorShape({}), {5});
AddInputFromArray<int>(TensorShape({}), {1});
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorEqual<int64_t>(*GetOutput(kSplitsOutput),
test::AsTensor<int64_t>({0, 5}));
test::ExpectTensorEqual<int>(*GetOutput(kValuesOutput),
test::AsTensor<int>({0, 1, 2, 3, 4}));
}
TEST_F(RaggedRangeOpTest, InvalidArgsStarts) {
BuildRaggedRangeGraph<int>();
AddInputFromArray<int>(TensorShape({4, 1}), {0, 5, 8, 5});
AddInputFromArray<int>(TensorShape({4}), {8, 7, 8, 1});
AddInputFromArray<int>(TensorShape({4}), {2, 1, 1, -1});
EXPECT_EQ("starts must be a scalar or vector", RunOpKernel().message());
}
TEST_F(RaggedRangeOpTest, InvalidArgsLimits) {
BuildRaggedRangeGraph<int>();
AddInputFromArray<int>(TensorShape({4}), {0, 5, 8, 5});
AddInputFromArray<int>(TensorShape({4, 1}), {8, 7, 8, 1});
AddInputFromArray<int>(TensorShape({4}), {2, 1, 1, -1});
EXPECT_EQ("limits must be a scalar or vector", RunOpKernel().message());
}
TEST_F(RaggedRangeOpTest, InvalidArgsDeltas) {
BuildRaggedRangeGraph<int>();
AddInputFromArray<int>(TensorShape({4}), {0, 5, 8, 5});
AddInputFromArray<int>(TensorShape({4}), {8, 7, 8, 1});
AddInputFromArray<int>(TensorShape({4, 1}), {2, 1, 1, -1});
EXPECT_EQ("deltas must be a scalar or vector", RunOpKernel().message());
}
TEST_F(RaggedRangeOpTest, InvalidArgsShapeMismatch) {
BuildRaggedRangeGraph<int>();
AddInputFromArray<int>(TensorShape({4}), {0, 5, 8, 5});
AddInputFromArray<int>(TensorShape({3}), {7, 8, 1});
AddInputFromArray<int>(TensorShape({4}), {2, 1, 1, -1});
EXPECT_EQ("starts, limits, and deltas must have the same shape",
RunOpKernel().message());
}
TEST_F(RaggedRangeOpTest, InvalidArgsZeroDelta) {
BuildRaggedRangeGraph<int>();
AddInputFromArray<int>(TensorShape({4}), {0, 5, 8, 5});
AddInputFromArray<int>(TensorShape({4}), {7, 8, 8, 1});
AddInputFromArray<int>(TensorShape({4}), {2, 1, 0, -1});
EXPECT_EQ("Requires delta != 0", RunOpKernel().message());
}
TEST_F(RaggedRangeOpTest, EmptyRangePositiveDelta) {
BuildRaggedRangeGraph<int>();
AddInputFromArray<int>(TensorShape({2}), {0, 5});
AddInputFromArray<int>(TensorShape({2}), {5, 0});
AddInputFromArray<int>(TensorShape({}), {2});
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorEqual<int64_t>(*GetOutput(kSplitsOutput),
test::AsTensor<int64_t>({0, 3, 3}));
test::ExpectTensorEqual<int>(*GetOutput(kValuesOutput),
test::AsTensor<int>({0, 2, 4}));
}
TEST_F(RaggedRangeOpTest, EmptyRangeNegativeDelta) {
BuildRaggedRangeGraph<int>();
AddInputFromArray<int>(TensorShape({2}), {0, 5});
AddInputFromArray<int>(TensorShape({2}), {5, 0});
AddInputFromArray<int>(TensorShape({}), {-2});
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorEqual<int64_t>(*GetOutput(kSplitsOutput),
test::AsTensor<int64_t>({0, 0, 3}));
test::ExpectTensorEqual<int>(*GetOutput(kValuesOutput),
test::AsTensor<int>({5, 3, 1}));
}
TEST_F(RaggedRangeOpTest, ShapeFn) {
ShapeInferenceTestOp op("RaggedRange");
INFER_OK(op, "?;?;?", "[?];[?]");
INFER_OK(op, "[3];[3];[3]", "[4];[?]");
INFER_OK(op, "[3];[3];[]", "[4];[?]");
INFER_OK(op, "[3];[];[3]", "[4];[?]");
INFER_OK(op, "[];[3];[3]", "[4];[?]");
INFER_OK(op, "[];[];[]", "[2];[?]");
INFER_ERROR("Shape must be at most rank 1 but is rank 2", op,
"[5,5];[5];[5]");
INFER_ERROR("Shape must be at most rank 1 but is rank 2", op,
"[5];[5,5];[5]");
INFER_ERROR("Shape must be at most rank 1 but is rank 2", op,
"[5];[5];[5,5]");
INFER_ERROR("Dimensions must be equal, but are 4 and 3", op, "[3];[4];[3]");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/ragged_range_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/ragged_range_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e651f8c4-71e9-4cab-a12a-ea760481aaca | cpp | tensorflow/tensorflow | mkl_cpu_allocator | tensorflow/core/common_runtime/mkl_cpu_allocator.cc | tensorflow/core/common_runtime/mkl_cpu_allocator_test.cc | #ifdef INTEL_MKL
#include "tensorflow/core/common_runtime/mkl_cpu_allocator.h"
namespace tensorflow {
constexpr const char* MklCPUAllocator::kMaxLimitStr;
constexpr const size_t MklCPUAllocator::kDefaultMaxLimit;
}
#endif | #if defined(INTEL_MKL)
#include "tensorflow/core/common_runtime/mkl_cpu_allocator.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(MKLBFCAllocatorTest, TestMaxLimit) {
setenv(MklCPUAllocator::kMaxLimitStr, "1000", 1);
MklCPUAllocator a;
TF_EXPECT_OK(a.Initialize());
auto stats = a.GetStats();
EXPECT_EQ(stats->bytes_limit, 1000);
unsetenv(MklCPUAllocator::kMaxLimitStr);
TF_EXPECT_OK(a.Initialize());
stats = a.GetStats();
uint64 max_mem_bytes = MklCPUAllocator::kDefaultMaxLimit;
#if defined(_SC_PHYS_PAGES) && defined(_SC_PAGESIZE)
max_mem_bytes =
(uint64)sysconf(_SC_PHYS_PAGES) * (uint64)sysconf(_SC_PAGESIZE);
#endif
EXPECT_EQ(stats->bytes_limit, max_mem_bytes);
setenv(MklCPUAllocator::kMaxLimitStr, "wrong-input", 1);
EXPECT_TRUE(errors::IsInvalidArgument(a.Initialize()));
setenv(MklCPUAllocator::kMaxLimitStr, "-20", 1);
EXPECT_TRUE(errors::IsInvalidArgument(a.Initialize()));
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/mkl_cpu_allocator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/mkl_cpu_allocator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c15ad434-bb08-4201-adc4-f39173e7f27c | cpp | google/cel-cpp | double_type | common/types/double_type.h | common/types/double_type_test.cc | #ifndef THIRD_PARTY_CEL_CPP_COMMON_TYPES_DOUBLE_TYPE_H_
#define THIRD_PARTY_CEL_CPP_COMMON_TYPES_DOUBLE_TYPE_H_
#include <ostream>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "common/type_kind.h"
namespace cel {
class Type;
class TypeParameters;
class DoubleType final {
public:
static constexpr TypeKind kKind = TypeKind::kDouble;
static constexpr absl::string_view kName = "double";
DoubleType() = default;
DoubleType(const DoubleType&) = default;
DoubleType(DoubleType&&) = default;
DoubleType& operator=(const DoubleType&) = default;
DoubleType& operator=(DoubleType&&) = default;
static TypeKind kind() { return kKind; }
static absl::string_view name() { return kName; }
static TypeParameters GetParameters();
static std::string DebugString() { return std::string(name()); }
constexpr void swap(DoubleType&) noexcept {}
};
inline constexpr void swap(DoubleType& lhs, DoubleType& rhs) noexcept {
lhs.swap(rhs);
}
inline constexpr bool operator==(DoubleType, DoubleType) { return true; }
inline constexpr bool operator!=(DoubleType lhs, DoubleType rhs) {
return !operator==(lhs, rhs);
}
template <typename H>
H AbslHashValue(H state, DoubleType) {
return std::move(state);
}
inline std::ostream& operator<<(std::ostream& out, const DoubleType& type) {
return out << type.DebugString();
}
}
#endif | #include <sstream>
#include "absl/hash/hash.h"
#include "common/type.h"
#include "internal/testing.h"
namespace cel {
namespace {
TEST(DoubleType, Kind) {
EXPECT_EQ(DoubleType().kind(), DoubleType::kKind);
EXPECT_EQ(Type(DoubleType()).kind(), DoubleType::kKind);
}
TEST(DoubleType, Name) {
EXPECT_EQ(DoubleType().name(), DoubleType::kName);
EXPECT_EQ(Type(DoubleType()).name(), DoubleType::kName);
}
TEST(DoubleType, DebugString) {
{
std::ostringstream out;
out << DoubleType();
EXPECT_EQ(out.str(), DoubleType::kName);
}
{
std::ostringstream out;
out << Type(DoubleType());
EXPECT_EQ(out.str(), DoubleType::kName);
}
}
TEST(DoubleType, Hash) {
EXPECT_EQ(absl::HashOf(DoubleType()), absl::HashOf(DoubleType()));
}
TEST(DoubleType, Equal) {
EXPECT_EQ(DoubleType(), DoubleType());
EXPECT_EQ(Type(DoubleType()), DoubleType());
EXPECT_EQ(DoubleType(), Type(DoubleType()));
EXPECT_EQ(Type(DoubleType()), Type(DoubleType()));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/double_type.h | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/double_type_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
52ccbbd8-23d0-473e-a425-5f4b87f19ae3 | cpp | tensorflow/tensorflow | rename_fusions | third_party/xla/xla/service/gpu/transforms/rename_fusions.cc | third_party/xla/xla/service/gpu/transforms/rename_fusions_test.cc | #include "xla/service/gpu/transforms/rename_fusions.h"
#include <memory>
#include <string>
#include "absl/container/btree_set.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
namespace xla {
namespace gpu {
namespace {
constexpr absl::string_view FusionKindToString(
HloInstruction::FusionKind kind) {
switch (kind) {
case HloInstruction::FusionKind::kCustom:
return "custom";
case HloInstruction::FusionKind::kLoop:
return "loop";
case HloInstruction::FusionKind::kInput:
return "input";
case HloInstruction::FusionKind::kOutput:
return "output";
}
}
std::string MakeFusionHeroNames(const HloInstruction* instruction) {
std::unique_ptr<HloFusionAdaptor> fusion_adaptor =
HloFusionAdaptor::ForInstruction(instruction);
absl::btree_set<absl::string_view> heroes;
for (auto root : fusion_adaptor->GetRoots()) {
heroes.insert(HloOpcodeString(FindNonTrivialHero(root).opcode()));
}
return absl::StrReplaceAll(absl::StrJoin(heroes, "_"), {{"-", "_"}});
}
void RenameFusion(HloModule* module, HloInstruction* instruction) {
std::string hero_names = MakeFusionHeroNames(instruction);
module->SetAndUniquifyInstrName(
instruction, absl::StrCat(FusionKindToString(instruction->fusion_kind()),
"_", hero_names, "_fusion"));
module->SetAndUniquifyComputationName(
instruction->fused_instructions_computation(),
absl::StrCat("fused_", hero_names));
}
}
absl::StatusOr<bool> RenameFusions::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
for (HloComputation* computation : module->MakeNonfusionComputations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() != HloOpcode::kFusion ||
instruction->fusion_kind() == HloInstruction::FusionKind::kCustom) {
continue;
}
RenameFusion(module, instruction);
}
}
return true;
}
}
} | #include "xla/service/gpu/transforms/rename_fusions.h"
#include <utility>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace gpu {
class RenameFusionsTest : public HloTestBase {
protected:
RenameFusions rename_fusions_;
};
TEST_F(RenameFusionsTest, FusionInstructionNames) {
absl::string_view kHlo = R"(
HloModule test_module
square {
p = f32[16384] parameter(0)
ROOT m = f32[16384] multiply(p, p)
}
exp {
p = f32[16384] parameter(0)
ROOT e = f32[16384] exponential(p)
}
log {
p = f32[16384] parameter(0)
ROOT l = f32[16384] log(p)
}
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
ENTRY main {
p0 = bf16[1024,8192] parameter(0)
p1 = f32[8192] parameter(1)
p2 = f32[16384] parameter(2)
convert = f32[1024,8192] convert(p0)
broadcast = f32[1024,8192] broadcast(p1), dimensions={1}
c0 = f32[] constant(0)
multiply = f32[1024,8192] multiply(broadcast, convert)
reduce = f32[1024] reduce(multiply, c0), dimensions={1}, to_apply=add
convert.1 = bf16[1024] convert(reduce)
s = f32[16384] fusion(p2), kind=kLoop, calls=square
e = f32[16384] fusion(s), kind=kLoop, calls=exp
l = f32[16384] fusion(s), kind=kInput, calls=log
ROOT result = (bf16[1024]{0}, f32[16384]{0}, f32[16384]{0}) tuple(convert.1, l, e)
})";
RunAndFilecheckHloRewrite(kHlo, std::move(rename_fusions_), R"(
CHECK: ENTRY %main
CHECK: %loop_multiply_fusion{{.*}} calls=%fused_multiply
CHECK: %input_log_fusion{{.*}} calls=%fused_log
CHECK: %loop_exponential_fusion{{.*}} calls=%fused_exponential
CHECK: ROOT %result
)");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/rename_fusions.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/rename_fusions_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b1dcf762-9ec7-475c-a7a3-29543231774f | cpp | tensorflow/tensorflow | minimal_logging | tensorflow/lite/minimal_logging.cc | tensorflow/lite/minimal_logging_test.cc | #include "tensorflow/lite/minimal_logging.h"
#include <cstdarg>
#include "tensorflow/lite/logger.h"
namespace tflite {
namespace logging_internal {
void MinimalLogger::Log(LogSeverity severity, const char* format, ...) {
va_list args;
va_start(args, format);
LogFormatted(severity, format, args);
va_end(args);
}
const char* MinimalLogger::GetSeverityName(LogSeverity severity) {
switch (severity) {
case TFLITE_LOG_VERBOSE:
return "VERBOSE";
case TFLITE_LOG_INFO:
return "INFO";
case TFLITE_LOG_WARNING:
return "WARNING";
case TFLITE_LOG_ERROR:
return "ERROR";
case TFLITE_LOG_SILENT:
return "SILENT";
}
return "<Unknown severity>";
}
LogSeverity MinimalLogger::GetMinimumLogSeverity() {
return MinimalLogger::minimum_log_severity_;
}
LogSeverity MinimalLogger::SetMinimumLogSeverity(LogSeverity new_severity) {
LogSeverity old_severity = MinimalLogger::minimum_log_severity_;
MinimalLogger::minimum_log_severity_ = new_severity;
return old_severity;
}
}
} | #include "tensorflow/lite/minimal_logging.h"
#include <gtest/gtest.h>
#include "tensorflow/lite/logger.h"
namespace tflite {
TEST(MinimalLogging, Basic) {
testing::internal::CaptureStderr();
TFLITE_LOG_PROD(TFLITE_LOG_INFO, "Foo");
EXPECT_EQ("INFO: Foo\n", testing::internal::GetCapturedStderr());
}
TEST(MinimalLogging, BasicFormatted) {
testing::internal::CaptureStderr();
TFLITE_LOG_PROD(TFLITE_LOG_INFO, "Foo %s %s", "Bar", "Baz");
EXPECT_EQ("INFO: Foo Bar Baz\n", testing::internal::GetCapturedStderr());
}
TEST(MinimalLogging, Warn) {
testing::internal::CaptureStderr();
TFLITE_LOG_PROD(TFLITE_LOG_WARNING, "One", "");
EXPECT_EQ("WARNING: One\n", testing::internal::GetCapturedStderr());
}
TEST(MinimalLogging, Error) {
testing::internal::CaptureStderr();
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Two");
EXPECT_EQ("ERROR: Two\n", testing::internal::GetCapturedStderr());
}
TEST(MinimalLogging, UnknownSeverity) {
testing::internal::CaptureStderr();
LogSeverity default_log_severity = TFLITE_LOG_INFO;
#if defined(__ANDROID__) || !defined(NDEBUG)
default_log_severity = TFLITE_LOG_VERBOSE;
#endif
EXPECT_EQ(tflite::logging_internal::MinimalLogger::SetMinimumLogSeverity(
static_cast<LogSeverity>(-1)),
default_log_severity);
TFLITE_LOG_PROD(static_cast<LogSeverity>(-1), "Three");
EXPECT_EQ("<Unknown severity>: Three\n",
testing::internal::GetCapturedStderr());
tflite::logging_internal::MinimalLogger::SetMinimumLogSeverity(
default_log_severity);
}
TEST(MinimalLogging, MinimumSeverity) {
testing::internal::CaptureStderr();
LogSeverity default_log_severity = TFLITE_LOG_INFO;
#if defined(__ANDROID__) || !defined(NDEBUG)
default_log_severity = TFLITE_LOG_VERBOSE;
#endif
EXPECT_EQ(tflite::logging_internal::MinimalLogger::SetMinimumLogSeverity(
TFLITE_LOG_WARNING),
default_log_severity);
TFLITE_LOG_PROD(TFLITE_LOG_WARNING, "Foo");
TFLITE_LOG_PROD(default_log_severity, "Bar");
EXPECT_EQ("WARNING: Foo\n", testing::internal::GetCapturedStderr());
tflite::logging_internal::MinimalLogger::SetMinimumLogSeverity(
default_log_severity);
}
TEST(MinimalLogging, Once) {
testing::internal::CaptureStderr();
for (int i = 0; i < 10; ++i) {
TFLITE_LOG_PROD_ONCE(TFLITE_LOG_INFO, "Count: %d", i);
}
EXPECT_EQ("INFO: Count: 0\n", testing::internal::GetCapturedStderr());
}
TEST(MinimalLogging, Debug) {
testing::internal::CaptureStderr();
TFLITE_LOG(TFLITE_LOG_INFO, "Foo");
TFLITE_LOG(TFLITE_LOG_WARNING, "Bar");
TFLITE_LOG(TFLITE_LOG_ERROR, "Baz");
#ifndef NDEBUG
EXPECT_EQ("INFO: Foo\nWARNING: Bar\nERROR: Baz\n",
testing::internal::GetCapturedStderr());
#else
EXPECT_TRUE(testing::internal::GetCapturedStderr().empty());
#endif
}
TEST(MinimalLogging, DebugOnce) {
testing::internal::CaptureStderr();
for (int i = 0; i < 10; ++i) {
TFLITE_LOG_ONCE(TFLITE_LOG_INFO, "Count: %d", i);
}
#ifndef NDEBUG
EXPECT_EQ("INFO: Count: 0\n", testing::internal::GetCapturedStderr());
#else
EXPECT_TRUE(testing::internal::GetCapturedStderr().empty());
#endif
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/minimal_logging.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/minimal_logging_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3d22ad6c-a254-4b61-adf3-de1bf2716283 | cpp | google/tsl | time_util | tsl/platform/cloud/time_util.cc | tsl/platform/cloud/time_util_test.cc | #include "tsl/platform/cloud/time_util.h"
#include <time.h>
#include <cmath>
#include <cstdio>
#include <ctime>
#ifdef _WIN32
#define timegm _mkgmtime
#endif
#include "tsl/platform/errors.h"
namespace tsl {
namespace {
constexpr int64_t kNanosecondsPerSecond = 1000 * 1000 * 1000;
}
absl::Status ParseRfc3339Time(const string& time, int64_t* mtime_nsec) {
tm parsed{0};
float seconds;
if (sscanf(time.c_str(), "%4d-%2d-%2dT%2d:%2d:%fZ", &(parsed.tm_year),
&(parsed.tm_mon), &(parsed.tm_mday), &(parsed.tm_hour),
&(parsed.tm_min), &seconds) != 6) {
return errors::Internal(
strings::StrCat("Unrecognized RFC 3339 time format: ", time));
}
const int int_seconds = std::floor(seconds);
parsed.tm_year -= 1900;
parsed.tm_mon -= 1;
parsed.tm_sec = int_seconds;
*mtime_nsec = timegm(&parsed) * kNanosecondsPerSecond +
static_cast<int64_t>(std::floor((seconds - int_seconds) *
kNanosecondsPerSecond));
return absl::OkStatus();
}
} | #include "tsl/platform/cloud/time_util.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/test.h"
namespace tsl {
TEST(TimeUtil, ParseRfc3339Time) {
int64_t mtime_nsec;
TF_EXPECT_OK(ParseRfc3339Time("2016-04-29T23:15:24.896Z", &mtime_nsec));
EXPECT_NEAR(1461971724896, mtime_nsec / 1000 / 1000, 1);
}
TEST(TimeUtil, ParseRfc3339Time_ParseError) {
int64_t mtime_nsec;
EXPECT_EQ("Unrecognized RFC 3339 time format: 2016-04-29",
ParseRfc3339Time("2016-04-29", &mtime_nsec).message());
}
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/cloud/time_util.cc | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/cloud/time_util_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
8971d4cb-bee3-478d-b5af-bb826df19571 | cpp | tensorflow/tensorflow | gpu_executor | third_party/xla/xla/stream_executor/gpu/gpu_executor.h | third_party/xla/xla/stream_executor/gpu/gpu_executor_test.cc | #ifndef XLA_STREAM_EXECUTOR_GPU_GPU_EXECUTOR_H_
#define XLA_STREAM_EXECUTOR_GPU_GPU_EXECUTOR_H_
#include <cstdint>
#include <memory>
#include <utility>
#include <variant>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/mutex.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/event_based_timer.h"
#include "xla/stream_executor/gpu/context.h"
#include "xla/stream_executor/host_memory_allocation.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/kernel_spec.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/stream_executor/stream_executor_common.h"
namespace stream_executor {
namespace gpu {
class GpuStream;
class GpuExecutor : public StreamExecutorCommon {
public:
GpuExecutor(Platform* platform, int device_ordinal)
: StreamExecutorCommon(platform),
context_(nullptr),
device_ordinal_(device_ordinal) {}
int device_ordinal() const override { return device_ordinal_; };
virtual void UnloadKernel(const Kernel* kernel) = 0;
virtual absl::StatusOr<std::unique_ptr<EventBasedTimer>>
CreateEventBasedTimer(GpuStream* stream, bool use_delay_kernel) = 0;
virtual absl::Status TrimGraphMemory() = 0;
Context* gpu_context() const { return context_; }
absl::StatusOr<std::vector<ApiTrace>> ExtractApiTrace() override {
absl::MutexLock lock(&logger_mu_);
return std::move(argument_logs_);
}
absl::Status RecordApiTrace(ApiTrace call) override {
absl::MutexLock lock(&logger_mu_);
if (std::holds_alternative<GemmCallTrace>(call) &&
(argument_logging_mode_ & kLogGemm)) {
argument_logs_.push_back(call);
}
return absl::OkStatus();
}
bool SetArgumentLoggingMode(uint64_t mode) override {
absl::MutexLock lock(&logger_mu_);
argument_logging_mode_ = mode;
return true;
}
uint64_t GetArgumentLoggingMode() const { return argument_logging_mode_; }
protected:
void set_context(Context* context) { context_ = context; }
private:
Context* context_;
int device_ordinal_;
absl::Mutex logger_mu_;
mutable std::vector<ApiTrace> argument_logs_ ABSL_GUARDED_BY(logger_mu_);
uint64_t argument_logging_mode_ = 0;
GpuExecutor(const GpuExecutor&) = delete;
void operator=(const GpuExecutor&) = delete;
};
inline GpuExecutor* ExtractGpuExecutor(StreamExecutor* stream_exec) {
return static_cast<GpuExecutor*>(stream_exec);
}
}
}
#endif | #include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "absl/strings/ascii.h"
#include "xla/service/platform_util.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream_executor.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace stream_executor {
class GpuExecutorTest : public testing::Test {
public:
Platform* GetPlatform() {
auto name = absl::AsciiStrToLower(
xla::PlatformUtil::CanonicalPlatformName("gpu").value());
return PlatformManager::PlatformWithName(name).value();
}
};
using GetPointerMemorySpaceTest = GpuExecutorTest;
TEST_F(GetPointerMemorySpaceTest, Host) {
StreamExecutor* executor = GetPlatform()->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto host_ptr, executor->HostMemoryAllocate(64));
TF_ASSERT_OK_AND_ASSIGN(auto memory_space,
executor->GetPointerMemorySpace(host_ptr->opaque()))
EXPECT_EQ(memory_space, MemoryType::kHost);
}
TEST_F(GetPointerMemorySpaceTest, Device) {
StreamExecutor* executor = GetPlatform()->ExecutorForDevice(0).value();
auto mem = executor->Allocate(64);
ASSERT_NE(mem, nullptr);
TF_ASSERT_OK_AND_ASSIGN(auto memory_space,
executor->GetPointerMemorySpace(mem.opaque()))
EXPECT_EQ(memory_space, MemoryType::kDevice);
executor->Deallocate(&mem);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/gpu/gpu_executor.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/gpu/gpu_executor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bc60f86c-e41b-41b2-8fdf-d424fb530694 | cpp | google/quiche | quic_blocked_writer_list | quiche/quic/core/quic_blocked_writer_list.cc | quiche/quic/core/quic_blocked_writer_list_test.cc | #include "quiche/quic/core/quic_blocked_writer_list.h"
#include <utility>
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
namespace quic {
void QuicBlockedWriterList::Add(QuicBlockedWriterInterface& blocked_writer) {
if (!blocked_writer.IsWriterBlocked()) {
QUIC_BUG(quic_bug_12724_4)
<< "Tried to add writer into blocked list when it shouldn't be added";
return;
}
write_blocked_list_.insert(std::make_pair(&blocked_writer, true));
}
bool QuicBlockedWriterList::Empty() const {
return write_blocked_list_.empty();
}
bool QuicBlockedWriterList::Remove(QuicBlockedWriterInterface& blocked_writer) {
return write_blocked_list_.erase(&blocked_writer) != 0;
}
void QuicBlockedWriterList::OnWriterUnblocked() {
const size_t num_blocked_writers_before = write_blocked_list_.size();
WriteBlockedList temp_list;
temp_list.swap(write_blocked_list_);
QUICHE_DCHECK(write_blocked_list_.empty());
while (!temp_list.empty()) {
QuicBlockedWriterInterface* blocked_writer = temp_list.begin()->first;
temp_list.erase(temp_list.begin());
blocked_writer->OnBlockedWriterCanWrite();
}
const size_t num_blocked_writers_after = write_blocked_list_.size();
if (num_blocked_writers_after != 0) {
if (num_blocked_writers_before == num_blocked_writers_after) {
QUIC_CODE_COUNT(quic_zero_progress_on_can_write);
} else {
QUIC_CODE_COUNT(quic_blocked_again_on_can_write);
}
}
}
} | #include "quiche/quic/core/quic_blocked_writer_list.h"
#include "quiche/quic/core/quic_blocked_writer_interface.h"
#include "quiche/quic/platform/api/quic_test.h"
namespace quic {
using testing::Invoke;
using testing::Return;
namespace {
class TestWriter : public QuicBlockedWriterInterface {
public:
~TestWriter() override = default;
MOCK_METHOD(void, OnBlockedWriterCanWrite, ());
MOCK_METHOD(bool, IsWriterBlocked, (), (const));
};
}
TEST(QuicBlockedWriterList, Empty) {
QuicBlockedWriterList list;
EXPECT_TRUE(list.Empty());
}
TEST(QuicBlockedWriterList, NotEmpty) {
QuicBlockedWriterList list;
testing::StrictMock<TestWriter> writer1;
EXPECT_CALL(writer1, IsWriterBlocked()).WillOnce(Return(true));
list.Add(writer1);
EXPECT_FALSE(list.Empty());
list.Remove(writer1);
EXPECT_TRUE(list.Empty());
}
TEST(QuicBlockedWriterList, OnWriterUnblocked) {
QuicBlockedWriterList list;
testing::StrictMock<TestWriter> writer1;
EXPECT_CALL(writer1, IsWriterBlocked()).WillOnce(Return(true));
list.Add(writer1);
EXPECT_CALL(writer1, OnBlockedWriterCanWrite());
list.OnWriterUnblocked();
EXPECT_TRUE(list.Empty());
}
TEST(QuicBlockedWriterList, OnWriterUnblockedInOrder) {
QuicBlockedWriterList list;
testing::StrictMock<TestWriter> writer1;
testing::StrictMock<TestWriter> writer2;
testing::StrictMock<TestWriter> writer3;
EXPECT_CALL(writer1, IsWriterBlocked()).WillOnce(Return(true));
EXPECT_CALL(writer2, IsWriterBlocked()).WillOnce(Return(true));
EXPECT_CALL(writer3, IsWriterBlocked()).WillOnce(Return(true));
list.Add(writer1);
list.Add(writer2);
list.Add(writer3);
testing::InSequence s;
EXPECT_CALL(writer1, OnBlockedWriterCanWrite());
EXPECT_CALL(writer2, OnBlockedWriterCanWrite());
EXPECT_CALL(writer3, OnBlockedWriterCanWrite());
list.OnWriterUnblocked();
EXPECT_TRUE(list.Empty());
}
TEST(QuicBlockedWriterList, OnWriterUnblockedInOrderAfterReinsertion) {
QuicBlockedWriterList list;
testing::StrictMock<TestWriter> writer1;
testing::StrictMock<TestWriter> writer2;
testing::StrictMock<TestWriter> writer3;
EXPECT_CALL(writer1, IsWriterBlocked()).WillOnce(Return(true));
EXPECT_CALL(writer2, IsWriterBlocked()).WillOnce(Return(true));
EXPECT_CALL(writer3, IsWriterBlocked()).WillOnce(Return(true));
list.Add(writer1);
list.Add(writer2);
list.Add(writer3);
EXPECT_CALL(writer1, IsWriterBlocked()).WillOnce(Return(true));
list.Add(writer1);
testing::InSequence s;
EXPECT_CALL(writer1, OnBlockedWriterCanWrite());
EXPECT_CALL(writer2, OnBlockedWriterCanWrite());
EXPECT_CALL(writer3, OnBlockedWriterCanWrite());
list.OnWriterUnblocked();
EXPECT_TRUE(list.Empty());
}
TEST(QuicBlockedWriterList, OnWriterUnblockedThenBlocked) {
QuicBlockedWriterList list;
testing::StrictMock<TestWriter> writer1;
testing::StrictMock<TestWriter> writer2;
testing::StrictMock<TestWriter> writer3;
EXPECT_CALL(writer1, IsWriterBlocked()).WillOnce(Return(true));
EXPECT_CALL(writer2, IsWriterBlocked()).WillOnce(Return(true));
EXPECT_CALL(writer3, IsWriterBlocked()).WillOnce(Return(true));
list.Add(writer1);
list.Add(writer2);
list.Add(writer3);
EXPECT_CALL(writer1, OnBlockedWriterCanWrite());
EXPECT_CALL(writer2, IsWriterBlocked()).WillOnce(Return(true));
EXPECT_CALL(writer2, OnBlockedWriterCanWrite()).WillOnce(Invoke([&]() {
list.Add(writer2);
}));
EXPECT_CALL(writer3, OnBlockedWriterCanWrite());
list.OnWriterUnblocked();
EXPECT_FALSE(list.Empty());
EXPECT_CALL(writer2, OnBlockedWriterCanWrite());
list.OnWriterUnblocked();
EXPECT_TRUE(list.Empty());
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_blocked_writer_list.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_blocked_writer_list_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
e623c8f3-9336-4f63-a160-ff9b2d0ae2db | cpp | google/tensorstore | label_op | tensorstore/index_space/internal/label_op.cc | tensorstore/index_space/label_op_test.cc | #include "tensorstore/index_space/internal/label_op.h"
#include <stddef.h>
#include <string>
#include <string_view>
#include <utility>
#include "absl/status/status.h"
#include "tensorstore/container_kind.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dimension_index_buffer.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/internal/dimension_labels.h"
#include "tensorstore/internal/string_like.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
Result<IndexTransform<>> ApplyLabel(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
internal::StringLikeSpan labels,
bool domain_only) {
if (dimensions->size() != static_cast<size_t>(labels.size())) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Number of dimensions (", dimensions->size(),
") does not match number of labels (", labels.size(), ")."));
}
auto rep = MutableRep(
TransformAccess::rep_ptr<container>(std::move(transform)), domain_only);
const DimensionIndex input_rank = rep->input_rank;
span<std::string> input_labels = rep->input_labels().first(input_rank);
for (DimensionIndex i = 0;
i < static_cast<DimensionIndex>(dimensions->size()); ++i) {
const DimensionIndex input_dim = (*dimensions)[i];
std::string_view label = labels[i];
input_labels[input_dim].assign(label.begin(), label.end());
}
TENSORSTORE_RETURN_IF_ERROR(
internal::ValidateDimensionLabelsAreUnique(input_labels));
internal_index_space::DebugCheckInvariants(rep.get());
return TransformAccess::Make<IndexTransform<>>(std::move(rep));
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/dim_expression_testutil.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::Dims;
using ::tensorstore::IdentityTransform;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::span;
using ::tensorstore::internal_index_space::TestDimExpression;
TEST(LabelTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.input_labels({"a", "y", "b"})
.output_identity_transform()
.Finalize()
.value();
TestDimExpression(original_transform,
Dims(0, 2).Label("a", "b"),
{0, 2},
expected_new_transform,
expected_new_transform,
{});
TestDimExpression(original_transform,
Dims("x", "z").Label("a", "b"),
{0, 2},
expected_new_transform,
expected_new_transform,
{});
}
TEST(LabelTest, MultipleArguments) {
TestDimExpression(
IndexTransformBuilder<3, 1>()
.output_constant(0, 1)
.Finalize()
.value(),
Dims(1, 0).Label("x", "y"),
{1, 0},
IndexTransformBuilder<3, 3>()
.input_labels({"y", "x", ""})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<3, 1>()
.input_labels({"y", "x", ""})
.output_constant(0, 1)
.Finalize()
.value(),
{});
}
TEST(LabelTest, ErrorHandling) {
TestDimExpressionError(
IdentityTransform(1),
Dims(span<const DimensionIndex>({0})).Label("x", "y"),
absl::StatusCode::kInvalidArgument,
"Number of dimensions \\(1\\) does not match number of "
"labels \\(2\\)\\.");
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/internal/label_op.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/label_op_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
910cd118-d757-4424-beac-fac04be8dd3a | cpp | tensorflow/tensorflow | pjrt_client | third_party/xla/xla/python/pjrt_ifrt/pjrt_client.cc | third_party/xla/xla/pjrt/pjrt_client_test.cc | #include "xla/python/pjrt_ifrt/pjrt_client.h"
#include <atomic>
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <variant>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/any_invocable.h"
#include "absl/log/check.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "llvm/Support/Casting.h"
#include "xla/layout.h"
#include "xla/literal.h"
#include "xla/pjrt/distributed/protocol.pb.h"
#include "xla/pjrt/distributed/topology_util.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/pjrt/pjrt_compiler.h"
#include "xla/pjrt/pjrt_device_description.h"
#include "xla/pjrt/pjrt_future.h"
#include "xla/pjrt/pjrt_layout.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/attribute_map.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/device_list.h"
#include "xla/python/ifrt/dtype.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/remap_plan.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/python/ifrt/topology.h"
#include "xla/python/ifrt/tuple.h"
#include "xla/python/ifrt/value.h"
#include "xla/python/pjrt_ifrt/basic_string_array.h"
#include "xla/python/pjrt_ifrt/pjrt_array.h"
#include "xla/python/pjrt_ifrt/pjrt_attribute_map_util.h"
#include "xla/python/pjrt_ifrt/pjrt_device.h"
#include "xla/python/pjrt_ifrt/pjrt_dtype.h"
#include "xla/python/pjrt_ifrt/pjrt_memory.h"
#include "xla/python/pjrt_ifrt/pjrt_remap.h"
#include "xla/python/pjrt_ifrt/pjrt_topology.h"
#include "xla/python/pjrt_ifrt/pjrt_tuple.h"
#include "xla/python/pjrt_ifrt/xla_sharding.h"
#include "xla/status_macros.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/util.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
absl::AnyInvocable<void() &&> FromStdFunction(std::function<void()>&& f) {
return f ? std::move(f) : absl::AnyInvocable<void() &&>();
}
AttributeMap MakeAttributeMap(xla::PjRtClient* pjrt_client) {
absl::flat_hash_map<std::string, PjRtValueType> attributes;
attributes.insert({"supports_executable_serialization", true});
if (std::optional<PjRtPluginAttributes> plugin_attributes =
pjrt_client->plugin_attributes();
plugin_attributes.has_value()) {
attributes.insert(
{"pjrt_c_api_major_version",
PjRtValueType(plugin_attributes->pjrt_c_api_major_version)});
attributes.insert(
{"pjrt_c_api_minor_version",
PjRtValueType(plugin_attributes->pjrt_c_api_minor_version)});
for (const auto& [key, value] : plugin_attributes->attributes) {
attributes.insert({key, value});
}
}
return FromPjRtAttributeMap(std::move(attributes));
}
void SerializePjRtDeviceAttributes(
const absl::flat_hash_map<std::string, PjRtDeviceAttribute>& attributes,
DeviceProto& device_proto) {
for (const auto& [key, value] : attributes) {
DeviceAttributeProto& attribute = (*device_proto.mutable_attributes())[key];
if (std::holds_alternative<std::string>(value)) {
attribute.set_string_value(std::get<std::string>(value));
} else if (std::holds_alternative<int64_t>(value)) {
attribute.set_int_value(std::get<int64_t>(value));
} else if (std::holds_alternative<std::vector<int64_t>>(value)) {
auto values = std::get<std::vector<int64_t>>(value);
attribute.mutable_int_values()->mutable_values()->Assign(values.begin(),
values.end());
} else if (std::holds_alternative<bool>(value)) {
attribute.set_bool_value(std::get<bool>(value));
} else if (std::holds_alternative<float>(value)) {
attribute.set_float_value(std::get<float>(value));
}
}
}
absl::Status DeserializePjRtDeviceAttributes(
const DeviceProto& device_proto,
absl::flat_hash_map<std::string, PjRtDeviceAttribute>& attributes) {
for (const auto& [key, value] : device_proto.attributes()) {
if (value.has_string_value()) {
attributes[key] = value.string_value();
} else if (value.has_int_value()) {
attributes[key] = value.int_value();
} else if (value.has_int_values()) {
attributes[key] =
std::vector<int64_t>(value.int_values().values().begin(),
value.int_values().values().end());
} else if (value.has_bool_value()) {
attributes[key] = value.bool_value();
} else if (value.has_float_value()) {
attributes[key] = value.float_value();
}
}
return absl::OkStatus();
}
absl::StatusOr<tsl::RCReference<Array>> MakeStringArrayFromHostBuffer(
Client* client, const void* data, DType dtype, Shape shape,
std::optional<absl::Span<const int64_t>> byte_strides,
std::shared_ptr<const Sharding> sharding,
Client::HostBufferSemantics semantics,
std::function<void()> on_done_with_host_buffer) {
auto param_validation = [&]() -> absl::Status {
if (byte_strides.has_value()) {
return absl::InvalidArgumentError(
"byte_strides is not currently supported for making "
"BasicStringArrays.");
}
if (semantics != Client::HostBufferSemantics::kImmutableOnlyDuringCall) {
return absl::InvalidArgumentError(
"HostBufferSemantics other than kImmutableOnlyDuringCall are not "
"currently supported for making BasicStringArrays.");
}
if (!llvm::isa<const SingleDeviceSharding>(sharding.get())) {
return absl::InvalidArgumentError(
absl::StrCat("Only SingleDeviceSharding is supported for making "
"BasicStringArrays: got: ",
sharding->DebugString()));
}
return absl::OkStatus();
}();
TF_RETURN_IF_ERROR(param_validation);
auto num_elements = shape.num_elements();
auto strings = std::make_shared<std::vector<std::string>>();
strings->reserve(num_elements);
auto string_views = std::make_shared<std::vector<absl::string_view>>();
string_views->reserve(num_elements);
auto element = static_cast<const absl::string_view*>(data);
for (int i = 0; i < num_elements; ++i, ++element) {
strings->push_back(std::string(*element));
string_views->push_back(absl::string_view(strings->back()));
}
std::move(on_done_with_host_buffer)();
BasicStringArray::Buffers buffers;
buffers.push_back(*string_views);
auto buffer_releaser = [strings = std::move(strings),
string_views = std::move(string_views)]() {};
return BasicStringArray::Create(
client, std::move(shape), std::move(sharding),
Future<BasicStringArray::Buffers>(std::move(buffers)),
std::move(buffer_releaser));
}
absl::StatusOr<tsl::RCReference<Array>>
AssembleStringArrayFromSingleDeviceStringArrays(
Shape shape, std::shared_ptr<const Sharding> sharding,
absl::Span<tsl::RCReference<Array>> arrays, ArrayCopySemantics semantics) {
struct BufferBackingStore {
explicit BufferBackingStore(int num_shards)
: per_shard_strings(num_shards), per_shard_string_views(num_shards) {}
void clear() {
per_shard_strings.clear();
per_shard_string_views.clear();
}
void CopyBuffer(absl::Span<const absl::string_view> strbuf, int shard_index,
BasicStringArray::Buffers* buffers) {
auto& strings = per_shard_strings[shard_index];
strings.reserve(strbuf.size());
auto& views = per_shard_string_views[shard_index];
views.reserve(strbuf.size());
for (int i = 0; i < strbuf.size(); ++i) {
strings.push_back(std::string(strbuf[i].data(), strbuf[i].size()));
}
for (const auto& str : strings) {
views.push_back(str);
}
(*buffers)[shard_index] = absl::MakeConstSpan(views);
}
std::vector<std::vector<std::string>> per_shard_strings;
std::vector<std::vector<absl::string_view>> per_shard_string_views;
};
auto buffer_backing_store =
std::make_shared<BufferBackingStore>(sharding->devices()->size());
auto on_done_with_buffer = [buffer_holder = buffer_backing_store]() {};
struct BufferCopyingState {
BufferCopyingState(int num_buffers_to_copy,
std::shared_ptr<BufferBackingStore> buffer_backing_store)
: num_buffers_to_copy(num_buffers_to_copy),
buffer_backing_store(std::move(buffer_backing_store)),
buffers(num_buffers_to_copy) {}
absl::Mutex mu;
int num_buffers_to_copy ABSL_GUARDED_BY(mu);
std::shared_ptr<BufferBackingStore> buffer_backing_store
ABSL_GUARDED_BY(mu);
BasicStringArray::Buffers buffers ABSL_GUARDED_BY(mu);
};
auto buffer_copying_state = std::make_shared<BufferCopyingState>(
arrays.size(), std::move(buffer_backing_store));
auto buffers_promise = Future<BasicStringArray::Buffers>::CreatePromise();
auto buffers_future = Future<BasicStringArray::Buffers>(buffers_promise);
auto buffer_copier = [state = buffer_copying_state,
promise = buffers_promise](
absl::StatusOr<BasicStringArray::Buffers> strbuf,
int shard_index) mutable {
absl::MutexLock lock(&state->mu);
if (state->num_buffers_to_copy == 0) {
return;
}
if (!strbuf.ok()) {
promise.Set(strbuf.status());
state->num_buffers_to_copy = 0;
state->buffer_backing_store->clear();
state->buffer_backing_store = nullptr;
return;
}
state->buffer_backing_store->CopyBuffer(strbuf->front(), shard_index,
&state->buffers);
if (--state->num_buffers_to_copy > 0) {
return;
}
promise.Set(std::move(state->buffers));
};
for (int i = 0; i < arrays.size(); ++i) {
auto basic_string_array = llvm::dyn_cast<BasicStringArray>(arrays[i].get());
if (!basic_string_array) {
return absl::InvalidArgumentError(
"All single device arrays must be BasicStringArrays");
}
if (!llvm::isa<SingleDeviceSharding>(basic_string_array->sharding())) {
return absl::InvalidArgumentError(absl::StrFormat(
"All single device arrays must have single device sharding. got: %s "
"for shard index: %d",
basic_string_array->sharding().DebugString(), i));
}
basic_string_array->buffers().OnReady(
[shard_index = i, buffer_copier = buffer_copier](
absl::StatusOr<BasicStringArray::Buffers> strbuf) mutable {
buffer_copier(std::move(strbuf), shard_index);
});
}
return BasicStringArray::Create(arrays[0]->client(), std::move(shape),
std::move(sharding), buffers_future,
std::move(on_done_with_buffer));
}
}
char PjRtCompatibleClient::ID = 0;
char PjRtClient::ID = 0;
absl::StatusOr<std::unique_ptr<PjRtClient>> PjRtClient::Create(
PjRtClient::CreateOptions options) {
auto client =
absl::WrapUnique(new PjRtClient(std::move(options.pjrt_client)));
xla::PjRtClient* pjrt_client = client->pjrt_client();
std::vector<std::unique_ptr<PjRtDevice>> devices;
if (!options.kv_store) {
devices.reserve(pjrt_client->devices().size());
for (xla::PjRtDevice* device : pjrt_client->devices()) {
auto ifrt_device = std::make_unique<PjRtDevice>(
client.get(), DeviceId(device->global_device_id().value()),
std::string(device->device_kind()), std::string(device->ToString()),
std::string(device->DebugString()), device->process_index(),
device->Attributes(), device->IsAddressable() ? device : nullptr);
devices.push_back(std::move(ifrt_device));
}
} else {
LocalTopologyProto local_topology;
local_topology.set_node_id(options.process_id);
std::string boot_id_str;
auto boot_id_str_or_status = GetBootIdString();
if (!boot_id_str_or_status.ok()) {
LOG(INFO) << boot_id_str_or_status.status();
} else {
boot_id_str = boot_id_str_or_status.value();
}
local_topology.set_boot_id(boot_id_str);
absl::flat_hash_map<PjRtLocalDeviceId, xla::PjRtDevice*> pjrt_devices;
for (xla::PjRtDevice* device : pjrt_client->addressable_devices()) {
pjrt_devices[device->local_device_id()] = device;
DeviceProto& device_proto = *local_topology.add_devices();
device_proto.set_global_device_id(device->global_device_id().value());
device_proto.set_local_device_ordinal(device->local_device_id().value());
device_proto.set_device_kind(
std::string(device->description().device_kind()));
device_proto.set_to_string(std::string(device->ToString()));
device_proto.set_debug_string(std::string(device->DebugString()));
SerializePjRtDeviceAttributes(device->Attributes(), device_proto);
}
GlobalTopologyProto global_topology;
TF_RETURN_IF_ERROR(ExchangeTopologies(
pjrt_client->platform_name(), options.process_id, options.num_processes,
options.get_local_topology_timeout, options.get_global_topology_timeout,
options.kv_store.get(), local_topology, &global_topology,
false));
int next_slice_index = 0;
absl::flat_hash_map<std::string, int> boot_id_to_slice_index;
for (const LocalTopologyProto& node : global_topology.nodes()) {
int64_t slice_index = -1;
if (!node.boot_id().empty()) {
std::string_view boot_id = node.boot_id();
auto [it, inserted] =
boot_id_to_slice_index.try_emplace(boot_id, next_slice_index);
slice_index = it->second;
if (inserted) {
++next_slice_index;
}
}
bool node_is_me = (node.node_id() == options.process_id);
for (const DeviceProto& device_proto : node.devices()) {
absl::flat_hash_map<std::string, PjRtDeviceAttribute> attributes;
TF_RETURN_IF_ERROR(
DeserializePjRtDeviceAttributes(device_proto, attributes));
if (!attributes.contains("slice_index")) {
attributes["slice_index"] = slice_index;
}
xla::PjRtDevice* pjrt_device = nullptr;
if (node_is_me) {
auto it = pjrt_devices.find(
PjRtLocalDeviceId(device_proto.local_device_ordinal()));
TF_RET_CHECK(it != pjrt_devices.end());
pjrt_device = it->second;
}
auto ifrt_device = std::make_unique<PjRtDevice>(
client.get(), DeviceId(device_proto.global_device_id()),
device_proto.device_kind(), device_proto.to_string(),
device_proto.debug_string(), node.node_id(), std::move(attributes),
pjrt_device);
devices.push_back(std::move(ifrt_device));
}
}
}
client->devices_.reserve(devices.size());
client->device_map_.reserve(pjrt_client->addressable_device_count());
for (auto& ifrt_device : devices) {
client->devices_.push_back(ifrt_device.get());
TF_RET_CHECK(
client->device_id_map_.emplace(ifrt_device->Id(), ifrt_device.get())
.second);
xla::PjRtDevice* pjrt_device = ifrt_device->pjrt_device();
if (pjrt_device) {
TF_RET_CHECK(
client->device_map_.emplace(pjrt_device, ifrt_device.get()).second);
}
client->owned_devices_.push_back(std::move(ifrt_device));
}
client->addressable_devices_.reserve(
pjrt_client->addressable_devices().size());
for (xla::PjRtDevice* device : pjrt_client->addressable_devices()) {
auto it = client->device_map_.find(device);
CHECK(it != client->device_map_.end());
client->addressable_devices_.push_back(it->second);
}
client->memory_map_.reserve(pjrt_client->memory_spaces().size());
for (xla::PjRtMemorySpace* memory_space : pjrt_client->memory_spaces()) {
auto ifrt_memory = std::make_unique<PjRtMemory>(client.get(), memory_space);
client->memory_map_[memory_space] = ifrt_memory.get();
client->owned_memories_.push_back(std::move(ifrt_memory));
}
for (Device* ifrt_device : client->addressable_devices_) {
auto* device = tensorflow::down_cast<PjRtDevice*>(ifrt_device);
auto* pjrt_device = device->pjrt_device();
device->memories_.reserve(pjrt_device->memory_spaces().size());
for (xla::PjRtMemorySpace* pjrt_memory_space :
pjrt_device->memory_spaces()) {
device->memories_.push_back(*client->LookupPjRtMemory(pjrt_memory_space));
}
absl::StatusOr<PjRtMemorySpace*> memory =
pjrt_device->default_memory_space();
if (memory.ok()) {
device->default_memory_ = *client->LookupPjRtMemory(*memory);
} else {
device->default_memory_ = memory.status();
}
}
return client;
}
std::unique_ptr<PjRtClient> PjRtClient::Create(
std::shared_ptr<xla::PjRtClient> pjrt_client) {
PjRtClient::CreateOptions options;
options.pjrt_client = std::move(pjrt_client);
return *Create(std::move(options));
}
PjRtClient::PjRtClient(std::shared_ptr<xla::PjRtClient> pjrt_client)
: pjrt_client_(std::move(pjrt_client)),
default_compiler_(this),
attributes_(MakeAttributeMap(pjrt_client_.get())) {}
PjRtClient::~PjRtClient() = default;
absl::StatusOr<PjRtCompatibleDevice*> PjRtClient::LookupPjRtDevice(
xla::PjRtDevice* pjrt_device) const {
auto it = device_map_.find(pjrt_device);
if (it == device_map_.end()) {
return InvalidArgument("PjRtDevice not found: %s",
pjrt_device->DebugString());
}
return it->second;
}
absl::StatusOr<PjRtCompatibleMemory*> PjRtClient::LookupPjRtMemory(
xla::PjRtMemorySpace* pjrt_memory) const {
auto it = memory_map_.find(pjrt_memory);
if (it == memory_map_.end()) {
return InvalidArgument("PjRtMemorySpace not found: %s",
pjrt_memory->DebugString());
}
return it->second;
}
absl::StatusOr<Device*> PjRtClient::LookupDevice(DeviceId device_id) const {
DCHECK(this);
auto it = device_id_map_.find(device_id);
if (it != device_id_map_.end()) {
return it->second;
}
return InvalidArgument("No matching device found for device_id %d",
device_id.value());
}
absl::StatusOr<Device*> PjRtClient::LookupAddressableDevice(
int local_hardware_id) const {
DCHECK(this);
TF_ASSIGN_OR_RETURN(xla::PjRtDevice * pjrt_device,
pjrt_client_->LookupAddressableDevice(
xla::PjRtLocalDeviceId(local_hardware_id)));
return LookupPjRtDevice(pjrt_device);
}
const AttributeMap& PjRtClient::Attributes() const { return attributes_; }
absl::StatusOr<tsl::RCReference<PjRtCompatibleArray>>
PjRtClient::CreatePjRtArray(std::shared_ptr<PjRtBuffer> pjrt_buffer) {
TF_ASSIGN_OR_RETURN(auto array,
PjRtArray::Create(this, std::move(pjrt_buffer)));
return tsl::RCReference<PjRtCompatibleArray>(std::move(array));
}
absl::StatusOr<tsl::RCReference<PjRtCompatibleArray>>
PjRtClient::CreatePjRtArray(Shape shape, PjRtBuffers pjrt_buffers) {
TF_ASSIGN_OR_RETURN(auto array, PjRtArray::Create(this, std::move(shape),
std::move(pjrt_buffers)));
return tsl::RCReference<PjRtCompatibleArray>(std::move(array));
}
absl::StatusOr<tsl::RCReference<Array>> PjRtClient::MakeArrayFromHostBuffer(
const void* data, DType dtype, Shape shape,
std::optional<absl::Span<const int64_t>> byte_strides,
std::shared_ptr<const Sharding> sharding,
Client::HostBufferSemantics semantics,
std::function<void()> on_done_with_host_buffer) {
DCHECK(this);
if (dtype.kind() == DType::kString) {
return MakeStringArrayFromHostBuffer(this, data, dtype, shape, byte_strides,
sharding, semantics,
on_done_with_host_buffer);
}
if (!llvm::isa<const SingleDeviceSharding>(sharding.get()) &&
!sharding->IsFullyReplicated()) {
return InvalidArgument(
"Only SingleDeviceSharding or fully-replicated sharding is supported: "
"sharding=%s",
sharding->DebugString());
}
TF_ASSIGN_OR_RETURN(auto primitive_type, ToPrimitiveType(dtype));
auto count = std::make_shared<std::atomic<int>>(sharding->devices()->size());
std::function<void()> on_done_with_host_buffer_per_device;
if (on_done_with_host_buffer) {
on_done_with_host_buffer_per_device =
[on_done_with_host_buffer = std::move(on_done_with_host_buffer),
count]() {
if (count->fetch_sub(1, std::memory_order_relaxed) == 1) {
on_done_with_host_buffer();
}
};
} else {
on_done_with_host_buffer_per_device = []() {};
}
PjRtArray::PjRtBuffers buffers;
buffers.reserve(sharding->devices()->size());
for (xla::ifrt::Device* const device : sharding->devices()->devices()) {
std::unique_ptr<PjRtBuffer> buffer;
if (sharding->memory_kind().memory_kind().has_value()) {
Memory* memory = nullptr;
for (Memory* ms : device->Memories()) {
if (ms->Kind() == sharding->memory_kind()) {
memory = ms;
break;
}
}
if (memory == nullptr) {
return InvalidArgument(
"Invalid memory kind: %s; available memory kinds: %s",
*sharding->memory_kind().memory_kind(),
absl::StrJoin(sharding->devices()->devices().front()->Memories(),
", ", [](std::string* out, Memory* ms) {
absl::StrAppend(out, *ms->Kind().memory_kind());
}));
}
TF_ASSIGN_OR_RETURN(
buffer, pjrt_client_->BufferFromHostBuffer(
data, primitive_type, shape.dims(), byte_strides,
semantics, on_done_with_host_buffer_per_device,
tensorflow::down_cast<PjRtMemory*>(memory)->pjrt_memory(),
nullptr));
} else {
if (!device->IsAddressable()) {
return InvalidArgument("Cannot copy array to non-addressable device %s",
device->DebugString());
}
TF_ASSIGN_OR_RETURN(
buffer,
pjrt_client_->BufferFromHostBuffer(
data, primitive_type, shape.dims(), byte_strides, semantics,
on_done_with_host_buffer_per_device,
tensorflow::down_cast<PjRtDevice*>(device)->pjrt_device()));
}
buffers.push_back(std::move(buffer));
}
return PjRtArray::Create(this, dtype, std::move(shape), std::move(sharding),
std::move(buffers));
}
absl::StatusOr<tsl::RCReference<Array>>
PjRtClient::AssembleArrayFromSingleDeviceArrays(
Shape shape, std::shared_ptr<const Sharding> sharding,
absl::Span<tsl::RCReference<Array>> arrays, ArrayCopySemantics semantics) {
DCHECK(this);
if (llvm::isa<const SingleDeviceSharding>(sharding.get())) {
if (arrays.size() != 1) {
return InvalidArgument(
"When the sharding is SingleDeviceSharding, the input arrays size "
"must be one, but the actual size is %d",
arrays.size());
}
return arrays[0];
} else if (!llvm::isa<const OpaqueSharding, const ConcreteSharding,
const ConcreteEvenSharding, const ShardingParamSharding,
const HloSharding>(sharding.get())) {
return InvalidArgument(
"Only SingleDeviceSharding, OpaqueSharding, ConcreteSharding, "
"ConcreteEvenSharding, ShardingParamSharding, HloSharding are "
"supported: sharding=%s",
sharding->DebugString());
}
if (sharding->devices()->size() != arrays.size()) {
return InvalidArgument(
"Number of output shards must match the number of single-shard "
"arrays: %d vs. %d",
sharding->devices()->size(), arrays.size());
}
if (arrays[0]->dtype().kind() == DType::kString) {
return AssembleStringArrayFromSingleDeviceStringArrays(shape, sharding,
arrays, semantics);
}
PjRtArray::PjRtBuffers buffers;
buffers.reserve(arrays.size());
DType dtype = arrays[0]->dtype();
for (int i = 0; i < arrays.size(); ++i) {
if (!llvm::isa<PjRtCompatibleArray>(arrays[i].get())) {
return InvalidArgument(
"Only PjRtCompatibleArray is supported: arrays[%d]=%s", i,
arrays[i]->DebugString());
}
auto* array = static_cast<PjRtCompatibleArray*>(arrays[i].get());
if (array->dtype() != dtype) {
return InvalidArgument(
"Every input must have the same dtype: %s (shard 0) vs. %s (shard "
"%d)",
dtype.DebugString(), array->dtype().DebugString(), i);
}
if (array->sharding().devices()->size() != 1) {
return InvalidArgument(
"Every input must use a single device sharding, but input %d has "
"sharding=%s",
i, array->sharding().DebugString());
}
switch (semantics) {
case ArrayCopySemantics::kAlwaysCopy:
buffers.push_back(array->pjrt_buffers().front());
break;
case ArrayCopySemantics::kReuseInput:
buffers.push_back(array->pjrt_buffers().front());
break;
case ArrayCopySemantics::kDonateInput:
buffers.push_back(std::move(array->pjrt_buffers().front()));
break;
}
}
return PjRtArray::Create(this, dtype, std::move(shape), std::move(sharding),
std::move(buffers));
}
absl::StatusOr<std::vector<tsl::RCReference<Array>>> PjRtClient::CopyArrays(
absl::Span<tsl::RCReference<Array>> arrays,
std::optional<tsl::RCReference<DeviceList>> devices,
std::optional<MemoryKind> memory_kind, ArrayCopySemantics semantics) {
if (arrays.empty()) {
return std::vector<tsl::RCReference<Array>>();
}
for (int i = 1; i < arrays.size(); ++i) {
const auto& sharding = arrays[i]->sharding();
if (*sharding.devices() != *arrays[0]->sharding().devices() ||
sharding.memory_kind() != arrays[0]->sharding().memory_kind()) {
return absl::InvalidArgumentError(
"CopyArrays only supports arrays with the same device list and "
"memory kind");
}
}
std::vector<tsl::RCReference<Array>> new_arrays;
new_arrays.reserve(arrays.size());
for (const auto& array : arrays) {
if (auto* const pjrt_array = llvm::dyn_cast<PjRtArray>(array.get())) {
TF_ASSIGN_OR_RETURN(new_arrays.emplace_back(),
pjrt_array->Copy(devices, memory_kind, semantics));
} else if (auto* const string_array =
llvm::dyn_cast<BasicStringArray>(array.get())) {
TF_ASSIGN_OR_RETURN(new_arrays.emplace_back(),
string_array->Copy(devices, memory_kind, semantics));
} else {
return absl::InvalidArgumentError(
"Unsupported array type for PjRtClient::CopyArrays");
}
}
return new_arrays;
}
absl::StatusOr<std::vector<tsl::RCReference<xla::ifrt::Array>>>
PjRtClient::RemapArrays(const RemapPlan& plan,
absl::Span<tsl::RCReference<xla::ifrt::Array>> arrays,
ArrayCopySemantics semantics) {
return PjRtCompatibleClientRemapArrays(this, plan, arrays, semantics);
}
Future<> PjRtClient::GetReadyFuture(
absl::Span<const tsl::RCReference<Value>> values) {
absl::InlinedVector<Future<>, 1> futures;
futures.reserve(values.size());
for (const auto& value : values) {
futures.push_back(value->GetReadyFuture());
}
return JoinFutures(futures);
}
absl::StatusOr<tsl::RCReference<Tuple>> PjRtClient::MakeTuple(
absl::Span<tsl::RCReference<Value>> values) {
return PjRtTuple::Create(this, values);
}
absl::StatusOr<std::shared_ptr<Topology>> PjRtClient::GetTopologyForDevices(
const tsl::RCReference<xla::ifrt::DeviceList>& devices) const {
TF_ASSIGN_OR_RETURN(auto topology, pjrt_client_->GetTopologyDescription());
return std::make_shared<PjRtTopology>(
std::shared_ptr<const xla::PjRtTopologyDescription>(pjrt_client_,
topology));
}
absl::StatusOr<std::unique_ptr<PjRtLayout>>
PjRtClient::GetDefaultLayoutForDevice(DType dtype,
absl::Span<const int64_t> dims,
Device* device) const {
TF_ASSIGN_OR_RETURN(PrimitiveType element_type, ToPrimitiveType(dtype));
TF_ASSIGN_OR_RETURN(xla::Layout layout,
pjrt_client_->GetDefaultLayout(element_type, dims));
return std::make_unique<PjRtXlaLayout>(std::move(layout));
}
absl::Status PjRtClient::TransferToInfeed(PjRtDevice* device,
const LiteralSlice& literal) {
if (!device->IsAddressable()) {
return InvalidArgument(
"Infeed is only supported on addressable devices "
"but device %s is not addressable",
device->DebugString());
}
return device->pjrt_device()->TransferToInfeed(literal);
}
absl::Status PjRtClient::TransferFromOutfeed(PjRtDevice* device,
MutableBorrowingLiteral literal) {
if (!device->IsAddressable()) {
return InvalidArgument(
"Outfeed is only supported on addressable devices "
"but device %s is not addressable",
device->DebugString());
}
return device->pjrt_device()->TransferFromOutfeed(literal);
}
}
} | #include "xla/pjrt/pjrt_client_test.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/synchronization/blocking_counter.h"
#include "absl/types/span.h"
#include "xla/client/xla_builder.h"
#include "xla/client/xla_computation.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/literal_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class TestClientFactory {
public:
void Register(
std::function<absl::StatusOr<std::unique_ptr<PjRtClient>>()> factory) {
absl::MutexLock lock(&mu_);
CHECK(!factory_);
factory_ = std::move(factory);
}
std::function<absl::StatusOr<std::unique_ptr<PjRtClient>>()> Get() const {
absl::MutexLock lock(&mu_);
return factory_;
}
private:
mutable absl::Mutex mu_;
std::function<absl::StatusOr<std::unique_ptr<PjRtClient>>()> factory_
ABSL_GUARDED_BY(mu_);
};
TestClientFactory& GetGlobalTestClientFactory() {
static auto* const factory = new TestClientFactory;
return *factory;
}
absl::StatusOr<std::unique_ptr<PjRtClient>> GetClient() {
return GetGlobalTestClientFactory().Get()();
}
}
void RegisterTestClientFactory(
std::function<absl::StatusOr<std::unique_ptr<PjRtClient>>()> factory) {
GetGlobalTestClientFactory().Register(std::move(factory));
}
namespace {
std::unique_ptr<PjRtLoadedExecutable> MakeIncrementProgram(
PjRtClient* client, bool alias, int device, bool tuplize_arg = false) {
Shape shape = ShapeUtil::MakeShape(S32, {4});
XlaBuilder builder("inc");
if (tuplize_arg) {
shape = ShapeUtil::MakeTupleShape({shape});
}
auto inp = Parameter(&builder, 0, shape, "inp");
if (tuplize_arg) {
inp = GetTupleElement(inp, 0);
}
auto one = ConstantR0<int32_t>(&builder, 1);
auto inc = Add(inp, one);
if (alias) {
builder.SetUpAlias({}, 0, {});
}
XlaComputation computation = builder.Build(inc).value();
DeviceAssignment assignment(1, 1);
assignment(0, 0) = device;
CompileOptions options;
options.parameter_is_tupled_arguments = tuplize_arg;
options.executable_build_options.set_device_assignment(assignment);
return client->Compile(computation, options).value();
}
class PjRtClientTest
: public ::testing::TestWithParam<ExecuteOptions::ExecutionMode> {};
TEST_P(PjRtClientTest, Execute) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetClient());
auto executable =
MakeIncrementProgram(client.get(), false, 0);
std::vector<int32_t> data(4, 0);
Shape shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
client->addressable_devices()[0]));
ExecuteOptions options;
options.execution_mode = GetParam();
TF_ASSERT_OK_AND_ASSIGN(auto results,
executable->Execute({{buffer.get()}}, options));
ASSERT_EQ(results.size(), 1);
ASSERT_EQ(results[0].size(), 1);
TF_ASSERT_OK_AND_ASSIGN(auto literal, results[0][0]->ToLiteralSync());
std::vector<int32_t> expected(4, 1);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
TEST_P(PjRtClientTest, ExecuteWithImmutableUntilTransferCompletes) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetClient());
auto executable =
MakeIncrementProgram(client.get(), false, 0);
std::vector<int32_t> data(4, 0);
Shape shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableUntilTransferCompletes,
nullptr, client->addressable_devices()[0]));
ExecuteOptions options;
options.execution_mode = GetParam();
TF_ASSERT_OK_AND_ASSIGN(auto results,
executable->Execute({{buffer.get()}}, options));
ASSERT_EQ(results.size(), 1);
ASSERT_EQ(results[0].size(), 1);
TF_ASSERT_OK_AND_ASSIGN(auto literal, results[0][0]->ToLiteralSync());
std::vector<int32_t> expected(4, 1);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
TEST_P(PjRtClientTest, ExecuteWithTupleZeroCopy) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetClient());
auto executable = MakeIncrementProgram(client.get(), false,
0, true);
std::vector<int32_t> data(4, 0);
Shape shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer, client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableZeroCopy,
[&data]() {
std::fill(data.begin(), data.end(), 1);
},
client->addressable_devices()[0]));
ExecuteOptions options;
options.execution_mode = GetParam();
TF_ASSERT_OK_AND_ASSIGN(auto results,
executable->Execute({{buffer.get()}}, options));
buffer.reset();
ASSERT_EQ(results.size(), 1);
ASSERT_EQ(results[0].size(), 1);
TF_ASSERT_OK_AND_ASSIGN(auto literal, results[0][0]->ToLiteralSync());
std::vector<int32_t> expected(4, 1);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
TEST_P(PjRtClientTest, ExecuteWithDonation) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetClient());
auto executable =
MakeIncrementProgram(client.get(), true, 0);
std::vector<int32_t> data(4, 0);
Shape shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer, client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableZeroCopy,
nullptr, client->addressable_devices()[0]));
ExecuteOptions options;
options.execution_mode = GetParam();
TF_ASSERT_OK_AND_ASSIGN(auto results,
executable->Execute({{buffer.get()}}, options));
ASSERT_EQ(results.size(), 1);
ASSERT_EQ(results[0].size(), 1);
TF_ASSERT_OK_AND_ASSIGN(auto literal, results[0][0]->ToLiteralSync());
std::vector<int32_t> expected(4, 1);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
TEST_P(PjRtClientTest, ExecuteWithDonationAbort) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetClient());
if (client->platform_id() == CpuId()) {
return;
}
auto executable =
MakeIncrementProgram(client.get(), true, 0);
std::vector<int32_t> data(4, 0);
Shape shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer, client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableZeroCopy,
nullptr, client->addressable_devices()[0]));
auto external_reference = buffer->AcquireExternalReference();
ExecuteOptions options;
options.execution_mode = GetParam();
auto resultsor = executable->Execute({{buffer.get()}}, options);
ASSERT_FALSE(resultsor.ok());
EXPECT_THAT(resultsor.status().message(),
::testing::HasSubstr(
"Donation requested for buffer with external reference"));
}
TEST_P(PjRtClientTest, ExecuteWithConcurrentUsage) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetClient());
auto executable =
MakeIncrementProgram(client.get(), false, 0);
std::vector<int32_t> data(4, 0);
Shape shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
client->addressable_devices()[0]));
ExecuteOptions options;
options.execution_mode = GetParam();
constexpr int kNumThreads = 4;
tsl::thread::ThreadPool thread_pool(
tsl::Env::Default(), "ExecuteWithConcurrentUsage", kNumThreads);
constexpr int kConcurrency = 16;
absl::BlockingCounter blocking_counter(kConcurrency);
std::vector<std::unique_ptr<PjRtBuffer>> results(kConcurrency);
for (int i = 0; i < kConcurrency; ++i) {
thread_pool.Schedule([&, &result = results[i]]() {
auto results = executable->Execute({{buffer.get()}}, options).value();
CHECK_EQ(results.size(), 1);
CHECK_EQ(results[0].size(), 1);
result = std::move(results[0][0]);
blocking_counter.DecrementCount();
});
}
blocking_counter.Wait();
std::vector<int32_t> expected(4, 1);
for (const auto& result : results) {
TF_ASSERT_OK_AND_ASSIGN(auto literal, result->ToLiteralSync());
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
}
TEST_P(PjRtClientTest, ExecuteWithConcurrentUsageAndDonation) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetClient());
auto executable =
MakeIncrementProgram(client.get(), false, 0);
auto executable_with_donation =
MakeIncrementProgram(client.get(), true, 0);
std::vector<int32_t> data(4, 0);
std::vector<int32_t> expected(4, 1);
Shape shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer, client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableZeroCopy,
nullptr, client->addressable_devices()[0]));
ExecuteOptions options;
options.execution_mode = GetParam();
constexpr int kNumThreads = 4;
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(),
"ExecuteWithConcurrentUsageAndDonation",
kNumThreads);
constexpr int kConcurrentUsage = 16;
absl::BlockingCounter blocking_counter(kConcurrentUsage + 1);
for (int i = 0; i < kConcurrentUsage; ++i) {
thread_pool.Schedule([&]() {
auto results_or = executable->Execute({{buffer.get()}}, options);
if (results_or.ok()) {
auto& results = *results_or;
CHECK_EQ(results.size(), 1);
CHECK_EQ(results[0].size(), 1);
auto literal = results[0][0]->ToLiteralSync().value();
CHECK(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
blocking_counter.DecrementCount();
});
}
std::unique_ptr<PjRtBuffer> result;
thread_pool.Schedule([&]() {
auto results =
executable_with_donation->Execute({{buffer.get()}}, options).value();
CHECK_EQ(results.size(), 1);
CHECK_EQ(results[0].size(), 1);
result = std::move(results[0][0]);
blocking_counter.DecrementCount();
});
blocking_counter.Wait();
TF_ASSERT_OK_AND_ASSIGN(auto literal, result->ToLiteralSync());
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
INSTANTIATE_TEST_SUITE_P(
PjRtClientTestSuite, PjRtClientTest,
::testing::Values(ExecuteOptions::ExecutionMode::kSynchronous,
ExecuteOptions::ExecutionMode::kAsynchronous));
TEST(PjRtClientTest, CopyToDevice) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetClient());
ASSERT_GT(client->addressable_devices().size(), 1);
std::vector<int32_t> data(4, 0);
Shape shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
client->addressable_devices()[0]));
auto* device_1 = client->addressable_devices()[1];
TF_ASSERT_OK_AND_ASSIGN(auto result, buffer->CopyToDevice(device_1));
TF_ASSERT_OK_AND_ASSIGN(auto literal, result->ToLiteralSync());
std::vector<int32_t> expected(4, 0);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
TEST(PjRtClientTest, CopyToDeviceAsync) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetClient());
ASSERT_GT(client->addressable_devices().size(), 1);
std::vector<int32_t> data(4, 0);
Shape shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
client->addressable_devices()[0]));
auto* device_1 = client->addressable_devices()[1];
constexpr int kNumThreads = 4;
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(), "CopyToDeviceAsync",
kNumThreads);
constexpr int kConcurrentCopy = 16;
std::vector<std::unique_ptr<PjRtBuffer>> results(kConcurrentCopy);
for (int i = 0; i < kConcurrentCopy; ++i) {
TF_ASSERT_OK_AND_ASSIGN(results[i], buffer->CopyToDevice(device_1));
}
buffer.reset();
for (const auto& result : results) {
ASSERT_TRUE(result);
TF_ASSERT_OK_AND_ASSIGN(auto literal, result->ToLiteralSync());
std::vector<int32_t> expected(4, 0);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
}
TEST(PjRtClientTest, CopyToDeviceAsyncExternalCpuOnly) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetClient());
ASSERT_GT(client->addressable_devices().size(), 1);
if (client->platform_id() != CpuId()) return;
std::vector<int32_t> data(4, 0);
auto* data_ptr = data.data();
Shape shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
client->CreateViewOfDeviceBuffer(
data_ptr, shape, client->addressable_devices()[0],
[data = std::move(data)]() mutable {
data.clear();
data.shrink_to_fit();
}));
auto* device_1 = client->addressable_devices()[1];
constexpr int kNumThreads = 4;
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(),
"CopyToDeviceAsyncExternal", kNumThreads);
constexpr int kConcurrentCopy = 16;
std::vector<std::unique_ptr<PjRtBuffer>> results(kConcurrentCopy);
for (int i = 0; i < kConcurrentCopy; ++i) {
TF_ASSERT_OK_AND_ASSIGN(results[i], buffer->CopyToDevice(device_1));
}
buffer.reset();
for (const auto& result : results) {
ASSERT_TRUE(result);
TF_ASSERT_OK_AND_ASSIGN(auto literal, result->ToLiteralSync());
std::vector<int32_t> expected(4, 0);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>> MakeFloatBuffer(
PjRtClient* client, const std::vector<float>& data,
absl::Span<const int64_t> dimensions) {
Shape shape = ShapeUtil::MakeShape(F32, {2, 2});
return client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
client->addressable_devices()[0]);
}
TEST(PjRtClientTest, DuplicateDonationError) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetClient());
constexpr char kProgram[] =
R"(HloModule DuplicateDonationError, input_output_alias={ {0}: (1, {}, must-alias), {1}: (2, {}, must-alias) }
ENTRY DuplicateDonationError() -> (f32[2, 2], f32[2, 2]) {
%input0 = f32[2, 2] parameter(0)
%input1 = f32[2, 2] parameter(1)
%input2 = f32[2, 2] parameter(2)
%input3 = f32[2, 2] parameter(3)
%tmp1 = f32[2, 2] add(%input0, %input1)
%tmp2 = f32[2, 2] add(%input2, %input3)
ROOT %result = (f32[2, 2], f32[2, 2]) tuple(%tmp1, %tmp2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnUnverifiedModule(kProgram, {}));
XlaComputation xla_computation(hlo_module->ToProto());
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_executable,
client->Compile(xla_computation, {}));
std::vector<float> data(4, 0);
TF_ASSERT_OK_AND_ASSIGN(auto buffer0,
MakeFloatBuffer(client.get(), data, {2, 2}));
TF_ASSERT_OK_AND_ASSIGN(auto buffer1,
MakeFloatBuffer(client.get(), data, {2, 2}));
TF_ASSERT_OK_AND_ASSIGN(auto buffer2,
MakeFloatBuffer(client.get(), data, {2, 2}));
{
auto result = pjrt_executable->Execute({{
buffer0.get(),
buffer1.get(),
buffer1.get(),
buffer0.get(),
}},
{});
ASSERT_FALSE(result.ok());
EXPECT_THAT(result.status().message(),
::testing::HasSubstr("f(donate(a), donate(a))"));
}
{
auto result = pjrt_executable->Execute({{
buffer1.get(),
buffer1.get(),
buffer2.get(),
buffer0.get(),
}},
{});
ASSERT_FALSE(result.ok());
EXPECT_THAT(result.status().message(),
::testing::HasSubstr("f(a, donate(a))"));
}
{
auto result = pjrt_executable->Execute({{
buffer0.get(),
buffer1.get(),
buffer2.get(),
buffer2.get(),
}},
{});
ASSERT_FALSE(result.ok());
EXPECT_THAT(result.status().message(),
::testing::HasSubstr("f(donate(a), a)"));
}
}
TEST(PjRtClientTest, GetDefaultLayout) {}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/pjrt_ifrt/pjrt_client.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/pjrt_client_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b6d6e20f-9b2b-4830-9059-b6284133322f | cpp | tensorflow/tensorflow | mlir_to_bytecode | tensorflow/compiler/mlir/tfrt/translate/mlrt/mlir_to_bytecode.cc | tensorflow/compiler/mlir/tfrt/translate/mlrt/mlir_to_bytecode_test.cc | #include "tensorflow/compiler/mlir/tfrt/translate/mlrt/mlir_to_bytecode.h"
#include <cstdint>
#include <cstring>
#include <iterator>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/TypeSwitch.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Region.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/executable.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/function.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/kernel.h"
namespace mlrt {
namespace {
bool CanBeInlined(mlir::Attribute attr, absl::string_view data) {
return mlir::isa<mlir::IntegerAttr, mlir::FloatAttr, mlir::FlatSymbolRefAttr>(
attr) &&
data.size() <= sizeof(uint32_t);
}
template <typename T>
std::string EncodeIntegerOrFloat(T attr) {
std::string data(sizeof(attr), '\0');
std::memcpy(data.data(), &attr, sizeof(attr));
return data;
}
template <typename T>
std::optional<std::string> EncodeListOfInteger(mlir::ArrayAttr array) {
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto ctor = bc::New<bc::Vector<T>>(&allocator, array.size());
mlir::Type type;
for (int i = 0; i < array.size(); ++i) {
if (auto integer_attr = mlir::dyn_cast<mlir::IntegerAttr>(array[i])) {
if (type && integer_attr.getType() != type) return std::nullopt;
type = integer_attr.getType();
llvm::APInt value = integer_attr.getValue();
if (value.getBitWidth() != sizeof(T) * 8) return std::nullopt;
ctor.ConstructAt(i, value.getZExtValue());
} else {
return std::nullopt;
}
}
return std::string(buffer.data(), buffer.size());
}
std::optional<std::string> EncodeListOfSymbolRef(
const ModuleEmitterContext& module_context, mlir::ArrayAttr array) {
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto ctor = bc::New<bc::Vector<uint32_t>>(&allocator, array.size());
for (int i = 0; i < array.size(); ++i) {
if (auto symbol_ref = mlir::dyn_cast<mlir::FlatSymbolRefAttr>(array[i])) {
ctor.ConstructAt(i, module_context.GetFunctionId(symbol_ref.getValue()));
} else {
return std::nullopt;
}
}
return std::string(buffer.data(), buffer.size());
}
template <typename T>
std::optional<std::string> EncodeDenseArray(llvm::ArrayRef<T> array) {
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto ctor = bc::New<bc::Vector<T>>(&allocator, array.size());
if (!array.empty()) {
ctor.Place(reinterpret_cast<const char*>(array.data()),
array.size() * sizeof(T));
}
return std::string(buffer.data(), buffer.size());
}
std::optional<std::string> EncodeDenseBoolArray(llvm::ArrayRef<bool> array) {
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto ctor = bc::New<bc::Vector<uint8_t>>(&allocator, array.size());
if (!array.empty()) {
std::vector<uint8_t> data(array.size());
int i = 0;
for (auto v : array) {
data[i++] = static_cast<uint8_t>(v);
}
ctor.Place(reinterpret_cast<const char*>(data.data()), data.size());
}
return std::string(buffer.data(), buffer.size());
}
std::optional<std::string> EncodeListOfString(mlir::ArrayAttr array) {
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto ctor = bc::New<bc::Vector<bc::String>>(&allocator, array.size());
for (int i = 0; i < array.size(); ++i) {
if (auto string_attr = mlir::dyn_cast<mlir::StringAttr>(array[i])) {
ctor.ConstructAt(i, string_attr.getValue().str());
} else {
return std::nullopt;
}
}
return std::string(buffer.data(), buffer.size());
}
struct FunctionEmitterContext {
explicit FunctionEmitterContext(const ModuleEmitterContext* module_context)
: module_context(*module_context) {}
const ModuleEmitterContext& module_context;
struct RegInfo {
int num_uses = 0;
int id = -1;
};
int next_reg_id = 0;
llvm::DenseMap<mlir::Value, RegInfo> register_table;
std::vector<int> free_regs;
int AssignRegId() {
if (free_regs.empty()) {
return next_reg_id++;
}
int id = free_regs.back();
free_regs.pop_back();
return id;
}
void FreeRegId(int id) { free_regs.push_back(id); }
};
void EmitKernel(FunctionEmitterContext& function_context,
bc::Kernel::Constructor& constructor, mlir::Operation& op,
std::vector<uint32_t>& function_output_regs,
std::vector<uint8_t>& function_output_last_uses) {
std::vector<uint32_t> results;
results.reserve(op.getNumResults());
for (auto result : op.getResults()) {
auto iter = function_context.register_table.find(result);
CHECK(iter != function_context.register_table.end());
CHECK_EQ(iter->second.id, -1);
iter->second.id = function_context.AssignRegId();
results.push_back(iter->second.id);
}
constructor.construct_results(results.size())
.Assign(results.begin(), results.end());
std::vector<uint32_t> arguments;
std::vector<uint8_t> last_uses;
arguments.reserve(op.getNumOperands());
last_uses.reserve(op.getNumOperands());
for (auto operand : op.getOperands()) {
auto iter = function_context.register_table.find(operand);
CHECK(iter != function_context.register_table.end());
int id = iter->second.id;
CHECK_NE(id, -1);
last_uses.push_back(0);
if (--iter->second.num_uses == 0) {
function_context.FreeRegId(id);
last_uses.back() = 1;
}
arguments.push_back(id);
}
constructor.construct_arguments(arguments.size())
.Assign(arguments.begin(), arguments.end());
constructor.construct_last_uses(last_uses.size())
.Assign(last_uses.begin(), last_uses.end());
std::vector<uint32_t> attributes;
attributes.reserve(op.getAttrs().size());
for (auto attr : op.getAttrs()) {
int attr_id =
function_context.module_context.GetAttributeId(attr.getValue());
absl::string_view attr_data =
function_context.module_context.attributes().at(attr_id);
if (CanBeInlined(attr.getValue(), attr_data)) {
uint32_t data = 0;
std::memcpy(&data, attr_data.data(), attr_data.size());
attributes.push_back(data);
} else {
attributes.push_back(attr_id);
}
}
constructor.construct_attributes(attributes.size())
.Assign(attributes.begin(), attributes.end());
if (llvm::isa<mlir::func::ReturnOp>(&op)) {
constructor.set_code(function_context.module_context.GetKernelId("return"));
function_output_regs = std::move(arguments);
function_output_last_uses = std::move(last_uses);
} else if (llvm::isa<mlir::func::CallOp>(&op)) {
constructor.set_code(function_context.module_context.GetKernelId("call"));
} else {
llvm::StringRef op_name = op.getName().getStringRef();
constructor.set_code(function_context.module_context.GetKernelId(op_name));
}
}
void EmitFunction(const ModuleEmitterContext& module_context,
bc::Function::Constructor& constructor, llvm::StringRef name,
mlir::Region& region) {
FunctionEmitterContext function_context(&module_context);
constructor.construct_name(name.str());
DCHECK(llvm::hasSingleElement(region)) << "should have a single block";
auto& block = region.front();
auto& register_table = function_context.register_table;
std::vector<uint32_t> input_regs;
input_regs.reserve(block.getNumArguments());
for (auto arg : block.getArguments()) {
int id = function_context.AssignRegId();
input_regs.push_back(id);
register_table[arg] = {static_cast<int>(std::distance(arg.getUses().begin(),
arg.getUses().end())),
id};
}
constructor.construct_input_regs(input_regs);
for (auto& op : block) {
for (auto result : op.getResults()) {
register_table[result] = {static_cast<int>(
std::distance(result.getUses().begin(), result.getUses().end()))};
}
}
auto kernels_constructor =
constructor.construct_kernels(block.getOperations().size());
std::vector<uint32_t> output_regs;
std::vector<uint8_t> output_last_uses;
for (const auto& iter : llvm::enumerate(block.getOperations())) {
int i = iter.index();
mlir::Operation& op = iter.value();
auto kernel_ctor = kernels_constructor.ConstructAt(i);
EmitKernel(function_context, kernel_ctor, op, output_regs,
output_last_uses);
}
constructor.set_num_regs(function_context.next_reg_id);
constructor.construct_output_regs(output_regs);
constructor.construct_output_last_uses(output_last_uses);
}
absl::Status EmitExecutable(ModuleEmitterContext& module_context,
bc::Executable::Constructor& constructor,
mlir::ModuleOp module) {
module.walk(
[&](mlir::func::FuncOp func) { module_context.AddFunction(func); });
auto functions = module_context.functions();
for (auto func : functions) {
if (!llvm::hasSingleElement(func.getRegion())) {
return absl::InvalidArgumentError("function should have a single block.");
}
auto& block = func.getRegion().front();
for (auto& op : block) {
if (llvm::isa<mlir::func::CallOp>(&op)) {
module_context.AddKernelName("call");
} else if (llvm::isa<mlir::func::ReturnOp>(&op)) {
if (op.getNumResults() != 0) {
return absl::InvalidArgumentError(
"Block terminator must be a return op.");
}
module_context.AddKernelName("return");
} else {
module_context.AddKernelName(op.getName().getStringRef().str());
}
for (auto attr : op.getAttrs()) {
if (auto status = module_context.AddAttribute(&op, attr.getValue());
!status.ok()) {
return status;
}
}
}
}
constructor.construct_kernel_names(module_context.kernels().size())
.Assign(module_context.kernels().begin(), module_context.kernels().end());
auto functions_constructor =
constructor.construct_functions(functions.size());
for (int i = 0; i < functions.size(); ++i) {
auto func = functions[i];
auto function_ctor = functions_constructor.ConstructAt(i);
EmitFunction(module_context, function_ctor, func.getSymName(),
func.getRegion());
}
constructor.construct_attributes(module_context.attributes().size())
.Assign(module_context.attributes().begin(),
module_context.attributes().end());
return absl::OkStatus();
}
}
absl::Status ModuleEmitterContext::AddAttribute(mlir::Operation* op,
mlir::Attribute attr) {
absl::StatusOr<std::string> attr_data;
if (auto* encoder = attribute_encoder_registry_.Get(
op->getName().getDialectNamespace())) {
attr_data = (*encoder)(*this, attr);
} else {
attr_data = DefaultEncodeAttribute(attr);
}
if (!attr_data.ok()) return std::move(attr_data).status();
int id = AddData(std::move(*attr_data), attributes_, attribute_data_id_map_);
attribute_id_map_[attr] = id;
return absl::OkStatus();
}
int ModuleEmitterContext::AddFunction(mlir::func::FuncOp func) {
int id = functions_.size();
functions_.push_back(func);
DCHECK(!function_name_id_map_.contains(func.getSymName()));
function_name_id_map_[func.getSymName()] = id;
return id;
}
std::optional<std::string> EncodeSimpleAttribute(
const ModuleEmitterContext& module_context, mlir::Attribute attr) {
return llvm::TypeSwitch<mlir::Attribute, std::optional<std::string>>(attr)
.Case<mlir::StringAttr>(
[](const auto& str_attr) { return str_attr.str(); })
.Case<mlir::IntegerAttr>(
[](const auto& integer_attr) -> std::optional<std::string> {
switch (llvm::APInt value = integer_attr.getValue();
value.getBitWidth()) {
case 1:
return EncodeIntegerOrFloat<uint8_t>(value.getZExtValue());
case 32:
return EncodeIntegerOrFloat<uint32_t>(value.getZExtValue());
case 64:
return EncodeIntegerOrFloat<uint64_t>(value.getZExtValue());
default:
return std::nullopt;
}
})
.Case<mlir::FloatAttr>(
[](const auto& float_attr) -> std::optional<std::string> {
llvm::APFloat value = float_attr.getValue();
if (float_attr.getType().isF32()) {
return EncodeIntegerOrFloat<float>(value.convertToFloat());
}
return std::nullopt;
})
.Case<mlir::ArrayAttr>([&](const auto& array_attr)
-> std::optional<std::string> {
if (auto encoded_list_i32 = EncodeListOfInteger<uint32_t>(array_attr)) {
return std::move(*encoded_list_i32);
} else if (auto encoded_list_i64 =
EncodeListOfInteger<uint64_t>(array_attr)) {
return std::move(*encoded_list_i64);
} else if (auto encoded_list_string = EncodeListOfString(array_attr)) {
return std::move(*encoded_list_string);
} else if (auto encoded_list_symbol_ref =
EncodeListOfSymbolRef(module_context, array_attr)) {
return std::move(*encoded_list_symbol_ref);
} else {
return std::nullopt;
}
})
.Case<mlir::DenseI32ArrayAttr>(
[](const auto& dense_array_i32) -> std::optional<std::string> {
return EncodeDenseArray<int32_t>(dense_array_i32);
})
.Case<mlir::DenseI64ArrayAttr>(
[](const auto& dense_array_i64) -> std::optional<std::string> {
return EncodeDenseArray<int64_t>(dense_array_i64);
})
.Case<mlir::DenseBoolArrayAttr>(
[](const auto& dense_array_bool) -> std::optional<std::string> {
return EncodeDenseBoolArray(dense_array_bool.asArrayRef());
})
.Case<mlir::FlatSymbolRefAttr>([&](const auto& symbol_ref) {
return EncodeIntegerOrFloat<uint32_t>(
module_context.GetFunctionId(symbol_ref.getValue()));
})
.Default([](const auto& attr) { return std::nullopt; });
}
absl::StatusOr<std::string> ModuleEmitterContext::DefaultEncodeAttribute(
mlir::Attribute attr) {
if (auto result = EncodeSimpleAttribute(*this, attr)) {
return std::move(*result);
}
std ::string attr_str;
llvm::raw_string_ostream os(attr_str);
attr.print(os);
return absl::InvalidArgumentError(
absl::StrCat("Try to encode unsupported attribute: ", attr_str));
}
absl::StatusOr<bc::Buffer> EmitExecutable(
const AttributeEncoderRegistry& attribute_encoder_registry,
mlir::ModuleOp module) {
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
ModuleEmitterContext module_context(&attribute_encoder_registry);
auto executable_ctor = bc::New<bc::Executable>(&allocator);
if (auto status = EmitExecutable(module_context, executable_ctor, module);
!status.ok()) {
return status;
}
buffer.shrink_to_fit();
return buffer;
}
} | #include "tensorflow/compiler/mlir/tfrt/translate/mlrt/mlir_to_bytecode.h"
#include <cstring>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/executable.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/attribute_span.h"
#include "tsl/platform/resource_loader.h"
#include "tsl/platform/status_matchers.h"
namespace mlrt {
namespace {
using ::testing::ElementsAreArray;
using ::testing::FloatEq;
using ::testing::IsEmpty;
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
TEST(MlirToByteCodeTest, Basic) {
constexpr char kBasicMlir[] =
"tensorflow/compiler/mlir/tfrt/translate/mlrt/testdata/basic.mlir";
mlir::DialectRegistry registry;
registry.insert<mlir::func::FuncDialect>();
mlir::MLIRContext mlir_context(registry);
mlir_context.allowUnregisteredDialects();
auto mlir_module = mlir::parseSourceFile<mlir::ModuleOp>(
tsl::GetDataDependencyFilepath(kBasicMlir), &mlir_context);
AttributeEncoderRegistry attribute_encoder_registry;
bc::Buffer buffer =
EmitExecutable(attribute_encoder_registry, mlir_module.get()).value();
bc::Executable executable(buffer.data());
auto kernel_names = executable.kernel_names();
EXPECT_THAT(kernel_names,
ElementsAreArray({"test_mlbc.add.i32", "test_mlbc.sub.i32",
"call", "return"}));
auto functions = executable.functions();
ASSERT_GE(functions.size(), 1);
auto function = functions[0];
EXPECT_EQ(function.name().str(), "add_i32_10");
EXPECT_EQ(function.num_regs(), 5);
EXPECT_THAT(function.input_regs(), ElementsAreArray({0}));
EXPECT_THAT(function.output_regs(), ElementsAreArray({0, 2, 2}));
EXPECT_THAT(function.output_last_uses(),
ElementsAreArray({true, false, true}));
auto kernels = function.kernels();
ASSERT_EQ(kernels.size(), 11);
EXPECT_EQ(kernels[0].code(), 0);
EXPECT_THAT(kernels[0].arguments(), ElementsAreArray({0, 0}));
EXPECT_THAT(kernels[0].results(), ElementsAreArray({1}));
EXPECT_THAT(kernels[0].last_uses(), ElementsAreArray({0, 0}));
for (int i = 1; i < 9; i++) {
EXPECT_EQ(kernels[i].code(), i % 2);
EXPECT_THAT(kernels[i].arguments(), ElementsAreArray({(i - 1) % 2 + 1, 0}));
EXPECT_THAT(kernels[i].results(), ElementsAreArray({i % 2 + 1}));
EXPECT_THAT(kernels[i].last_uses(), ElementsAreArray({1, 0}));
}
EXPECT_EQ(kernels[9].code(), 2);
EXPECT_THAT(kernels[9].arguments(), ElementsAreArray({1}));
EXPECT_THAT(kernels[9].last_uses(), ElementsAreArray({true}));
EXPECT_THAT(kernels[9].results(), ElementsAreArray({2, 3, 4}));
EXPECT_EQ(kernels[10].code(), 3);
EXPECT_THAT(kernels[10].arguments(), ElementsAreArray({0, 2, 2}));
EXPECT_THAT(kernels[10].last_uses(), ElementsAreArray({true, false, true}));
EXPECT_TRUE(kernels[10].results().empty());
}
template <typename T>
absl::StatusOr<T> DecodeAttribute(absl::string_view data) {
if (data.size() < sizeof(T))
return absl::InvalidArgumentError("Invalid data size for attribute.");
T value;
std::memcpy(&value, data.data(), sizeof(T));
return value;
}
TEST(MlirToByteCodeTest, BasicAttributes) {
constexpr char kBasicAttributesMlir[] =
"tensorflow/compiler/mlir/tfrt/translate/mlrt/testdata/"
"basic_attributes.mlir";
mlir::DialectRegistry registry;
registry.insert<mlir::func::FuncDialect>();
mlir::MLIRContext mlir_context(registry);
mlir_context.allowUnregisteredDialects();
auto mlir_module = mlir::parseSourceFile<mlir::ModuleOp>(
tsl::GetDataDependencyFilepath(kBasicAttributesMlir), &mlir_context);
AttributeEncoderRegistry attribute_encoder_registry;
bc::Buffer buffer =
EmitExecutable(attribute_encoder_registry, mlir_module.get()).value();
bc::Executable executable(buffer.data());
auto attributes = executable.attributes();
ASSERT_EQ(attributes.size(), 15);
auto attr_iter = attributes.begin();
EXPECT_EQ(*attr_iter, "test string");
++attr_iter;
EXPECT_EQ(*attr_iter, "ts");
++attr_iter;
EXPECT_THAT(DecodeAttribute<int32_t>(*attr_iter), IsOkAndHolds(100));
++attr_iter;
EXPECT_THAT(DecodeAttribute<int64_t>(*attr_iter), IsOkAndHolds(200));
++attr_iter;
EXPECT_THAT(DecodeAttribute<float>(*attr_iter), IsOkAndHolds(FloatEq(3.0)));
++attr_iter;
EXPECT_THAT(DecodeAttribute<uint8_t>(*attr_iter), IsOkAndHolds(0));
++attr_iter;
bc::Vector<int64_t> list_of_i64((*attr_iter).data());
EXPECT_THAT(list_of_i64, ElementsAreArray({0, 1, 2, 3, 4}));
++attr_iter;
bc::Vector<int32_t> list_of_i32((*attr_iter).data());
EXPECT_THAT(list_of_i32, ElementsAreArray({0, 1, 2, 3}));
++attr_iter;
bc::Vector<bc::String> list_of_str((*attr_iter).data());
EXPECT_THAT(list_of_str, ElementsAreArray({"string 0", "string 1"}));
++attr_iter;
EXPECT_THAT(DecodeAttribute<uint32_t>(*attr_iter), IsOkAndHolds(1));
EXPECT_EQ(executable.functions()[1].name().Get(), "callee");
++attr_iter;
bc::Vector<int32_t> list_of_symbol_ref((*attr_iter).data());
EXPECT_EQ(executable.functions()[2].name().Get(), "callee0");
EXPECT_EQ(executable.functions()[3].name().Get(), "callee1");
EXPECT_THAT(list_of_symbol_ref, ElementsAreArray({2, 3}));
++attr_iter;
bc::Vector<int32_t> dense_array_of_i32((*attr_iter).data());
EXPECT_THAT(dense_array_of_i32, ElementsAreArray({0, 1, 2}));
++attr_iter;
bc::Vector<int64_t> dense_array_of_i64((*attr_iter).data());
EXPECT_THAT(dense_array_of_i64, ElementsAreArray({0, 1, 2}));
++attr_iter;
bc::Vector<int32_t> empty_dense_array((*attr_iter).data());
EXPECT_TRUE(empty_dense_array.empty());
++attr_iter;
bc::Vector<uint8_t> dense_array_of_bool((*attr_iter).data());
EXPECT_THAT(dense_array_of_bool, ElementsAreArray({true, false}));
auto kernels = executable.functions()[0].kernels();
ASSERT_EQ(kernels.size(), 16);
auto kernel_iter = kernels.begin();
auto attribute_span = [&](auto kernel_iter) {
return mlrt::AttributeSpan((*kernel_iter).attributes(), attributes);
};
EXPECT_EQ(attribute_span(kernel_iter).GetAs<bc::String>(0).Get(),
"test string");
++kernel_iter;
EXPECT_EQ(attribute_span(kernel_iter).GetAs<bc::String>(0).Get(), "ts");
++kernel_iter;
EXPECT_EQ(attribute_span(kernel_iter).GetAs<int32_t>(0), 100);
++kernel_iter;
EXPECT_EQ(attribute_span(kernel_iter).GetAs<int64_t>(0), 200);
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<float>(0), FloatEq(3.0));
++kernel_iter;
EXPECT_EQ(attribute_span(kernel_iter).GetAs<uint8_t>(0), false);
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int64_t>>(0),
ElementsAreArray({0, 1, 2, 3, 4}));
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int32_t>>(0),
ElementsAreArray({0, 1, 2, 3}));
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<bc::String>>(0),
ElementsAreArray({"string 0", "string 1"}));
++kernel_iter;
EXPECT_EQ(attribute_span(kernel_iter).GetAs<uint32_t>(0), 1);
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int32_t>>(0),
ElementsAreArray({2, 3}));
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int32_t>>(0),
ElementsAreArray({0, 1, 2}));
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int64_t>>(0),
ElementsAreArray({0, 1, 2}));
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int32_t>>(0),
IsEmpty());
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<bool>>(0),
ElementsAreArray({true, false}));
}
TEST(MlirToByteCodeTest, UnsupportedAttributes) {
constexpr char kUnsupportedAttributesMlir[] =
"tensorflow/compiler/mlir/tfrt/translate/mlrt/testdata/"
"unsupported_attributes.mlir";
mlir::DialectRegistry registry;
registry.insert<mlir::func::FuncDialect>();
mlir::MLIRContext mlir_context(registry);
mlir_context.allowUnregisteredDialects();
auto mlir_module = mlir::parseSourceFile<mlir::ModuleOp>(
tsl::GetDataDependencyFilepath(kUnsupportedAttributesMlir),
&mlir_context);
AttributeEncoderRegistry attribute_encoder_registry;
EXPECT_THAT(EmitExecutable(attribute_encoder_registry, mlir_module.get()),
StatusIs(absl::StatusCode::kInvalidArgument,
"Try to encode unsupported attribute: unit"));
}
class CustomDense {
public:
struct StorageType {
using Self = StorageType;
DEFINE_BYTECODE_FIELD(bc::Vector<int64_t>, shape);
DEFINE_BYTECODE_FIELD(bc::Vector<uint32_t>, data);
};
class Constructor {
public:
Constructor(bc::Allocator* allocator, bc::BcAddr_t address)
: allocator_(allocator), address_(address) {}
template <typename... Args>
auto construct_shape(Args&&... args) {
return StorageType::construct_shape(allocator_, address_,
std::forward<Args>(args)...);
}
template <typename... Args>
auto construct_data(Args&&... args) {
return StorageType::construct_data(allocator_, address_,
std::forward<Args>(args)...);
}
bc::BcAddr_t address() const { return address_; }
private:
bc::Allocator* allocator_;
bc::BcAddr_t address_;
};
using NonTrivialConstructorType = Constructor;
explicit CustomDense(const char* p) : p_(p) {}
bc::Vector<int64_t> shape() const { return StorageType::read_shape(p_); }
bc::Vector<uint32_t> data() const { return StorageType::read_data(p_); }
private:
const char* p_ = nullptr;
};
absl::StatusOr<std::string> EncodeCustomDense(const ModuleEmitterContext&,
mlir::Attribute attr) {
auto dense_int_attr = mlir::dyn_cast<mlir::DenseIntElementsAttr>(attr);
if (!dense_int_attr)
return absl::InvalidArgumentError(
"The element of the custom dense attribute must be an integer.");
if (mlir::cast<mlir::IntegerType>(dense_int_attr.getElementType())
.getWidth() != 32) {
return absl::InvalidArgumentError(
"The element of the custom dense attribute must be an i32 integer.");
}
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto custom_dense_ctor = bc::New<CustomDense>(&allocator);
auto shaped_type = dense_int_attr.getType();
std::vector<int64_t> shape(shaped_type.getShape().begin(),
shaped_type.getShape().end());
custom_dense_ctor.construct_shape(shape);
custom_dense_ctor.construct_data(shaped_type.getNumElements())
.Place(dense_int_attr.getRawData().data(),
dense_int_attr.getRawData().size());
return std::string(buffer.data(), buffer.size());
}
TEST(MlirToByteCodeTest, CustomDense) {
constexpr char kCustomAttributesMlir[] =
"tensorflow/compiler/mlir/tfrt/translate/mlrt/testdata/"
"custom_attributes.mlir";
mlir::DialectRegistry registry;
registry.insert<mlir::func::FuncDialect>();
mlir::MLIRContext mlir_context(registry);
mlir_context.allowUnregisteredDialects();
auto mlir_module = mlir::parseSourceFile<mlir::ModuleOp>(
tsl::GetDataDependencyFilepath(kCustomAttributesMlir), &mlir_context);
AttributeEncoderRegistry attribute_encoder_registry;
attribute_encoder_registry.Register("test_custom", &EncodeCustomDense);
bc::Buffer buffer =
EmitExecutable(attribute_encoder_registry, mlir_module.get()).value();
bc::Executable executable(buffer.data());
auto attributes = executable.attributes();
ASSERT_EQ(attributes.size(), 10);
for (int i = 0; i < 10; ++i) {
bc::String attr_data = attributes[i];
CustomDense custom_dense(attr_data.data());
EXPECT_THAT(custom_dense.shape(), ElementsAreArray({1}));
EXPECT_THAT(custom_dense.data(), ElementsAreArray({i}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tfrt/translate/mlrt/mlir_to_bytecode.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tfrt/translate/mlrt/mlir_to_bytecode_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9f7eb9d2-3b42-4f7c-8958-4e6e67b9b0a1 | cpp | tensorflow/tensorflow | mkl_qmatmul_op | tensorflow/core/kernels/mkl/mkl_qmatmul_op.cc | tensorflow/core/kernels/mkl/mkl_qmatmul_op_test.cc | #if defined(INTEL_MKL)
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/kernels/fill_functor.h"
#include "tensorflow/core/kernels/mkl/mkl_matmul_ops_common.h"
#include "tensorflow/core/kernels/mkl/mkl_quantized_conv_ops.h"
#include "tensorflow/core/kernels/no_op.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/util/mkl_util.h"
#include "tensorflow/core/util/work_sharder.h"
namespace {
enum {
QUANTIZE_MODE_MIN_FIRST,
QUANTIZE_MODE_SCALED,
};
}
namespace tensorflow {
#ifndef ENABLE_ONEDNN_V3
#define TSCALED_BIAS Tbias
#else
#define TSCALED_BIAS float
#endif
template <typename Device, typename Tinput, typename Tweight, typename Tbias,
typename Toutput, bool native_format = false>
class MklDnnQuantizedMatMulOp
: public MklDnnMatMulOpBase<Tweight, Tbias, Toutput> {
public:
virtual ~MklDnnQuantizedMatMulOp() {
if (this->input_bias_ != nullptr) {
delete this->input_bias_;
input_bias_ = nullptr;
}
if (this->scaled_bias_ != nullptr) {
delete this->scaled_bias_;
scaled_bias_ = nullptr;
}
if (this->comp_bias_ != nullptr) {
delete this->comp_bias_;
comp_bias_ = nullptr;
}
}
float* GetCompBiasBuffer(int size) {
if (comp_bias_ == nullptr) {
comp_bias_ = new float[size];
}
return comp_bias_;
}
explicit MklDnnQuantizedMatMulOp(OpKernelConstruction* context)
: MklDnnMatMulOpBase<Tweight, Tbias, Toutput>(context) {
string mode_string;
OP_REQUIRES_OK(context, context->GetAttr("input_quant_mode", &mode_string));
if (mode_string == "MIN_FIRST") {
mode_ = QUANTIZE_MODE_MIN_FIRST;
} else if (mode_string == "SCALED") {
mode_ = QUANTIZE_MODE_SCALED;
} else {
context->CtxFailure(absl::InvalidArgumentError(
absl::StrCat("Quantization mode must be either MIN_FIRST or "
"SCALED, but received ",
mode_string)));
}
this->is_weight_const_ = false;
if (context->HasAttr("is_weight_const")) {
OP_REQUIRES_OK(context, context->GetAttr("is_weight_const",
&(this->is_weight_const_)));
}
this->is_bias_const_ = false;
if (context->HasAttr("is_bias_const")) {
OP_REQUIRES_OK(
context, context->GetAttr("is_bias_const", &(this->is_bias_const_)));
}
}
void Compute(OpKernelContext* context) override {
try {
const Tensor& src_tensor = MklGetInput(context, this->kInputIndexSrc);
const Tensor& weight_tensor =
MklGetInput(context, this->kInputIndexWeight);
const Tensor& bias_tensor = MklGetInput(context, this->kInputIndexBias);
MklDnnShape src_mkl_shape, weight_mkl_shape;
GetMklShape(context, this->kInputIndexSrc, &src_mkl_shape, native_format);
GetMklShape(context, this->kInputIndexWeight, &weight_mkl_shape,
native_format);
OP_REQUIRES(
context, !weight_mkl_shape.IsMklTensor(),
absl::InvalidArgumentError("Weight should not be in MKL Layout"));
MklDnnData<Tinput> src(&(this->cpu_engine_));
MklDnnData<Tweight> weight(&(this->cpu_engine_));
memory::dims src_dims, weight_dims;
memory::dims dst_dims_tf_order, dst_dims_mkl_order;
auto src_tf_shape = src_mkl_shape.IsMklTensor()
? src_mkl_shape.GetTfShape()
: src_tensor.shape();
auto weight_tf_shape = weight_mkl_shape.IsMklTensor()
? weight_mkl_shape.GetTfShape()
: weight_tensor.shape();
src_dims = TFShapeToMklDnnDims(src_tf_shape);
weight_dims = TFShapeToMklDnnDims(weight_tf_shape);
dst_dims_mkl_order = {static_cast<int>(src_tf_shape.dim_size(0)),
static_cast<int>(weight_tf_shape.dim_size(1))};
weight_dims = {static_cast<int>(weight_tf_shape.dim_size(1)),
static_cast<int>(weight_tf_shape.dim_size(0))};
Tensor* dst_tensor = nullptr;
auto input_output_fmt = memory::format_tag::nc;
auto input_output_fmt_mkldnn = MklTensorFormat::FORMAT_NC;
auto src_md =
src_mkl_shape.IsMklTensor()
? src_mkl_shape.GetMklLayout()
: memory::desc(src_dims, MklDnnType<Tinput>(), input_output_fmt);
src.SetUsrMem(src_md, &src_tensor);
auto weight_md = weight_mkl_shape.IsMklTensor()
? weight_mkl_shape.GetMklLayout()
: memory::desc(weight_dims, MklDnnType<Tweight>(),
memory::format_tag::io);
weight.SetUsrMem(weight_md, &weight_tensor);
MklDnnMatMulFwdPrimitive<float, Tinput, Tweight, Tbias, Toutput>*
matmul_fwd = nullptr;
memory::dims bias_dims = {static_cast<int>(bias_tensor.dim_size(0))};
MklDnnMatMulFwdParams matmul_fwd_dims(src_dims, weight_dims, bias_dims,
dst_dims_mkl_order);
this->ExtendMklDnnMatMulFwdParams(context, matmul_fwd_dims);
Eigen::ThreadPoolInterface* eigen_interface =
EigenThreadPoolFromTfContext(context);
tsl::OneDnnThreadPool eigen_tp(eigen_interface,
ThreadPoolUseCallerThread());
matmul_fwd =
MklDnnMatMulFwdPrimitiveFactory<float, Tinput, Tweight, Tbias,
Toutput>::Get(matmul_fwd_dims, 0);
std::shared_ptr<dnnl::inner_product_forward::primitive_desc>
matmul_fwd_pd = matmul_fwd->GetPrimitiveDesc();
this->AllocateOutputTensor(context, *matmul_fwd_pd, dst_dims_mkl_order,
input_output_fmt_mkldnn, &dst_tensor,
native_format);
Toutput* dst_data =
reinterpret_cast<Toutput*>(dst_tensor->flat<Toutput>().data());
Tinput* src_data = nullptr;
if (!native_format && src_md != matmul_fwd_pd->src_desc()) {
src.SetUsrMem(src_md, &src_tensor);
src.CheckReorderToOpMem(matmul_fwd_pd.get()->src_desc(),
this->cpu_engine_, context);
src_data = static_cast<Tinput*>(src.GetOpMem().get_data_handle());
} else {
src_data = static_cast<Tinput*>(
const_cast<Tinput*>(src_tensor.flat<Tinput>().data()));
}
Tweight* weight_data = nullptr;
if (weight_md != matmul_fwd_pd->weights_desc()) {
bool is_weight_cached = false;
if (this->is_weight_const_) {
if (this->IsWeightCacheEmpty(context)) {
this->CacheWeight(context, matmul_fwd_pd, weight_data,
weight_tensor, weight, weight_md);
}
weight_data =
this->GetCachedWeight(context, matmul_fwd_pd->weights_desc());
is_weight_cached = (weight_data != nullptr);
}
if (!is_weight_cached) {
weight.SetUsrMem(weight_md, &weight_tensor);
weight.CheckReorderToOpMem(matmul_fwd_pd.get()->weights_desc(),
this->cpu_engine_, context);
weight_data =
static_cast<Tweight*>(weight.GetOpMem().get_data_handle());
}
} else {
weight_data = static_cast<Tweight*>(
const_cast<Tweight*>(weight_tensor.flat<Tweight>().data()));
}
std::shared_ptr<stream> cpu_stream;
cpu_stream.reset(CreateStream(&eigen_tp, matmul_fwd->GetEngine()));
UserScratchPad<unsigned char> scratch_pad;
scratch_pad.AllocateSPTensor(matmul_fwd, context);
#ifndef ENABLE_ONEDNN_V3
Tbias* bias_data = this->GetBiasHandle(
context, matmul_fwd_pd, bias_tensor, weight_tensor, cpu_stream);
#else
void* bias_data = static_cast<void*>(
const_cast<Tbias*>(bias_tensor.flat<Tbias>().data()));
Tensor temp_scaled_bias_tensor;
this->GetBiasHandle(context, matmul_fwd_pd, bias_tensor, weight_tensor,
cpu_stream, &temp_scaled_bias_tensor, &bias_data);
#endif
matmul_fwd->Execute(src_data, weight_data, bias_data, dst_data,
matmul_fwd_dims, scratch_pad.Get(), cpu_stream);
} catch (dnnl::error& e) {
string error_msg = tensorflow::strings::StrCat(
"Status: ", e.status, ", message: ", string(e.message), ", in file ",
__FILE__, ":", __LINE__);
OP_REQUIRES_OK(context,
absl::AbortedError(absl::StrCat(
"Operation received an exception:", error_msg)));
}
float min_output_value;
float max_output_value;
if (std::is_same<Toutput, quint8>::value ||
std::is_same<Toutput, qint8>::value) {
const Tensor& min_freezed_tensor = context->input(7);
const Tensor& max_freezed_tensor = context->input(8);
OP_REQUIRES(context,
TensorShapeUtils::IsScalar(min_freezed_tensor.shape()),
absl::InvalidArgumentError(absl::StrCat(
"`min_freezed_output` must be rank 0 but is rank ",
min_freezed_tensor.dims())));
OP_REQUIRES(context,
TensorShapeUtils::IsScalar(max_freezed_tensor.shape()),
absl::InvalidArgumentError(absl::StrCat(
"`max_freezed_output` must be rank 0 but is rank ",
max_freezed_tensor.dims())));
min_output_value = min_freezed_tensor.scalar<float>()();
max_output_value = max_freezed_tensor.scalar<float>()();
} else {
ComputeOutputRangeForInt32(context, &min_output_value, &max_output_value);
}
if (std::is_same<Toutput, quint8>::value ||
std::is_same<Toutput, qint8>::value ||
std::is_same<Toutput, qint32>::value) {
Tensor* output_min = nullptr;
Tensor* output_max = nullptr;
MklDnnShape output_min_mkl_shape, output_max_mkl_shape;
output_min_mkl_shape.SetMklTensor(false);
output_max_mkl_shape.SetMklTensor(false);
AllocateOutputSetMklShape(context, 1, &output_min, {},
output_min_mkl_shape, native_format);
AllocateOutputSetMklShape(context, 2, &output_max, {},
output_max_mkl_shape, native_format);
output_min->flat<float>()(0) = min_output_value;
output_max->flat<float>()(0) = max_output_value;
}
}
protected:
void ComputeOutputRangeForInt32(OpKernelContext* context,
float* min_output_value,
float* max_output_value) {
const float min_input = context->input(3).scalar<float>()();
const float max_input = context->input(4).scalar<float>()();
const float min_weight = context->input(5).scalar<float>()();
const float max_weight = context->input(6).scalar<float>()();
MklQuantizationRangeForMultiplication<quint8, qint8, qint32>(
min_input, max_input, min_weight, max_weight, min_output_value,
max_output_value);
}
virtual void ExtendMklDnnMatMulFwdParams(OpKernelContext* context,
MklDnnMatMulFwdParams& params) {
params.dtypes.append(typeid(Tinput).name());
params.dtypes.append(typeid(Tweight).name());
params.dtypes.append(typeid(Tbias).name());
params.dtypes.append(typeid(Toutput).name());
const Tensor& min_input_tensor = context->input(3);
const Tensor& max_input_tensor = context->input(4);
const Tensor& min_weight_tensor = context->input(5);
const Tensor& max_weight_tensor = context->input(6);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(min_input_tensor.shape()),
absl::InvalidArgumentError(absl::StrCat(
"`min_a` must be rank 0 but is rank ", min_input_tensor.dims())));
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(max_input_tensor.shape()),
absl::InvalidArgumentError(absl::StrCat(
"`max_a` must be rank 0 but is rank ", max_input_tensor.dims())));
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(min_weight_tensor.shape()),
absl::InvalidArgumentError(absl::StrCat(
"`min_b` must be rank 0 but is rank ", min_weight_tensor.dims())));
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(max_weight_tensor.shape()),
absl::InvalidArgumentError(absl::StrCat(
"`max_b` must be rank 0 but is rank ", max_weight_tensor.dims())));
#ifdef ENABLE_ONEDNN_V3
const float min_input = min_input_tensor.scalar<float>()();
const float max_input = max_input_tensor.scalar<float>()();
const float min_weight = min_weight_tensor.scalar<float>()();
const float max_weight = max_weight_tensor.scalar<float>()();
float src_scale =
mode_ == QUANTIZE_MODE_MIN_FIRST
? (max_input - min_input) / 255.0
: std::max(std::abs(min_input), std::abs(max_input)) / 255.0;
float wei_scale =
std::max(std::abs(min_weight), std::abs(max_weight)) / 127.0;
float dst_scale = 1.0;
#endif
if (std::is_same<Toutput, quint8>::value ||
std::is_same<Toutput, qint8>::value ||
std::is_same<Toutput, float>::value) {
const Tensor& min_freezed_tensor = context->input(7);
const Tensor& max_freezed_tensor = context->input(8);
OP_REQUIRES(context,
TensorShapeUtils::IsScalar(min_freezed_tensor.shape()),
absl::InvalidArgumentError(absl::StrCat(
"`min_freezed_output` must be rank 0 but is rank ",
min_freezed_tensor.dims())));
OP_REQUIRES(context,
TensorShapeUtils::IsScalar(max_freezed_tensor.shape()),
absl::InvalidArgumentError(absl::StrCat(
"`max_freezed_output` must be rank 0 but is rank ",
max_freezed_tensor.dims())));
const float min_freezed_output = min_freezed_tensor.scalar<float>()();
const float max_freezed_output = max_freezed_tensor.scalar<float>()();
float scale_eightbit =
std::max(std::abs(min_freezed_output), std::abs(max_freezed_output));
#ifndef ENABLE_ONEDNN_V3
float min_output_value;
float max_output_value;
ComputeOutputRangeForInt32(context, &min_output_value, &max_output_value);
float scale_int32 =
std::max(std::abs(min_output_value), std::abs(max_output_value));
float scale = 1.0;
if (std::is_same<Toutput, quint8>::value) {
scale = scale_int32 / scale_eightbit / static_cast<float>(1u << 23);
} else if (std::is_same<Toutput, qint8>::value) {
scale = scale_int32 / scale_eightbit / static_cast<float>(1u << 24);
} else if (std::is_same<Toutput, float>::value) {
scale = scale_int32 / static_cast<float>(1u << 31);
} else {
scale = scale_int32 / scale_eightbit / static_cast<float>(1u << 24);
}
std::vector<float> output_scale;
output_scale.push_back(scale);
params.post_op_params.push_back({"output_scale", output_scale});
}
#else
if (std::is_same<Toutput, quint8>::value) {
dst_scale = scale_eightbit / 255.0;
} else if (std::is_same<Toutput, qint8>::value) {
dst_scale = scale_eightbit / 127.0;
} else {
dst_scale = 1.0;
}
} else {
if (!std::is_same<Toutput, qint32>::value)
TF_CHECK_OK(Status(absl::StatusCode::kFailedPrecondition,
"Output datatype is expected to be qint32."));
dst_scale = src_scale * wei_scale;
}
params.post_op_params.push_back({"src_scale", {src_scale}});
params.post_op_params.push_back({"wei_scale", {wei_scale}});
params.post_op_params.push_back({"dst_scale", {dst_scale}});
#endif
}
#ifndef ENABLE_ONEDNN_V3
Tbias* GetBiasHandle(
OpKernelContext* context,
std::shared_ptr<dnnl::inner_product_forward::primitive_desc>&
mkldnn_matmul_fwd_pd,
const Tensor& bias_tensor, const Tensor& weight_tensor,
std::shared_ptr<stream> reorder_stream) {
if (std::is_same<Tbias, qint32>::value) {
return static_cast<Tbias*>(
const_cast<Tbias*>(bias_tensor.flat<Tbias>().data()));
} else {
const float min_input = context->input(3).flat<float>()(0);
const float max_input = context->input(4).flat<float>()(0);
const float min_weight = context->input(5).flat<float>()(0);
const float max_weight = context->input(6).flat<float>()(0);
std::vector<dnnl::primitive> net;
float out_scale;
if (mode_ == QUANTIZE_MODE_MIN_FIRST) {
int64_t k = weight_tensor.dim_size(0);
int64_t n = weight_tensor.dim_size(1);
float* comp_bias = GetCompBiasBuffer(n);
qint8* wt_buf = static_cast<qint8*>(
const_cast<qint8*>(weight_tensor.flat<qint8>().data()));
const float* bias_buf = static_cast<float*>(
const_cast<float*>(bias_tensor.flat<float>().data()));
float qa_amin = 255 * min_input / (max_input - min_input);
out_scale = (255.0 * 127.0) /
((max_input - min_input) *
std::max(std::abs(max_weight), std::abs(min_weight)));
#ifndef ENABLE_ONEDNN_OPENMP
auto parallel_func = [&](int64_t start, int64_t end) {
for (int64_t j = start; j < end; j++) {
int64_t x = 0;
for (int64_t i = 0; i < k; ++i) {
x += wt_buf[i * n + j];
}
comp_bias[j] =
((bias_buf[j] * out_scale) + static_cast<float>(x * qa_amin));
}
};
const float kArithCost = 2.5f;
const float kMovCost = 1.0f;
float shard_cost = 4 * kArithCost + kMovCost;
const DeviceBase::CpuWorkerThreads& worker_threads =
*(context->device()->tensorflow_cpu_worker_threads());
Shard(worker_threads.num_threads, worker_threads.workers, n, shard_cost,
parallel_func);
#else
#pragma omp parallel for schedule(static)
for (int64_t j = 0; j < n; ++j) {
int64_t x = 0;
for (int64_t i = 0; i < k; ++i) {
x += wt_buf[i * n + j];
}
comp_bias[j] =
((bias_buf[j] * out_scale) + static_cast<float>(x * qa_amin));
}
#endif
return reinterpret_cast<Tbias*>(comp_bias_);
} else if (mode_ == QUANTIZE_MODE_SCALED) {
out_scale = 255.0 * 127.0 / max_input *
std::max(std::abs(max_weight), std::abs(min_weight));
std::vector<float> scales;
scales.push_back(out_scale);
dnnl::primitive_attr bias_attr;
bias_attr.set_output_scales(0, scales);
void* bias_buf = static_cast<void*>(
const_cast<Tbias*>(bias_tensor.flat<Tbias>().data()));
input_bias_ = new memory(mkldnn_matmul_fwd_pd->bias_desc(),
this->cpu_engine_, bias_buf);
scaled_bias_ =
new memory(mkldnn_matmul_fwd_pd->bias_desc(), this->cpu_engine_);
auto reorder_desc = dnnl::reorder::primitive_desc(
*input_bias_, *scaled_bias_, bias_attr);
net.push_back(dnnl::reorder(reorder_desc));
std::unordered_map<int, memory> reorder_net_args = {
{DNNL_ARG_FROM, *input_bias_}, {DNNL_ARG_TO, *scaled_bias_}};
net.at(0).execute(*reorder_stream, reorder_net_args);
return reinterpret_cast<Tbias*>(scaled_bias_->get_data_handle());
} else {
context->CtxFailure(absl::InvalidArgumentError(
"Quantization mode must be either MIN_FIRST or SCALED."));
return nullptr;
}
}
}
#else
void GetBiasHandle(
OpKernelContext* context,
std::shared_ptr<dnnl::inner_product_forward::primitive_desc>&
mkldnn_matmul_fwd_pd,
const Tensor& bias_tensor, const Tensor& weight_tensor,
std::shared_ptr<stream> reorder_stream, Tensor* temp_scaled_bias_tensor,
void** bias_data) {
if (std::is_same<Tbias, float>::value && mode_ == QUANTIZE_MODE_SCALED) {
return;
} else {
const float min_input = context->input(3).flat<float>()(0);
const float max_input = context->input(4).flat<float>()(0);
const float min_weight = context->input(5).flat<float>()(0);
const float max_weight = context->input(6).flat<float>()(0);
std::vector<dnnl::primitive> net;
float out_scale;
bool is_cached_bias_valid = false;
bool is_bias_cache_empty = this->IsBiasCacheEmpty();
if (!is_bias_cache_empty) {
this->GetCachedBias(min_input, max_input, bias_data);
is_cached_bias_valid = (*bias_data != nullptr);
}
if (!is_cached_bias_valid) {
auto scaled_bias_md = mkldnn_matmul_fwd_pd->bias_desc();
TensorShape scaled_bias_shape;
scaled_bias_shape.AddDim(
(scaled_bias_md.get_size() / sizeof(TSCALED_BIAS)));
OP_REQUIRES_OK(
context,
context->allocate_temp(DataTypeToEnum<TSCALED_BIAS>::v(),
scaled_bias_shape, temp_scaled_bias_tensor));
void* scaled_bias_buf = static_cast<void*>(
temp_scaled_bias_tensor->flat<TSCALED_BIAS>().data());
if (mode_ == QUANTIZE_MODE_MIN_FIRST) {
int k = weight_tensor.dim_size(0);
int n = weight_tensor.dim_size(1);
TSCALED_BIAS* comp_bias = (TSCALED_BIAS*)scaled_bias_buf;
qint8* wt_buf = static_cast<qint8*>(
const_cast<qint8*>(weight_tensor.flat<qint8>().data()));
const Tbias* bias_buf = static_cast<Tbias*>(
const_cast<Tbias*>(bias_tensor.flat<Tbias>().data()));
float qa_amin = 255 * min_input / (max_input - min_input);
out_scale = (255.0 * 127.0) /
((max_input - min_input) *
std::max(std::abs(max_weight), std::abs(min_weight)));
for (int j = 0; j < n; ++j) {
int x = 0;
for (int i = 0; i < k; ++i) {
x += wt_buf[i * n + j];
}
if (std::is_same<Tbias, qint32>::value) {
comp_bias[j] = static_cast<float>(bias_buf[j]) / out_scale;
} else {
comp_bias[j] = static_cast<float>(bias_buf[j]) + (x * qa_amin);
}
}
} else if (mode_ == QUANTIZE_MODE_SCALED) {
out_scale = 255.0 * 127.0 /
(max_input *
std::max(std::abs(max_weight), std::abs(min_weight)));
std::vector<float> scales;
scales.push_back(out_scale);
dnnl::primitive_attr bias_attr;
void* bias_buf = static_cast<void*>(
const_cast<Tbias*>(bias_tensor.flat<Tbias>().data()));
bias_attr.set_scales_mask(DNNL_ARG_DST, 0);
auto input_bias_mem =
memory({{static_cast<int>(bias_tensor.NumElements())},
MklDnnType<Tbias>(),
memory::format_tag::x},
this->cpu_engine_, bias_buf);
auto scaled_bias_mem = memory(mkldnn_matmul_fwd_pd->bias_desc(),
this->cpu_engine_, scaled_bias_buf);
auto reorder_desc = dnnl::reorder::primitive_desc(
input_bias_mem, scaled_bias_mem, bias_attr);
net.push_back(dnnl::reorder(reorder_desc));
std::unordered_map<int, memory> reorder_net_args = {
{DNNL_ARG_FROM, input_bias_mem}, {DNNL_ARG_TO, scaled_bias_mem}};
auto scale_mem =
memory({{1}, MklDnnType<float>(), memory::format_tag::x},
this->cpu_engine_, scales.data());
reorder_net_args.insert(
{DNNL_ARG_ATTR_SCALES | DNNL_ARG_DST, scale_mem});
std::vector<MemoryArgsMap> net_args{reorder_net_args};
ExecutePrimitive(net, &net_args, this->cpu_engine_, context);
} else {
context->CtxFailure(
absl::InvalidArgumentError("Quantization mode must be"
"either MIN_FIRST or SCALED."));
}
*bias_data = static_cast<void*>(
temp_scaled_bias_tensor->flat<TSCALED_BIAS>().data());
if (is_bias_cache_empty) {
this->CacheBias(context, *temp_scaled_bias_tensor, min_input,
max_input);
}
}
}
}
bool IsCachedBiasValid(float current_min_input,
float current_max_input) override {
if (this->is_bias_const_ && this->is_weight_const_ &&
std::abs(current_min_input - this->saved_min_input_) < 1e-5 &&
std::abs(current_max_input - this->saved_max_input_) < 1e-5) {
return true;
}
return false;
}
#endif
private:
memory* input_bias_ = nullptr;
memory* scaled_bias_ = nullptr;
float* comp_bias_ = nullptr;
int mode_;
};
template <typename Device, typename Tinput, typename Tweight, typename Tbias,
typename Toutput, bool native_format = false>
class MklDnnQuantizedMatMulReluOp
: public MklDnnQuantizedMatMulOp<Device, Tinput, Tweight, Tbias, Toutput,
native_format> {
public:
virtual ~MklDnnQuantizedMatMulReluOp() {}
explicit MklDnnQuantizedMatMulReluOp(OpKernelConstruction* context)
: MklDnnQuantizedMatMulOp<Device, Tinput, Tweight, Tbias, Toutput,
native_format>(context) {}
protected:
void ExtendMklDnnMatMulFwdParams(OpKernelContext* context,
MklDnnMatMulFwdParams& params) override {
MklDnnQuantizedMatMulOp<Device, quint8, qint8, Tbias, Toutput,
native_format>::ExtendMklDnnMatMulFwdParams(context,
params);
params.post_op_params.push_back({"Relu", {1.0, 0.0, 0.0}});
}
};
#define REGISTER_MKL_KERNEL(op, kernel, bias_type, output_type, is_native) \
REGISTER_KERNEL_BUILDER( \
Name(op) \
.Device(DEVICE_CPU) \
.TypeConstraint<quint8>("T1") \
.TypeConstraint<qint8>("T2") BIAS_TYPE_CONSTRAINT(bias_type) \
.TypeConstraint<output_type>("Toutput") LABEL, \
kernel TEMPLATE_ARGS(CPUDevice, quint8, qint8, bias_type, output_type, \
is_native));
#define REGISTER_MKL_KERNEL_ALL_BIAS_TYPES(op, kernel, output_type, is_native) \
REGISTER_MKL_KERNEL(op, kernel, float, output_type, is_native) \
REGISTER_MKL_KERNEL(op, kernel, qint32, output_type, is_native);
#define LABEL
#define TEMPLATE_ARGS(CPUDevice, quint8, qint8, bias_type, output_type, \
is_native)
#define BIAS_TYPE_CONSTRAINT(bias_type)
REGISTER_MKL_KERNEL("QuantizedMatMulWithBiasAndRelu", NoOp, float, qint32,
false);
#undef BIAS_TYPE_CONSTRAINT
#define BIAS_TYPE_CONSTRAINT(bias_type) .TypeConstraint<bias_type>("Tbias")
REGISTER_MKL_KERNEL_ALL_BIAS_TYPES("QuantizedMatMulWithBias", NoOp, qint32,
false);
REGISTER_MKL_KERNEL_ALL_BIAS_TYPES(
"QuantizedMatMulWithBiasAndReluAndRequantize", NoOp, quint8, false);
REGISTER_MKL_KERNEL_ALL_BIAS_TYPES("QuantizedMatMulWithBiasAndRequantize", NoOp,
quint8, false);
REGISTER_MKL_KERNEL_ALL_BIAS_TYPES("QuantizedMatMulWithBiasAndDequantize", NoOp,
float, false);
#undef BIAS_TYPE_CONSTRAINT
#undef TEMPLATE_ARGS
#undef LABEL
#define LABEL .Label(mkl_op_registry::kMklQuantizedOpLabel)
#define TEMPLATE_ARGS(CPUDevice, quint8, qint8, bias_type, output_type, \
is_native) \
<CPUDevice, quint8, qint8, bias_type, output_type, is_native>
#define BIAS_TYPE_CONSTRAINT(bias_type)
REGISTER_MKL_KERNEL("_MklQuantizedMatMulWithBiasAndRelu",
MklDnnQuantizedMatMulReluOp, float, qint32, true);
#undef BIAS_TYPE_CONSTRAINT
#define BIAS_TYPE_CONSTRAINT(bias_type) .TypeConstraint<bias_type>("Tbias")
REGISTER_MKL_KERNEL_ALL_BIAS_TYPES("_MklQuantizedMatMulWithBias",
MklDnnQuantizedMatMulOp, qint32, true);
REGISTER_MKL_KERNEL_ALL_BIAS_TYPES(
"_MklQuantizedMatMulWithBiasAndReluAndRequantize",
MklDnnQuantizedMatMulReluOp, quint8, true);
REGISTER_MKL_KERNEL_ALL_BIAS_TYPES("_MklQuantizedMatMulWithBiasAndRequantize",
MklDnnQuantizedMatMulOp, quint8, true);
REGISTER_MKL_KERNEL_ALL_BIAS_TYPES("_MklQuantizedMatMulWithBiasAndDequantize",
MklDnnQuantizedMatMulOp, float, true);
#undef BIAS_TYPE_CONSTRAINT
#undef TEMPLATE_ARGS
#undef LABEL
#undef TSCALED_BIAS
}
#endif |
#if defined(INTEL_MKL)
#define EIGEN_USE_THREADS
#include <functional>
#include <memory>
#include <vector>
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/kernels/quantization_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
class QuantizedMatMulTest : public OpsTestBase,
public ::testing::WithParamInterface<bool> {};
TEST_P(QuantizedMatMulTest, Small_withBias) {
const bool is_old_api = GetParam();
if (is_old_api) {
TF_ASSERT_OK(
NodeDefBuilder("quantized_mat_mul_op", "_MklQuantizedMatMulWithBias")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("Toutput", DataTypeToEnum<qint32>::v())
.Attr("_kernel", "QuantizedMklOp")
.Finalize(node_def()));
} else {
TF_ASSERT_OK(
NodeDefBuilder("quantized_mat_mul_op", "_QuantizedMatMul")
.Attr("Thost_inputs", {DT_QUINT8, DT_QINT8, DT_QINT32, DT_FLOAT,
DT_FLOAT, DT_FLOAT, DT_FLOAT})
.Attr("Thost_outputs", {DT_QINT32, DT_FLOAT, DT_FLOAT})
.Attr("Tdevice_inputs", std::vector<DataType>())
.Attr("Tdevice_outputs", std::vector<DataType>())
.Attr("T1", DT_QUINT8)
.Attr("T2", DT_QINT8)
.Attr("Tbias", DT_QINT32)
.Attr("Tout", DT_QINT32)
.Attr("fused_ops", {"BiasAdd"})
.Input(FakeInput())
.Input(FakeInput())
.Finalize(node_def()));
}
TF_ASSERT_OK(InitOp());
AddInputFromArray<quint8>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<qint8>(TensorShape({3, 4}),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18});
AddInputFromArray<qint32>(TensorShape({4}), {1, 2, 3, 4});
AddInputFromArray<float>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {255.0f});
AddInputFromArray<float>(TensorShape({}), {-127.0f});
AddInputFromArray<float>(TensorShape({}), {127.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({2, 4}));
test::FillValues<qint32>(&expected, {75, 82, 89, 96, 174, 190, 206, 222});
const Tensor& output = *GetOutput(0);
test::ExpectTensorEqual<qint32>(expected, output);
}
TEST_P(QuantizedMatMulTest, Small_withNegBias) {
const bool is_old_api = GetParam();
if (is_old_api) {
TF_ASSERT_OK(
NodeDefBuilder("quantized_mat_mul_op", "_MklQuantizedMatMulWithBias")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("Toutput", DataTypeToEnum<qint32>::v())
.Attr("_kernel", "QuantizedMklOp")
.Finalize(node_def()));
} else {
TF_ASSERT_OK(
NodeDefBuilder("quantized_mat_mul_op", "_QuantizedMatMul")
.Attr("Thost_inputs", {DT_QUINT8, DT_QINT8, DT_QINT32, DT_FLOAT,
DT_FLOAT, DT_FLOAT, DT_FLOAT})
.Attr("Thost_outputs", {DT_QINT32, DT_FLOAT, DT_FLOAT})
.Attr("Tdevice_inputs", std::vector<DataType>())
.Attr("Tdevice_outputs", std::vector<DataType>())
.Attr("T1", DT_QUINT8)
.Attr("T2", DT_QINT8)
.Attr("Tbias", DT_QINT32)
.Attr("Tout", DT_QINT32)
.Attr("fused_ops", {"BiasAdd"})
.Input(FakeInput())
.Input(FakeInput())
.Finalize(node_def()));
}
TF_ASSERT_OK(InitOp());
AddInputFromArray<quint8>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<qint8>(TensorShape({3, 4}),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18});
AddInputFromArray<qint32>(TensorShape({4}), {100, -200, 300, -400});
AddInputFromArray<float>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {255.0f});
AddInputFromArray<float>(TensorShape({}), {-127.0f});
AddInputFromArray<float>(TensorShape({}), {127.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({2, 4}));
test::FillValues<qint32>(&expected,
{174, -120, 386, -308, 273, -12, 503, -182});
const Tensor& output = *GetOutput(0);
test::ExpectTensorEqual<qint32>(expected, output);
}
TEST_P(QuantizedMatMulTest, Small_WithNegInp) {
const bool is_old_api = GetParam();
if (is_old_api) {
TF_ASSERT_OK(
NodeDefBuilder("quantized_mat_mul_op", "_MklQuantizedMatMulWithBias")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("Toutput", DataTypeToEnum<qint32>::v())
.Attr("input_quant_mode", "MIN_FIRST")
.Attr("_kernel", "QuantizedMklOp")
.Finalize(node_def()));
} else {
TF_ASSERT_OK(
NodeDefBuilder("quantized_mat_mul_op", "_QuantizedMatMul")
.Attr("Thost_inputs", {DT_QUINT8, DT_QINT8, DT_FLOAT, DT_FLOAT,
DT_FLOAT, DT_FLOAT, DT_FLOAT})
.Attr("Thost_outputs", {DT_QINT32, DT_FLOAT, DT_FLOAT})
.Attr("Tdevice_inputs", std::vector<DataType>())
.Attr("Tdevice_outputs", std::vector<DataType>())
.Attr("T1", DT_QUINT8)
.Attr("T2", DT_QINT8)
.Attr("Tbias", DT_FLOAT)
.Attr("Tout", DT_QINT32)
.Attr("fused_ops", {"BiasAdd"})
.Attr("input_quant_mode", "MIN_FIRST")
.Input(FakeInput())
.Input(FakeInput())
.Finalize(node_def()));
}
TF_ASSERT_OK(InitOp());
AddInputFromArray<quint8>(TensorShape({4, 3}),
{11, 7, 3, 10, 6, 2, 9, 5, 1, 8, 4, 0});
AddInputFromArray<qint8>(TensorShape({3, 2}), {1, 4, 2, 5, 3, 6});
AddInputFromArray<float>(TensorShape({2}), {10.0f, 20.0f});
AddInputFromArray<float>(TensorShape({}), {-12.0f});
AddInputFromArray<float>(TensorShape({}), {243.0f});
AddInputFromArray<float>(TensorShape({}), {-127.0f});
AddInputFromArray<float>(TensorShape({}), {127.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({4, 2}));
test::FillValues<qint32>(&expected,
{-28, -63, -34, -78, -40, -93, -46, -108});
const Tensor& output = *GetOutput(0);
test::ExpectTensorEqual<qint32>(expected, output);
}
TEST_P(QuantizedMatMulTest, Small_withBiasAndReq) {
const bool is_old_api = GetParam();
if (is_old_api) {
TF_ASSERT_OK(NodeDefBuilder("quantized_mat_mul_op",
"_MklQuantizedMatMulWithBiasAndRequantize")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("Toutput", DataTypeToEnum<quint8>::v())
.Attr("_kernel", "QuantizedMklOp")
.Finalize(node_def()));
} else {
TF_ASSERT_OK(NodeDefBuilder("quantized_mat_mul_op", "_QuantizedMatMul")
.Attr("Thost_inputs",
{DT_QUINT8, DT_QINT8, DT_QINT32, DT_FLOAT, DT_FLOAT,
DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT})
.Attr("Thost_outputs", {DT_QUINT8, DT_FLOAT, DT_FLOAT})
.Attr("Tdevice_inputs", std::vector<DataType>())
.Attr("Tdevice_outputs", std::vector<DataType>())
.Attr("T1", DT_QUINT8)
.Attr("T2", DT_QINT8)
.Attr("Tbias", DT_QINT32)
.Attr("Tout", DT_QUINT8)
.Attr("fused_ops", {"BiasAdd", "Requantize"})
.Input(FakeInput())
.Input(FakeInput())
.Finalize(node_def()));
}
TF_ASSERT_OK(InitOp());
AddInputFromArray<quint8>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<qint8>(TensorShape({3, 4}),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18});
AddInputFromArray<qint32>(TensorShape({4}), {10, -20, 30, -40});
AddInputFromArray<float>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {255.0f});
AddInputFromArray<float>(TensorShape({}), {-127.0f});
AddInputFromArray<float>(TensorShape({}), {127.0f});
AddInputFromArray<float>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {255.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QUINT8, TensorShape({2, 4}));
if (is_old_api) {
#ifdef ENABLE_ONEDNN_V3
test::FillValues<quint8>(&expected, {84, 60, 116, 52, 183, 168, 233, 178});
#else
test::FillValues<quint8>(&expected, {84, 60, 116, 52, 184, 169, 234, 179});
#endif
} else {
test::FillValues<quint8>(&expected, {84, 60, 116, 52, 183, 168, 233, 178});
}
const Tensor& output = *GetOutput(0);
test::ExpectTensorEqual<quint8>(expected, output);
}
TEST_P(QuantizedMatMulTest, Small_withBiasAndDeq) {
const bool is_old_api = GetParam();
if (is_old_api) {
TF_ASSERT_OK(NodeDefBuilder("quantized_mat_mul_op",
"_MklQuantizedMatMulWithBiasAndDequantize")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("Toutput", DataTypeToEnum<float>::v())
.Attr("_kernel", "QuantizedMklOp")
.Finalize(node_def()));
} else {
TF_ASSERT_OK(
NodeDefBuilder("quantized_mat_mul_op", "_QuantizedMatMul")
.Attr("Thost_inputs", {DT_QUINT8, DT_QINT8, DT_QINT32, DT_FLOAT,
DT_FLOAT, DT_FLOAT, DT_FLOAT})
.Attr("Thost_outputs", {DT_FLOAT})
.Attr("Tdevice_inputs", std::vector<DataType>())
.Attr("Tdevice_outputs", std::vector<DataType>())
.Attr("T1", DT_QUINT8)
.Attr("T2", DT_QINT8)
.Attr("Tbias", DT_QINT32)
.Attr("Tout", DT_FLOAT)
.Attr("fused_ops", {"BiasAdd", "Dequantize"})
.Input(FakeInput())
.Input(FakeInput())
.Finalize(node_def()));
}
TF_ASSERT_OK(InitOp());
AddInputFromArray<quint8>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<qint8>(TensorShape({3, 4}),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18});
AddInputFromArray<qint32>(TensorShape({4}), {10, -20, 30, -40});
AddInputFromArray<float>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {255.0f});
AddInputFromArray<float>(TensorShape({}), {-127.0f});
AddInputFromArray<float>(TensorShape({}), {127.0f});
if (is_old_api) {
AddInputFromArray<float>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {255.0f});
}
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 4}));
test::FillValues<float>(&expected, {84, 60, 116, 52, 183, 168, 233, 178});
const Tensor& output = *GetOutput(0);
test::ExpectTensorEqual<float>(expected, output);
}
TEST_P(QuantizedMatMulTest, Small_withBiasAndRelu) {
const bool is_old_api = GetParam();
if (is_old_api) {
TF_ASSERT_OK(NodeDefBuilder("quantized_mat_mul_op",
"_MklQuantizedMatMulWithBiasAndRelu")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("Toutput", DataTypeToEnum<qint32>::v())
.Attr("_kernel", "QuantizedMklOp")
.Finalize(node_def()));
} else {
TF_ASSERT_OK(
NodeDefBuilder("quantized_mat_mul_op", "_QuantizedMatMul")
.Attr("Thost_inputs", {DT_QUINT8, DT_QINT8, DT_FLOAT, DT_FLOAT,
DT_FLOAT, DT_FLOAT, DT_FLOAT})
.Attr("Thost_outputs", {DT_QINT32, DT_FLOAT, DT_FLOAT})
.Attr("Tdevice_inputs", std::vector<DataType>())
.Attr("Tdevice_outputs", std::vector<DataType>())
.Attr("T1", DT_QUINT8)
.Attr("T2", DT_QINT8)
.Attr("Tbias", DT_FLOAT)
.Attr("Tout", DT_QINT32)
.Attr("fused_ops", {"BiasAdd", "Relu"})
.Input(FakeInput())
.Input(FakeInput())
.Finalize(node_def()));
}
TF_ASSERT_OK(InitOp());
AddInputFromArray<quint8>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<qint8>(TensorShape({3, 4}),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18});
AddInputFromArray<float>(TensorShape({4}),
{100.0f, -200.0f, 300.0f, -400.0f});
AddInputFromArray<float>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {255.0f});
AddInputFromArray<float>(TensorShape({}), {-127.0f});
AddInputFromArray<float>(TensorShape({}), {127.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({2, 4}));
test::FillValues<qint32>(&expected, {174, 0, 386, 0, 273, 0, 503, 0});
const Tensor& output = *GetOutput(0);
test::ExpectTensorEqual<qint32>(expected, output);
}
TEST_P(QuantizedMatMulTest, Small_withBiasAndReluAndReq) {
const bool is_old_api = GetParam();
if (is_old_api) {
TF_ASSERT_OK(
NodeDefBuilder("quantized_mat_mul_op",
"_MklQuantizedMatMulWithBiasAndReluAndRequantize")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("Toutput", DataTypeToEnum<quint8>::v())
.Attr("_kernel", "QuantizedMklOp")
.Finalize(node_def()));
} else {
TF_ASSERT_OK(NodeDefBuilder("quantized_mat_mul_op", "_QuantizedMatMul")
.Attr("Thost_inputs",
{DT_QUINT8, DT_QINT8, DT_QINT32, DT_FLOAT, DT_FLOAT,
DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT})
.Attr("Thost_outputs", {DT_QUINT8, DT_FLOAT, DT_FLOAT})
.Attr("Tdevice_inputs", std::vector<DataType>())
.Attr("Tdevice_outputs", std::vector<DataType>())
.Attr("T1", DT_QUINT8)
.Attr("T2", DT_QINT8)
.Attr("Tbias", DT_QINT32)
.Attr("Tout", DT_QUINT8)
.Attr("fused_ops", {"BiasAdd", "Relu", "Requantize"})
.Input(FakeInput())
.Input(FakeInput())
.Finalize(node_def()));
}
TF_ASSERT_OK(InitOp());
AddInputFromArray<quint8>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<qint8>(TensorShape({3, 4}),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18});
AddInputFromArray<qint32>(TensorShape({4}), {10, -20, 30, -40});
AddInputFromArray<float>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {255.0f});
AddInputFromArray<float>(TensorShape({}), {-127.0f});
AddInputFromArray<float>(TensorShape({}), {127.0f});
AddInputFromArray<float>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {255.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QUINT8, TensorShape({2, 4}));
if (is_old_api) {
#ifdef ENABLE_ONEDNN_V3
test::FillValues<quint8>(&expected, {84, 60, 116, 52, 183, 168, 233, 178});
#else
test::FillValues<quint8>(&expected, {84, 60, 116, 52, 184, 169, 234, 179});
#endif
} else {
test::FillValues<quint8>(&expected, {84, 60, 116, 52, 183, 168, 233, 178});
}
const Tensor& output = *GetOutput(0);
test::ExpectTensorEqual<quint8>(expected, output);
}
TEST_P(QuantizedMatMulTest, Small_withWeightCached) {
const bool is_old_api = GetParam();
if (is_old_api) {
TF_ASSERT_OK(
NodeDefBuilder("quantized_mat_mul_op", "_MklQuantizedMatMulWithBias")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("Toutput", DataTypeToEnum<qint32>::v())
.Attr("_kernel", "QuantizedMklOp")
.Finalize(node_def()));
} else {
TF_ASSERT_OK(
NodeDefBuilder("quantized_mat_mul_op", "_QuantizedMatMul")
.Attr("Thost_inputs", {DT_QUINT8, DT_QINT8, DT_QINT32, DT_FLOAT,
DT_FLOAT, DT_FLOAT, DT_FLOAT})
.Attr("Thost_outputs", {DT_QINT32, DT_FLOAT, DT_FLOAT})
.Attr("Tdevice_inputs", std::vector<DataType>())
.Attr("Tdevice_outputs", std::vector<DataType>())
.Attr("T1", DT_QUINT8)
.Attr("T2", DT_QINT8)
.Attr("Tbias", DT_QINT32)
.Attr("Tout", DT_QINT32)
.Attr("fused_ops", {"BiasAdd"})
.Input(FakeInput())
.Input(FakeInput())
.Finalize(node_def()));
}
TF_ASSERT_OK(InitOp());
AddInputFromArray<quint8>(TensorShape({1, 3}), {1, 2, 3});
AddInputFromArray<qint8>(TensorShape({3, 4}),
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18});
AddInputFromArray<qint32>(TensorShape({4}), {1, 2, 3, 4});
AddInputFromArray<float>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {255.0f});
AddInputFromArray<float>(TensorShape({}), {-127.0f});
AddInputFromArray<float>(TensorShape({}), {127.0f});
int64 start_time = Env::Default()->NowMicros();
TF_ASSERT_OK(RunOpKernel());
int64 end_time = Env::Default()->NowMicros();
int64 total_duration_unopt = end_time - start_time;
Tensor expected(allocator(), DT_QINT32, TensorShape({1, 4}));
test::FillValues<qint32>(&expected, {75, 82, 89, 96});
const Tensor& output = *GetOutput(0);
test::ExpectTensorEqual<qint32>(expected, output);
start_time = Env::Default()->NowMicros();
TF_ASSERT_OK(RunOpKernel());
end_time = Env::Default()->NowMicros();
int64 total_duration_opt = end_time - start_time;
LOG(INFO) << " Time taken by first call : " << total_duration_unopt
<< ", Time taken after Caching : " << total_duration_opt;
EXPECT_LT(total_duration_opt, total_duration_unopt * 0.8);
const Tensor& output_new = *GetOutput(0);
test::ExpectTensorEqual<qint32>(expected, output_new);
}
INSTANTIATE_TEST_SUITE_P(All, QuantizedMatMulTest,
::testing::Values(true, false));
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mkl/mkl_qmatmul_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mkl/mkl_qmatmul_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
93f94341-4b0e-4bc2-912e-7f6500b6da69 | cpp | tensorflow/tensorflow | math_ops | tensorflow/c/experimental/ops/math_ops.cc | tensorflow/core/ops/math_ops_test.cc | #include "tensorflow/c/experimental/ops/math_ops.h"
#include "absl/types/span.h"
#include "tensorflow/c/eager/abstract_context.h"
#include "tensorflow/c/eager/abstract_operation.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/tracing_utils.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/platform/errors.h"
using tensorflow::tracing::MaybeSetOpName;
namespace tensorflow {
namespace ops {
Status Mul(AbstractContext* ctx, AbstractTensorHandle* const x,
AbstractTensorHandle* const y, AbstractTensorHandle** z,
const char* name, const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("Mul", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(x));
TF_RETURN_IF_ERROR(op_ptr->AddInput(y));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(z, 1), &num_retvals);
}
Status Conj(AbstractContext* ctx, AbstractTensorHandle* const input,
AbstractTensorHandle** output, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("Conj", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(input));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(output, 1), &num_retvals);
}
Status AddV2(AbstractContext* ctx, AbstractTensorHandle* const x,
AbstractTensorHandle* const y, AbstractTensorHandle** z,
const char* name, const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("AddV2", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(x));
TF_RETURN_IF_ERROR(op_ptr->AddInput(y));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(z, 1), &num_retvals);
}
Status MatMul(AbstractContext* ctx, AbstractTensorHandle* const a,
AbstractTensorHandle* const b, AbstractTensorHandle** product,
bool transpose_a, bool transpose_b, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("MatMul", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(a));
TF_RETURN_IF_ERROR(op_ptr->AddInput(b));
TF_RETURN_IF_ERROR(op_ptr->SetAttrBool("transpose_a", transpose_a));
TF_RETURN_IF_ERROR(op_ptr->SetAttrBool("transpose_b", transpose_b));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(product, 1), &num_retvals);
}
Status Neg(AbstractContext* ctx, AbstractTensorHandle* const x,
AbstractTensorHandle** y, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("Neg", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(x));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(y, 1), &num_retvals);
}
Status Sum(AbstractContext* ctx, AbstractTensorHandle* const input,
AbstractTensorHandle* const reduction_indices,
AbstractTensorHandle** output, bool keep_dims, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("Sum", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(input));
TF_RETURN_IF_ERROR(op_ptr->AddInput(reduction_indices));
TF_RETURN_IF_ERROR(op_ptr->SetAttrBool("keep_dims", keep_dims));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(output, 1), &num_retvals);
}
Status Sub(AbstractContext* ctx, AbstractTensorHandle* const x,
AbstractTensorHandle* const y, AbstractTensorHandle** z,
const char* name, const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("Sub", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(x));
TF_RETURN_IF_ERROR(op_ptr->AddInput(y));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(z, 1), &num_retvals);
}
Status Div(AbstractContext* ctx, AbstractTensorHandle* const x,
AbstractTensorHandle* const y, AbstractTensorHandle** z,
const char* name, const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("Div", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(x));
TF_RETURN_IF_ERROR(op_ptr->AddInput(y));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(z, 1), &num_retvals);
}
Status DivNoNan(AbstractContext* ctx, AbstractTensorHandle* const x,
AbstractTensorHandle* const y, AbstractTensorHandle** z,
const char* name, const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("DivNoNan", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(x));
TF_RETURN_IF_ERROR(op_ptr->AddInput(y));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(z, 1), &num_retvals);
}
Status Exp(AbstractContext* ctx, AbstractTensorHandle* const x,
AbstractTensorHandle** y, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("Exp", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(x));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(y, 1), &num_retvals);
}
Status Sqrt(AbstractContext* ctx, AbstractTensorHandle* const x,
AbstractTensorHandle** y, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("Sqrt", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(x));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(y, 1), &num_retvals);
}
Status SqrtGrad(AbstractContext* ctx, AbstractTensorHandle* const y,
AbstractTensorHandle* const dy, AbstractTensorHandle** z,
const char* name, const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("SqrtGrad", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(y));
TF_RETURN_IF_ERROR(op_ptr->AddInput(dy));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(z, 1), &num_retvals);
}
Status Log1p(AbstractContext* ctx, AbstractTensorHandle* const x,
AbstractTensorHandle** y, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("Log1p", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(x));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(y, 1), &num_retvals);
}
}
} | #include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(MathOpsTest, AddN_ShapeFn) {
ShapeInferenceTestOp op("AddN");
auto set_n = [&op](int n) {
std::vector<NodeDefBuilder::NodeOut> src_list;
src_list.reserve(n);
for (int i = 0; i < n; ++i) src_list.emplace_back("a", 0, DT_FLOAT);
TF_ASSERT_OK(NodeDefBuilder("test", "AddN")
.Input(src_list)
.Attr("N", n)
.Finalize(&op.node_def));
};
set_n(2);
INFER_OK(op, "?;?", "in0|in1");
INFER_OK(op, "[1];[?]", "in0");
INFER_OK(op, "[1];?", "in0");
INFER_OK(op, "[?];[1]", "in1");
INFER_OK(op, "?;[1]", "in1");
set_n(2);
INFER_OK(op, "[1,2];[?,2]", "in0");
INFER_OK(op, "[1,2];[1,2]", "in0|in1");
INFER_OK(op, "[?,2];[1,2]", "in1");
set_n(3);
INFER_OK(op, "[1,?];[?,2];[1,2]", "in2");
INFER_OK(op, "[1,2];[?,2];[1,?]", "in0");
INFER_OK(op, "?;?;[1,2]", "in2");
set_n(2);
INFER_OK(op, "?;[1,2]", "in1");
INFER_OK(op, "[1,?];[?,2]", "[d0_0,d1_1]");
INFER_OK(op, "[?,2,?];[?,?,3]", "[d0_0|d1_0,d0_1,d1_2]");
INFER_OK(op, "[?,2];[1,?]", "[d1_0,d0_1]");
set_n(3);
INFER_ERROR("Dimension 1 in both shapes must be equal, but are 2 and 4", op,
"[1,2];?;[1,4]");
INFER_ERROR("From merging shape 0 with other shapes.", op, "[1,2];?;[1,4]");
set_n(4);
INFER_ERROR("Shapes must be equal rank, but are 2 and 3", op,
"?;[1,2];?;[1,2,3]");
INFER_ERROR("From merging shape 1 with other shapes.", op,
"?;[1,2];?;[1,2,3]");
}
TEST(MathOpsTest, UnchangedShape_ShapeFn) {
ShapeInferenceTestOp op("Cast");
INFER_OK(op, "?", "in0");
INFER_OK(op, "[?]", "in0");
INFER_OK(op, "[1,?,3,4]", "in0");
}
TEST(MathOpsTest, Segment_ShapeFn) {
for (const auto* op_name : {"SegmentMax", "SegmentMean", "SegmentMin",
"SegmentProd", "SegmentSum"}) {
ShapeInferenceTestOp op(op_name);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "?;[100]", "?");
INFER_OK(op, "[?];?", "[?]");
INFER_OK(op, "[?];[100]", "[?]");
INFER_OK(op, "[1];?", "[?]");
INFER_OK(op, "[1];[100]", "[?]");
INFER_OK(op, "[?,?];?", "[?,d0_1]");
INFER_OK(op, "[?,2];[100]", "[?,d0_1]");
INFER_OK(op, "[?,2,?,4];[100]", "[?,d0_1,d0_2,d0_3]");
INFER_OK(op, "[1,?];?", "[?,d0_1]");
INFER_OK(op, "[1,2];[100]", "[?,d0_1]");
INFER_OK(op, "[1,2,?,4];[100]", "[?,d0_1,d0_2,d0_3]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;[1,2]");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[];[1]");
}
}
TEST(MathOpsTest, BroadcastBinaryOps_ShapeFn) {
auto test_shapes = [&](ShapeInferenceTestOp& op,
bool incompatible_shape_error) {
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[1,2];?", "?");
INFER_OK(op, "?;[1,2]", "?");
INFER_OK(op, "[?];[1]", "[d0_0]");
INFER_OK(op, "[1];[?]", "[d1_0]");
INFER_OK(op, "[?];[2]", incompatible_shape_error ? "[d1_0]" : "?");
INFER_OK(op, "[2];[?]", incompatible_shape_error ? "[d0_0]" : "?");
INFER_OK(op, "[?];[?]", "[?]");
INFER_OK(op, "[];[?]", "[d1_0]");
INFER_OK(op, "[?];[]", "[d0_0]");
INFER_OK(op, "[1];[1]", "[d0_0|d1_0]");
INFER_OK(op, "[];[1]", "[d1_0]");
INFER_OK(op, "[1];[]", "[d0_0]");
INFER_OK(op, "[2];[2]", "[d0_0|d1_0]");
INFER_OK(op, "[];[2]", "[d1_0]");
INFER_OK(op, "[1];[2]", "[d1_0]");
INFER_OK(op, "[2];[1]", "[d0_0]");
INFER_OK(op, "[2];[]", "[d0_0]");
INFER_OK(op, "[2];[?]", incompatible_shape_error ? "[d0_0]" : "?");
INFER_OK(op, "[0];[0]", "[d0_0|d1_0]");
INFER_OK(op, "[];[0]", "[d1_0]");
INFER_OK(op, "[1];[0]", "[d1_0]");
INFER_OK(op, "[0];[1]", "[d0_0]");
INFER_OK(op, "[0];[]", "[d0_0]");
INFER_OK(op, "[2];[?,?]", incompatible_shape_error ? "[d1_0,d0_0]" : "?");
INFER_OK(op, "[2,2];[?,?,?]",
incompatible_shape_error ? "[d1_0,d0_0,d0_1]" : "?");
INFER_OK(op, "[?,1,2,3,4,5];[3,1,?]",
incompatible_shape_error ? "[d0_0,d0_1,d0_2,d0_3|d1_0,d0_4,d0_5]"
: "?");
INFER_OK(op, "[3,1,?];[?,1,2,3,4,5]",
incompatible_shape_error ? "[d1_0,d1_1,d1_2,d1_3|d0_0,d1_4,d1_5]"
: "?");
if (incompatible_shape_error) {
INFER_ERROR("Dimensions must be equal", op, "[2];[3]");
} else {
INFER_OK(op, "[2];[3]", "[]");
}
};
for (string op_name : {"Add", "Complex",
"Div", "Equal",
"Greater", "GreaterEqual",
"Igamma", "Igammac",
"Zeta", "Polygamma",
"Less", "LessEqual",
"LogicalAnd", "LogicalOr",
"Maximum", "Minimum",
"Mod", "Mul",
"NotEqual", "Pow",
"Sub", "SquaredDifference",
"DivNoNan"}) {
ShapeInferenceTestOp op(op_name);
AddNodeAttr("incompatible_shape_error", true, &op.node_def);
test_shapes(op, true);
if ((op_name == "Equal") || (op_name == "NotEqual")) {
ShapeInferenceTestOp op(op_name);
AddNodeAttr("incompatible_shape_error", false, &op.node_def);
test_shapes(op, false);
}
}
}
TEST(MathOpsTest, Select_ShapeFn) {
ShapeInferenceTestOp op("Select");
INFER_OK(op, "?;?;?", "in1|in2");
INFER_OK(op, "[];[1];?", "in1");
INFER_OK(op, "[];?;?", "in1|in2");
INFER_OK(op, "[1];?;?",
"in1|in2");
INFER_OK(op, "[1,2];?;?", "in1|in2?");
INFER_OK(op, "?;[];?", "in1");
INFER_OK(op, "?;?;[]", "in2");
INFER_OK(op, "?;[1];?", "in1");
INFER_OK(op, "?;?;[1]", "in2");
INFER_OK(op, "?;[1,2];?", "in1");
INFER_OK(op, "?;?;[1,2]", "in2");
INFER_ERROR("Shapes must be equal rank, but are 0 and 1", op, "[1];[];?");
INFER_ERROR("Shapes must be equal rank, but are 1 and 2", op, "[];[1];[1,2]");
INFER_ERROR("Shapes must be equal rank, but are 1 and 2", op, "[1,2];[1];?");
INFER_OK(op, "[2];[?];[?]", "in1|in2");
INFER_OK(op, "[?];[?,?,3];[1,2,?]", "[d2_0,d2_1,d1_2]");
INFER_OK(op, "[2];[?,?,3];[?,2,?]", "[d1_0|d2_0,d2_1,d1_2]");
INFER_ERROR("must be equal", op, "[1];[2,?,3];[?,2,?]");
INFER_ERROR("Shapes must be equal rank, but are 3 and 2", op,
"[2,?];[?,?,3];[?,2,?]");
INFER_OK(op, "[2,?,?];[?,?,3];[?,2,?]", "[d0_0,d2_1,d1_2]");
INFER_ERROR("Dimension 2 in both shapes must be equal, but are 3 and 5", op,
"[2,?,5];[?,?,3];[?,2,?]");
const OpRegistrationData* op_reg_data;
TF_ASSERT_OK(OpRegistry::Global()->LookUp(op.name, &op_reg_data));
typedef std::vector<std::pair<PartialTensorShape, DataType>> ShapeDtypeV;
std::vector<std::unique_ptr<ShapeDtypeV>> handle_data;
std::unique_ptr<shape_inference::InferenceContext> c;
auto run_inference_for_handles = [&]() -> Status {
CHECK(op_reg_data->shape_inference_fn != nullptr);
c.reset(new shape_inference::InferenceContext(
TF_GRAPH_DEF_VERSION, op.node_def, op_reg_data->op_def,
{PartialTensorShape(), PartialTensorShape(), PartialTensorShape()}, {},
{}, handle_data));
TF_CHECK_OK(c->construction_status());
Status s = c->Run(op_reg_data->shape_inference_fn);
LOG(INFO) << "Inference got " << s;
return s;
};
auto shape_proto = [](std::initializer_list<int64_t> dim_sizes) {
TensorShapeProto p;
for (auto i : dim_sizes) p.add_dim()->set_size(i);
return p;
};
auto i0 = PartialTensorShape({1, -1});
auto i1 = PartialTensorShape({-1, 2});
PartialTensorShape unknown_shape;
auto scalar = PartialTensorShape({});
handle_data.emplace_back(
new ShapeDtypeV{{scalar, DT_FLOAT}, {unknown_shape, DT_INT32}});
handle_data.emplace_back(new ShapeDtypeV{{i0, DT_FLOAT}, {i1, DT_INT32}});
handle_data.emplace_back(
new ShapeDtypeV{{i1, DT_FLOAT}, {unknown_shape, DT_INT32}});
TF_ASSERT_OK(run_inference_for_handles());
auto* out = c->output_handle_shapes_and_types(0);
ASSERT_EQ(2, out->size());
EXPECT_EQ("[1,2]", c->DebugString(out->at(0).shape));
EXPECT_EQ(DT_FLOAT, out->at(0).dtype);
EXPECT_EQ("[?,2]", c->DebugString(out->at(1).shape));
EXPECT_EQ(DT_INT32, out->at(1).dtype);
handle_data[2]->at(0).first = shape_proto({2, 2});
EXPECT_TRUE(absl::StrContains(run_inference_for_handles().message(),
"must be equal, but are 1 and 2"));
handle_data[2]->at(0).first = i1;
handle_data[2]->at(1).second = DT_INT64;
EXPECT_TRUE(absl::StrContains(run_inference_for_handles().message(),
"pointing to different dtypes"));
handle_data[2]->at(1).second = DT_INT32;
handle_data[2]->push_back({i1, DT_FLOAT});
EXPECT_TRUE(absl::StrContains(run_inference_for_handles().message(),
"pointing to different numbers of tensors"));
handle_data[2]->pop_back();
}
TEST(MathOpsTest, Range_ShapeFn) {
ShapeInferenceTestOp op("Range");
TF_ASSERT_OK(NodeDefBuilder("test", "Range")
.Input({"start", {}, DT_INT32})
.Input({"limit", {}, DT_INT32})
.Input({"delta", {}, DT_INT32})
.Attr("Tidx", DT_INT32)
.Finalize(&op.node_def));
op.input_tensors.resize(3);
INFER_OK(op, "?;?;?", "[?]");
INFER_ERROR("Shape must be rank 0 but is rank 2", op, "[1,2];?;?");
INFER_ERROR("for 'start'", op, "[1,2];?;?");
INFER_ERROR("Shape must be rank 0 but is rank 2", op, "?;[1,2];?");
INFER_ERROR("for 'limit'", op, "?;[1,2];?");
INFER_ERROR("Shape must be rank 0 but is rank 2", op, "?;?;[1,2]");
INFER_ERROR("for 'delta'", op, "?;?;[1,2]");
Tensor start_t = test::AsScalar(1);
op.input_tensors[0] = &start_t;
INFER_OK(op, "?;?;?", "[?]");
Tensor limit_t = test::AsScalar(1);
op.input_tensors[1] = &limit_t;
INFER_OK(op, "?;?;?", "[?]");
Tensor delta_t = test::AsScalar(1);
op.input_tensors[2] = &delta_t;
INFER_OK(op, "?;?;?", "[0]");
delta_t = test::AsScalar(0);
INFER_ERROR("Requires delta != 0", op, "?;?;?");
delta_t = test::AsScalar(3);
limit_t = test::AsScalar(-1);
INFER_ERROR("Requires start <= limit when delta > 0: 1/-1", op, "?;?;?");
delta_t = test::AsScalar(-1);
INFER_OK(op, "?;?;?", "[2]");
limit_t = test::AsScalar(4);
INFER_ERROR("Requires start >= limit when delta < 0: 1/4", op, "?;?;?");
limit_t = test::AsScalar(100);
start_t = test::AsScalar(2);
delta_t = test::AsScalar(3);
INFER_OK(op, "?;?;?", "[33]");
}
TEST(MathOpsTest, LinSpace_ShapeFn) {
ShapeInferenceTestOp op("LinSpace");
op.input_tensors.resize(3);
INFER_OK(op, "?;?;?", "[?]");
INFER_ERROR("Shape must be rank 0 but is rank 2", op, "[1,2];?;?");
INFER_ERROR("for 'start'", op, "[1,2];?;?");
INFER_ERROR("Shape must be rank 0 but is rank 2", op, "?;[1,2];?");
INFER_ERROR("for 'stop'", op, "?;[1,2];?");
INFER_ERROR("Shape must be rank 0 but is rank 2", op, "?;?;[1,2]");
INFER_ERROR("for 'num'", op, "?;?;[1,2]");
Tensor num_t = test::AsScalar(1);
op.input_tensors[2] = &num_t;
INFER_OK(op, "?;?;?", "[1]");
num_t = test::AsScalar(2);
INFER_OK(op, "?;?;?", "[2]");
num_t = test::AsScalar(-1);
INFER_ERROR("Requires num > 0: -1", op, "?;?;?");
}
TEST(MathOpsTest, UnsortedSegmentSum_ShapeFn) {
ShapeInferenceTestOp op("UnsortedSegmentSum");
op.input_tensors.resize(3);
INFER_OK(op, "?;?;?", "?");
INFER_OK(op, "?;[?];?", "?");
INFER_ERROR("Shape must be rank 0 but is rank 2", op, "?;?;[1,2]");
INFER_ERROR("Dimensions must be equal, but are 2 and 3", op,
"[1,?,2];[1,?,3];?");
INFER_OK(op, "?;[3];?", "?");
INFER_ERROR("Shape must be at least rank 3 but is rank 2", op,
"[1,2];[1,2,3];?");
Tensor num_segments_t = test::AsScalar(100);
op.input_tensors[2] = &num_segments_t;
INFER_OK(op, "[?,2,3,?,5];[1,2,?];[]", "[100,d0_3,d0_4]");
num_segments_t = test::AsScalar(-1);
INFER_ERROR(("Dimension size, given by scalar input 2, must be "
"non-negative but is -1"),
op, "[3];[3];?");
}
TEST(MathOpsTest, SparseSegment_ShapeFn) {
ShapeInferenceTestOp op("SparseSegmentSum");
op.input_tensors.resize(3);
INFER_OK(op, "?;?;?", "?");
INFER_OK(op, "[2,4,3];[3];[3]", "[?,d0_1,d0_2]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[2,4,3];[];[3]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[2,4,3];[3];[3,4]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 3 and 4", op,
"[2,4,3];[3];[4]");
}
TEST(MathOpsTest, SparseSegmentGrad_ShapeFn) {
ShapeInferenceTestOp op("SparseSegmentMeanGrad");
op.input_tensors.resize(4);
INFER_OK(op, "?;?;?;?", "?");
INFER_OK(op, "[2,4,3];[3];[3];[]", "[?,d0_1,d0_2]");
Tensor num_segments_t = test::AsScalar(100);
op.input_tensors[3] = &num_segments_t;
INFER_OK(op, "[2,4,3];[3];[3];[]", "[100,d0_1,d0_2]");
INFER_ERROR("Shape must be rank 0 but is rank 2", op,
"[2,4,3];[3];[3];[1,1]");
num_segments_t = test::AsScalar(-100);
op.input_tensors[3] = &num_segments_t;
INFER_ERROR("Cannot specify a negative value", op, "[2,4,3];[3];[3];[]");
}
TEST(MathOpsTest, BatchMatMul_ShapeFn) {
ShapeInferenceTestOp op("BatchMatMul");
auto set_adj = [&op](bool adj_x, bool adj_y) {
TF_ASSERT_OK(NodeDefBuilder("test", "BatchMatMul")
.Input({"a", 0, DT_FLOAT})
.Input({"b", 0, DT_FLOAT})
.Attr("adj_x", adj_x)
.Attr("adj_y", adj_y)
.Finalize(&op.node_def));
};
set_adj(false, false);
INFER_ERROR("at least rank 2", op, "[1];?");
INFER_ERROR("at least rank 2", op, "?;[2]");
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[?,?];[?,?]", "[d0_0,d1_1]");
INFER_OK(op, "[?,?,?,?];?", "[d0_0,d0_1,d0_2,?]");
set_adj(false, false);
INFER_OK(op, "[1,2,3,4];[1,2,?,?]", "[d0_0,d0_1,d0_2,d1_3]");
INFER_ERROR("are 2 and 3", op, "[?,1,2];[?,3,1]");
set_adj(true, false);
INFER_OK(op, "[1,2,3,4];[1,2,?,?]", "[d0_0,d0_1,d0_3,d1_3]");
INFER_ERROR("are 2 and 3", op, "[?,2,1];[?,3,1]");
set_adj(false, true);
INFER_OK(op, "[1,2,?,?];[1,2,3,4]", "[d0_0,d0_1,d0_2,d1_2]");
INFER_ERROR("are 2 and 3", op, "[?,1,2];[?,1,3]");
set_adj(true, true);
INFER_OK(op, "[1,2,?,?];[1,2,3,4]", "[d0_0,d0_1,d0_3,d1_2]");
INFER_ERROR("are 2 and 3", op, "[?,2,1];[?,1,3]");
}
TEST(MathOpsTest, ArgOps_ShapeFn) {
ShapeInferenceTestOp op("ArgMax");
op.input_tensors.resize(2);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[2];?", "[]");
INFER_OK(op, "[];?", "[]");
INFER_ERROR("must be rank 0", op, "[2];[1]");
INFER_OK(op, "[2,3,4];?", "[?,?]");
INFER_OK(op, "[2,3,4,5,6];?", "[?,?,?,?]");
Tensor dimension = test::AsScalar(0);
op.input_tensors[1] = &dimension;
INFER_OK(op, "[2,3,4];[]", "[d0_1,d0_2]");
dimension = test::AsScalar(1);
op.input_tensors[1] = &dimension;
INFER_OK(op, "[2,3,4];[]", "[d0_0,d0_2]");
dimension = test::AsScalar(2);
op.input_tensors[1] = &dimension;
INFER_OK(op, "[2,3,4];[]", "[d0_0,d0_1]");
dimension = test::AsScalar(10);
op.input_tensors[1] = &dimension;
INFER_ERROR("must be in the range [-3, 3)", op, "[2,3,4];[]");
dimension = test::AsScalar(-10);
op.input_tensors[1] = &dimension;
INFER_ERROR("must be in the range [-3, 3)", op, "[2,3,4];[]");
dimension = test::AsScalar(-1);
op.input_tensors[1] = &dimension;
INFER_OK(op, "[2,3,4];[]", "[d0_0,d0_1]");
}
TEST(MathOpsTest, Betainc_ShapeFn) {
ShapeInferenceTestOp op("Betainc");
INFER_OK(op, "?;?;?", "?");
INFER_OK(op, "[?,?];?;?", "in0");
INFER_OK(op, "[?,2];?;[1,?]", "[d2_0,d0_1]");
INFER_OK(op, "[?,2,?];[1,?,?];[?,?,3]", "[d1_0,d0_1,d2_2]");
INFER_OK(op, "[?,2,?];[];[?,?,3]", "[d0_0|d2_0,d0_1,d2_2]");
INFER_OK(op, "[];[];[?,?,3]", "in2");
INFER_OK(op, "[];[];?", "in2");
INFER_OK(op, "[];[];[1,2,3,4]", "in2");
INFER_OK(op, "[];[];[]", "in0");
INFER_ERROR("must be equal", op, "[1,2];[];[1,4]");
INFER_ERROR("must be equal", op, "[1,2];[];[1,2,3]");
}
TEST(MathOpsTest, Requantize_ShapeFn) {
ShapeInferenceTestOp op("Requantize");
INFER_OK(op, "?;?;?;?;?", "in0;[];[]");
INFER_OK(op, "?;[];[];[];[]", "in0;[];[]");
INFER_ERROR("must be rank 0", op, "?;[1];?;?;?");
INFER_ERROR("must be rank 0", op, "?;?;[2];?;?");
INFER_ERROR("must be rank 0", op, "?;?;?;[3];?");
INFER_ERROR("must be rank 0", op, "?;?;?;?;[4]");
}
TEST(MathOpstest, RequantizationRange_ShapeFn) {
ShapeInferenceTestOp op("RequantizationRange");
INFER_OK(op, "?;?;?", "[];[]");
INFER_OK(op, "?;[];[]", "[];[]");
INFER_ERROR("must be rank 0", op, "?;[1];?");
INFER_ERROR("must be rank 0", op, "?;?;[2]");
}
TEST(MathOpsTest, Cross_ShapeFn) {
ShapeInferenceTestOp op("Cross");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[];[]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but", op, "[3];[5]");
INFER_ERROR("Dimension must be 3 but", op, "[3,5];[3,5]");
INFER_OK(op, "?;?", "in0");
INFER_OK(op, "[?];[?]", "in0");
INFER_OK(op, "[1,?,3];[?,?,?]", "in0");
}
TEST(MathOpsTest, HistogramFixedWidth_ShapeFn) {
ShapeInferenceTestOp op("HistogramFixedWidth");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];[];[]");
INFER_ERROR("Dimension must be 2 but is 3", op, "[];[3];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[];[2];[2]");
INFER_OK(op, "?;?;?", "[?]");
INFER_OK(op, "[?];[2];[]", "[?]");
INFER_OK(op, "[?];[2];?", "[?]");
}
TEST(MathOpsTest, QuantizedAdd_ShapeFn) {
ShapeInferenceTestOp op("QuantizedAdd");
INFER_OK(op, "?;?;?;?;?;?", "?;[];[]");
INFER_OK(op, "?;?;[];[];[];[]", "?;[];[]");
INFER_OK(op, "[1,2];?;[];[];[];[]", "?;[];[]");
INFER_OK(op, "[];[2];[];[];[];[]", "[d1_0];[];[]");
INFER_ERROR("must be rank 0", op, "?;?;[1];?;?;?");
INFER_ERROR("must be rank 0", op, "?;?;?;[2];?;?");
INFER_ERROR("must be rank 0", op, "?;?;?;?;[3];?");
INFER_ERROR("must be rank 0", op, "?;?;?;?;?;[4]");
}
TEST(MathOpsTest, Bincount_ShapeFn) {
ShapeInferenceTestOp op("Bincount");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;[1];?");
INFER_OK(op, "?;?;?", "[?]");
INFER_OK(op, "?;[];?", "[?]");
INFER_OK(op, "[?];[];?", "[?]");
INFER_OK(op, "[?];[];[?]", "[?]");
}
TEST(MathOpsTest, SobolSample) {
ShapeInferenceTestOp op("SobolSample");
INFER_ERROR("must be rank 0", op, "[1];?;?");
INFER_ERROR("must be rank 0", op, "?;[1];?");
INFER_ERROR("must be rank 0", op, "?;?;[1]");
INFER_OK(op, "[];[];[]", "[?,?]");
}
TEST(MathOpsTest, EqualOp) {
ShapeInferenceTestOp op("Equal");
AddNodeAttr("incompatible_shape_error", true, &op.node_def);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[1,2];?", "?");
INFER_OK(op, "?;[1,2]", "?");
INFER_OK(op, "[1,2,3];[1]", "[d0_0,d0_1,d0_2]");
INFER_OK(op, "[?,2,1];[1,3]", "[d0_0,d0_1,d1_1]");
INFER_OK(op, "[1,?,3];[3,1]", "[d0_0,d1_0,d0_2]");
INFER_OK(op, "[1,2,3];[2,1,3]", "[d1_0,d0_1,d0_2]");
INFER_OK(op, "[?,10,1];[?,1,4]", "[?,d0_1,d1_2]");
INFER_OK(op, "[10,?,1];[1,?,4]", "[d0_0,?,d1_2]");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/ops/math_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/math_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
81c66f85-a1d8-463d-9347-1c571f71ce56 | cpp | tensorflow/tensorflow | uniform_quantized_dot_ops | tensorflow/core/kernels/uniform_quant_ops/uniform_quantized_dot_ops.cc | tensorflow/core/kernels/uniform_quant_ops/uniform_quantized_dot_ops_test.cc | #include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/kernels/uniform_quant_ops/math_utils.h"
#include "tensorflow/core/kernels/uniform_quant_ops/tensor_utils.h"
namespace tensorflow {
namespace {
using tensorflow::errors::InvalidArgument;
Status DotInputShapeValid(const TensorShape& lhs_shape,
const TensorShape& rhs_shape) {
if (lhs_shape.dims() != 2) {
return InvalidArgument("lhs rank must be 2, but given lhs shape ",
lhs_shape.DebugString());
}
if (rhs_shape.dims() != 2) {
return InvalidArgument("rhs rank must be 2, but given rhs shape ",
rhs_shape.DebugString());
}
if (lhs_shape.dim_size(1) != rhs_shape.dim_size(0)) {
return InvalidArgument(
"lhs.dim_size(1) and rhs.dim_size(0) must be equal, but given lhs "
"shape ",
lhs_shape.DebugString(), " and rhs shape ", rhs_shape.DebugString());
}
return absl::OkStatus();
}
template <typename Tlhs, typename Trhs, typename Tout, typename AccF,
typename OutputF>
void DotWithAccFunctionAndOutputFunction(const Tensor& lhs, const Tensor& rhs,
Tensor& output, const AccF& acc_f,
const OutputF& output_f) {
const int64_t batches = output.dim_size(0);
const int64_t output_depth = output.dim_size(1);
const int64_t accum_depth = rhs.dim_size(0);
const Tlhs* lhs_data = lhs.flat<Tlhs>().data();
const Trhs* rhs_data = rhs.flat<Trhs>().data();
Tout* output_data = output.flat<Tout>().data();
for (int64_t b = 0; b < batches; ++b) {
for (int64_t out_c = 0; out_c < output_depth; ++out_c) {
int32_t acc = 0;
for (int64_t d = 0; d < accum_depth; ++d) {
acc += acc_f(lhs_data[b * accum_depth + d],
rhs_data[d * output_depth + out_c], b, out_c);
}
output_data[b * output_depth + out_c] = output_f(acc, b, out_c);
}
}
}
template <typename Tin, typename Tout>
Status EvalLhsPerTensorAndRhsPerTensorQuantizedDot(
const Tensor& lhs, const Tensor& rhs, float lhs_scale,
int32_t lhs_zero_point, float rhs_scale, int32_t rhs_zero_point,
float output_scale, int32_t output_zero_point,
int output_quantization_min_val, int output_quantization_max_val,
Tensor& output) {
const double effective_multiplier =
static_cast<double>(lhs_scale) * rhs_scale / output_scale;
int32_t effective_quantized_multiplier;
int effective_shift;
TF_RETURN_IF_ERROR(QuantizeMultiplier(
effective_multiplier, effective_quantized_multiplier, effective_shift));
DotWithAccFunctionAndOutputFunction<Tin, Tin, Tout>(
lhs, rhs, output,
[lhs_zero_point, rhs_zero_point](Tin lhs_val, Tin rhs_val, int64_t b,
int64_t out_c) {
return static_cast<Tout>(
(static_cast<int32_t>(lhs_val) - lhs_zero_point) *
(static_cast<int32_t>(rhs_val) - rhs_zero_point));
},
[effective_quantized_multiplier, effective_shift, output_zero_point,
output_quantization_min_val,
output_quantization_max_val](int32_t acc, int64_t b, int64_t out_c) {
return AffineRequantizeWithQuantizedMultiplierAndShift<int32_t, Tout>(
acc, effective_quantized_multiplier, effective_shift,
0, output_zero_point,
output_quantization_min_val, output_quantization_max_val);
});
return absl::OkStatus();
}
template <typename Tin, typename Tout>
Status EvalLhsPerTensorAndRhsPerChannelQuantizedDot(
OpKernelContext* context, const Tensor& lhs, const Tensor& rhs,
float lhs_scale, int32_t lhs_zero_point, const Tensor& rhs_scales,
const Tensor& rhs_zero_points, const Tensor& output_scales,
const Tensor& output_zero_points, int output_quantization_min_val,
int output_quantization_max_val, Tensor& output) {
const int output_depth = output.dim_size(1);
const float* rhs_scales_data = rhs_scales.flat<float>().data();
const int32_t* rhs_zero_points_data = rhs_zero_points.flat<int32_t>().data();
Tensor effective_quantized_multipliers;
TF_RETURN_IF_ERROR(context->allocate_temp(DT_INT32, rhs_scales.shape(),
&effective_quantized_multipliers));
Tensor effective_shifts;
TF_RETURN_IF_ERROR(
context->allocate_temp(DT_INT32, rhs_scales.shape(), &effective_shifts));
int32_t* effective_quantized_multipliers_data =
effective_quantized_multipliers.flat<int32_t>().data();
int32_t* effective_shifts_data = effective_shifts.flat<int32_t>().data();
const bool is_output_scales_scalar = output_scales.dims() == 0;
if (!is_output_scales_scalar) {
const float* output_scales_data = output_scales.flat<float>().data();
for (int64_t out_c = 0; out_c < output_depth; ++out_c) {
const double effective_multiplier = static_cast<double>(lhs_scale) *
rhs_scales_data[out_c] /
output_scales_data[out_c];
TF_RETURN_IF_ERROR(QuantizeMultiplier(
effective_multiplier, effective_quantized_multipliers_data[out_c],
effective_shifts_data[out_c]));
}
} else {
const float output_scale = output_scales.scalar<float>()();
for (int64_t out_c = 0; out_c < output_depth; ++out_c) {
const double effective_multiplier = static_cast<double>(lhs_scale) *
rhs_scales_data[out_c] / output_scale;
TF_RETURN_IF_ERROR(QuantizeMultiplier(
effective_multiplier, effective_quantized_multipliers_data[out_c],
effective_shifts_data[out_c]));
}
}
const int32_t* output_zero_points_data =
output_zero_points.flat<int32_t>().data();
DotWithAccFunctionAndOutputFunction<Tin, Tin, Tout>(
lhs, rhs, output,
[lhs_zero_point, rhs_zero_points_data](Tin lhs_val, Tin rhs_val,
int64_t b, int64_t out_c) {
return (static_cast<int32_t>(lhs_val) - lhs_zero_point) *
(static_cast<int32_t>(rhs_val) - rhs_zero_points_data[out_c]);
},
[effective_quantized_multipliers_data, effective_shifts_data,
output_zero_points_data, output_quantization_min_val,
output_quantization_max_val,
is_output_scales_scalar](int32_t acc, int64_t b, int64_t out_c) {
return AffineRequantizeWithQuantizedMultiplierAndShift<int32_t, Tout>(
acc, effective_quantized_multipliers_data[out_c],
effective_shifts_data[out_c],
0,
output_zero_points_data[is_output_scales_scalar ? 0 : out_c],
output_quantization_min_val, output_quantization_max_val);
});
return absl::OkStatus();
}
template <typename Tlhs, typename Trhs>
void EvalLhsPerBatchAndRhsPerTensorQuantizedDot(
OpKernelContext* context, const Tensor& lhs, const Tensor& rhs,
const Tensor& lhs_scales, const Tensor& lhs_zero_points, float rhs_scale,
int32_t rhs_zero_point, Tensor& output) {
const float* lhs_scales_data = lhs_scales.flat<float>().data();
const int32_t* lhs_zero_points_data = lhs_zero_points.flat<int32_t>().data();
DotWithAccFunctionAndOutputFunction<Tlhs, Trhs, float>(
lhs, rhs, output,
[lhs_zero_points_data, rhs_zero_point](Tlhs lhs_val, Trhs rhs_val,
int64_t b, int64_t out_c) {
return (static_cast<int32_t>(lhs_val) - lhs_zero_points_data[b]) *
(static_cast<int32_t>(rhs_val) - rhs_zero_point);
},
[lhs_scales_data, rhs_scale](int32_t acc, int64_t b, int64_t out_c) {
return acc * lhs_scales_data[b] * rhs_scale;
});
}
template <typename Tlhs, typename Trhs>
void EvalLhsPerBatchAndRhsPerChannelQuantizedDot(
const Tensor& lhs, const Tensor& rhs, const Tensor& lhs_scales,
const Tensor& lhs_zero_points, const Tensor& rhs_scales,
const Tensor& rhs_zero_points, Tensor& output) {
const float* lhs_scales_data = lhs_scales.flat<float>().data();
const int32_t* lhs_zero_points_data = lhs_zero_points.flat<int32_t>().data();
const float* rhs_scales_data = rhs_scales.flat<float>().data();
const int32_t* rhs_zero_points_data = rhs_zero_points.flat<int32_t>().data();
DotWithAccFunctionAndOutputFunction<Tlhs, Trhs, float>(
lhs, rhs, output,
[lhs_zero_points_data, rhs_zero_points_data](Tlhs lhs_val, Trhs rhs_val,
int64_t b, int64_t out_c) {
return (static_cast<int32_t>(lhs_val) - lhs_zero_points_data[b]) *
(static_cast<int32_t>(rhs_val) - rhs_zero_points_data[out_c]);
},
[lhs_scales_data, rhs_scales_data](int32_t acc, int64_t b,
int64_t out_c) {
return acc * lhs_scales_data[b] * rhs_scales_data[out_c];
});
}
template <typename Tin, typename Tout>
Status EvalQuantizedDot(OpKernelContext* context, const Tensor& lhs,
const Tensor& rhs, const Tensor& lhs_scales,
const Tensor& lhs_zero_points, const Tensor& rhs_scales,
const Tensor& rhs_zero_points,
const Tensor& output_scales,
const Tensor& output_zero_points,
int output_quantization_min_val,
int output_quantization_max_val, Tensor& output) {
const float lhs_scale = lhs_scales.scalar<float>()();
const int32_t lhs_zero_point = lhs_zero_points.scalar<int32_t>()();
if (rhs_scales.dims() != 0) {
return EvalLhsPerTensorAndRhsPerChannelQuantizedDot<Tin, Tout>(
context, lhs, rhs, lhs_scale, lhs_zero_point, rhs_scales,
rhs_zero_points, output_scales, output_zero_points,
output_quantization_min_val, output_quantization_max_val, output);
} else {
const float rhs_scale = rhs_scales.scalar<float>()();
const int32_t rhs_zero_point = rhs_zero_points.scalar<int32_t>()();
const float output_scale = output_scales.scalar<float>()();
const int32_t output_zero_point = output_zero_points.scalar<int32_t>()();
return EvalLhsPerTensorAndRhsPerTensorQuantizedDot<Tin, Tout>(
lhs, rhs, lhs_scale, lhs_zero_point, rhs_scale, rhs_zero_point,
output_scale, output_zero_point, output_quantization_min_val,
output_quantization_max_val, output);
}
}
template <typename Trhs>
Status EvalHybridDot(OpKernelContext* context, const Tensor& lhs,
const Tensor& rhs, const Tensor& rhs_scales,
const Tensor& rhs_zero_points, Tensor& output) {
const int64_t batches = lhs.dim_size(0);
Tensor lhs_quantized;
TF_RETURN_IF_ERROR(
context->allocate_temp(DT_QINT8, lhs.shape(), &lhs_quantized));
Tensor lhs_scales;
TF_RETURN_IF_ERROR(context->allocate_temp(DT_FLOAT, {batches}, &lhs_scales));
Tensor lhs_zero_points;
TF_RETURN_IF_ERROR(
context->allocate_temp(DT_INT32, {batches}, &lhs_zero_points));
float* lhs_scales_data = lhs_scales.flat<float>().data();
int32_t* lhs_zero_points_data = lhs_zero_points.flat<int32_t>().data();
auto lhs_tensor = lhs.template tensor<float, 2>();
auto lhs_quantized_tensor = lhs_quantized.template tensor<qint8, 2>();
for (int64_t b = 0; b < batches; ++b) {
TF_RETURN_IF_ERROR(AsymmetricQuantize(
lhs_tensor.template chip<0>(b),
-128,
127, lhs_scales_data[b],
lhs_zero_points_data[b], lhs_quantized_tensor.template chip<0>(b)));
}
if (rhs_scales.dims() != 0) {
EvalLhsPerBatchAndRhsPerChannelQuantizedDot<qint8, Trhs>(
lhs_quantized, rhs, lhs_scales, lhs_zero_points, rhs_scales,
rhs_zero_points, output);
} else {
EvalLhsPerBatchAndRhsPerTensorQuantizedDot<qint8, Trhs>(
context, lhs_quantized, rhs, lhs_scales, lhs_zero_points,
rhs_scales.scalar<float>()(), rhs_zero_points.scalar<int32_t>()(),
output);
}
return absl::OkStatus();
}
}
template <typename Tin, typename Tout>
class UniformQuantizedDotOp : public OpKernel {
public:
explicit UniformQuantizedDotOp(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES(context, (std::is_same<Tin, qint8>()),
InvalidArgument("Unsupported lhs/rhs type."));
OP_REQUIRES(context, (std::is_same<Tout, qint32>()),
InvalidArgument("Unsupported output type."));
OP_REQUIRES_OK(context, context->GetAttr("output_quantization_min_val",
&output_quantization_min_val_));
OP_REQUIRES_OK(context, context->GetAttr("output_quantization_max_val",
&output_quantization_max_val_));
int lhs_quantization_axis;
OP_REQUIRES_OK(context, context->GetAttr("lhs_quantization_axis",
&lhs_quantization_axis));
OP_REQUIRES(
context, (lhs_quantization_axis == -1),
InvalidArgument("lhs_quantization_axis Attr must be -1 (per-tensor)."));
int rhs_quantization_axis;
OP_REQUIRES_OK(context, context->GetAttr("rhs_quantization_axis",
&rhs_quantization_axis));
OP_REQUIRES(context,
(rhs_quantization_axis == 1 || rhs_quantization_axis == -1),
InvalidArgument("rhs_quantization_axis Attr must be 1 "
"(per-channel) or -1 (per-tensor)."));
int output_quantization_axis;
OP_REQUIRES_OK(context, context->GetAttr("output_quantization_axis",
&output_quantization_axis));
OP_REQUIRES(
context,
(output_quantization_axis == 1 || output_quantization_axis == -1),
InvalidArgument("output_quantization_axis Attr must be 1 "
"(per-channel) or -1 (per-tensor)."));
}
void Compute(OpKernelContext* context) override {
const Tensor& lhs = context->input(0);
const Tensor& rhs = context->input(1);
const Tensor& lhs_scales = context->input(2);
const Tensor& lhs_zero_points = context->input(3);
const Tensor& rhs_scales = context->input(4);
const Tensor& rhs_zero_points = context->input(5);
const Tensor& output_scales = context->input(6);
const Tensor& output_zero_points = context->input(7);
OP_REQUIRES(context, (AllElementsPositive<float>(lhs_scales)),
InvalidArgument("lhs scales elements must be all positive."));
OP_REQUIRES(context, (AllElementsPositive<float>(rhs_scales)),
InvalidArgument("rhs scales elements must be all positive."));
OP_REQUIRES(
context, (AllElementsPositive<float>(output_scales)),
InvalidArgument("output scales elements must be all positive."));
OP_REQUIRES_OK(context, DotInputShapeValid(lhs.shape(), rhs.shape()));
OP_REQUIRES(
context,
(lhs_scales.IsSameSize(lhs_zero_points) && lhs_scales.dims() == 0),
InvalidArgument(
"lhs scales/zero_points must be all scalar tensors. Given: ",
lhs_scales.shape().DebugString(),
lhs_zero_points.shape().DebugString()));
OP_REQUIRES_OK(context,
QuantizationAxisAndShapeValid(
rhs.shape(), rhs_scales.shape(), rhs_zero_points.shape(),
rhs_scales.dims() == 0 ? -1 : 1));
TensorShape output_shape({lhs.dim_size(0), rhs.dim_size(1)});
OP_REQUIRES_OK(
context,
QuantizationAxisAndShapeValid(
output_shape, output_scales.shape(), output_zero_points.shape(),
output_scales.dims() == 0 ? -1 : 1));
OP_REQUIRES(
context, (rhs_scales.dims() > 0 || output_scales.dims() == 0),
InvalidArgument(
"If rhs is per-tensor quantized, output must be also per-tensor "
"quantized. Given rhs scales and zero_points of shape ",
rhs_scales.shape().DebugString(),
" but given output scales and zero_points of shape ",
output_scales.shape().DebugString()));
Tensor* output = nullptr;
OP_REQUIRES_OK(
context,
context->allocate_output(
0, TensorShape({lhs.dim_size(0), rhs.dim_size(1)}), &output));
OP_REQUIRES_OK(
context, EvalQuantizedDot<Tin, Tout>(
context, lhs, rhs, lhs_scales, lhs_zero_points, rhs_scales,
rhs_zero_points, output_scales, output_zero_points,
output_quantization_min_val_, output_quantization_max_val_,
*output));
}
private:
int output_quantization_min_val_;
int output_quantization_max_val_;
};
template <typename Tlhs, typename Trhs, typename Tout>
class UniformQuantizedDotHybridOp : public OpKernel {
public:
explicit UniformQuantizedDotHybridOp(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES(context, (std::is_same<Tlhs, float>()),
InvalidArgument("Unsupported lhs type."));
OP_REQUIRES(context, (std::is_same<Trhs, qint8>()),
InvalidArgument("Unsupported rhs type."));
OP_REQUIRES(context, (std::is_same<Tout, float>()),
InvalidArgument("Unsupported output type."));
int rhs_quantization_axis;
OP_REQUIRES_OK(context, context->GetAttr("rhs_quantization_axis",
&rhs_quantization_axis));
OP_REQUIRES(context,
(rhs_quantization_axis == 1 || rhs_quantization_axis == -1),
InvalidArgument("rhs_quantization_axis Attr must be 1 "
"(per-channel) or -1 (per-tensor)."));
}
void Compute(OpKernelContext* context) override {
const Tensor& lhs = context->input(0);
const Tensor& rhs = context->input(1);
const Tensor& rhs_scales = context->input(2);
const Tensor& rhs_zero_points = context->input(3);
OP_REQUIRES_OK(context, DotInputShapeValid(lhs.shape(), rhs.shape()));
OP_REQUIRES_OK(context,
QuantizationAxisAndShapeValid(
rhs.shape(), rhs_scales.shape(), rhs_zero_points.shape(),
rhs_scales.dims() == 0 ? -1 : 1));
OP_REQUIRES(context, AllElementsPositive<float>(rhs_scales),
InvalidArgument("rhs scales elements must be all positive."));
Tensor* output = nullptr;
OP_REQUIRES_OK(
context,
context->allocate_output(
0, TensorShape({lhs.dim_size(0), rhs.dim_size(1)}), &output));
OP_REQUIRES_OK(context, EvalHybridDot<Trhs>(context, lhs, rhs, rhs_scales,
rhs_zero_points, *output));
}
};
REGISTER_KERNEL_BUILDER(Name("UniformQuantizedDot")
.Device(DEVICE_CPU)
.TypeConstraint<qint8>("Tin")
.TypeConstraint<qint32>("Tout"),
UniformQuantizedDotOp<qint8, qint32>);
REGISTER_KERNEL_BUILDER(Name("UniformQuantizedDotHybrid")
.Device(DEVICE_CPU)
.TypeConstraint<float>("Tlhs")
.TypeConstraint<qint8>("Trhs")
.TypeConstraint<float>("Tout"),
UniformQuantizedDotHybridOp<float, qint8, float>);
} | #include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
namespace tensorflow {
class UniformQuantizedDotTest : public OpsTestBase {
protected:
};
TEST_F(UniformQuantizedDotTest, PerTensorQuantized) {
TF_ASSERT_OK(
NodeDefBuilder("test", "UniformQuantizedDot")
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tin", DT_QINT8)
.Attr("Tout", DT_QINT32)
.Attr("lhs_quantization_min_val", -128)
.Attr("lhs_quantization_max_val", 127)
.Attr("rhs_quantization_min_val", -128)
.Attr("rhs_quantization_max_val", 127)
.Attr("output_quantization_min_val",
static_cast<int32_t>(-2147483648))
.Attr("output_quantization_max_val", static_cast<int32_t>(2147483647))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<qint8>(TensorShape({2, 2}), {1, 2, 3, 4});
AddInputFromArray<qint8>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<float>(TensorShape({}), {0.5});
AddInputFromArray<int32>(TensorShape({}), {1});
AddInputFromArray<float>(TensorShape({}), {2.0});
AddInputFromArray<int32>(TensorShape({}), {2});
AddInputFromArray<float>(TensorShape({}), {0.25});
AddInputFromArray<int32>(TensorShape({}), {-20});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3}));
test::FillValues<qint32>(&expected, {-12, -8, -4, -4, 16, 36});
test::ExpectTensorEqual<qint32>(expected, *GetOutput(0));
}
TEST_F(UniformQuantizedDotTest, PerChannelQuantized) {
TF_ASSERT_OK(
NodeDefBuilder("test", "UniformQuantizedDot")
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tin", DT_QINT8)
.Attr("Tout", DT_QINT32)
.Attr("lhs_quantization_min_val", -128)
.Attr("lhs_quantization_max_val", 127)
.Attr("rhs_quantization_min_val", -128)
.Attr("rhs_quantization_max_val", 127)
.Attr("rhs_quantization_axis", 1)
.Attr("output_quantization_min_val",
static_cast<int32_t>(-2147483648))
.Attr("output_quantization_max_val", static_cast<int32_t>(2147483647))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<qint8>(TensorShape({2, 2}), {1, 2, 3, 4});
AddInputFromArray<qint8>(TensorShape({2, 3}), {1, 4, 3, 4, 7, 6});
AddInputFromArray<float>(TensorShape({}), {0.5});
AddInputFromArray<int32>(TensorShape({}), {1});
AddInputFromArray<float>(TensorShape({3}), {2.0, 4.0, 2.0});
AddInputFromArray<int32>(TensorShape({3}), {2, 4, 2});
AddInputFromArray<float>(TensorShape({}), {0.25});
AddInputFromArray<int32>(TensorShape({}), {-20});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3}));
test::FillValues<qint32>(&expected, {-12, 4, -4, -4, 52, 36});
test::ExpectTensorEqual<qint32>(expected, *GetOutput(0));
}
TEST_F(UniformQuantizedDotTest, PerTensorQuantizedEffectiveMultiplierOne) {
TF_ASSERT_OK(
NodeDefBuilder("test", "UniformQuantizedDot")
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tin", DT_QINT8)
.Attr("Tout", DT_QINT32)
.Attr("lhs_quantization_min_val", -128)
.Attr("lhs_quantization_max_val", 127)
.Attr("rhs_quantization_min_val", -128)
.Attr("rhs_quantization_max_val", 127)
.Attr("output_quantization_min_val",
static_cast<int32_t>(-2147483648))
.Attr("output_quantization_max_val", static_cast<int32_t>(2147483647))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<qint8>(TensorShape({2, 2}), {1, 2, 3, 4});
AddInputFromArray<qint8>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<float>(TensorShape({}), {0.5});
AddInputFromArray<int32>(TensorShape({}), {1});
AddInputFromArray<float>(TensorShape({}), {0.5});
AddInputFromArray<int32>(TensorShape({}), {2});
AddInputFromArray<float>(TensorShape({}), {0.25});
AddInputFromArray<int32>(TensorShape({}), {-4});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3}));
test::FillValues<qint32>(&expected, {-2, -1, 0, 0, 5, 10});
test::ExpectTensorEqual<qint32>(expected, *GetOutput(0));
}
TEST_F(UniformQuantizedDotTest, PerChannelQuantizedEffectiveMultiplierOne) {
TF_ASSERT_OK(
NodeDefBuilder("test", "UniformQuantizedDot")
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tin", DT_QINT8)
.Attr("Tout", DT_QINT32)
.Attr("lhs_quantization_min_val", -128)
.Attr("lhs_quantization_max_val", 127)
.Attr("rhs_quantization_min_val", -128)
.Attr("rhs_quantization_max_val", 127)
.Attr("rhs_quantization_axis", 1)
.Attr("output_quantization_min_val",
static_cast<int32_t>(-2147483648))
.Attr("output_quantization_max_val", static_cast<int32_t>(2147483647))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<qint8>(TensorShape({2, 2}), {1, 2, 3, 4});
AddInputFromArray<qint8>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<float>(TensorShape({}), {0.5});
AddInputFromArray<int32>(TensorShape({}), {1});
AddInputFromArray<float>(TensorShape({3}), {0.5, 1.0, 0.5});
AddInputFromArray<int32>(TensorShape({3}), {2, 4, 2});
AddInputFromArray<float>(TensorShape({3}), {0.25, 0.5, 0.25});
AddInputFromArray<int32>(TensorShape({3}), {4, 8, 4});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3}));
test::FillValues<qint32>(&expected, {6, 9, 8, 8, 7, 18});
test::ExpectTensorEqual<qint32>(expected, *GetOutput(0));
}
TEST_F(UniformQuantizedDotTest, HybridPerTensorQuantized) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedDotHybrid")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tlhs", DT_FLOAT)
.Attr("Trhs", DT_QINT8)
.Attr("Tout", DT_FLOAT)
.Attr("rhs_quantization_min_val", -128)
.Attr("rhs_quantization_max_val", 127)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 2}), {-32.2, -12.1, 10.7, 11.6});
AddInputFromArray<qint8>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<float>(TensorShape({}), {2.0});
AddInputFromArray<int32>(TensorShape({}), {2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3}));
test::FillValues<float>(&expected, {16.0, -72.6, -161.2, 25.0, 69.6, 114.2});
test::ExpectClose(expected, *GetOutput(0), 0.1, 0.01);
}
TEST_F(UniformQuantizedDotTest, HybridPerChannelQuantized) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedDotHybrid")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tlhs", DT_FLOAT)
.Attr("Trhs", DT_QINT8)
.Attr("Tout", DT_FLOAT)
.Attr("rhs_quantization_min_val", -128)
.Attr("rhs_quantization_max_val", 127)
.Attr("rhs_quantization_axis", 1)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 2}), {-32.2, -12.1, 10.7, 11.6});
AddInputFromArray<qint8>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<float>(TensorShape({3}), {2.0, 4.0, 2.0});
AddInputFromArray<int32>(TensorShape({3}), {2, 4, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3}));
test::FillValues<float>(&expected, {16.0, 209.2, -161.2, 25.0, -39.2, 114.2});
test::ExpectClose(expected, *GetOutput(0), 0.1, 0.01);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/uniform_quant_ops/uniform_quantized_dot_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/uniform_quant_ops/uniform_quantized_dot_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7d875bf1-7d72-49ea-bcaf-436820c6b061 | cpp | tensorflow/tensorflow | gemm_algorithm_picker | third_party/xla/xla/service/gpu/autotuning/gemm_algorithm_picker.cc | third_party/xla/xla/service/gpu/autotuning/gemm_algorithm_picker_test.cc | #include "xla/service/gpu/autotuning/gemm_algorithm_picker.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/autotuning/autotuner_compile_util.h"
#include "xla/service/gpu/autotuning/autotuner_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/buffer_comparator.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/gpu/variant_visitor.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/blas.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/gpu/redzone_allocator.h"
#include "xla/tsl/util/proto/proto_utils.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/lib/scoped_annotation.h"
namespace xla {
namespace gpu {
namespace {
using se::gpu::BlasLt;
absl::StatusOr<BlasLt::Epilogue> AsBlasLtEpilogue(
GemmBackendConfig_Epilogue epilogue) {
switch (epilogue) {
case GemmBackendConfig::DEFAULT:
return BlasLt::Epilogue::kDefault;
case GemmBackendConfig::RELU:
return BlasLt::Epilogue::kReLU;
case GemmBackendConfig::GELU:
return BlasLt::Epilogue::kGELU;
case GemmBackendConfig::GELU_AUX:
return BlasLt::Epilogue::kGELUWithAux;
case GemmBackendConfig::BIAS:
return BlasLt::Epilogue::kBias;
case GemmBackendConfig::BIAS_RELU:
return BlasLt::Epilogue::kBiasThenReLU;
case GemmBackendConfig::BIAS_GELU:
return BlasLt::Epilogue::kBiasThenGELU;
case GemmBackendConfig::BIAS_GELU_AUX:
return BlasLt::Epilogue::kBiasThenGELUWithAux;
default:
return Internal("Unsupported Epilogue.");
}
}
class GemmAutotuner {
const AutotuneConfig& autotune_config_;
RedzoneBuffers rz_buffers_;
se::Stream* stream_ = nullptr;
bool deterministic_ops_ = false;
size_t solutions_limit_ = 0;
size_t num_algorithms_left_ = 0;
public:
explicit GemmAutotuner(const AutotuneConfig& autotune_config)
: autotune_config_(autotune_config) {}
const AutotuneConfig& config() const { return autotune_config_; }
size_t num_algorithms_left() const { return num_algorithms_left_; }
absl::StatusOr<AutotuneResult> operator()(const HloInstruction* gemm,
const AutotuneCacheKey& key) {
num_algorithms_left_ = 0;
if (autotune_config_.IsDeviceless()) {
return AutotuneResult{};
}
VLOG(3) << "Starting autotune of GemmThunk " << gemm->ToString();
TF_ASSIGN_OR_RETURN(stream_, autotune_config_.GetStream());
const DebugOptions& debug_options =
gemm->GetModule()->config().debug_options();
deterministic_ops_ = RequireDeterminism(gemm->GetModule()->config());
solutions_limit_ = debug_options.xla_gpu_autotune_max_solutions();
TF_ASSIGN_OR_RETURN(auto gemm_config, GemmConfig::For(gemm));
absl::MutexLock gpu_lock(&GetGpuMutex(stream_->parent()));
TF_ASSIGN_OR_RETURN(rz_buffers_, RedzoneBuffers::FromInstruction(
*gemm, autotune_config_, debug_options,
RedzoneBuffers::kAllInputsAllOutputs));
return IsCublasLtMatmul(*gemm) || IsCublasLtMatmulF8(*gemm)
? TuneGpuBlasLt(gemm, gemm_config)
: TuneGpuBlas(gemm, gemm_config);
}
private:
se::DeviceMemoryBase LhsBuffer() { return rz_buffers_.input_buffers().at(0); }
se::DeviceMemoryBase RhsBuffer() { return rz_buffers_.input_buffers().at(1); }
se::DeviceMemoryBase OutputBuffer() {
return rz_buffers_.output_buffers().at(0);
}
const Shape& GetOutputShape(const HloInstruction* gemm) {
return gemm->shape().IsTuple() ? gemm->shape().tuple_shapes(0)
: gemm->shape();
}
absl::StatusOr<AutotuneResult> TuneGpuBlasLt(const HloInstruction* gemm,
const GemmConfig& gemm_config) {
auto workspace_buffer =
rz_buffers_.output_buffers().at(gemm->shape().tuple_shapes_size() - 1);
GpuBackendConfig gpu_config =
gemm->backend_config<GpuBackendConfig>().value();
const GemmBackendConfig& backend_config = gpu_config.gemm_backend_config();
bool has_matrix_bias = gemm_config.beta != 0.;
TF_ASSIGN_OR_RETURN(
bool has_vector_bias,
gpublas_lt::EpilogueAddsVectorBias(backend_config.epilogue()));
TF_ASSIGN_OR_RETURN(
bool has_aux_output,
gpublas_lt::EpilogueHasAuxiliaryOutput(backend_config.epilogue()));
TF_ASSIGN_OR_RETURN(auto epilogue,
AsBlasLtEpilogue(backend_config.epilogue()));
se::DeviceMemoryBase a_scale_buffer, b_scale_buffer, c_scale_buffer,
d_scale_buffer, d_amax_buffer, bias_buffer, aux_buffer;
if (has_vector_bias) {
bias_buffer = rz_buffers_.input_buffers().at(has_matrix_bias ? 3 : 2);
}
if (has_aux_output) {
aux_buffer = rz_buffers_.output_buffers().at(1);
}
TF_ASSIGN_OR_RETURN(auto plan,
BlasLt::GetMatmulPlan(stream_, gemm_config, epilogue));
TF_ASSIGN_OR_RETURN(
auto algorithms,
plan->GetAlgorithms( 128,
workspace_buffer.size()));
auto tuned_func = [&](const BlasLt::MatmulAlgorithm& algorithm)
-> absl::StatusOr<se::blas::ProfileResult> {
TF_RETURN_IF_ERROR(plan->ExecuteOnStream(
stream_, LhsBuffer(), RhsBuffer(), OutputBuffer(), OutputBuffer(),
bias_buffer, aux_buffer, a_scale_buffer, b_scale_buffer,
c_scale_buffer, d_scale_buffer, d_amax_buffer, algorithm,
workspace_buffer));
se::blas::ProfileResult profile_result;
profile_result.set_warmup_run_executed(true);
TF_RETURN_IF_ERROR(plan->ExecuteOnStream(
stream_, LhsBuffer(), RhsBuffer(), OutputBuffer(), OutputBuffer(),
bias_buffer, aux_buffer, a_scale_buffer, b_scale_buffer,
c_scale_buffer, d_scale_buffer, d_amax_buffer, algorithm,
workspace_buffer, &profile_result));
return std::move(profile_result);
};
return GetBestAlgorithm<BlasLt::MatmulAlgorithm>(
gemm, algorithms, gemm_config.beta, true,
tuned_func);
}
absl::StatusOr<AutotuneResult> TuneGpuBlas(const HloInstruction* gemm,
const GemmConfig& gemm_config) {
auto workspace_buffer = rz_buffers_.output_buffers().at(1);
std::vector<se::blas::AlgorithmType> algorithms;
TF_ASSIGN_OR_RETURN(GemmConfig::DescriptorsTuple desc,
gemm_config.GetMatrixDescriptors(
LhsBuffer(), RhsBuffer(), OutputBuffer()));
auto blas = stream_->parent()->AsBlas();
if (blas == nullptr) {
return absl::InternalError("No BLAS support for stream");
}
blas->GetBlasGemmAlgorithms(stream_, desc.lhs, desc.rhs, &desc.output,
&gemm_config.alpha, &gemm_config.beta,
&algorithms);
auto tuned_func = [&](const se::blas::AlgorithmType& algorithm)
-> absl::StatusOr<se::blas::ProfileResult> {
static_cast<void>(RunGemm(gemm_config, LhsBuffer(), RhsBuffer(),
OutputBuffer(), workspace_buffer,
deterministic_ops_, stream_, algorithm));
se::blas::ProfileResult profile_result;
profile_result.set_warmup_run_executed(true);
TF_RETURN_IF_ERROR(RunGemm(gemm_config, LhsBuffer(), RhsBuffer(),
OutputBuffer(), workspace_buffer,
deterministic_ops_, stream_, algorithm,
&profile_result));
return std::move(profile_result);
};
return GetBestAlgorithm<se::blas::AlgorithmType>(
gemm, algorithms, gemm_config.beta, false,
tuned_func);
}
template <typename AlgoT, typename TunedFunc>
absl::StatusOr<AutotuneResult> GetBestAlgorithm(
const HloInstruction* gemm, absl::Span<const AlgoT> algorithms,
double beta, bool return_algo_index, TunedFunc&& run_benchmark) {
static_assert(std::is_invocable_r_v<absl::StatusOr<se::blas::ProfileResult>,
TunedFunc, const AlgoT&>,
"Tuned function has incorrect prototype!");
if (!stream_->parent()->SynchronizeAllActivity()) {
return Internal("Failed to synchronize GPU for autotuning.");
}
tsl::profiler::ScopedAnnotation annotation([&] {
return absl::StrFormat("XlaAutotunerMeasurement:#hlo_op=%s#",
gemm->name());
});
auto& hlo_module_config = gemm->GetModule()->mutable_config();
const auto& output_shape = GetOutputShape(gemm);
se::DeviceMemoryBase reference_buffer;
if (autotune_config_.should_check_correctness()) {
TF_ASSIGN_OR_RETURN(reference_buffer,
rz_buffers_.RedzoneAllocator().AllocateBytes(
ShapeUtil::ByteSizeOf(output_shape)));
}
BufferComparator comparator(
output_shape,
hlo_module_config.debug_options().xla_gpu_autotune_gemm_rtol(),
!autotune_config_.should_skip_wrong_results());
std::vector<AutotuneResult> results;
results.reserve(algorithms.size());
std::optional<int64_t> reference_algorithm;
auto num = algorithms.size();
if (solutions_limit_ > 0) num = std::min(num, solutions_limit_);
for (size_t i = 0; i < num; i++) {
const AlgoT& algorithm = algorithms[i];
if (autotune_config_.should_reinit_output_buffer() && beta != 0) {
int64_t rng_state = 0;
InitializeBuffer(stream_, output_shape.element_type(), &rng_state,
OutputBuffer());
}
TF_ASSIGN_OR_RETURN(auto profile_result, run_benchmark(algorithm));
AutotuneResult& result = results.emplace_back();
result.mutable_gemm()->set_algorithm(profile_result.algorithm());
if (!profile_result.is_valid()) {
result.mutable_failure()->set_kind(AutotuneResult::DISQUALIFIED);
continue;
}
VLOG(2) << "gemm algorithm " << profile_result.algorithm() << " took "
<< profile_result.elapsed_time_in_ms() << "ms";
*result.mutable_run_time() = tsl::proto_utils::ToDurationProto(
absl::Milliseconds(profile_result.elapsed_time_in_ms()));
if (!autotune_config_.should_check_correctness()) {
num_algorithms_left_++;
continue;
}
TF_ASSIGN_OR_RETURN(
se::RedzoneAllocator::RedzoneCheckStatus rz_check_status,
rz_buffers_.RedzoneAllocator().CheckRedzones());
if (!rz_check_status.ok()) {
result.mutable_failure()->set_kind(AutotuneResult::REDZONE_MODIFIED);
*result.mutable_failure()->mutable_msg() =
rz_check_status.RedzoneFailureMsg();
LOG(ERROR) << "Detected out-of-bounds write in gemm buffer";
CHECK(!autotune_config_.should_crash_on_check_failure());
continue;
}
num_algorithms_left_++;
if (!reference_algorithm) {
TF_RETURN_IF_ERROR(stream_->Memcpy(&reference_buffer, OutputBuffer(),
OutputBuffer().size()));
reference_algorithm = profile_result.algorithm();
continue;
}
TF_ASSIGN_OR_RETURN(
bool outputs_match,
comparator.CompareEqual(stream_, OutputBuffer(),
reference_buffer));
if (!outputs_match) {
LOG(ERROR) << "Results mismatch between different GEMM algorithms. "
<< "This is likely a bug/unexpected loss of precision.";
CHECK(!autotune_config_.should_crash_on_check_failure());
auto kind = AutotuneResult::WRONG_RESULT;
if (autotune_config_.should_skip_wrong_results()) {
kind = AutotuneResult::DISQUALIFIED;
num_algorithms_left_--;
}
result.mutable_failure()->set_kind(kind);
result.mutable_failure()->mutable_reference_gemm()->set_algorithm(
*reference_algorithm);
}
}
absl::StatusOr<AutotuneResult> best =
PickBestResult(results, gemm->ToString(), hlo_module_config);
if (best.ok()) {
if (!return_algo_index) return best;
for (size_t i = 0; i < results.size(); ++i) {
if (best->gemm().algorithm() == results[i].gemm().algorithm()) {
best->mutable_gemm()->set_algorithm(i);
return best;
}
}
return Internal("unknown best algorithm");
}
LOG(WARNING) << "Failed to find best cuBLAS algorithm, GEMM performance "
"might be suboptimal: "
<< best.status();
return AutotuneResult{};
}
};
absl::StatusOr<bool> RunOnInstruction(HloInstruction* gemm,
GemmAutotuner& autotuner) {
VLOG(3) << "Loading the autotune result of GemmThunk " << gemm->ToString();
GpuBackendConfig gpu_config =
gemm->backend_config<GpuBackendConfig>().value();
GemmBackendConfig& backend_config = *gpu_config.mutable_gemm_backend_config();
if (backend_config.alpha_real() == 0.0 &&
backend_config.alpha_imag() == 0.0 && backend_config.beta() == 0.0) {
VLOG(3) << "Skip degenerate gemm instruction auto tuning";
return false;
}
const AutotuneConfig& config = autotuner.config();
AutotuneCacheKey key(config.GetModelStr(), *gemm);
TF_ASSIGN_OR_RETURN(AutotuneResult algorithm,
AutotunerUtil::Autotune(
gemm, config, [&] { return autotuner(gemm, key); }));
auto old_algorithm = backend_config.selected_algorithm();
bool update_algorithm =
IsCublasLtMatmulF8(*gemm) ||
std::visit(VariantVisitor{[](const se::CudaComputeCapability& cc) {
return !cc.IsAtLeast(
se::CudaComputeCapability::AMPERE);
},
[](const se::RocmComputeCapability&) {
return true;
}},
config.GetGpuComputeCapability());
if (update_algorithm) {
int64_t new_algorithm{};
if (algorithm.has_gemm()) {
new_algorithm = algorithm.gemm().algorithm();
} else {
new_algorithm = se::blas::kDefaultAlgorithm;
}
if (new_algorithm == old_algorithm &&
backend_config.has_selected_algorithm()) {
return false;
}
backend_config.set_selected_algorithm(new_algorithm);
TF_RETURN_IF_ERROR(gemm->set_backend_config(gpu_config));
return true;
}
return false;
}
absl::StatusOr<bool> RunOnComputation(HloComputation* computation,
GemmAutotuner& autotuner,
size_t* num_algorithms_left) {
bool changed = false;
for (HloInstruction* instr : computation->instructions()) {
if (IsCublasGemm(*instr)) {
TF_ASSIGN_OR_RETURN(bool result, RunOnInstruction(instr, autotuner));
*num_algorithms_left =
std::max(*num_algorithms_left, autotuner.num_algorithms_left());
changed |= result;
}
}
return changed;
}
}
absl::StatusOr<bool> GemmAlgorithmPicker::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_SCOPED_LOGGING_TIMER(
absl::StrCat("GemmAlgorithmPicker for ", module->name()));
num_algorithms_left_ = 0;
if (module->config().debug_options().xla_gpu_autotune_level() == 0) {
VLOG(2) << "GEMM auto-tuning disabled, GemmAlgorithmPicker returning early";
return false;
}
GemmAutotuner autotuner(config_);
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
TF_ASSIGN_OR_RETURN(bool result, RunOnComputation(computation, autotuner,
&num_algorithms_left_));
changed |= result;
}
return changed;
}
}
} | #include "xla/service/gpu/autotuning/gemm_algorithm_picker.h"
#include <cstddef>
#include <cstdint>
#include <string>
#include <variant>
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "xla/autotune_results.pb.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/autotuning/autotuner_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/transforms/gemm_rewriter.h"
#include "xla/service/gpu/variant_visitor.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/semantic_version.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/protobuf/dnn.pb.h"
#include "xla/xla.pb.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::gpu {
namespace {
namespace m = ::xla::match;
class GemmAlgorithmPickerTest : public HloTestBase,
public ::testing::WithParamInterface<bool> {
public:
GemmAlgorithmPickerTest() { AutotunerUtil::ClearAutotuneResults(); }
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_cublaslt(GetParam());
debug_options.set_xla_gpu_enable_triton_gemm(false);
return debug_options;
}
se::StreamExecutor* stream_exec() {
return backend().default_stream_executor();
}
const se::DeviceDescription& device_desc() {
return stream_exec()->GetDeviceDescription();
}
const se::GpuComputeCapability& gpu_comp() {
return device_desc().gpu_compute_capability();
}
void SetUp() override {
std::string_view name =
::testing::UnitTest::GetInstance()->current_test_info()->name();
bool blas_get_version = name.rfind("BlasGetVersion") == 0;
std::visit(
VariantVisitor{
[&](const se::CudaComputeCapability& cc) {
if (!blas_get_version && cc.IsAtLeastAmpere()) {
GTEST_SKIP()
<< "Skipping this test for Ampere+ as it is supported "
"and recommended with the Nvidia Volta+ GPUs.";
}
},
[&](const se::RocmComputeCapability& cc) {
if (blas_get_version) {
if (device_desc().runtime_version() <
stream_executor::SemanticVersion{6, 2, 0}) {
GTEST_SKIP()
<< "This API is not available on ROCM 6.1 and below.";
}
} else if (GetDebugOptionsForTest().xla_gpu_enable_cublaslt() &&
!cc.has_hipblaslt()) {
GTEST_SKIP() << "No gpublas-lt support on this architecture!";
}
}},
gpu_comp());
}
};
TEST_P(GemmAlgorithmPickerTest, BlasGetVersion) {
auto* blas = stream_exec()->AsBlas();
ASSERT_TRUE(blas != nullptr);
std::string version;
ASSERT_TRUE(blas->GetVersion(&version).ok());
VLOG(0) << "Blas version: " << version;
ASSERT_TRUE(!version.empty());
}
TEST_P(GemmAlgorithmPickerTest, SkipAlgorithmsWithAccuracyCheck) {
constexpr absl::string_view kHlo = R"(
HloModule module
ENTRY main {
%arg0 = f32[100,100]{1,0} parameter(0)
%arg1 = f32[100,100]{1,0} parameter(1)
ROOT %dot = f32[100,100]{1,0} dot(arg0, arg1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
auto module_cfg = GetModuleConfigForTest();
auto debug_opts = module_cfg.debug_options();
size_t num_left1 = 0, num_left2 = 0;
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHlo, module_cfg));
{
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
RunHloPass(
GemmRewriter(
gpu_comp(),
stream_executor::SemanticVersion{12, 4, 0}),
module.get()));
AutotuneConfig cfg{DeviceConfig{stream_exec(), nullptr}, debug_opts};
GemmAlgorithmPicker gpicker(cfg);
TF_ASSERT_OK_AND_ASSIGN(changed, RunHloPass(gpicker, module.get()));
num_left1 = gpicker.num_algorithms_left();
if (num_left1 < 2) {
GTEST_SKIP() << "Too few algorithms left after the first step";
}
auto* blas = stream_exec()->AsBlas();
ASSERT_TRUE(blas != nullptr);
TF_ASSERT_OK_AND_ASSIGN(bool is_main_stream, blas->IsMainStreamSet());
if (std::holds_alternative<se::RocmComputeCapability>(gpu_comp())) {
ASSERT_TRUE(is_main_stream);
}
}
AutotunerUtil::ClearAutotuneResults();
{
debug_opts.set_xla_gpu_autotune_gemm_rtol(1e-12);
debug_opts.set_xla_gpu_autotune_level(5);
module->mutable_config().set_debug_options(debug_opts);
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
RunHloPass(
GemmRewriter(
gpu_comp(),
stream_executor::SemanticVersion{12, 4, 0}),
module.get()));
AutotuneConfig cfg{DeviceConfig{stream_exec(), nullptr}, debug_opts};
GemmAlgorithmPicker gpicker(cfg);
TF_ASSERT_OK_AND_ASSIGN(changed, RunHloPass(gpicker, module.get()));
num_left2 = gpicker.num_algorithms_left();
}
ASSERT_TRUE(num_left1 > num_left2);
}
TEST_P(GemmAlgorithmPickerTest, SetAlgorithm) {
constexpr absl::string_view kHlo = R"(
HloModule module
ENTRY main {
%arg0 = f32[100,100]{1,0} parameter(0)
%arg1 = f32[100,100]{1,0} parameter(1)
ROOT %dot = f32[100,100]{1,0} dot(arg0, arg1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
auto module_cfg = GetModuleConfigForTest();
TF_ASSERT_OK_AND_ASSIGN(auto m,
ParseAndReturnVerifiedModule(kHlo, module_cfg));
bool changed = false;
TF_ASSERT_OK_AND_ASSIGN(
changed,
RunHloPass(
GemmRewriter(
gpu_comp(),
stream_executor::SemanticVersion{12, 4, 0}),
m.get()));
changed = false;
DebugOptions opts;
AutotuneConfig cfg{DeviceConfig{stream_exec(), nullptr}, opts};
TF_ASSERT_OK_AND_ASSIGN(changed,
RunHloPass(GemmAlgorithmPicker(cfg), m.get()));
ASSERT_TRUE(changed);
AutotuneResults results;
TF_ASSERT_OK(AutotunerUtil::SerializeAutotuneResults(&results));
ASSERT_EQ(results.results_size(), 1);
auto& result = *results.mutable_results(0)->mutable_result();
int64_t old_algo_id = result.algorithm().algo_id();
int64_t new_algo_id = old_algo_id + 1;
result.mutable_gemm()->set_algorithm(new_algo_id);
AutotunerUtil::ClearAutotuneResults();
TF_ASSERT_OK(AutotunerUtil::LoadAutotuneResults(results));
TF_ASSERT_OK_AND_ASSIGN(m, ParseAndReturnVerifiedModule(kHlo, module_cfg));
changed = false;
TF_ASSERT_OK_AND_ASSIGN(
changed,
RunHloPass(
GemmRewriter(gpu_comp(),
se::SemanticVersion{12, 4, 0}),
m.get()));
changed = false;
TF_ASSERT_OK_AND_ASSIGN(changed,
RunHloPass(GemmAlgorithmPicker(cfg), m.get()));
ASSERT_TRUE(changed);
SCOPED_TRACE(m->ToString());
HloInstruction* dot;
ASSERT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(m::CustomCall(&dot), 0)));
TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config,
dot->backend_config<GpuBackendConfig>());
const GemmBackendConfig& config = gpu_config.gemm_backend_config();
EXPECT_EQ(config.selected_algorithm(), new_algo_id);
}
TEST_P(GemmAlgorithmPickerTest, GetAlgorithmWithoutDevice) {
constexpr absl::string_view kHlo = R"(
HloModule module
ENTRY main {
%arg0 = f32[100,100]{1,0} parameter(0)
%arg1 = f32[100,100]{1,0} parameter(1)
ROOT %dot = f32[100,100]{1,0} dot(arg0, arg1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto m, ParseAndReturnVerifiedModule(kHlo, GetModuleConfigForTest()));
bool changed = false;
TF_ASSERT_OK_AND_ASSIGN(
changed,
RunHloPass(
GemmRewriter(
gpu_comp(),
stream_executor::SemanticVersion{12, 4, 0}),
m.get()));
changed = false;
DebugOptions opts;
AutotuneConfig cfg{DeviceConfig{stream_exec(), nullptr}, opts};
TF_ASSERT_OK_AND_ASSIGN(changed,
RunHloPass(GemmAlgorithmPicker(cfg), m.get()));
ASSERT_TRUE(changed);
AutotuneResults results;
TF_ASSERT_OK(AutotunerUtil::SerializeAutotuneResults(&results));
ASSERT_EQ(results.results_size(), 1);
auto& result = *results.mutable_results(0)->mutable_result();
int64_t old_algo_id = result.algorithm().algo_id();
int64_t new_algo_id = old_algo_id + 1;
result.mutable_gemm()->set_algorithm(new_algo_id);
AutotunerUtil::ClearAutotuneResults();
TF_ASSERT_OK(AutotunerUtil::LoadAutotuneResults(results));
auto module_cfg = GetModuleConfigForTest();
TF_ASSERT_OK_AND_ASSIGN(m, ParseAndReturnVerifiedModule(kHlo, module_cfg));
changed = false;
DevicelessConfig deviceless_config{device_desc()};
AutotuneConfig deviceless_cfg{deviceless_config, opts};
TF_ASSERT_OK_AND_ASSIGN(
changed,
RunHloPass(
GemmRewriter(
gpu_comp(),
stream_executor::SemanticVersion{12, 4, 0}),
m.get()));
changed = false;
TF_ASSERT_OK_AND_ASSIGN(
changed, RunHloPass(GemmAlgorithmPicker(deviceless_cfg), m.get()))
ASSERT_TRUE(changed);
SCOPED_TRACE(m->ToString());
HloInstruction* dot;
ASSERT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(m::CustomCall(&dot), 0)));
TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config,
dot->backend_config<GpuBackendConfig>());
const GemmBackendConfig& config = gpu_config.gemm_backend_config();
EXPECT_EQ(config.selected_algorithm(), new_algo_id);
}
INSTANTIATE_TEST_SUITE_P(GemmAlgorithmPickerTestSuite, GemmAlgorithmPickerTest,
::testing::Bool());
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/autotuning/gemm_algorithm_picker.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/autotuning/gemm_algorithm_picker_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f72466f2-69f4-45c4-9c91-d85a452eeac0 | cpp | tensorflow/tensorflow | logger_registry | tensorflow/compiler/tf2tensorrt/convert/logger_registry.cc | tensorflow/compiler/tf2tensorrt/convert/logger_registry_test.cc | #if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/convert/logger_registry.h"
#include <unordered_map>
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/mutex.h"
namespace tensorflow {
namespace tensorrt {
class LoggerRegistryImpl : public LoggerRegistry {
Status Register(const string& name, nvinfer1::ILogger* logger) override {
mutex_lock lock(mu_);
if (!registry_.emplace(name, std::unique_ptr<nvinfer1::ILogger>(logger))
.second) {
return errors::AlreadyExists("Logger ", name, " already registered");
}
return OkStatus();
}
nvinfer1::ILogger* LookUp(const string& name) override {
mutex_lock lock(mu_);
const auto found = registry_.find(name);
if (found == registry_.end()) {
return nullptr;
}
return found->second.get();
}
private:
mutable mutex mu_;
mutable std::unordered_map<string, std::unique_ptr<nvinfer1::ILogger>>
registry_ TF_GUARDED_BY(mu_);
};
LoggerRegistry* GetLoggerRegistry() {
static LoggerRegistryImpl* registry = new LoggerRegistryImpl;
return registry;
}
}
}
#endif | #include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace {
class TestLogger : public nvinfer1::ILogger {
void log(nvinfer1::ILogger::Severity severity, const char* msg) override {}
};
TestLogger test_logger;
REGISTER_TENSORRT_LOGGER("test_logger", &test_logger);
TEST(LoggerRegistryTest, RegistersCorrectly) {
auto registered_logger = GetLoggerRegistry()->LookUp("test_logger");
EXPECT_THAT(registered_logger, Eq(&test_logger));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/convert/logger_registry.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/convert/logger_registry_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c6633d09-02d1-403c-bb55-c66cc9091cc7 | cpp | google/cel-cpp | flatbuffers_backed_impl | tools/flatbuffers_backed_impl.cc | tools/flatbuffers_backed_impl_test.cc | #include "tools/flatbuffers_backed_impl.h"
#include <algorithm>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/types/optional.h"
#include "eval/public/cel_value.h"
#include "flatbuffers/flatbuffers.h"
namespace google {
namespace api {
namespace expr {
namespace runtime {
namespace {
CelValue CreateValue(int64_t value) { return CelValue::CreateInt64(value); }
CelValue CreateValue(uint64_t value) { return CelValue::CreateUint64(value); }
CelValue CreateValue(double value) { return CelValue::CreateDouble(value); }
CelValue CreateValue(bool value) { return CelValue::CreateBool(value); }
template <typename T, typename U>
class FlatBuffersListImpl : public CelList {
public:
FlatBuffersListImpl(const flatbuffers::Table& table,
const reflection::Field& field)
: list_(table.GetPointer<const flatbuffers::Vector<T>*>(field.offset())) {
}
int size() const override { return list_ ? list_->size() : 0; }
CelValue operator[](int index) const override {
return CreateValue(static_cast<U>(list_->Get(index)));
}
private:
const flatbuffers::Vector<T>* list_;
};
class StringListImpl : public CelList {
public:
explicit StringListImpl(
const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>* list)
: list_(list) {}
int size() const override { return list_ ? list_->size() : 0; }
CelValue operator[](int index) const override {
auto value = list_->Get(index);
return CelValue::CreateStringView(
absl::string_view(value->c_str(), value->size()));
}
private:
const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>* list_;
};
class ObjectListImpl : public CelList {
public:
ObjectListImpl(
const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::Table>>* list,
const reflection::Schema& schema, const reflection::Object& object,
google::protobuf::Arena* arena)
: arena_(arena), list_(list), schema_(schema), object_(object) {}
int size() const override { return list_ ? list_->size() : 0; }
CelValue operator[](int index) const override {
auto value = list_->Get(index);
return CelValue::CreateMap(google::protobuf::Arena::Create<FlatBuffersMapImpl>(
arena_, *value, schema_, object_, arena_));
}
private:
google::protobuf::Arena* arena_;
const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::Table>>* list_;
const reflection::Schema& schema_;
const reflection::Object& object_;
};
class ObjectStringIndexedMapImpl : public CelMap {
public:
ObjectStringIndexedMapImpl(
const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::Table>>* list,
const reflection::Schema& schema, const reflection::Object& object,
const reflection::Field& index, google::protobuf::Arena* arena)
: arena_(arena),
list_(list),
schema_(schema),
object_(object),
index_(index) {
keys_.parent = this;
}
int size() const override { return list_ ? list_->size() : 0; }
absl::StatusOr<bool> Has(const CelValue& key) const override {
auto lookup_result = (*this)[key];
if (!lookup_result.has_value()) {
return false;
}
auto result = *lookup_result;
if (result.IsError()) {
return *(result.ErrorOrDie());
}
return true;
}
absl::optional<CelValue> operator[](CelValue cel_key) const override {
if (!cel_key.IsString()) {
return CreateErrorValue(
arena_, absl::InvalidArgumentError(
absl::StrCat("Invalid map key type: '",
CelValue::TypeName(cel_key.type()), "'")));
}
const absl::string_view key = cel_key.StringOrDie().value();
const auto it = std::lower_bound(
list_->begin(), list_->end(), key,
[this](const flatbuffers::Table* t, const absl::string_view key) {
auto value = flatbuffers::GetFieldS(*t, index_);
auto sv = value ? absl::string_view(value->c_str(), value->size())
: absl::string_view();
return sv < key;
});
if (it != list_->end()) {
auto value = flatbuffers::GetFieldS(**it, index_);
auto sv = value ? absl::string_view(value->c_str(), value->size())
: absl::string_view();
if (sv == key) {
return CelValue::CreateMap(google::protobuf::Arena::Create<FlatBuffersMapImpl>(
arena_, **it, schema_, object_, arena_));
}
}
return absl::nullopt;
}
absl::StatusOr<const CelList*> ListKeys() const override { return &keys_; }
private:
struct KeyList : public CelList {
int size() const override { return parent->size(); }
CelValue operator[](int index) const override {
auto value =
flatbuffers::GetFieldS(*(parent->list_->Get(index)), parent->index_);
if (value == nullptr) {
return CelValue::CreateStringView(absl::string_view());
}
return CelValue::CreateStringView(
absl::string_view(value->c_str(), value->size()));
}
ObjectStringIndexedMapImpl* parent;
};
google::protobuf::Arena* arena_;
const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::Table>>* list_;
const reflection::Schema& schema_;
const reflection::Object& object_;
const reflection::Field& index_;
KeyList keys_;
};
const reflection::Field* findStringKeyField(const reflection::Object& object) {
for (const auto field : *object.fields()) {
if (field->key() && field->type()->base_type() == reflection::String) {
return field;
}
}
return nullptr;
}
}
absl::StatusOr<bool> FlatBuffersMapImpl::Has(const CelValue& key) const {
auto lookup_result = (*this)[key];
if (!lookup_result.has_value()) {
return false;
}
auto result = *lookup_result;
if (result.IsError()) {
return *(result.ErrorOrDie());
}
return true;
}
absl::optional<CelValue> FlatBuffersMapImpl::operator[](
CelValue cel_key) const {
if (!cel_key.IsString()) {
return CreateErrorValue(
arena_, absl::InvalidArgumentError(
absl::StrCat("Invalid map key type: '",
CelValue::TypeName(cel_key.type()), "'")));
}
auto field = keys_.fields->LookupByKey(cel_key.StringOrDie().value().data());
if (field == nullptr) {
return absl::nullopt;
}
switch (field->type()->base_type()) {
case reflection::Byte:
return CelValue::CreateInt64(
flatbuffers::GetFieldI<int8_t>(table_, *field));
case reflection::Short:
return CelValue::CreateInt64(
flatbuffers::GetFieldI<int16_t>(table_, *field));
case reflection::Int:
return CelValue::CreateInt64(
flatbuffers::GetFieldI<int32_t>(table_, *field));
case reflection::Long:
return CelValue::CreateInt64(
flatbuffers::GetFieldI<int64_t>(table_, *field));
case reflection::UByte:
return CelValue::CreateUint64(
flatbuffers::GetFieldI<uint8_t>(table_, *field));
case reflection::UShort:
return CelValue::CreateUint64(
flatbuffers::GetFieldI<uint16_t>(table_, *field));
case reflection::UInt:
return CelValue::CreateUint64(
flatbuffers::GetFieldI<uint32_t>(table_, *field));
case reflection::ULong:
return CelValue::CreateUint64(
flatbuffers::GetFieldI<uint64_t>(table_, *field));
case reflection::Float:
return CelValue::CreateDouble(
flatbuffers::GetFieldF<float>(table_, *field));
case reflection::Double:
return CelValue::CreateDouble(
flatbuffers::GetFieldF<double>(table_, *field));
case reflection::Bool:
return CelValue::CreateBool(
flatbuffers::GetFieldI<int8_t>(table_, *field));
case reflection::String: {
auto value = flatbuffers::GetFieldS(table_, *field);
if (value == nullptr) {
return CelValue::CreateStringView(absl::string_view());
}
return CelValue::CreateStringView(
absl::string_view(value->c_str(), value->size()));
}
case reflection::Obj: {
const auto* field_schema = schema_.objects()->Get(field->type()->index());
const auto* field_table = flatbuffers::GetFieldT(table_, *field);
if (field_table == nullptr) {
return CelValue::CreateNull();
}
if (field_schema) {
return CelValue::CreateMap(google::protobuf::Arena::Create<FlatBuffersMapImpl>(
arena_, *field_table, schema_, *field_schema, arena_));
}
break;
}
case reflection::Vector: {
switch (field->type()->element()) {
case reflection::Byte:
case reflection::UByte: {
const auto* field_table = flatbuffers::GetFieldAnyV(table_, *field);
if (field_table == nullptr) {
return CelValue::CreateBytesView(absl::string_view());
}
return CelValue::CreateBytesView(absl::string_view(
reinterpret_cast<const char*>(field_table->Data()),
field_table->size()));
}
case reflection::Short:
return CelValue::CreateList(
google::protobuf::Arena::Create<FlatBuffersListImpl<int16_t, int64_t>>(
arena_, table_, *field));
case reflection::Int:
return CelValue::CreateList(
google::protobuf::Arena::Create<FlatBuffersListImpl<int32_t, int64_t>>(
arena_, table_, *field));
case reflection::Long:
return CelValue::CreateList(
google::protobuf::Arena::Create<FlatBuffersListImpl<int64_t, int64_t>>(
arena_, table_, *field));
case reflection::UShort:
return CelValue::CreateList(
google::protobuf::Arena::Create<FlatBuffersListImpl<uint16_t, uint64_t>>(
arena_, table_, *field));
case reflection::UInt:
return CelValue::CreateList(
google::protobuf::Arena::Create<FlatBuffersListImpl<uint32_t, uint64_t>>(
arena_, table_, *field));
case reflection::ULong:
return CelValue::CreateList(
google::protobuf::Arena::Create<FlatBuffersListImpl<uint64_t, uint64_t>>(
arena_, table_, *field));
case reflection::Float:
return CelValue::CreateList(
google::protobuf::Arena::Create<FlatBuffersListImpl<float, double>>(
arena_, table_, *field));
case reflection::Double:
return CelValue::CreateList(
google::protobuf::Arena::Create<FlatBuffersListImpl<double, double>>(
arena_, table_, *field));
case reflection::Bool:
return CelValue::CreateList(
google::protobuf::Arena::Create<FlatBuffersListImpl<uint8_t, bool>>(
arena_, table_, *field));
case reflection::String:
return CelValue::CreateList(google::protobuf::Arena::Create<StringListImpl>(
arena_, table_.GetPointer<const flatbuffers::Vector<
flatbuffers::Offset<flatbuffers::String>>*>(
field->offset())));
case reflection::Obj: {
const auto* field_schema =
schema_.objects()->Get(field->type()->index());
if (field_schema) {
const auto* index = findStringKeyField(*field_schema);
if (index) {
return CelValue::CreateMap(
google::protobuf::Arena::Create<ObjectStringIndexedMapImpl>(
arena_,
table_.GetPointer<const flatbuffers::Vector<
flatbuffers::Offset<flatbuffers::Table>>*>(
field->offset()),
schema_, *field_schema, *index, arena_));
} else {
return CelValue::CreateList(google::protobuf::Arena::Create<ObjectListImpl>(
arena_,
table_.GetPointer<const flatbuffers::Vector<
flatbuffers::Offset<flatbuffers::Table>>*>(
field->offset()),
schema_, *field_schema, arena_));
}
}
break;
}
default:
return absl::nullopt;
}
break;
}
default:
return absl::nullopt;
}
return absl::nullopt;
}
const CelMap* CreateFlatBuffersBackedObject(const uint8_t* flatbuf,
const reflection::Schema& schema,
google::protobuf::Arena* arena) {
return google::protobuf::Arena::Create<const FlatBuffersMapImpl>(
arena, *flatbuffers::GetAnyRoot(flatbuf), schema, *schema.root_table(),
arena);
}
}
}
}
} | #include "tools/flatbuffers_backed_impl.h"
#include <string>
#include "internal/status_macros.h"
#include "internal/testing.h"
#include "flatbuffers/idl.h"
#include "flatbuffers/reflection.h"
namespace google {
namespace api {
namespace expr {
namespace runtime {
namespace {
constexpr char kReflectionBufferPath[] =
"tools/testdata/"
"flatbuffers.bfbs";
constexpr absl::string_view kByteField = "f_byte";
constexpr absl::string_view kUbyteField = "f_ubyte";
constexpr absl::string_view kShortField = "f_short";
constexpr absl::string_view kUshortField = "f_ushort";
constexpr absl::string_view kIntField = "f_int";
constexpr absl::string_view kUintField = "f_uint";
constexpr absl::string_view kLongField = "f_long";
constexpr absl::string_view kUlongField = "f_ulong";
constexpr absl::string_view kFloatField = "f_float";
constexpr absl::string_view kDoubleField = "f_double";
constexpr absl::string_view kBoolField = "f_bool";
constexpr absl::string_view kStringField = "f_string";
constexpr absl::string_view kObjField = "f_obj";
constexpr absl::string_view kUnknownField = "f_unknown";
constexpr absl::string_view kBytesField = "r_byte";
constexpr absl::string_view kUbytesField = "r_ubyte";
constexpr absl::string_view kShortsField = "r_short";
constexpr absl::string_view kUshortsField = "r_ushort";
constexpr absl::string_view kIntsField = "r_int";
constexpr absl::string_view kUintsField = "r_uint";
constexpr absl::string_view kLongsField = "r_long";
constexpr absl::string_view kUlongsField = "r_ulong";
constexpr absl::string_view kFloatsField = "r_float";
constexpr absl::string_view kDoublesField = "r_double";
constexpr absl::string_view kBoolsField = "r_bool";
constexpr absl::string_view kStringsField = "r_string";
constexpr absl::string_view kObjsField = "r_obj";
constexpr absl::string_view kIndexedField = "r_indexed";
const int64_t kNumFields = 27;
class FlatBuffersTest : public testing::Test {
public:
FlatBuffersTest() {
EXPECT_TRUE(
flatbuffers::LoadFile(kReflectionBufferPath, true, &schema_file_));
flatbuffers::Verifier verifier(
reinterpret_cast<const uint8_t*>(schema_file_.data()),
schema_file_.size());
EXPECT_TRUE(reflection::VerifySchemaBuffer(verifier));
EXPECT_TRUE(parser_.Deserialize(
reinterpret_cast<const uint8_t*>(schema_file_.data()),
schema_file_.size()));
schema_ = reflection::GetSchema(schema_file_.data());
}
const CelMap& loadJson(std::string data) {
EXPECT_TRUE(parser_.Parse(data.data()));
const CelMap* value = CreateFlatBuffersBackedObject(
parser_.builder_.GetBufferPointer(), *schema_, &arena_);
EXPECT_NE(nullptr, value);
EXPECT_EQ(kNumFields, value->size());
const CelList* keys = value->ListKeys().value();
EXPECT_NE(nullptr, keys);
EXPECT_EQ(kNumFields, keys->size());
EXPECT_TRUE((*keys)[2].IsString());
return *value;
}
protected:
std::string schema_file_;
flatbuffers::Parser parser_;
const reflection::Schema* schema_;
google::protobuf::Arena arena_;
};
TEST_F(FlatBuffersTest, PrimitiveFields) {
const CelMap& value = loadJson(R"({
f_byte: -1,
f_ubyte: 1,
f_short: -2,
f_ushort: 2,
f_int: -3,
f_uint: 3,
f_long: -4,
f_ulong: 4,
f_float: 5.0,
f_double: 6.0,
f_bool: false,
f_string: "test"
})");
{
auto f = value[CelValue::CreateStringView(kByteField)];
EXPECT_TRUE(f.has_value());
EXPECT_TRUE(f->IsInt64());
EXPECT_EQ(-1, f->Int64OrDie());
}
{
auto uf = value[CelValue::CreateStringView(kUbyteField)];
EXPECT_TRUE(uf.has_value());
EXPECT_TRUE(uf->IsUint64());
EXPECT_EQ(1, uf->Uint64OrDie());
}
{
auto f = value[CelValue::CreateStringView(kShortField)];
EXPECT_TRUE(f.has_value());
EXPECT_TRUE(f->IsInt64());
EXPECT_EQ(-2, f->Int64OrDie());
}
{
auto uf = value[CelValue::CreateStringView(kUshortField)];
EXPECT_TRUE(uf.has_value());
EXPECT_TRUE(uf->IsUint64());
EXPECT_EQ(2, uf->Uint64OrDie());
}
{
auto f = value[CelValue::CreateStringView(kIntField)];
EXPECT_TRUE(f.has_value());
EXPECT_TRUE(f->IsInt64());
EXPECT_EQ(-3, f->Int64OrDie());
}
{
auto uf = value[CelValue::CreateStringView(kUintField)];
EXPECT_TRUE(uf.has_value());
EXPECT_TRUE(uf->IsUint64());
EXPECT_EQ(3, uf->Uint64OrDie());
}
{
auto f = value[CelValue::CreateStringView(kLongField)];
EXPECT_TRUE(f.has_value());
EXPECT_TRUE(f->IsInt64());
EXPECT_EQ(-4, f->Int64OrDie());
}
{
auto uf = value[CelValue::CreateStringView(kUlongField)];
EXPECT_TRUE(uf.has_value());
EXPECT_TRUE(uf->IsUint64());
EXPECT_EQ(4, uf->Uint64OrDie());
}
{
auto f = value[CelValue::CreateStringView(kFloatField)];
EXPECT_TRUE(f.has_value());
EXPECT_TRUE(f->IsDouble());
EXPECT_EQ(5.0, f->DoubleOrDie());
}
{
auto f = value[CelValue::CreateStringView(kDoubleField)];
EXPECT_TRUE(f.has_value());
EXPECT_TRUE(f->IsDouble());
EXPECT_EQ(6.0, f->DoubleOrDie());
}
{
auto f = value[CelValue::CreateStringView(kBoolField)];
EXPECT_TRUE(f.has_value());
EXPECT_TRUE(f->IsBool());
EXPECT_EQ(false, f->BoolOrDie());
}
{
auto f = value[CelValue::CreateStringView(kStringField)];
EXPECT_TRUE(f.has_value());
EXPECT_TRUE(f->IsString());
EXPECT_EQ("test", f->StringOrDie().value());
}
{
CelValue bad_field = CelValue::CreateInt64(1);
auto f = value[bad_field];
EXPECT_TRUE(f.has_value());
EXPECT_TRUE(f->IsError());
auto presence = value.Has(bad_field);
EXPECT_FALSE(presence.ok());
EXPECT_EQ(presence.status().code(), absl::StatusCode::kInvalidArgument);
}
{
auto f = value[CelValue::CreateStringView(kUnknownField)];
EXPECT_FALSE(f.has_value());
}
}
TEST_F(FlatBuffersTest, PrimitiveFieldDefaults) {
const CelMap& value = loadJson("{}");
{
auto f = value[CelValue::CreateStringView(kByteField)];
EXPECT_TRUE(f.has_value());
EXPECT_TRUE(f->IsInt64());
EXPECT_EQ(0, f->Int64OrDie());
}
{
auto f = value[CelValue::CreateStringView(kShortField)];
EXPECT_TRUE(f.has_value());
EXPECT_TRUE(f->IsInt64());
EXPECT_EQ(150, f->Int64OrDie());
}
{
auto f = value[CelValue::CreateStringView(kBoolField)];
EXPECT_TRUE(f.has_value());
EXPECT_TRUE(f->IsBool());
EXPECT_EQ(true, f->BoolOrDie());
}
{
auto f = value[CelValue::CreateStringView(kStringField)];
EXPECT_TRUE(f.has_value());
EXPECT_TRUE(f->IsString());
EXPECT_EQ("", f->StringOrDie().value());
}
}
TEST_F(FlatBuffersTest, ObjectField) {
const CelMap& value = loadJson(R"({
f_obj: {
f_string: "entry",
f_int: 16
}
})");
CelValue field = CelValue::CreateStringView(kObjField);
auto presence = value.Has(field);
EXPECT_OK(presence);
EXPECT_TRUE(*presence);
auto f = value[field];
EXPECT_TRUE(f.has_value());
EXPECT_TRUE(f->IsMap());
const CelMap& m = *f->MapOrDie();
EXPECT_EQ(2, m.size());
{
auto obj_field = CelValue::CreateStringView(kStringField);
auto member_presence = m.Has(obj_field);
EXPECT_OK(member_presence);
EXPECT_TRUE(*member_presence);
auto mf = m[obj_field];
EXPECT_TRUE(mf.has_value());
EXPECT_TRUE(mf->IsString());
EXPECT_EQ("entry", mf->StringOrDie().value());
}
{
auto obj_field = CelValue::CreateStringView(kIntField);
auto member_presence = m.Has(obj_field);
EXPECT_OK(member_presence);
EXPECT_TRUE(*member_presence);
auto mf = m[obj_field];
EXPECT_TRUE(mf.has_value());
EXPECT_TRUE(mf->IsInt64());
EXPECT_EQ(16, mf->Int64OrDie());
}
{
std::string undefined = "f_undefined";
CelValue undefined_field = CelValue::CreateStringView(undefined);
auto presence = m.Has(undefined_field);
EXPECT_OK(presence);
EXPECT_FALSE(*presence);
auto v = m[undefined_field];
EXPECT_FALSE(v.has_value());
presence = m.Has(CelValue::CreateBool(false));
EXPECT_FALSE(presence.ok());
EXPECT_EQ(presence.status().code(), absl::StatusCode::kInvalidArgument);
}
}
TEST_F(FlatBuffersTest, ObjectFieldDefault) {
const CelMap& value = loadJson("{}");
auto f = value[CelValue::CreateStringView(kObjField)];
EXPECT_TRUE(f.has_value());
EXPECT_TRUE(f->IsNull());
}
TEST_F(FlatBuffersTest, PrimitiveVectorFields) {
const CelMap& value = loadJson(R"({
r_byte: [-97],
r_ubyte: [97, 98, 99],
r_short: [-2],
r_ushort: [2],
r_int: [-3],
r_uint: [3],
r_long: [-4],
r_ulong: [4],
r_float: [5.0],
r_double: [6.0],
r_bool: [false],
r_string: ["test"]
})");
{
auto f = value[CelValue::CreateStringView(kBytesField)];
EXPECT_TRUE(f.has_value());
EXPECT_TRUE(f->IsBytes());
EXPECT_EQ("\x9F", f->BytesOrDie().value());
}
{
auto uf = value[CelValue::CreateStringView(kUbytesField)];
EXPECT_TRUE(uf.has_value());
EXPECT_TRUE(uf->IsBytes());
EXPECT_EQ("abc", uf->BytesOrDie().value());
}
{
auto f = value[CelValue::CreateStringView(kShortsField)];
EXPECT_TRUE(f.has_value());
EXPECT_TRUE(f->IsList());
const CelList& l = *f->ListOrDie();
EXPECT_EQ(1, l.size());
EXPECT_EQ(-2, l[0].Int64OrDie());
}
{
auto uf = value[CelValue::CreateStringView(kUshortsField)];
EXPECT_TRUE(uf.has_value());
EXPECT_TRUE(uf->IsList());
const CelList& l = *uf->ListOrDie();
EXPECT_EQ(1, l.size());
EXPECT_EQ(2, l[0].Uint64OrDie());
}
{
auto f = value[CelValue::CreateStringView(kIntsField)];
EXPECT_TRUE(f.has_value());
EXPECT_TRUE(f->IsList());
const CelList& l = *f->ListOrDie();
EXPECT_EQ(1, l.size());
EXPECT_EQ(-3, l[0].Int64OrDie());
}
{
auto uf = value[CelValue::CreateStringView(kUintsField)];
EXPECT_TRUE(uf.has_value());
EXPECT_TRUE(uf->IsList());
const CelList& l = *uf->ListOrDie();
EXPECT_EQ(1, l.size());
EXPECT_EQ(3, l[0].Uint64OrDie());
}
{
auto f = value[CelValue::CreateStringView(kLongsField)];
EXPECT_TRUE(f.has_value());
EXPECT_TRUE(f->IsList());
const CelList& l = *f->ListOrDie();
EXPECT_EQ(1, l.size());
EXPECT_EQ(-4, l[0].Int64OrDie());
}
{
auto uf = value[CelValue::CreateStringView(kUlongsField)];
EXPECT_TRUE(uf.has_value());
EXPECT_TRUE(uf->IsList());
const CelList& l = *uf->ListOrDie();
EXPECT_EQ(1, l.size());
EXPECT_EQ(4, l[0].Uint64OrDie());
}
{
auto f = value[CelValue::CreateStringView(kFloatsField)];
EXPECT_TRUE(f.has_value());
EXPECT_TRUE(f->IsList());
const CelList& l = *f->ListOrDie();
EXPECT_EQ(1, l.size());
EXPECT_EQ(5.0, l[0].DoubleOrDie());
}
{
auto f = value[CelValue::CreateStringView(kDoublesField)];
EXPECT_TRUE(f.has_value());
EXPECT_TRUE(f->IsList());
const CelList& l = *f->ListOrDie();
EXPECT_EQ(1, l.size());
EXPECT_EQ(6.0, l[0].DoubleOrDie());
}
{
auto f = value[CelValue::CreateStringView(kBoolsField)];
EXPECT_TRUE(f.has_value());
EXPECT_TRUE(f->IsList());
const CelList& l = *f->ListOrDie();
EXPECT_EQ(1, l.size());
EXPECT_EQ(false, l[0].BoolOrDie());
}
{
auto f = value[CelValue::CreateStringView(kStringsField)];
EXPECT_TRUE(f.has_value());
EXPECT_TRUE(f->IsList());
const CelList& l = *f->ListOrDie();
EXPECT_EQ(1, l.size());
EXPECT_EQ("test", l[0].StringOrDie().value());
}
}
TEST_F(FlatBuffersTest, ObjectVectorField) {
const CelMap& value = loadJson(R"({
r_obj: [{
f_string: "entry",
f_int: 16
},{
f_int: 32
}]
})");
auto f = value[CelValue::CreateStringView(kObjsField)];
EXPECT_TRUE(f.has_value());
EXPECT_TRUE(f->IsList());
const CelList& l = *f->ListOrDie();
EXPECT_EQ(2, l.size());
{
EXPECT_TRUE(l[0].IsMap());
const CelMap& m = *l[0].MapOrDie();
EXPECT_EQ(2, m.size());
{
CelValue field = CelValue::CreateStringView(kStringField);
auto presence = m.Has(field);
EXPECT_OK(presence);
EXPECT_TRUE(*presence);
auto mf = m[field];
EXPECT_TRUE(mf.has_value());
EXPECT_TRUE(mf->IsString());
EXPECT_EQ("entry", mf->StringOrDie().value());
}
{
CelValue field = CelValue::CreateStringView(kIntField);
auto presence = m.Has(field);
EXPECT_OK(presence);
EXPECT_TRUE(*presence);
auto mf = m[field];
EXPECT_TRUE(mf.has_value());
EXPECT_TRUE(mf->IsInt64());
EXPECT_EQ(16, mf->Int64OrDie());
}
}
{
EXPECT_TRUE(l[1].IsMap());
const CelMap& m = *l[1].MapOrDie();
EXPECT_EQ(2, m.size());
{
CelValue field = CelValue::CreateStringView(kStringField);
auto presence = m.Has(field);
EXPECT_OK(presence);
EXPECT_TRUE(*presence);
auto mf = m[field];
EXPECT_TRUE(mf.has_value());
EXPECT_TRUE(mf->IsString());
EXPECT_EQ("", mf->StringOrDie().value());
}
{
CelValue field = CelValue::CreateStringView(kIntField);
auto presence = m.Has(field);
EXPECT_OK(presence);
EXPECT_TRUE(*presence);
auto mf = m[field];
EXPECT_TRUE(mf.has_value());
EXPECT_TRUE(mf->IsInt64());
EXPECT_EQ(32, mf->Int64OrDie());
}
{
std::string undefined = "f_undefined";
CelValue field = CelValue::CreateStringView(undefined);
auto presence = m.Has(field);
EXPECT_OK(presence);
EXPECT_FALSE(*presence);
auto mf = m[field];
EXPECT_FALSE(mf.has_value());
}
}
}
TEST_F(FlatBuffersTest, VectorFieldDefaults) {
const CelMap& value = loadJson("{}");
for (const auto field : std::vector<absl::string_view>{
kIntsField, kBoolsField, kStringsField, kObjsField}) {
auto f = value[CelValue::CreateStringView(field)];
EXPECT_TRUE(f.has_value());
EXPECT_TRUE(f->IsList());
const CelList& l = *f->ListOrDie();
EXPECT_EQ(0, l.size());
}
{
auto f = value[CelValue::CreateStringView(kIndexedField)];
EXPECT_TRUE(f.has_value());
EXPECT_TRUE(f->IsMap());
const CelMap& m = *f->MapOrDie();
EXPECT_EQ(0, m.size());
EXPECT_EQ(0, (*m.ListKeys())->size());
}
{
auto f = value[CelValue::CreateStringView(kBytesField)];
EXPECT_TRUE(f.has_value());
EXPECT_TRUE(f->IsBytes());
EXPECT_EQ("", f->BytesOrDie().value());
}
}
TEST_F(FlatBuffersTest, IndexedObjectVectorField) {
const CelMap& value = loadJson(R"({
r_indexed: [
{
f_string: "a",
f_int: 16
},
{
f_string: "b",
f_int: 32
},
{
f_string: "c",
f_int: 64
},
{
f_string: "d",
f_int: 128
}
]
})");
auto f = value[CelValue::CreateStringView(kIndexedField)];
EXPECT_TRUE(f.has_value());
EXPECT_TRUE(f->IsMap());
const CelMap& m = *f->MapOrDie();
EXPECT_EQ(4, m.size());
const CelList& l = *m.ListKeys().value();
EXPECT_EQ(4, l.size());
EXPECT_TRUE(l[0].IsString());
EXPECT_TRUE(l[1].IsString());
EXPECT_TRUE(l[2].IsString());
EXPECT_TRUE(l[3].IsString());
std::string a = "a";
std::string b = "b";
std::string c = "c";
std::string d = "d";
EXPECT_EQ(a, l[0].StringOrDie().value());
EXPECT_EQ(b, l[1].StringOrDie().value());
EXPECT_EQ(c, l[2].StringOrDie().value());
EXPECT_EQ(d, l[3].StringOrDie().value());
for (const std::string& key : std::vector<std::string>{a, b, c, d}) {
auto v = m[CelValue::CreateString(&key)];
EXPECT_TRUE(v.has_value());
const CelMap& vm = *v->MapOrDie();
EXPECT_EQ(2, vm.size());
auto vf = vm[CelValue::CreateStringView(kStringField)];
EXPECT_TRUE(vf.has_value());
EXPECT_TRUE(vf->IsString());
EXPECT_EQ(key, vf->StringOrDie().value());
auto vi = vm[CelValue::CreateStringView(kIntField)];
EXPECT_TRUE(vi.has_value());
EXPECT_TRUE(vi->IsInt64());
}
{
std::string bb = "bb";
std::string dd = "dd";
EXPECT_FALSE(m[CelValue::CreateString(&bb)].has_value());
EXPECT_FALSE(m[CelValue::CreateString(&dd)].has_value());
EXPECT_FALSE(
m[CelValue::CreateStringView(absl::string_view())].has_value());
}
}
TEST_F(FlatBuffersTest, IndexedObjectVectorFieldDefaults) {
const CelMap& value = loadJson(R"({
r_indexed: [
{
f_string: "",
f_int: 16
}
]
})");
CelValue field = CelValue::CreateStringView(kIndexedField);
auto presence = value.Has(field);
EXPECT_OK(presence);
EXPECT_TRUE(*presence);
auto f = value[field];
EXPECT_TRUE(f.has_value());
EXPECT_TRUE(f->IsMap());
const CelMap& m = *f->MapOrDie();
EXPECT_EQ(1, m.size());
const CelList& l = *m.ListKeys().value();
EXPECT_EQ(1, l.size());
EXPECT_TRUE(l[0].IsString());
EXPECT_EQ("", l[0].StringOrDie().value());
CelValue map_field = CelValue::CreateStringView(absl::string_view());
presence = m.Has(map_field);
EXPECT_OK(presence);
EXPECT_TRUE(*presence);
auto v = m[map_field];
EXPECT_TRUE(v.has_value());
std::string undefined = "f_undefined";
CelValue undefined_field = CelValue::CreateStringView(undefined);
presence = m.Has(undefined_field);
EXPECT_OK(presence);
EXPECT_FALSE(*presence);
v = m[undefined_field];
EXPECT_FALSE(v.has_value());
presence = m.Has(CelValue::CreateBool(false));
EXPECT_FALSE(presence.ok());
EXPECT_EQ(presence.status().code(), absl::StatusCode::kInvalidArgument);
}
}
}
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/tools/flatbuffers_backed_impl.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/tools/flatbuffers_backed_impl_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
e1c8bb67-f331-4edf-af15-281e38fa10d1 | cpp | google/arolla | expr_debug_string | arolla/expr/expr_debug_string.cc | arolla/expr/expr_debug_string_test.cc | #include "arolla/expr/expr_debug_string.h"
#include <algorithm>
#include <cstddef>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/base/optimization.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/expr/annotation_utils.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_visitor.h"
#include "arolla/expr/operator_repr_functions.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/repr.h"
#include "arolla/util/string.h"
namespace arolla::expr {
namespace {
std::vector<ExprNodePtr> SelectStatementNodes(const PostOrder& post_order) {
const size_t kCriticalDepth = 3;
std::vector<size_t> node_parent_count(post_order.nodes_size(), 0);
for (size_t i = 0; i < post_order.nodes_size(); ++i) {
for (size_t j : post_order.dep_indices(i)) {
node_parent_count[j] += 1;
}
}
std::vector<ExprNodePtr> result;
std::vector<size_t> node_depth(post_order.nodes_size());
for (size_t i = 0; i < post_order.nodes_size(); ++i) {
size_t depth = 1;
for (size_t j : post_order.dep_indices(i)) {
depth = std::max(depth, 1 + node_depth[j]);
}
const auto& node = post_order.node(i);
const bool is_statement =
IsNameAnnotation(node) ||
(node_parent_count[i] > 1 &&
depth >= kCriticalDepth);
if (is_statement) {
result.push_back(node);
depth = 1;
}
node_depth[i] = depth;
}
return result;
}
constexpr bool IsSafeStatementName(absl::string_view str) {
return IsQualifiedIdentifier(str) &&
!(str.size() > 1 && str[0] == '_' &&
std::find_if_not(str.begin() + 1, str.end(), IsDigit) == str.end());
}
absl::flat_hash_map<Fingerprint, std::string> GenStatementNames(
const PostOrder& post_order) {
const auto statement_nodes = SelectStatementNodes(post_order);
absl::flat_hash_map<absl::string_view, size_t> name_counts;
name_counts.reserve(statement_nodes.size());
for (const auto& node : statement_nodes) {
if (auto name = ReadNameAnnotation(node); IsSafeStatementName(name)) {
name_counts[name] += 1;
}
}
for (auto& [_, v] : name_counts) {
v = (v > 1);
}
absl::flat_hash_map<Fingerprint, std::string> result;
result.reserve(statement_nodes.size());
size_t anonymous_count = 1;
for (const auto& node : statement_nodes) {
const auto name = ReadNameAnnotation(node);
if (!IsSafeStatementName(name)) {
result.emplace(node->fingerprint(), absl::StrCat("_", anonymous_count++));
continue;
}
auto& name_count = name_counts[name];
if (name_count == 0) {
result.emplace(node->fingerprint(), name);
} else {
result.emplace(node->fingerprint(),
absl::StrCat(name, "._", name_count++));
}
}
return result;
}
std::vector<const ReprToken*> GetNodeDepsTokens(
const ExprNodePtr& node,
const absl::flat_hash_map<Fingerprint, ReprToken>& node_tokens) {
std::vector<const ReprToken*> inputs(node->node_deps().size());
for (size_t i = 0; i < node->node_deps().size(); ++i) {
inputs[i] = &node_tokens.at(node->node_deps()[i]->fingerprint());
}
return inputs;
}
ReprToken FormatLiteral(const ExprNodePtr& node) {
if (auto literal = node->qvalue()) {
return literal->GenReprToken();
} else {
return ReprToken{"<broken_literal>"};
}
}
ReprToken FormatLeaf(const ExprNodePtr& node) {
return ReprToken{absl::StrCat("L", ContainerAccessString(node->leaf_key()))};
}
ReprToken FormatPlaceholder(const ExprNodePtr& node) {
return ReprToken{
absl::StrCat("P", ContainerAccessString(node->placeholder_key()))};
}
ReprToken FormatOperatorCanonical(const ExprNodePtr& node,
absl::Span<const ReprToken* const> inputs) {
ReprToken result;
if (IsRegisteredOperator(node->op())) {
absl::StrAppend(&result.str, "M.");
}
absl::StrAppend(&result.str, node->op()->display_name(), "(");
for (size_t i = 0; i < inputs.size(); ++i) {
if (i > 0) {
absl::StrAppend(&result.str, ", ");
}
absl::StrAppend(&result.str, inputs[i]->str);
}
absl::StrAppend(&result.str, ")");
return result;
}
ReprToken FormatOperatorVerbose(const ExprNodePtr& node,
absl::Span<const ReprToken* const> inputs) {
ReprToken result = FormatOperatorCanonical(node, inputs);
if (!IsQTypeAnnotation(node)) {
if (auto* qtype = node->qtype()) {
absl::StrAppend(&result.str, ":", qtype->name());
}
}
return result;
}
ReprToken FormatOperatorPretty(
const ExprNodePtr& node,
const absl::flat_hash_map<Fingerprint, ReprToken>& node_tokens) {
if (auto repr = FormatOperatorNodePretty(node, node_tokens)) {
return *std::move(repr);
}
return FormatOperatorCanonical(node, GetNodeDepsTokens(node, node_tokens));
}
ReprToken FormatVerbose(const ExprNodePtr& node,
absl::Span<const ReprToken* const> inputs) {
switch (node->type()) {
case ExprNodeType::kLiteral:
return FormatLiteral(node);
case ExprNodeType::kLeaf:
return FormatLeaf(node);
case ExprNodeType::kPlaceholder:
return FormatPlaceholder(node);
case ExprNodeType::kOperator:
return FormatOperatorVerbose(node, inputs);
}
ABSL_UNREACHABLE();
}
ReprToken FormatPretty(
const ExprNodePtr& node,
const absl::flat_hash_map<Fingerprint, ReprToken>& node_tokens) {
switch (node->type()) {
case ExprNodeType::kLiteral:
return FormatLiteral(node);
case ExprNodeType::kLeaf:
return FormatLeaf(node);
case ExprNodeType::kPlaceholder:
return FormatPlaceholder(node);
case ExprNodeType::kOperator:
return FormatOperatorPretty(node, node_tokens);
}
ABSL_UNREACHABLE();
}
ReprToken FormatWithHiddenInputs(const ExprNodePtr& node) {
const ReprToken kDots{.str = "..."};
std::vector<const ReprToken*> inputs(node->node_deps().size(), &kDots);
return FormatVerbose(node, inputs);
}
}
std::string ToDebugString(const ExprNodePtr& root, bool verbose) {
const PostOrder post_order(root);
const auto statement_names = GenStatementNames(post_order);
std::vector<std::string> result;
absl::flat_hash_map<Fingerprint, ReprToken> node_tokens(
post_order.nodes_size());
auto format = verbose ? [](
const ExprNodePtr& node,
const absl::flat_hash_map<Fingerprint, ReprToken>& node_tokens) {
return FormatVerbose(node, GetNodeDepsTokens(node, node_tokens));
}: FormatPretty;
for (const auto& node : post_order.nodes()) {
auto it = statement_names.find(node->fingerprint());
if (it == statement_names.end()) {
node_tokens[node->fingerprint()] = format(node, node_tokens);
continue;
}
const auto& statement_name = it->second;
if (IsSafeStatementName(ReadNameAnnotation(node))) {
DCHECK_EQ(node->node_deps().size(), 2);
const auto& res = node_tokens[node->node_deps()[0]->fingerprint()];
result.push_back(absl::StrCat(statement_name, " = ", res.str));
} else {
result.push_back(
absl::StrCat(statement_name, " = ", format(node, node_tokens).str));
}
node_tokens[node->fingerprint()] = ReprToken{.str = statement_name};
}
result.push_back(std::move(node_tokens[root->fingerprint()].str));
return absl::StrJoin(result, "\n");
}
constexpr int kMaxDebugSnippetSize = 200;
std::string GetDebugSnippet(const ExprNodePtr& node) {
const auto& node_deps = node->node_deps();
absl::InlinedVector<ReprToken, 4> dep_snippets(node_deps.size());
absl::InlinedVector<const ReprToken*, 4> dep_snippet_ptrs(node_deps.size());
for (size_t i = 0; i < node_deps.size(); ++i) {
dep_snippets[i] = FormatWithHiddenInputs(node_deps[i]);
dep_snippet_ptrs[i] = &dep_snippets[i];
}
std::string snippet = FormatVerbose(node, dep_snippet_ptrs).str;
return Truncate(std::move(snippet), kMaxDebugSnippetSize);
}
} | #include "arolla/expr/expr_debug_string.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "benchmark/benchmark.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_attributes.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/operator_repr_functions.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/expr/testing/test_operators.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/testing/dummy_types.h"
#include "arolla/qtype/unspecified_qtype.h"
#include "arolla/util/bytes.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/init_arolla.h"
#include "arolla/util/repr.h"
#include "arolla/util/text.h"
namespace arolla::expr {
namespace {
using ::arolla::testing::WithNameAnnotation;
using ::arolla::testing::WithQTypeAnnotation;
class ExprDebugStringTest : public ::testing::Test {
protected:
ExprNodePtr Pos(ExprNodePtr x) { return CallOp("math.pos", {x}).value(); }
ExprNodePtr Neg(ExprNodePtr x) { return CallOp("math.neg", {x}).value(); }
ExprNodePtr Invert(ExprNodePtr x) {
return CallOp("core.presence_not", {x}).value();
}
ExprNodePtr Pow(ExprNodePtr lhs, ExprNodePtr rhs) {
return CallOp("math.pow", {lhs, rhs}).value();
}
ExprNodePtr Mul(ExprNodePtr lhs, ExprNodePtr rhs) {
return CallOp("math.multiply", {lhs, rhs}).value();
}
ExprNodePtr TrueDiv(ExprNodePtr lhs, ExprNodePtr rhs) {
return CallOp("math.divide", {lhs, rhs}).value();
}
ExprNodePtr FloorDiv(ExprNodePtr lhs, ExprNodePtr rhs) {
return CallOp("math.floordiv", {lhs, rhs}).value();
}
ExprNodePtr Mod(ExprNodePtr lhs, ExprNodePtr rhs) {
return CallOp("math.mod", {lhs, rhs}).value();
}
ExprNodePtr Add(ExprNodePtr lhs, ExprNodePtr rhs) {
return CallOp("math.add", {lhs, rhs}).value();
}
ExprNodePtr Sub(ExprNodePtr lhs, ExprNodePtr rhs) {
return CallOp("math.subtract", {lhs, rhs}).value();
}
ExprNodePtr And(ExprNodePtr lhs, ExprNodePtr rhs) {
return CallOp("core.presence_and", {lhs, rhs}).value();
}
ExprNodePtr Or(ExprNodePtr lhs, ExprNodePtr rhs) {
return CallOp("core.presence_or", {lhs, rhs}).value();
}
ExprNodePtr Lt(ExprNodePtr lhs, ExprNodePtr rhs) {
return CallOp("core.less", {lhs, rhs}).value();
}
ExprNodePtr Le(ExprNodePtr lhs, ExprNodePtr rhs) {
return CallOp("core.less_equal", {lhs, rhs}).value();
}
ExprNodePtr Eq(ExprNodePtr lhs, ExprNodePtr rhs) {
return CallOp("core.equal", {lhs, rhs}).value();
}
ExprNodePtr Neq(ExprNodePtr lhs, ExprNodePtr rhs) {
return CallOp("core.not_equal", {lhs, rhs}).value();
}
ExprNodePtr Ge(ExprNodePtr lhs, ExprNodePtr rhs) {
return CallOp("core.greater_equal", {lhs, rhs}).value();
}
ExprNodePtr Gt(ExprNodePtr lhs, ExprNodePtr rhs) {
return CallOp("core.greater", {lhs, rhs}).value();
}
ExprNodePtr GetAttr(ExprNodePtr lhs, ExprNodePtr rhs) {
return ExprNode::UnsafeMakeOperatorNode(
std::make_shared<RegisteredOperator>("core.getattr"), {lhs, rhs},
ExprAttributes());
}
ExprNodePtr GetItem(ExprNodePtr lhs, ExprNodePtr rhs) {
return ExprNode::UnsafeMakeOperatorNode(
std::make_shared<RegisteredOperator>("core.getitem"), {lhs, rhs},
ExprAttributes());
}
ExprNodePtr MakeSlice(ExprNodePtr a, ExprNodePtr b, ExprNodePtr c) {
return ExprNode::UnsafeMakeOperatorNode(
std::make_shared<RegisteredOperator>("core.make_slice"), {a, b, c},
ExprAttributes());
}
ExprNodePtr Dummy(ExprNodePtr lhs, ExprNodePtr rhs) {
return ExprNode::UnsafeMakeOperatorNode(
std::make_shared<testing::DummyOp>(
"custom.add", ExprOperatorSignature({{"x"}, {"y"}})),
{lhs, rhs}, ExprAttributes());
}
};
TEST_F(ExprDebugStringTest, Literal) {
{
auto expr = Literal(int32_t{271828182});
EXPECT_EQ("271828182", ToDebugString(expr));
}
{
auto expr = Literal(int64_t{3417201710});
EXPECT_EQ("int64{3417201710}", ToDebugString(expr));
}
{
auto expr = Literal(Bytes("Hello, World!"));
EXPECT_EQ("b'Hello, World!'", ToDebugString(expr));
}
{
ASSERT_OK_AND_ASSIGN(auto expr,
WithNameAnnotation(Literal(Bytes("Foo")), "Bar"));
EXPECT_EQ("Bar = b'Foo'\nBar", ToDebugString(expr));
}
}
TEST_F(ExprDebugStringTest, Leaf) {
EXPECT_THAT(ToDebugString(Leaf("")), "L['']");
EXPECT_THAT(ToDebugString(Leaf("x")), "L.x");
EXPECT_THAT(ToDebugString(Leaf("'Hello, World!'")),
"L['\\'Hello, World!\\'']");
ASSERT_OK_AND_ASSIGN(auto y,
WithQTypeAnnotation(Leaf("y"), GetQType<double>()));
EXPECT_THAT(ToDebugString(y), "M.annotation.qtype(L.y, FLOAT64)");
EXPECT_THAT(ToDebugString(y, true),
"M.annotation.qtype(L.y, FLOAT64)");
}
TEST_F(ExprDebugStringTest, Placeholder) {
EXPECT_EQ("P['']", ToDebugString(Placeholder("")));
EXPECT_EQ("P.foo", ToDebugString(Placeholder("foo")));
EXPECT_EQ("P[':)']", ToDebugString(Placeholder(":)")));
}
TEST_F(ExprDebugStringTest, Operator) {
EXPECT_EQ(ToDebugString(CallOp("math.max", {Leaf("x"), Leaf("y")}).value()),
"M.math.max(L.x, L.y)");
EXPECT_EQ(ToDebugString(Add(Leaf("x"), Leaf("y"))), "L.x + L.y");
}
TEST_F(ExprDebugStringTest, Trivial) {
ASSERT_OK_AND_ASSIGN(
auto abc,
CallOp("test.add3", {Literal(0.f), Literal(2.7182f), Literal(3.1415f)}));
ASSERT_OK_AND_ASSIGN(auto expr,
CallOp("test.add3", {abc, Leaf("x"), Leaf("y")}));
EXPECT_EQ("M.test.add3(M.test.add3(0., 2.7182, 3.1415), L.x, L.y)",
ToDebugString(expr));
}
TEST_F(ExprDebugStringTest, UniqueStatements) {
auto a = Leaf("a");
auto b = Leaf("b");
auto c = Leaf("c");
ASSERT_OK_AND_ASSIGN(
auto d,
WithNameAnnotation(
Pow(Sub(Mul(b, b), Mul(Literal(4.f), Mul(a, c))), Literal(0.5f)),
"D"));
ASSERT_OK_AND_ASSIGN(
auto x0,
WithNameAnnotation(TrueDiv(TrueDiv(Add(b, d), Literal(-2.f)), a), "x0"));
ASSERT_OK_AND_ASSIGN(auto x1,
WithNameAnnotation(TrueDiv(TrueDiv(c, a), x0), "x1"));
EXPECT_EQ(("D = (L.b * L.b - 4. * (L.a * L.c)) ** 0.5\n"
"x0 = (L.b + D) / -2. / L.a\n"
"x1 = L.c / L.a / x0\n"
"x0 * x1"),
ToDebugString(Mul(x0, x1)));
}
TEST_F(ExprDebugStringTest, LeafKeyNameCollisions) {
ASSERT_OK_AND_ASSIGN(auto expr,
WithNameAnnotation(Add(Leaf("a"), Leaf("a")), "a"));
EXPECT_EQ(ToDebugString(expr), "a = L.a + L.a\na");
}
TEST_F(ExprDebugStringTest, PlaceholderKeyNameCollisions) {
ASSERT_OK_AND_ASSIGN(
auto expr,
WithNameAnnotation(
CallOp("math.min", {Placeholder("a"), Placeholder("a")}), "a"));
EXPECT_EQ(ToDebugString(expr), "a = M.math.min(P.a, P.a)\na");
}
TEST_F(ExprDebugStringTest, UnsafeStatements) {
auto expr = Leaf("a");
ASSERT_OK_AND_ASSIGN(expr, WithNameAnnotation(Add(expr, expr), "_"));
ASSERT_OK_AND_ASSIGN(expr, WithNameAnnotation(Add(expr, expr), ""));
ASSERT_OK_AND_ASSIGN(expr, WithNameAnnotation(Add(expr, expr), "_1"));
ASSERT_OK_AND_ASSIGN(expr, WithNameAnnotation(Add(expr, expr), "_X"));
ASSERT_OK_AND_ASSIGN(expr, WithNameAnnotation(Add(expr, expr), "_Y"));
ASSERT_OK_AND_ASSIGN(expr, WithNameAnnotation(Add(expr, expr), "_Y"));
ASSERT_OK_AND_ASSIGN(expr, WithNameAnnotation(Add(expr, expr), "quick' fox"));
ASSERT_OK_AND_ASSIGN(expr, WithNameAnnotation(Add(expr, expr), "foo.bar"));
ASSERT_OK_AND_ASSIGN(expr, WithNameAnnotation(Add(expr, expr), "abc."));
ASSERT_OK_AND_ASSIGN(expr, WithNameAnnotation(Add(expr, expr), ".def"));
ASSERT_OK_AND_ASSIGN(expr, WithNameAnnotation(Add(expr, expr), "fake..name"));
ASSERT_OK_AND_ASSIGN(expr, WithNameAnnotation(Add(expr, expr), "a.1"));
EXPECT_EQ(ToDebugString(expr),
"_ = L.a + L.a\n"
"_1 = M.annotation.name(_ + _, '')\n"
"_2 = M.annotation.name(_1 + _1, '_1')\n"
"_X = _2 + _2\n"
"_Y._1 = _X + _X\n"
"_Y._2 = _Y._1 + _Y._1\n"
"_3 = M.annotation.name(_Y._2 + _Y._2, 'quick\\' fox')\n"
"foo.bar = _3 + _3\n"
"_4 = M.annotation.name(foo.bar + foo.bar, 'abc.')\n"
"_5 = M.annotation.name(_4 + _4, '.def')\n"
"_6 = M.annotation.name(_5 + _5, 'fake..name')\n"
"_7 = M.annotation.name(_6 + _6, 'a.1')\n"
"_7");
}
TEST_F(ExprDebugStringTest, UnnamedStatements) {
auto expr = Leaf("a");
for (int i = 0; i < 10; ++i) {
expr = Add(expr, expr);
}
EXPECT_EQ(ToDebugString(expr),
"_1 = L.a + L.a + (L.a + L.a)\n"
"_2 = _1 + _1 + (_1 + _1)\n"
"_3 = _2 + _2 + (_2 + _2)\n"
"_4 = _3 + _3 + (_3 + _3)\n"
"_4 + _4 + (_4 + _4)");
}
TEST_F(ExprDebugStringTest, NonUniqueStatements) {
auto expr = Leaf("a");
for (int i = 0; i < 5; ++i) {
ASSERT_OK_AND_ASSIGN(expr, WithNameAnnotation(Add(expr, expr), "a"));
}
EXPECT_EQ(ToDebugString(expr),
"a._1 = L.a + L.a\n"
"a._2 = a._1 + a._1\n"
"a._3 = a._2 + a._2\n"
"a._4 = a._3 + a._3\n"
"a._5 = a._4 + a._4\n"
"a._5");
}
TEST_F(ExprDebugStringTest, ExponentionalBlow) {
auto expr = Leaf("a");
for (int i = 0; i < 100; ++i) {
expr = Add(expr, expr);
}
EXPECT_LT(ToDebugString(expr).size(), 10000);
}
TEST_F(ExprDebugStringTest, Infix_Brackets) {
EXPECT_EQ(ToDebugString(Neg(Add(Leaf("u"), Leaf("v")))), "-(L.u + L.v)");
EXPECT_EQ(ToDebugString(Neg(Leaf("u"))), "-L.u");
EXPECT_EQ(ToDebugString(Mul(Leaf("u"), Leaf("x"))), "L.u * L.x");
EXPECT_EQ(ToDebugString(Mul(Add(Leaf("u"), Leaf("v")), Leaf("x"))),
"(L.u + L.v) * L.x");
EXPECT_EQ(ToDebugString(Mul(Leaf("u"), Add(Leaf("x"), Leaf("y")))),
"L.u * (L.x + L.y)");
EXPECT_EQ(
ToDebugString(Mul(Add(Leaf("u"), Leaf("v")), Add(Leaf("x"), Leaf("y")))),
"(L.u + L.v) * (L.x + L.y)");
}
TEST_F(ExprDebugStringTest, Infix_Unary_IncorrectArity) {
auto x = Leaf("x");
ASSERT_OK_AND_ASSIGN(auto op, LookupOperator("math.pos"));
EXPECT_EQ(ToDebugString(ExprNode::UnsafeMakeOperatorNode(op, {x}, {})),
"+L.x");
EXPECT_EQ(ToDebugString(ExprNode::UnsafeMakeOperatorNode(op, {x, x}, {})),
"M.math.pos(L.x, L.x)");
}
TEST_F(ExprDebugStringTest, Infix_Binary_IncorrectArity) {
auto x = Leaf("x");
ASSERT_OK_AND_ASSIGN(auto op, LookupOperator("math.add"));
EXPECT_EQ(ToDebugString(ExprNode::UnsafeMakeOperatorNode(op, {x, x}, {})),
"L.x + L.x");
EXPECT_EQ(ToDebugString(ExprNode::UnsafeMakeOperatorNode(op, {x, x, x}, {})),
"M.math.add(L.x, L.x, L.x)");
}
TEST_F(ExprDebugStringTest, Infix_NonRegisteredOperator) {
auto x = Leaf("x");
ASSERT_OK_AND_ASSIGN(auto op, LookupOperator("math.add"));
ASSERT_OK_AND_ASSIGN(auto op_impl, DecayRegisteredOperator(op));
EXPECT_EQ(ToDebugString(
ExprNode::UnsafeMakeOperatorNode(std::move(op), {x, x}, {})),
"L.x + L.x");
EXPECT_EQ(ToDebugString(ExprNode::UnsafeMakeOperatorNode(std::move(op_impl),
{x, x}, {})),
"math.add(L.x, L.x)");
}
TEST_F(ExprDebugStringTest, Infix_Unary_NegGroup) {
auto x = Leaf("x");
EXPECT_EQ(ToDebugString(Pos(x)), "+L.x");
EXPECT_EQ(ToDebugString(Pos(Pos(x))), "+(+L.x)");
EXPECT_EQ(ToDebugString(Neg(x)), "-L.x");
EXPECT_EQ(ToDebugString(Neg(Neg(x))), "-(-L.x)");
EXPECT_EQ(ToDebugString(Invert(x)), "~L.x");
EXPECT_EQ(ToDebugString(Invert(Invert(x))), "~(~L.x)");
EXPECT_EQ(ToDebugString(Pos(Neg(Invert(x)))), "+(-(~L.x))");
EXPECT_EQ(ToDebugString(Pos(Neg(Invert(Pos(Neg(Invert(x))))))),
"+(-(~(+(-(~L.x)))))");
}
TEST_F(ExprDebugStringTest, Infix_Binary_Pow) {
auto x = Leaf("x");
auto y = Leaf("y");
auto z = Leaf("z");
EXPECT_EQ(ToDebugString(Pow(x, y)), "L.x ** L.y");
EXPECT_EQ(ToDebugString(Pow(Pow(x, y), z)), "(L.x ** L.y) ** L.z");
EXPECT_EQ(ToDebugString(Pow(x, Pow(y, z))), "L.x ** L.y ** L.z");
EXPECT_EQ(ToDebugString(Neg(Pow(x, y))), "-(L.x ** L.y)");
EXPECT_EQ(ToDebugString(Pow(Neg(x), y)), "(-L.x) ** L.y");
EXPECT_EQ(ToDebugString(Pow(x, Neg(y))), "L.x ** -L.y");
}
TEST_F(ExprDebugStringTest, Infix_Binary_MulGroup) {
auto x = Leaf("x");
auto y = Leaf("y");
auto z = Leaf("z");
EXPECT_EQ(ToDebugString(Mul(x, y)), "L.x * L.y");
EXPECT_EQ(ToDebugString(Mul(Mul(x, y), z)), "L.x * L.y * L.z");
EXPECT_EQ(ToDebugString(Mul(x, Mul(y, z))), "L.x * (L.y * L.z)");
EXPECT_EQ(ToDebugString(TrueDiv(x, y)), "L.x / L.y");
EXPECT_EQ(ToDebugString(TrueDiv(TrueDiv(x, y), z)), "L.x / L.y / L.z");
EXPECT_EQ(ToDebugString(TrueDiv(x, TrueDiv(y, z))), "L.x / (L.y / L.z)");
EXPECT_EQ(ToDebugString(FloorDiv(x, y)), "L.x
EXPECT_EQ(ToDebugString(FloorDiv(FloorDiv(x, y), z)), "L.x
EXPECT_EQ(ToDebugString(FloorDiv(x, FloorDiv(y, z))), "L.x
EXPECT_EQ(ToDebugString(Mod(x, y)), "L.x % L.y");
EXPECT_EQ(ToDebugString(Mod(Mod(x, y), z)), "L.x % L.y % L.z");
EXPECT_EQ(ToDebugString(Mod(x, Mod(y, z))), "L.x % (L.y % L.z)");
EXPECT_EQ(ToDebugString(TrueDiv(Mul(x, y), z)), "L.x * L.y / L.z");
EXPECT_EQ(ToDebugString(Mul(x, TrueDiv(y, z))), "L.x * (L.y / L.z)");
EXPECT_EQ(ToDebugString(Mul(TrueDiv(x, y), z)), "L.x / L.y * L.z");
EXPECT_EQ(ToDebugString(TrueDiv(x, Mul(y, z))), "L.x / (L.y * L.z)");
EXPECT_EQ(ToDebugString(FloorDiv(Mul(x, y), z)), "L.x * L.y
EXPECT_EQ(ToDebugString(Mul(x, FloorDiv(y, z))), "L.x * (L.y
EXPECT_EQ(ToDebugString(Mul(FloorDiv(x, y), z)), "L.x
EXPECT_EQ(ToDebugString(FloorDiv(x, Mul(y, z))), "L.x
EXPECT_EQ(ToDebugString(Mod(Mul(x, y), z)), "L.x * L.y % L.z");
EXPECT_EQ(ToDebugString(Mul(x, Mod(y, z))), "L.x * (L.y % L.z)");
EXPECT_EQ(ToDebugString(Mul(Mod(x, y), z)), "L.x % L.y * L.z");
EXPECT_EQ(ToDebugString(Mod(x, Mul(y, z))), "L.x % (L.y * L.z)");
EXPECT_EQ(ToDebugString(Pow(Mul(x, y), z)), "(L.x * L.y) ** L.z");
EXPECT_EQ(ToDebugString(Mul(x, Pow(y, z))), "L.x * L.y ** L.z");
EXPECT_EQ(ToDebugString(Mul(Pow(x, y), z)), "L.x ** L.y * L.z");
EXPECT_EQ(ToDebugString(Pow(x, Mul(y, z))), "L.x ** (L.y * L.z)");
EXPECT_EQ(ToDebugString(Neg(Mul(x, y))), "-(L.x * L.y)");
EXPECT_EQ(ToDebugString(Mul(Neg(x), y)), "-L.x * L.y");
EXPECT_EQ(ToDebugString(Mul(x, Neg(y))), "L.x * -L.y");
}
TEST_F(ExprDebugStringTest, Infix_Binary_AddGroup) {
auto x = Leaf("x");
auto y = Leaf("y");
auto z = Leaf("z");
EXPECT_EQ(ToDebugString(Add(x, y)), "L.x + L.y");
EXPECT_EQ(ToDebugString(Add(Add(x, y), z)), "L.x + L.y + L.z");
EXPECT_EQ(ToDebugString(Add(x, Add(y, z))), "L.x + (L.y + L.z)");
EXPECT_EQ(ToDebugString(Sub(x, y)), "L.x - L.y");
EXPECT_EQ(ToDebugString(Sub(Sub(x, y), z)), "L.x - L.y - L.z");
EXPECT_EQ(ToDebugString(Sub(x, Sub(y, z))), "L.x - (L.y - L.z)");
EXPECT_EQ(ToDebugString(Sub(Add(x, y), z)), "L.x + L.y - L.z");
EXPECT_EQ(ToDebugString(Add(x, Sub(y, z))), "L.x + (L.y - L.z)");
EXPECT_EQ(ToDebugString(Add(Sub(x, y), z)), "L.x - L.y + L.z");
EXPECT_EQ(ToDebugString(Sub(x, Add(y, z))), "L.x - (L.y + L.z)");
EXPECT_EQ(ToDebugString(Mul(Add(x, y), z)), "(L.x + L.y) * L.z");
EXPECT_EQ(ToDebugString(Add(x, Mul(y, z))), "L.x + L.y * L.z");
EXPECT_EQ(ToDebugString(Add(Mul(x, y), z)), "L.x * L.y + L.z");
EXPECT_EQ(ToDebugString(Mul(x, Add(y, z))), "L.x * (L.y + L.z)");
EXPECT_EQ(ToDebugString(Pow(Add(x, y), z)), "(L.x + L.y) ** L.z");
EXPECT_EQ(ToDebugString(Add(x, Pow(y, z))), "L.x + L.y ** L.z");
EXPECT_EQ(ToDebugString(Add(Pow(x, y), z)), "L.x ** L.y + L.z");
EXPECT_EQ(ToDebugString(Pow(x, Add(y, z))), "L.x ** (L.y + L.z)");
EXPECT_EQ(ToDebugString(Neg(Add(x, y))), "-(L.x + L.y)");
EXPECT_EQ(ToDebugString(Add(Neg(x), y)), "-L.x + L.y");
EXPECT_EQ(ToDebugString(Add(x, Neg(y))), "L.x + -L.y");
}
TEST_F(ExprDebugStringTest, Infix_Binary_And) {
auto x = Leaf("x");
auto y = Leaf("y");
auto z = Leaf("z");
EXPECT_EQ(ToDebugString(And(x, y)), "L.x & L.y");
EXPECT_EQ(ToDebugString(And(And(x, y), z)), "L.x & L.y & L.z");
EXPECT_EQ(ToDebugString(And(x, And(y, z))), "L.x & (L.y & L.z)");
EXPECT_EQ(ToDebugString(Add(And(x, y), z)), "(L.x & L.y) + L.z");
EXPECT_EQ(ToDebugString(And(x, Add(y, z))), "L.x & L.y + L.z");
EXPECT_EQ(ToDebugString(And(Add(x, y), z)), "L.x + L.y & L.z");
EXPECT_EQ(ToDebugString(Add(x, And(y, z))), "L.x + (L.y & L.z)");
EXPECT_EQ(ToDebugString(Mul(And(x, y), z)), "(L.x & L.y) * L.z");
EXPECT_EQ(ToDebugString(And(x, Mul(y, z))), "L.x & L.y * L.z");
EXPECT_EQ(ToDebugString(And(Mul(x, y), z)), "L.x * L.y & L.z");
EXPECT_EQ(ToDebugString(Mul(x, And(y, z))), "L.x * (L.y & L.z)");
EXPECT_EQ(ToDebugString(Pow(And(x, y), z)), "(L.x & L.y) ** L.z");
EXPECT_EQ(ToDebugString(And(x, Pow(y, z))), "L.x & L.y ** L.z");
EXPECT_EQ(ToDebugString(And(Pow(x, y), z)), "L.x ** L.y & L.z");
EXPECT_EQ(ToDebugString(Pow(x, And(y, z))), "L.x ** (L.y & L.z)");
EXPECT_EQ(ToDebugString(Neg(And(x, y))), "-(L.x & L.y)");
EXPECT_EQ(ToDebugString(And(Neg(x), y)), "-L.x & L.y");
EXPECT_EQ(ToDebugString(And(x, Neg(y))), "L.x & -L.y");
}
TEST_F(ExprDebugStringTest, Infix_Binary_Or) {
auto x = Leaf("x");
auto y = Leaf("y");
auto z = Leaf("z");
EXPECT_EQ(ToDebugString(Or(x, y)), "L.x | L.y");
EXPECT_EQ(ToDebugString(Or(Or(x, y), z)), "L.x | L.y | L.z");
EXPECT_EQ(ToDebugString(Or(x, Or(y, z))), "L.x | (L.y | L.z)");
EXPECT_EQ(ToDebugString(And(Or(x, y), z)), "(L.x | L.y) & L.z");
EXPECT_EQ(ToDebugString(Or(x, And(y, z))), "L.x | L.y & L.z");
EXPECT_EQ(ToDebugString(Or(And(x, y), z)), "L.x & L.y | L.z");
EXPECT_EQ(ToDebugString(And(x, Or(y, z))), "L.x & (L.y | L.z)");
EXPECT_EQ(ToDebugString(Add(Or(x, y), z)), "(L.x | L.y) + L.z");
EXPECT_EQ(ToDebugString(Or(x, Add(y, z))), "L.x | L.y + L.z");
EXPECT_EQ(ToDebugString(Or(Add(x, y), z)), "L.x + L.y | L.z");
EXPECT_EQ(ToDebugString(Add(x, Or(y, z))), "L.x + (L.y | L.z)");
EXPECT_EQ(ToDebugString(Mul(Or(x, y), z)), "(L.x | L.y) * L.z");
EXPECT_EQ(ToDebugString(Or(x, Mul(y, z))), "L.x | L.y * L.z");
EXPECT_EQ(ToDebugString(Or(Mul(x, y), z)), "L.x * L.y | L.z");
EXPECT_EQ(ToDebugString(Mul(x, Or(y, z))), "L.x * (L.y | L.z)");
EXPECT_EQ(ToDebugString(Pow(Or(x, y), z)), "(L.x | L.y) ** L.z");
EXPECT_EQ(ToDebugString(Or(x, Pow(y, z))), "L.x | L.y ** L.z");
EXPECT_EQ(ToDebugString(Or(Pow(x, y), z)), "L.x ** L.y | L.z");
EXPECT_EQ(ToDebugString(Pow(x, Or(y, z))), "L.x ** (L.y | L.z)");
EXPECT_EQ(ToDebugString(Neg(Or(x, y))), "-(L.x | L.y)");
EXPECT_EQ(ToDebugString(Or(Neg(x), y)), "-L.x | L.y");
EXPECT_EQ(ToDebugString(Or(x, Neg(y))), "L.x | -L.y");
}
TEST_F(ExprDebugStringTest, Infix_Binary_LtGroup) {
auto x = Leaf("x");
auto y = Leaf("y");
auto z = Leaf("z");
EXPECT_EQ(ToDebugString(Lt(x, y)), "L.x < L.y");
EXPECT_EQ(ToDebugString(Lt(Lt(x, y), z)), "(L.x < L.y) < L.z");
EXPECT_EQ(ToDebugString(Lt(x, Lt(y, z))), "L.x < (L.y < L.z)");
EXPECT_EQ(ToDebugString(Le(x, y)), "L.x <= L.y");
EXPECT_EQ(ToDebugString(Le(Le(x, y), z)), "(L.x <= L.y) <= L.z");
EXPECT_EQ(ToDebugString(Le(x, Le(y, z))), "L.x <= (L.y <= L.z)");
EXPECT_EQ(ToDebugString(Eq(x, y)), "L.x == L.y");
EXPECT_EQ(ToDebugString(Eq(Eq(x, y), z)), "(L.x == L.y) == L.z");
EXPECT_EQ(ToDebugString(Eq(x, Eq(y, z))), "L.x == (L.y == L.z)");
EXPECT_EQ(ToDebugString(Neq(x, y)), "L.x != L.y");
EXPECT_EQ(ToDebugString(Neq(Neq(x, y), z)), "(L.x != L.y) != L.z");
EXPECT_EQ(ToDebugString(Neq(x, Neq(y, z))), "L.x != (L.y != L.z)");
EXPECT_EQ(ToDebugString(Ge(x, y)), "L.x >= L.y");
EXPECT_EQ(ToDebugString(Ge(Ge(x, y), z)), "(L.x >= L.y) >= L.z");
EXPECT_EQ(ToDebugString(Ge(x, Ge(y, z))), "L.x >= (L.y >= L.z)");
EXPECT_EQ(ToDebugString(Gt(x, y)), "L.x > L.y");
EXPECT_EQ(ToDebugString(Gt(Gt(x, y), z)), "(L.x > L.y) > L.z");
EXPECT_EQ(ToDebugString(Gt(x, Gt(y, z))), "L.x > (L.y > L.z)");
EXPECT_EQ(ToDebugString(Le(Lt(x, y), z)), "(L.x < L.y) <= L.z");
EXPECT_EQ(ToDebugString(Lt(x, Le(y, z))), "L.x < (L.y <= L.z)");
EXPECT_EQ(ToDebugString(Lt(Le(x, y), z)), "(L.x <= L.y) < L.z");
EXPECT_EQ(ToDebugString(Le(x, Lt(y, z))), "L.x <= (L.y < L.z)");
EXPECT_EQ(ToDebugString(Eq(Lt(x, y), z)), "(L.x < L.y) == L.z");
EXPECT_EQ(ToDebugString(Lt(x, Eq(y, z))), "L.x < (L.y == L.z)");
EXPECT_EQ(ToDebugString(Lt(Eq(x, y), z)), "(L.x == L.y) < L.z");
EXPECT_EQ(ToDebugString(Eq(x, Lt(y, z))), "L.x == (L.y < L.z)");
EXPECT_EQ(ToDebugString(Neq(Lt(x, y), z)), "(L.x < L.y) != L.z");
EXPECT_EQ(ToDebugString(Lt(x, Neq(y, z))), "L.x < (L.y != L.z)");
EXPECT_EQ(ToDebugString(Lt(Neq(x, y), z)), "(L.x != L.y) < L.z");
EXPECT_EQ(ToDebugString(Neq(x, Lt(y, z))), "L.x != (L.y < L.z)");
EXPECT_EQ(ToDebugString(Ge(Lt(x, y), z)), "(L.x < L.y) >= L.z");
EXPECT_EQ(ToDebugString(Lt(x, Ge(y, z))), "L.x < (L.y >= L.z)");
EXPECT_EQ(ToDebugString(Lt(Ge(x, y), z)), "(L.x >= L.y) < L.z");
EXPECT_EQ(ToDebugString(Ge(x, Lt(y, z))), "L.x >= (L.y < L.z)");
EXPECT_EQ(ToDebugString(Gt(Lt(x, y), z)), "(L.x < L.y) > L.z");
EXPECT_EQ(ToDebugString(Lt(x, Gt(y, z))), "L.x < (L.y > L.z)");
EXPECT_EQ(ToDebugString(Lt(Gt(x, y), z)), "(L.x > L.y) < L.z");
EXPECT_EQ(ToDebugString(Gt(x, Lt(y, z))), "L.x > (L.y < L.z)");
EXPECT_EQ(ToDebugString(Or(Lt(x, y), z)), "(L.x < L.y) | L.z");
EXPECT_EQ(ToDebugString(Lt(x, Or(y, z))), "L.x < L.y | L.z");
EXPECT_EQ(ToDebugString(Lt(Or(x, y), z)), "L.x | L.y < L.z");
EXPECT_EQ(ToDebugString(Or(x, Lt(y, z))), "L.x | (L.y < L.z)");
EXPECT_EQ(ToDebugString(And(Lt(x, y), z)), "(L.x < L.y) & L.z");
EXPECT_EQ(ToDebugString(Lt(x, And(y, z))), "L.x < L.y & L.z");
EXPECT_EQ(ToDebugString(Lt(And(x, y), z)), "L.x & L.y < L.z");
EXPECT_EQ(ToDebugString(And(x, Lt(y, z))), "L.x & (L.y < L.z)");
EXPECT_EQ(ToDebugString(Add(Lt(x, y), z)), "(L.x < L.y) + L.z");
EXPECT_EQ(ToDebugString(Lt(x, Add(y, z))), "L.x < L.y + L.z");
EXPECT_EQ(ToDebugString(Lt(Add(x, y), z)), "L.x + L.y < L.z");
EXPECT_EQ(ToDebugString(Add(x, Lt(y, z))), "L.x + (L.y < L.z)");
EXPECT_EQ(ToDebugString(Mul(Lt(x, y), z)), "(L.x < L.y) * L.z");
EXPECT_EQ(ToDebugString(Lt(x, Mul(y, z))), "L.x < L.y * L.z");
EXPECT_EQ(ToDebugString(Lt(Mul(x, y), z)), "L.x * L.y < L.z");
EXPECT_EQ(ToDebugString(Mul(x, Lt(y, z))), "L.x * (L.y < L.z)");
EXPECT_EQ(ToDebugString(Pow(Lt(x, y), z)), "(L.x < L.y) ** L.z");
EXPECT_EQ(ToDebugString(Lt(x, Pow(y, z))), "L.x < L.y ** L.z");
EXPECT_EQ(ToDebugString(Lt(Pow(x, y), z)), "L.x ** L.y < L.z");
EXPECT_EQ(ToDebugString(Pow(x, Lt(y, z))), "L.x ** (L.y < L.z)");
EXPECT_EQ(ToDebugString(Neg(Lt(x, y))), "-(L.x < L.y)");
EXPECT_EQ(ToDebugString(Lt(Neg(x), y)), "-L.x < L.y");
EXPECT_EQ(ToDebugString(Lt(x, Neg(y))), "L.x < -L.y");
}
TEST_F(ExprDebugStringTest, Infix_GetAttr) {
auto x = Leaf("x");
auto y = Leaf("y");
auto one = Literal<int>(1);
auto foo = Literal(Text("foo"));
auto bar = Literal(Text("bar"));
EXPECT_EQ(ToDebugString(GetAttr(x, foo)), "L.x.foo");
EXPECT_EQ(ToDebugString(GetAttr(GetAttr(x, foo), bar)), "L.x.foo.bar");
EXPECT_EQ(ToDebugString(GetAttr(one, foo)), "(1).foo");
EXPECT_EQ(ToDebugString(GetAttr(foo, bar)), "'foo'.bar");
EXPECT_EQ(ToDebugString(Lt(GetAttr(x, foo), y)), "L.x.foo < L.y");
EXPECT_EQ(ToDebugString(Lt(x, GetAttr(y, bar))), "L.x < L.y.bar");
EXPECT_EQ(ToDebugString(GetAttr(Lt(x, y), foo)), "(L.x < L.y).foo");
EXPECT_EQ(ToDebugString(Or(GetAttr(x, foo), y)), "L.x.foo | L.y");
EXPECT_EQ(ToDebugString(Or(x, GetAttr(y, bar))), "L.x | L.y.bar");
EXPECT_EQ(ToDebugString(GetAttr(Or(x, y), foo)), "(L.x | L.y).foo");
EXPECT_EQ(ToDebugString(And(GetAttr(x, foo), y)), "L.x.foo & L.y");
EXPECT_EQ(ToDebugString(And(x, GetAttr(y, bar))), "L.x & L.y.bar");
EXPECT_EQ(ToDebugString(GetAttr(And(x, y), foo)), "(L.x & L.y).foo");
EXPECT_EQ(ToDebugString(Add(GetAttr(x, foo), y)), "L.x.foo + L.y");
EXPECT_EQ(ToDebugString(Add(x, GetAttr(y, bar))), "L.x + L.y.bar");
EXPECT_EQ(ToDebugString(GetAttr(Add(x, y), foo)), "(L.x + L.y).foo");
EXPECT_EQ(ToDebugString(Mul(GetAttr(x, foo), y)), "L.x.foo * L.y");
EXPECT_EQ(ToDebugString(Mul(x, GetAttr(y, bar))), "L.x * L.y.bar");
EXPECT_EQ(ToDebugString(GetAttr(Mul(x, y), foo)), "(L.x * L.y).foo");
EXPECT_EQ(ToDebugString(Pow(GetAttr(x, foo), y)), "L.x.foo ** L.y");
EXPECT_EQ(ToDebugString(Pow(x, GetAttr(y, bar))), "L.x ** L.y.bar");
EXPECT_EQ(ToDebugString(GetAttr(Pow(x, y), foo)), "(L.x ** L.y).foo");
EXPECT_EQ(ToDebugString(Neg(GetAttr(x, foo))), "-L.x.foo");
EXPECT_EQ(ToDebugString(GetAttr(Neg(x), foo)), "(-L.x).foo");
}
TEST_F(ExprDebugStringTest, Infix_GetItem) {
auto x = Leaf("x");
auto y = Leaf("y");
auto one = Literal<int>(1);
auto foo = Literal(Text("foo"));
auto bar = Literal(Text("bar"));
EXPECT_EQ(ToDebugString(GetItem(x, foo)), "L.x['foo']");
EXPECT_EQ(ToDebugString(GetItem(x, y)), "L.x[L.y]");
EXPECT_EQ(ToDebugString(GetItem(GetItem(x, foo), bar)), "L.x['foo']['bar']");
EXPECT_EQ(ToDebugString(GetItem(one, foo)), "(1)['foo']");
EXPECT_EQ(ToDebugString(GetItem(foo, bar)), "'foo'['bar']");
EXPECT_EQ(ToDebugString(GetItem(CallOp("math.max", {x, y}).value(), bar)),
"M.math.max(L.x, L.y)['bar']");
EXPECT_EQ(ToDebugString(Lt(GetItem(x, foo), y)), "L.x['foo'] < L.y");
EXPECT_EQ(ToDebugString(Lt(x, GetItem(y, bar))), "L.x < L.y['bar']");
EXPECT_EQ(ToDebugString(GetItem(Lt(x, y), foo)), "(L.x < L.y)['foo']");
EXPECT_EQ(ToDebugString(Or(GetItem(x, foo), y)), "L.x['foo'] | L.y");
EXPECT_EQ(ToDebugString(Or(x, GetItem(y, bar))), "L.x | L.y['bar']");
EXPECT_EQ(ToDebugString(GetItem(Or(x, y), foo)), "(L.x | L.y)['foo']");
EXPECT_EQ(ToDebugString(And(GetItem(x, foo), y)), "L.x['foo'] & L.y");
EXPECT_EQ(ToDebugString(And(x, GetItem(y, bar))), "L.x & L.y['bar']");
EXPECT_EQ(ToDebugString(GetItem(And(x, y), foo)), "(L.x & L.y)['foo']");
EXPECT_EQ(ToDebugString(Add(GetItem(x, foo), y)), "L.x['foo'] + L.y");
EXPECT_EQ(ToDebugString(Add(x, GetItem(y, bar))), "L.x + L.y['bar']");
EXPECT_EQ(ToDebugString(GetItem(Add(x, y), foo)), "(L.x + L.y)['foo']");
EXPECT_EQ(ToDebugString(Mul(GetItem(x, foo), y)), "L.x['foo'] * L.y");
EXPECT_EQ(ToDebugString(Mul(x, GetItem(y, bar))), "L.x * L.y['bar']");
EXPECT_EQ(ToDebugString(GetItem(Mul(x, y), foo)), "(L.x * L.y)['foo']");
EXPECT_EQ(ToDebugString(Pow(GetItem(x, foo), y)), "L.x['foo'] ** L.y");
EXPECT_EQ(ToDebugString(Pow(x, GetItem(y, bar))), "L.x ** L.y['bar']");
EXPECT_EQ(ToDebugString(GetItem(Pow(x, y), foo)), "(L.x ** L.y)['foo']");
EXPECT_EQ(ToDebugString(Neg(GetItem(x, foo))), "-L.x['foo']");
EXPECT_EQ(ToDebugString(GetItem(Neg(x), foo)), "(-L.x)['foo']");
EXPECT_EQ(ToDebugString(GetAttr(GetItem(x, foo), bar)), "L.x['foo'].bar");
EXPECT_EQ(ToDebugString(GetItem(x, GetAttr(y, foo))), "L.x[L.y.foo]");
EXPECT_EQ(ToDebugString(GetItem(GetAttr(x, foo), bar)), "L.x.foo['bar']");
EXPECT_EQ(ToDebugString(GetItem(x, MakeSlice(one, foo, bar))),
"L.x[1:'foo':'bar']");
}
TEST_F(ExprDebugStringTest, Infix_MakeSlice) {
auto x = Leaf("x");
auto y = Leaf("y");
auto u = Literal(GetUnspecifiedQValue());
auto one = Literal<int>(1);
auto two = Literal<int>(2);
auto three = Literal<int>(3);
EXPECT_EQ(ToDebugString(MakeSlice(u, u, u)),
"M.core.make_slice(unspecified, unspecified, unspecified)");
EXPECT_EQ(ToDebugString(MakeSlice(one, two, three)),
"M.core.make_slice(1, 2, 3)");
EXPECT_EQ(ToDebugString(GetItem(x, MakeSlice(u, u, u))), "L.x[:]");
EXPECT_EQ(ToDebugString(GetItem(x, MakeSlice(one, u, u))), "L.x[1:]");
EXPECT_EQ(ToDebugString(GetItem(x, MakeSlice(u, one, u))), "L.x[:1]");
EXPECT_EQ(ToDebugString(GetItem(x, MakeSlice(u, u, one))), "L.x[::1]");
EXPECT_EQ(ToDebugString(GetItem(x, MakeSlice(one, two, u))), "L.x[1:2]");
EXPECT_EQ(ToDebugString(GetItem(x, MakeSlice(one, u, two))), "L.x[1::2]");
EXPECT_EQ(ToDebugString(GetItem(x, MakeSlice(u, one, two))), "L.x[:1:2]");
EXPECT_EQ(ToDebugString(GetItem(x, MakeSlice(one, two, three))),
"L.x[1:2:3]");
EXPECT_EQ(ToDebugString(GetItem(x, MakeSlice(Add(one, x), two, three))),
"L.x[1 + L.x:2:3]");
EXPECT_EQ(ToDebugString(GetItem(x, MakeSlice(one, Add(two, x), three))),
"L.x[1:2 + L.x:3]");
EXPECT_EQ(ToDebugString(GetItem(x, MakeSlice(one, two, Add(three, x)))),
"L.x[1:2:3 + L.x]");
EXPECT_EQ(ToDebugString(GetItem(x, MakeSlice(Gt(one, x), two, three))),
"L.x[1 > L.x:2:3]");
EXPECT_EQ(ToDebugString(GetItem(x, MakeSlice(one, Gt(two, x), three))),
"L.x[1:2 > L.x:3]");
EXPECT_EQ(ToDebugString(GetItem(x, MakeSlice(one, two, Gt(three, x)))),
"L.x[1:2:3 > L.x]");
auto d = Literal(DummyWithPrecedence{});
EXPECT_EQ(ToDebugString(GetItem(x, MakeSlice(d, u, u))),
"L.x[dummy-with-precedence:]");
EXPECT_EQ(ToDebugString(GetItem(x, MakeSlice(u, d, u))),
"L.x[:dummy-with-precedence]");
EXPECT_EQ(ToDebugString(GetItem(x, MakeSlice(u, u, d))),
"L.x[::dummy-with-precedence]");
auto d11 =
Literal(DummyWithPrecedence{.precedence = ReprToken::Precedence{11, 11}});
EXPECT_EQ(ToDebugString(GetItem(x, MakeSlice(d11, u, u))),
"L.x[(dummy-with-precedence):]");
EXPECT_EQ(ToDebugString(GetItem(x, MakeSlice(u, d11, u))),
"L.x[:(dummy-with-precedence)]");
EXPECT_EQ(ToDebugString(GetItem(x, MakeSlice(u, u, d11))),
"L.x[::(dummy-with-precedence)]");
EXPECT_EQ(ToDebugString(GetItem(x, MakeSlice(d11, d11, u))),
"L.x[(dummy-with-precedence):(dummy-with-precedence)]");
EXPECT_EQ(ToDebugString(GetItem(x, MakeSlice(d11, u, d11))),
"L.x[(dummy-with-precedence)::(dummy-with-precedence)]");
EXPECT_EQ(ToDebugString(GetItem(x, MakeSlice(u, d11, d11))),
"L.x[:(dummy-with-precedence):(dummy-with-precedence)]");
EXPECT_EQ(ToDebugString(GetItem(x, MakeSlice(d11, d11, d11))),
"L.x[(dummy-with-precedence):(dummy-with-precedence):(dummy-with-"
"precedence)]");
}
TEST_F(ExprDebugStringTest, Infix_Binary_NonInfix) {
auto x = Leaf("x");
auto foo = Literal(Text("foo"));
ASSERT_OK_AND_ASSIGN(auto op, LookupOperator("core.getattr"));
EXPECT_EQ(ToDebugString(ExprNode::UnsafeMakeOperatorNode(op, {x, x}, {})),
"M.core.getattr(L.x, L.x)");
EXPECT_EQ(ToDebugString(ExprNode::UnsafeMakeOperatorNode(
op, {x, Literal(Bytes("bar"))}, {})),
"M.core.getattr(L.x, b'bar')");
EXPECT_EQ(ToDebugString(ExprNode::UnsafeMakeOperatorNode(op, {}, {})),
"M.core.getattr()");
EXPECT_EQ(
ToDebugString(ExprNode::UnsafeMakeOperatorNode(op, {foo, foo, foo}, {})),
"M.core.getattr('foo', 'foo', 'foo')");
}
TEST_F(ExprDebugStringTest, Infix_NegativeLiteralRegression) {
auto x = Leaf("x");
EXPECT_EQ(ToDebugString(Pow(Literal<int>(2), x)), "2 ** L.x");
EXPECT_EQ(ToDebugString(Pow(Literal<float>(2.), x)), "2. ** L.x");
EXPECT_EQ(ToDebugString(Pow(Literal<double>(2.), x)), "float64{2} ** L.x");
EXPECT_EQ(ToDebugString(Pow(Literal<int>(-1), x)), "(-1) ** L.x");
EXPECT_EQ(ToDebugString(Pow(Literal<float>(-1.), x)), "(-1.) ** L.x");
EXPECT_EQ(ToDebugString(Pow(Literal<double>(-1.), x)), "float64{-1} ** L.x");
EXPECT_EQ(ToDebugString(Pow(x, Literal<int>(-1))), "L.x ** -1");
EXPECT_EQ(ToDebugString(Pow(x, Literal<float>(-1.))), "L.x ** -1.");
EXPECT_EQ(ToDebugString(Pow(x, Literal<double>(-1.))), "L.x ** float64{-1}");
EXPECT_EQ(ToDebugString(Pow(x, Literal<int>(2))), "L.x ** 2");
EXPECT_EQ(ToDebugString(Pow(x, Literal<float>(2.))), "L.x ** 2.");
EXPECT_EQ(ToDebugString(Pow(x, Literal<double>(2.))), "L.x ** float64{2}");
EXPECT_EQ(ToDebugString(Neg(Literal<int>(-1))), "-(-1)");
EXPECT_EQ(ToDebugString(Neg(Literal<float>(-1))), "-(-1.)");
EXPECT_EQ(ToDebugString(Neg(Literal<double>(-1))), "-float64{-1}");
EXPECT_EQ(ToDebugString(Neg(Literal<int>(2))), "-2");
EXPECT_EQ(ToDebugString(Neg(Literal<float>(2))), "-2.");
EXPECT_EQ(ToDebugString(Neg(Literal<double>(2))), "-float64{2}");
}
TEST_F(ExprDebugStringTest, CustomOpRepr) {
auto x = Leaf("x");
auto y = Leaf("y");
auto expr = Dummy(x, y);
{
EXPECT_EQ(ToDebugString(expr), "custom.add(L.x, L.y)");
}
{
auto repr_fn =
[](const ExprNodePtr& node,
const absl::flat_hash_map<Fingerprint, ReprToken>& node_tokens)
-> std::optional<ReprToken> {
const auto& lhs_str =
node_tokens.at(node->node_deps()[0]->fingerprint()).str;
const auto& rhs_str =
node_tokens.at(node->node_deps()[1]->fingerprint()).str;
auto res = absl::StrFormat("%s + %s", lhs_str, rhs_str);
return ReprToken{.str = std::move(res)};
};
RegisterOpReprFnByQValueSpecializationKey(
std::string(expr->op()->py_qvalue_specialization_key()), repr_fn);
EXPECT_EQ(ToDebugString(expr), "L.x + L.y");
}
{
auto repr_fn =
[](ExprNodePtr node,
const absl::flat_hash_map<Fingerprint, ReprToken>& node_tokens)
-> std::optional<ReprToken> { return std::nullopt; };
RegisterOpReprFnByQValueSpecializationKey(
std::string(expr->op()->py_qvalue_specialization_key()), repr_fn);
EXPECT_EQ(ToDebugString(expr), "custom.add(L.x, L.y)");
}
}
TEST_F(ExprDebugStringTest, GetDebugSnippet) {
auto expr = Leaf("x");
EXPECT_EQ(GetDebugSnippet(expr), "L.x");
ASSERT_OK_AND_ASSIGN(auto typed_expr,
WithQTypeAnnotation(Leaf("x"), GetQType<int32_t>()));
EXPECT_EQ(GetDebugSnippet(typed_expr), "M.annotation.qtype(L.x, INT32)");
ASSERT_OK_AND_ASSIGN(auto named_expr, WithNameAnnotation(expr, "xxx"));
EXPECT_EQ(GetDebugSnippet(named_expr), "M.annotation.name(L.x, 'xxx')");
auto big_expr = Leaf("x");
for (int i = 0; i < 100; ++i) {
big_expr = Add(big_expr, big_expr);
}
EXPECT_EQ(GetDebugSnippet(big_expr),
"M.math.add(M.math.add(..., ...), M.math.add(..., ...))");
ASSERT_OK_AND_ASSIGN(auto big_typed_expr,
WithQTypeAnnotation(Leaf("x"), GetQType<int32_t>()));
for (int i = 0; i < 100; ++i) {
big_typed_expr = Add(big_typed_expr, big_typed_expr);
}
EXPECT_EQ(GetDebugSnippet(big_typed_expr),
("M.math.add(M.math.add(..., ...):INT32, M.math.add(..., "
"...):INT32):INT32"));
}
void BM_GetDebugSnippet_Leaf(benchmark::State& state) {
InitArolla();
auto expr = Leaf("x");
for (auto s : state) {
auto x = GetDebugSnippet(expr);
benchmark::DoNotOptimize(x);
}
}
BENCHMARK(BM_GetDebugSnippet_Leaf);
void BM_GetDebugSnippet_Literal(benchmark::State& state) {
InitArolla();
auto expr = Literal(57);
for (auto s : state) {
auto x = GetDebugSnippet(expr);
benchmark::DoNotOptimize(x);
}
}
BENCHMARK(BM_GetDebugSnippet_Literal);
void BM_GetDebugSnippet_Small(benchmark::State& state) {
InitArolla();
auto expr = WithQTypeAnnotation(Leaf("x"), GetQType<int32_t>()).value();
expr = CallOp("math.add", {Literal(57), Leaf("x")}).value();
for (auto s : state) {
auto x = GetDebugSnippet(expr);
benchmark::DoNotOptimize(x);
}
}
BENCHMARK(BM_GetDebugSnippet_Small);
void BM_GetDebugSnippet_Big(benchmark::State& state) {
InitArolla();
auto expr = WithQTypeAnnotation(Leaf("x"), GetQType<int32_t>()).value();
for (int i = 0; i < 100; ++i) {
expr = CallOp("math.add", {expr, Leaf("x")}).value();
expr = CallOp("math.add", {expr, Literal(57)}).value();
expr = CallOp("math.add", {expr, expr}).value();
}
for (auto s : state) {
auto x = GetDebugSnippet(expr);
benchmark::DoNotOptimize(x);
}
}
BENCHMARK(BM_GetDebugSnippet_Big);
void BM_ToDebugString_Leaf(benchmark::State& state) {
InitArolla();
auto expr = Leaf("x");
for (auto s : state) {
auto x = ToDebugString(expr);
benchmark::DoNotOptimize(x);
}
}
BENCHMARK(BM_ToDebugString_Leaf);
void BM_ToDebugString_Literal(benchmark::State& state) {
InitArolla();
auto expr = Literal(57);
for (auto s : state) {
auto x = ToDebugString(expr);
benchmark::DoNotOptimize(x);
}
}
BENCHMARK(BM_ToDebugString_Literal);
void BM_ToDebugString_Small(benchmark::State& state) {
InitArolla();
auto expr = WithQTypeAnnotation(Leaf("x"), GetQType<int32_t>()).value();
expr = CallOp("math.maximum", {Literal(57), Leaf("x")}).value();
for (auto s : state) {
auto x = ToDebugString(expr);
benchmark::DoNotOptimize(x);
}
}
BENCHMARK(BM_ToDebugString_Small);
void BM_ToDebugString_Big(benchmark::State& state) {
InitArolla();
auto expr = WithQTypeAnnotation(Leaf("x"), GetQType<int32_t>()).value();
for (int i = 0; i < 100; ++i) {
expr = CallOp("math.maximum", {expr, Leaf("x")}).value();
expr = CallOp("math.maximum", {expr, Literal(57)}).value();
expr = CallOp("math.maximum", {expr, expr}).value();
}
for (auto s : state) {
auto x = ToDebugString(expr);
benchmark::DoNotOptimize(x);
}
}
BENCHMARK(BM_ToDebugString_Big);
void BM_ToDebugString_Small_Verbose(benchmark::State& state) {
InitArolla();
auto expr = WithQTypeAnnotation(Leaf("x"), GetQType<int32_t>()).value();
expr = CallOp("math.maximum", {Literal(57), Leaf("x")}).value();
for (auto s : state) {
auto x = ToDebugString(expr, true);
benchmark::DoNotOptimize(x);
}
}
BENCHMARK(BM_ToDebugString_Small_Verbose);
void BM_ToDebugString_Big_Verbose(benchmark::State& state) {
InitArolla();
auto expr = WithQTypeAnnotation(Leaf("x"), GetQType<int32_t>()).value();
for (int i = 0; i < 100; ++i) {
expr = CallOp("math.maximum", {expr, Leaf("x")}).value();
expr = CallOp("math.maximum", {expr, Literal(57)}).value();
expr = CallOp("math.maximum", {expr, expr}).value();
}
for (auto s : state) {
auto x = ToDebugString(expr, true);
benchmark::DoNotOptimize(x);
}
}
BENCHMARK(BM_ToDebugString_Big_Verbose);
void BM_ToDebugString_Big_Infix(benchmark::State& state) {
InitArolla();
auto expr = WithQTypeAnnotation(Leaf("x"), GetQType<int32_t>()).value();
for (int i = 0; i < 100; ++i) {
expr = CallOp("math.add", {expr, Leaf("x")}).value();
expr = CallOp("math.add", {expr, Literal(57)}).value();
expr = CallOp("math.add", {expr, expr}).value();
}
for (auto s : state) {
auto x = ToDebugString(expr);
benchmark::DoNotOptimize(x);
}
}
BENCHMARK(BM_ToDebugString_Big_Infix);
void BM_ToDebugString_CustomReprBig(benchmark::State& state) {
InitArolla();
auto x = WithQTypeAnnotation(Leaf("x"), GetQType<int32_t>()).value();
auto foo_bar = std::make_shared<testing::DummyOp>(
"foo.bar", ExprOperatorSignature({{"x"}, {"y"}}));
auto expr =
ExprNode::UnsafeMakeOperatorNode(foo_bar, {x, x}, ExprAttributes());
auto repr_fn =
[](const ExprNodePtr& node,
const absl::flat_hash_map<Fingerprint, ReprToken>& node_tokens)
-> std::optional<ReprToken> {
const auto& lhs_str =
node_tokens.at(node->node_deps()[0]->fingerprint()).str;
const auto& rhs_str =
node_tokens.at(node->node_deps()[1]->fingerprint()).str;
auto res = absl::StrFormat("foo.bar(%s, %s)", lhs_str, rhs_str);
return ReprToken{.str = std::move(res)};
};
RegisterOpReprFnByQValueSpecializationKey(
std::string(expr->op()->py_qvalue_specialization_key()), repr_fn);
for (int i = 0; i < 100; ++i) {
expr = ExprNode::UnsafeMakeOperatorNode(foo_bar, {expr, Leaf("x")},
ExprAttributes());
expr = ExprNode::UnsafeMakeOperatorNode(foo_bar, {expr, Literal(57)},
ExprAttributes());
expr = ExprNode::UnsafeMakeOperatorNode(foo_bar, {expr, expr},
ExprAttributes());
}
for (auto s : state) {
auto x = ToDebugString(expr);
benchmark::DoNotOptimize(x);
}
}
BENCHMARK(BM_ToDebugString_CustomReprBig);
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/expr_debug_string.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/expr_debug_string_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
573867bc-7633-461e-a4f3-a42efd5de27b | cpp | tensorflow/tensorflow | tf_allocator_adapter | third_party/xla/xla/stream_executor/integrations/tf_allocator_adapter.cc | third_party/xla/xla/stream_executor/integrations/tf_allocator_adapter_test.cc | #include "xla/stream_executor/integrations/tf_allocator_adapter.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tsl/framework/allocator.h"
namespace stream_executor {
TfAllocatorAdapter::TfAllocatorAdapter(tsl::Allocator *wrapped, Stream *stream)
: DeviceMemoryAllocator(stream->parent()->GetPlatform()),
wrapped_(wrapped),
stream_(stream) {}
TfAllocatorAdapter::TfAllocatorAdapter(tsl::Allocator *wrapped,
Platform *platform)
: DeviceMemoryAllocator(platform), wrapped_(wrapped), stream_(nullptr) {}
TfAllocatorAdapter::~TfAllocatorAdapter() {}
absl::StatusOr<OwningDeviceMemory> TfAllocatorAdapter::Allocate(
int device_ordinal, uint64_t size, bool retry_on_failure,
int64_t memory_space) {
tsl::AllocationAttributes attrs;
attrs.retry_on_failure = retry_on_failure;
void *data = nullptr;
if (size != 0) {
data =
wrapped_->AllocateRaw(tsl::Allocator::kAllocatorAlignment, size, attrs);
if (data == nullptr) {
return absl::ResourceExhaustedError(absl::StrCat(
"Out of memory while trying to allocate ", size, " bytes."));
}
}
return OwningDeviceMemory(DeviceMemoryBase(data, size), device_ordinal, this);
}
absl::Status TfAllocatorAdapter::Deallocate(int device_ordinal,
DeviceMemoryBase mem) {
wrapped_->DeallocateRaw(mem.opaque());
return absl::OkStatus();
}
absl::StatusOr<Stream *> TfAllocatorAdapter::GetStream(int device_ordinal) {
CHECK_EQ(stream_->parent()->device_ordinal(), device_ordinal);
return stream_;
}
absl::StatusOr<tsl::Allocator *> TfAllocatorAdapter::GetAllocator(
int device_ordinal) {
if (stream_ == nullptr) {
return absl::UnavailableError("stream_ is null for TfAllocatorAdapter.");
}
if (stream_->parent()->device_ordinal() != device_ordinal) {
return absl::InternalError(
absl::StrCat("stream_->parent()->device_ordinal() ",
stream_->parent()->device_ordinal(),
" not equal to device_ordinal ", device_ordinal));
}
return wrapped_;
}
} | #include "xla/stream_executor/integrations/tf_allocator_adapter.h"
#include <cstddef>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/container/node_hash_set.h"
#include "absl/log/check.h"
#include "xla/service/platform_util.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tsl/framework/allocator.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace se = stream_executor;
class TestAllocator : public tsl::Allocator {
public:
explicit TestAllocator(
size_t start_address,
std::shared_ptr<absl::flat_hash_set<void*>> allocations = nullptr)
: start_address_(start_address), allocations_(allocations) {
if (allocations_ == nullptr) {
allocations_ = std::make_shared<absl::flat_hash_set<void*>>();
}
}
std::string Name() override { return "test"; }
void* AllocateRaw(size_t alignment, size_t num_bytes) override {
void* ptr = reinterpret_cast<void*>(++start_address_);
allocations_->insert(ptr);
return ptr;
}
void DeallocateRaw(void* ptr) override {
auto it = allocations_->find(ptr);
if (it == allocations_->end()) {
ADD_FAILURE() << "Allocation not found (double free?)";
} else {
allocations_->erase(it);
}
}
private:
size_t start_address_;
std::shared_ptr<absl::flat_hash_set<void*>> allocations_;
};
TEST(MultiDeviceAdapter, UsesCorrectAllocator) {
TF_ASSERT_OK_AND_ASSIGN(auto* platform,
xla::PlatformUtil::GetDefaultPlatform());
TF_ASSERT_OK_AND_ASSIGN(std::vector<se::StreamExecutor*> executors,
xla::PlatformUtil::GetStreamExecutors(platform))
TF_ASSERT_OK_AND_ASSIGN(auto stream, executors[0]->CreateStream());
std::vector<se::MultiDeviceAdapter::AllocatorInfo> infos;
infos.emplace_back(std::make_unique<TestAllocator>(0x1000), stream.get(),
0, 0);
infos.emplace_back(std::make_unique<TestAllocator>(0x2000), stream.get(),
0, 1);
infos.emplace_back(std::make_unique<TestAllocator>(0x3000), stream.get(),
1, 0);
infos.emplace_back(std::make_unique<TestAllocator>(0x4000), stream.get(),
1, 1);
std::unique_ptr<se::DeviceMemoryAllocator> allocator =
std::make_unique<se::MultiDeviceAdapter>(platform, std::move(infos));
TF_ASSERT_OK_AND_ASSIGN(
se::OwningDeviceMemory buff0,
allocator->Allocate(0, 4, false, 0));
CHECK_EQ(reinterpret_cast<size_t>(buff0->opaque()), 0x1001);
TF_ASSERT_OK_AND_ASSIGN(
se::OwningDeviceMemory buff1,
allocator->Allocate(0, 4, false, 0));
CHECK_EQ(reinterpret_cast<size_t>(buff1->opaque()), 0x1002);
TF_ASSERT_OK_AND_ASSIGN(
se::OwningDeviceMemory buff2,
allocator->Allocate(0, 4, false, 1));
CHECK_EQ(reinterpret_cast<size_t>(buff2->opaque()), 0x3001);
TF_ASSERT_OK_AND_ASSIGN(
se::OwningDeviceMemory buff3,
allocator->Allocate(1, 4, false, 0));
CHECK_EQ(reinterpret_cast<size_t>(buff3->opaque()), 0x2001);
TF_ASSERT_OK_AND_ASSIGN(
se::OwningDeviceMemory buff4,
allocator->Allocate(1, 4, false, 1));
CHECK_EQ(reinterpret_cast<size_t>(buff4->opaque()), 0x4001);
}
TEST(MultiDeviceAdapter, DeallocationWithDifferentAllocator) {
TF_ASSERT_OK_AND_ASSIGN(auto* platform,
xla::PlatformUtil::GetDefaultPlatform());
TF_ASSERT_OK_AND_ASSIGN(std::vector<se::StreamExecutor*> executors,
xla::PlatformUtil::GetStreamExecutors(platform));
TF_ASSERT_OK_AND_ASSIGN(auto stream, executors[0]->CreateStream());
std::shared_ptr<absl::flat_hash_set<void*>> allocations =
std::make_shared<absl::flat_hash_set<void*>>();
std::vector<se::MultiDeviceAdapter::AllocatorInfo> info_allocator;
info_allocator.emplace_back(
std::make_unique<TestAllocator>(0x1000, allocations), stream.get(),
0, 0);
std::unique_ptr<se::DeviceMemoryAllocator> allocator =
std::make_unique<se::MultiDeviceAdapter>(platform,
std::move(info_allocator));
std::vector<se::MultiDeviceAdapter::AllocatorInfo> info_deallocator;
info_deallocator.emplace_back(
std::make_unique<TestAllocator>(0x1000, allocations), stream.get(),
0, 0);
std::unique_ptr<se::DeviceMemoryAllocator> deallocator =
std::make_unique<se::MultiDeviceAdapter>(platform,
std::move(info_deallocator));
TF_ASSERT_OK_AND_ASSIGN(
se::OwningDeviceMemory buff0,
allocator->Allocate(0, 4, false, 0));
CHECK_EQ(allocations->size(), 1);
CHECK_EQ(reinterpret_cast<size_t>(buff0->opaque()), 0x1001);
TF_CHECK_OK(deallocator->Deallocate(0, buff0.cref()));
CHECK_EQ(allocations->size(), 0);
allocations->insert(buff0->opaque());
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/integrations/tf_allocator_adapter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/integrations/tf_allocator_adapter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6373881d-be51-4cc8-ab57-a967d8737d44 | cpp | tensorflow/tensorflow | one_hot_op | tensorflow/compiler/tf2xla/kernels/one_hot_op.cc | tensorflow/core/kernels/one_hot_op_test.cc | #include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/xla_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
class OneHotOp : public XlaOpKernel {
public:
explicit OneHotOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("axis", &axis_));
}
void Compile(XlaOpKernelContext* ctx) override {
const TensorShape indices_shape = ctx->InputShape(0);
const TensorShape depth_shape = ctx->InputShape(1);
const TensorShape on_value_shape = ctx->InputShape(2);
const TensorShape off_value_shape = ctx->InputShape(3);
const int indices_dims = indices_shape.dims();
const int output_dims = indices_dims + 1;
OP_REQUIRES(
ctx, axis_ == -1 || (axis_ >= 0 && axis_ < output_dims),
errors::InvalidArgument("Expected axis to be -1 or between [0, ",
output_dims, "). But received: ", axis_));
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(depth_shape),
errors::InvalidArgument("depth must be a scalar, but got: ",
depth_shape.DebugString()));
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(on_value_shape),
errors::InvalidArgument("on_value must be a scalar, but got: ",
on_value_shape.DebugString()));
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(off_value_shape),
errors::InvalidArgument("off_value must be a scalar, but got: ",
off_value_shape.DebugString()));
const int axis = (axis_ == -1) ? indices_dims : axis_;
int64_t depth;
OP_REQUIRES_OK(ctx, ctx->ConstantInputAsIntScalar(1, &depth));
OP_REQUIRES(
ctx, depth >= 0,
errors::InvalidArgument("depth must be non-negative, got: ", depth));
xla::XlaOp one_hot;
OP_REQUIRES_OK(
ctx, XlaHelpers::OneHot(ctx->builder(), depth, axis, input_type(0),
indices_shape, ctx->Input(0), ctx->Input(2),
ctx->Input(3), &one_hot));
ctx->SetOutput(0, one_hot);
}
private:
int32 axis_;
OneHotOp(const OneHotOp&) = delete;
void operator=(const OneHotOp&) = delete;
};
REGISTER_XLA_OP(Name("OneHot").CompileTimeConstantInput("depth"), OneHotOp);
}
} | #include <random>
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
static Graph* OneHot(int batch_size, int num_classes, int axis) {
Graph* g = new Graph(OpRegistry::Global());
Tensor indices(DT_INT32, TensorShape({batch_size}));
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> dist(0, num_classes - 1);
auto indices_t = indices.flat<int32>();
for (int i = 0; i < batch_size; ++i) {
indices_t(i) = dist(gen);
}
Tensor depth(DT_INT32, TensorShape({}));
depth.scalar<int32>()() = num_classes;
Tensor on_value(DT_FLOAT, TensorShape({}));
on_value.scalar<float>()() = 1.0f;
Tensor off_value(DT_FLOAT, TensorShape({}));
off_value.scalar<float>()() = 0.0f;
test::graph::Multi(g, "OneHot",
{
test::graph::Constant(g, indices),
test::graph::Constant(g, depth),
test::graph::Constant(g, on_value),
test::graph::Constant(g, off_value),
})
->AddAttr("axis", axis);
return g;
}
#define BM_OneHot(BATCH, CLASS, AXIS, DEVICE) \
static void BM_OneHot##_##BATCH##_##CLASS##_##AXIS##_##DEVICE( \
::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, OneHot(BATCH, CLASS, AXIS), \
false) \
.Run(state); \
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * BATCH * \
CLASS); \
} \
BENCHMARK(BM_OneHot##_##BATCH##_##CLASS##_##AXIS##_##DEVICE);
BM_OneHot(32, 512, 1, cpu);
BM_OneHot(64, 512, 1, cpu);
BM_OneHot(128, 512, 1, cpu);
BM_OneHot(32, 1024, 1, cpu);
BM_OneHot(64, 1024, 1, cpu);
BM_OneHot(128, 1024, 1, cpu);
BM_OneHot(32, 10000, 1, cpu);
BM_OneHot(64, 10000, 1, cpu);
BM_OneHot(128, 10000, 1, cpu);
BM_OneHot(32, 512, 0, cpu);
BM_OneHot(64, 512, 0, cpu);
BM_OneHot(128, 512, 0, cpu);
BM_OneHot(32, 1024, 0, cpu);
BM_OneHot(64, 1024, 0, cpu);
BM_OneHot(128, 1024, 0, cpu);
BM_OneHot(32, 10000, 0, cpu);
BM_OneHot(64, 10000, 0, cpu);
BM_OneHot(128, 10000, 0, cpu);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/one_hot_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/one_hot_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ce1df6e4-04ce-46d9-852f-1d19564385cf | cpp | tensorflow/tensorflow | tensor_handle | tensorflow/core/common_runtime/eager/tensor_handle.cc | tensorflow/core/common_runtime/eager/tensor_handle_test.cc | #include "tensorflow/core/common_runtime/eager/tensor_handle.h"
#include <algorithm>
#include <cstddef>
#include <map>
#include <memory>
#include <queue>
#include <string>
#include <tuple>
#include <utility>
#include <variant>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/substitute.h"
#include "absl/types/variant.h"
#include "tensorflow/c/tf_tensor_internal.h"
#include "tensorflow/core/common_runtime/composite_device.h"
#include "tensorflow/core/common_runtime/copy_tensor.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/eager/eager_executor.h"
#include "tensorflow/core/common_runtime/eager/tensor_handle_data.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/errors.h"
#if !defined(IS_MOBILE_PLATFORM)
#include "tensorflow/core/distributed_runtime/eager/remote_tensor_handle_data.h"
#endif
#include "tensorflow/core/framework/resource_var.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/profiler/lib/traceme.h"
namespace tensorflow {
namespace {
int64_t GetRemoteDeviceIncarnation(Device* device) {
if (device == nullptr || device->IsLocal()) return 0;
return device->attributes().incarnation();
}
string SafeDeviceDebugString(Device* device) {
if (device == nullptr) {
return "[]";
} else {
return device->DebugString();
}
}
}
TensorHandle::PackedTensorHandleData::PackedTensorHandleData(
std::vector<TensorHandle*>&& handles, const TensorShape& shape)
: handles_(std::move(handles)), shape_(shape) {
for (auto* handle : handles_) {
handle->Ref();
}
}
TensorHandle::PackedTensorHandleData::~PackedTensorHandleData() {
for (auto* handle : handles_) {
handle->Unref();
}
}
Status TensorHandle::PackedTensorHandleData::Shape(TensorShape* shape) const {
*shape = shape_;
return absl::OkStatus();
}
Status TensorHandle::PackedTensorHandleData::NumDims(int* num_dims) const {
*num_dims = shape_.dims();
return absl::OkStatus();
}
Status TensorHandle::PackedTensorHandleData::Dim(int dim_index,
int64_t* dim) const {
*dim = shape_.dim_size(dim_index);
return absl::OkStatus();
}
Status TensorHandle::PackedTensorHandleData::NumElements(
int64_t* num_elements) const {
*num_elements = shape_.num_elements();
return absl::OkStatus();
}
Status TensorHandle::PackedTensorHandleData::Unprotect() {
for (auto* handle : handles_) {
TF_RETURN_IF_ERROR(
std::visit([](auto& data) { return data.Unprotect(); }, handle->data_));
}
return absl::OkStatus();
}
bool TensorHandle::PackedTensorHandleData::IsReady() const {
{
tf_shared_lock l(mu_);
if (!is_poisoned_.ok()) {
return true;
}
}
for (auto* handle : handles_) {
if (!handle->IsReady()) {
return false;
}
}
return true;
}
Status TensorHandle::PackedTensorHandleData::WaitReady(
const char* caller) const {
{
tf_shared_lock l(mu_);
if (!is_poisoned_.ok()) {
return is_poisoned_;
}
}
for (auto* handle : handles_) {
TF_RETURN_IF_ERROR(handle->WaitReady(caller));
}
return absl::OkStatus();
}
void TensorHandle::PackedTensorHandleData::Poison(Status status) {
mutex_lock l(mu_);
is_poisoned_ = status;
}
string TensorHandle::PackedTensorHandleData::DebugString() const {
string debug_str = "PackedTensorHandleData: ";
for (const auto* handle : handles_) {
debug_str.append(
absl::StrCat(std::visit([](auto& data) { return data.DebugString(); },
handle->data_),
"; "));
}
return debug_str;
}
int TensorHandle::PackedTensorHandleData::NumPackedHandles() const {
return handles_.size();
}
Status TensorHandle::PackedTensorHandleData::ExtractPackedHandle(
const int index, TensorHandle** handle) const {
if (index < 0 || index >= handles_.size()) {
return errors::InvalidArgument("Expect an index within [0, ",
handles_.size(), "), but got ", index);
}
*handle = handles_.at(index);
return absl::OkStatus();
}
void TensorHandle::SetResourceHandleDtypeAndShape(
std::vector<DtypeAndPartialTensorShape> dtypes_and_shapes) {
handle_dtypes_and_shapes_ = std::move(dtypes_and_shapes);
}
Status TensorHandle::GetResourceHandleDtypesAndShapes(
std::vector<DtypeAndPartialTensorShape>* result) {
if (dtype != DT_RESOURCE) {
return errors::InvalidArgument(
"TensorHandle::GetResourceDtypeAndShape should be called on tensor "
"handles with data type DT_RESOURCE. Actual tensor: ",
dtype);
}
if (Type() != LOCAL) {
*result = handle_dtypes_and_shapes_;
return absl::OkStatus();
}
tsl::profiler::TraceMe activity(
"TensorHandle::GetResourceHandleInfo WaitReady",
tsl::profiler::TraceMeLevel::kVerbose);
auto& data = std::get<LocalTensorHandleData>(data_);
TF_RETURN_IF_ERROR(data.WaitReady("TensorHandle::GetResourceHandleInfo"));
*result = handle_dtypes_and_shapes_;
return absl::OkStatus();
}
int TensorHandle::NumPackedHandles() const {
if (Type() != PACKED) {
return 0;
}
return std::get<PackedTensorHandleData>(data_).NumPackedHandles();
}
Status TensorHandle::ExtractPackedHandle(const int index,
TensorHandle** handle) const {
if (Type() != PACKED) {
return errors::Internal("Invalid ExtractPackedHandleOnDevice call on a",
TypeString(), " handle: ", this);
}
return std::get<PackedTensorHandleData>(data_).ExtractPackedHandle(index,
handle);
}
TensorHandle* TensorHandle::CreateLocalHandle(const tensorflow::Tensor& t) {
tensorflow::Tensor tensor = t;
return CreateLocalHandle(std::move(tensor),
nullptr,
nullptr,
nullptr);
}
TensorHandle* TensorHandle::CreateLocalHandle(tensorflow::Tensor&& t, Device* d,
Device* op_device,
EagerContext* ctx) {
return CreateLocalHandle(std::move(t), d, op_device, nullptr, ctx);
}
TensorHandle* TensorHandle::CreateLocalHandle(tensorflow::Tensor&& t, Device* d,
Device* op_device,
Device* resource_device,
EagerContext* ctx) {
if (t.dtype() == DT_RESOURCE && t.NumElements() > 0) {
return new TensorHandle(std::move(t), d, op_device, ctx);
} else {
return new TensorHandle(std::move(t), d, op_device, resource_device, ctx);
}
}
TensorHandle::TensorHandle(tensorflow::Tensor&& t, Device* d, Device* op_device,
Device* resource_device, EagerContext* ctx)
: ImmediateExecutionTensorHandle(kEager),
dtype(t.dtype()),
device_((!ctx || d == ctx->HostCPU()) ? nullptr : d),
op_device_(op_device),
resource_device_(resource_device),
resource_remote_device_incarnation_(
GetRemoteDeviceIncarnation(resource_device_)),
ctx_(ctx),
data_(absl::in_place_type<LocalTensorHandleData>, std::move(t)) {
DVLOG(3) << "Creating Local TensorHandle: " << this
<< " device: " << SafeDeviceDebugString(device_)
<< " tensor: " << t.DeviceSafeDebugString();
}
TensorHandle::TensorHandle(tensorflow::Tensor&& t, Device* d, Device* op_device,
EagerContext* ctx)
: ImmediateExecutionTensorHandle(kEager),
dtype(DT_RESOURCE),
device_((!ctx || d == ctx->HostCPU()) ? nullptr : d),
op_device_(op_device),
resource_device_(
GetResourceDevice(t.flat<class ResourceHandle>()(0), ctx)),
resource_remote_device_incarnation_(
GetRemoteDeviceIncarnation(resource_device_)),
ctx_(ctx),
handle_dtypes_and_shapes_(
t.flat<class ResourceHandle>()(0).dtypes_and_shapes()),
data_(absl::in_place_type<LocalTensorHandleData>, std::move(t)) {
DVLOG(3) << "Creating Local TensorHandle: " << this
<< " device: " << SafeDeviceDebugString(device_)
<< " tensor: " << t.DeviceSafeDebugString();
}
TensorHandle* TensorHandle::CreateEmptyLocalHandle(Device* d, Device* op_device,
Device* resource_device,
tensorflow::DataType dtype,
EagerContext* ctx) {
return new TensorHandle(d, op_device, resource_device, dtype, ctx);
}
TensorHandle::TensorHandle(Device* d, Device* op_device,
Device* resource_device, tensorflow::DataType dtype,
EagerContext* ctx)
: ImmediateExecutionTensorHandle(kEager),
dtype(dtype),
device_((d == ctx->HostCPU()) ? nullptr : d),
op_device_(op_device),
resource_device_(resource_device),
resource_remote_device_incarnation_(
GetRemoteDeviceIncarnation(resource_device_)),
ctx_(ctx),
data_(absl::in_place_type<LocalTensorHandleData>) {
DVLOG(3) << "Creating empty Local TensorHandle: " << this
<< " device: " << SafeDeviceDebugString(device_);
}
Status TensorHandle::CreatePackedHandle(std::vector<TensorHandle*>&& handles,
const tensorflow::DataType dtype,
const tensorflow::TensorShape& shape,
const string& device_name,
EagerContext* ctx,
TensorHandle** packed_handle) {
if (handles.empty()) {
return errors::InvalidArgument("Handles should not be empty.");
}
std::vector<DtypeAndPartialTensorShape> dtypes_and_shapes;
if (dtype == DT_RESOURCE) {
TF_RETURN_IF_ERROR(
handles.at(0)->GetResourceHandleDtypesAndShapes(&dtypes_and_shapes));
}
std::vector<string> devices;
devices.reserve(handles.size());
for (auto* handle : handles) {
devices.push_back(handle->op_device() ? handle->op_device()->name()
: ctx->HostCPU()->name());
}
CompositeDevice* composite_device = nullptr;
TF_RETURN_IF_ERROR(ctx->FindOrCreateCompositeDevice(devices, device_name,
&composite_device));
*packed_handle =
new TensorHandle(std::move(handles), composite_device, dtype, shape, ctx);
(*packed_handle)
->SetResourceHandleDtypeAndShape(std::move(dtypes_and_shapes));
return absl::OkStatus();
}
Status TensorHandle::CreatePackedHandle(std::vector<TensorHandle*>&& handles,
EagerContext* ctx,
TensorHandle** packed_handle) {
if (handles.empty()) {
return errors::InvalidArgument("Handles should not be empty.");
}
tensorflow::DataType dtype = handles.at(0)->dtype;
tensorflow::TensorShape shape;
TF_RETURN_IF_ERROR(handles.at(0)->Shape(&shape));
return CreatePackedHandle(std::move(handles), dtype, shape,
"", ctx, packed_handle);
}
TensorHandle::TensorHandle(std::vector<TensorHandle*>&& handles, Device* device,
const tensorflow::DataType dtype,
const tensorflow::TensorShape& shape,
EagerContext* ctx)
: ImmediateExecutionTensorHandle(kEager),
dtype(dtype),
device_(device),
op_device_(device),
resource_device_(dtype == DT_RESOURCE ? device : nullptr),
resource_remote_device_incarnation_(
GetRemoteDeviceIncarnation(resource_device_)),
ctx_(ctx),
data_(absl::in_place_type<PackedTensorHandleData>, std::move(handles),
shape) {
DVLOG(3) << "Creating a packed TensorHandle: " << this
<< " device: " << SafeDeviceDebugString(device_);
}
#if !defined(IS_MOBILE_PLATFORM)
TensorHandle* TensorHandle::CreateUnshapedRemoteHandle(
int64_t op_id, int32_t output_num, const string& remote_task,
tensorflow::DataType dtype, Device* d, EagerContext* ctx,
const bool unknown_device) {
return new TensorHandle(op_id, output_num, remote_task, dtype, d, ctx,
unknown_device);
}
TensorHandle::TensorHandle(int64_t op_id, int32_t output_num,
const string& remote_task,
tensorflow::DataType dtype, Device* d,
EagerContext* ctx, const bool unknown_device)
: ImmediateExecutionTensorHandle(kEager),
dtype(dtype),
device_(d),
op_device_(d),
resource_device_(dtype == DT_RESOURCE ? d : nullptr),
resource_remote_device_incarnation_(
GetRemoteDeviceIncarnation(resource_device_)),
unknown_device_(unknown_device),
ctx_(ctx),
data_(absl::in_place_type<RemoteTensorHandleData>, op_id, output_num,
remote_task, ctx) {
DVLOG(3) << "Creating Unshaped Remote TensorHandle: " << this
<< " device: " << SafeDeviceDebugString(device_);
}
TensorHandle* TensorHandle::CreateLazyRemoteHandle(
int64_t op_id, int32_t output_num, tensorflow::DataType dtype, Device* d,
const bool is_ready, EagerContext* ctx) {
return new TensorHandle(op_id, output_num, dtype, d, is_ready, ctx);
}
TensorHandle::TensorHandle(int64_t op_id, int32_t output_num,
tensorflow::DataType dtype, Device* d,
const bool is_ready, EagerContext* ctx)
: ImmediateExecutionTensorHandle(kEager),
dtype(dtype),
device_(d),
op_device_(d),
resource_device_(dtype == DT_RESOURCE ? d : nullptr),
resource_remote_device_incarnation_(
GetRemoteDeviceIncarnation(resource_device_)),
ctx_(ctx),
data_(absl::in_place_type<RemoteTensorHandleData>, op_id, output_num,
ctx->GetContextViewId(), is_ready) {
DVLOG(3) << "Creating Lazy Remote TensorHandle: " << this
<< " device: " << SafeDeviceDebugString(device_);
}
#endif
TensorHandle::~TensorHandle() { DVLOG(3) << "Deleting tensor handle " << this; }
void TensorHandle::Release() {
DVLOG(3) << "Releasing tensor handle " << this;
Unref();
}
tensorflow::DataType TensorHandle::DataType() const { return dtype; }
bool TensorHandle::IsReady() const {
return std::visit([](auto& data) { return data.IsReady(); }, data_);
}
Status TensorHandle::WaitReady(const char* caller) const {
return std::visit([caller](auto& data) { return data.WaitReady(caller); },
data_);
}
TensorHandle::HandleType TensorHandle::Type() const {
if (data_.index() == 0) {
return LOCAL;
} else if (data_.index() == 1) {
return PACKED;
} else {
return REMOTE;
}
}
string TensorHandle::TypeString() const {
if (data_.index() == 0) {
return "LOCAL";
} else if (data_.index() == 1) {
return "PACKED";
} else {
return "REMOTE";
}
}
Status TensorHandle::Tensor(const tensorflow::Tensor** t) const {
DVLOG(3) << "Tensor on TensorHandle: " << this;
if (Type() != LOCAL) {
return errors::Internal("Invalid Tensor call on a ", TypeString(),
" handle: ", this);
}
auto& data = std::get<LocalTensorHandleData>(data_);
return data.Tensor(t);
}
Status TensorHandle::TensorFromDevice(const Device* d,
const tensorflow::Tensor** t) const {
DVLOG(3) << "TensorFromDevice on TensorHandle: " << this << " device: " << d;
if (d == device_) {
if (Type() != LOCAL) {
return errors::Internal("Invalid Tensor call on a ", TypeString(),
" handle: ", this);
}
auto& data = std::get<LocalTensorHandleData>(data_);
return data.Tensor(t);
}
tf_shared_lock l(mu_);
auto elem = local_mirrors_.find(d);
if (elem == local_mirrors_.end()) {
return errors::Internal("Invalid device: ", d,
" in Tensor call to handle: ", this);
}
auto& mirror = elem->second;
return mirror.Tensor(t);
}
Status TensorHandle::TensorValue(const Device* d, tensorflow::TensorValue* t) {
DVLOG(3) << "TensorValue on TensorHandle: " << this << " device: " << d;
if (d == device_) {
if (Type() != LOCAL) {
return errors::Internal("Invalid TensorValue call on a ", TypeString(),
" handle: ", this);
}
auto& data = std::get<LocalTensorHandleData>(data_);
return data.TensorValue(t);
}
tf_shared_lock l(mu_);
auto elem = local_mirrors_.find(d);
if (elem == local_mirrors_.end()) {
return errors::Internal("Invalid device: ", d,
" in TensorValue call to handle: ", this);
}
auto& mirror = elem->second;
return mirror.TensorValue(t);
}
Status TensorHandle::WaitUnknownDevice() const {
if (unknown_device_) {
TF_RETURN_IF_ERROR(std::visit(
[](auto& data) {
return data.WaitReady("TensorHandle::UnknownDevice");
},
data_));
}
return absl::OkStatus();
}
Device* TensorHandle::DeviceOrHostCPU(const EagerContext& ctx) const {
return (device_ == nullptr) ? ctx.HostCPU() : device_;
}
Status TensorHandle::Shape(tensorflow::TensorShape* shape) {
if (!IsReady() && inference_shape_.IsFullyDefined()) {
bool fill = inference_shape_.AsTensorShape(shape);
DCHECK(fill);
return absl::OkStatus();
} else {
return std::visit([shape](auto& data) { return data.Shape(shape); }, data_);
}
}
Status TensorHandle::InferenceShape(
shape_inference::InferenceContext* const inference_context,
shape_inference::ShapeHandle* shape_handle) {
if (IsReady()) {
std::vector<shape_inference::DimensionHandle> dims_handle;
int num_dims;
TF_RETURN_IF_ERROR(NumDims(&num_dims));
for (int i = 0; i < num_dims; i++) {
int64_t dims;
TF_RETURN_IF_ERROR(Dim(i, &dims));
dims_handle.push_back(inference_context->MakeDim(dims));
}
*shape_handle = inference_context->MakeShape(dims_handle);
return absl::OkStatus();
} else {
if (inference_shape_.unknown_rank()) {
*shape_handle = inference_context->UnknownShape();
return absl::OkStatus();
}
std::vector<shape_inference::DimensionHandle> dims_handle(
inference_shape_.dims());
for (int i = 0; i < dims_handle.size(); i++) {
dims_handle[i] = inference_context->MakeDim(inference_shape_.dim_size(i));
}
*shape_handle = inference_context->MakeShape(dims_handle);
return absl::OkStatus();
}
}
void TensorHandle::SetInferenceShape(
shape_inference::InferenceContext* const inference_context,
const shape_inference::ShapeHandle& shape_handle) {
auto num_dims = inference_context->Rank(shape_handle);
std::vector<int64_t> dims;
if (num_dims == shape_inference::InferenceContext::kUnknownRank) {
inference_shape_ = PartialTensorShape();
return;
}
DCHECK_GE(num_dims, 0);
dims.resize(num_dims);
for (size_t i = 0; i < num_dims; ++i) {
dims[i] = inference_context->Value(inference_context->Dim(shape_handle, i));
}
auto s = PartialTensorShape::MakePartialShape(dims.data(), num_dims,
&inference_shape_);
TF_DCHECK_OK(s);
}
Status TensorHandle::CopyInferenceShape(TensorHandle* other) {
if (IsReady()) {
return absl::OkStatus();
}
if (other->IsReady()) {
TensorShape other_shape;
TF_RETURN_IF_ERROR(other->Shape(&other_shape));
inference_shape_ = other_shape;
} else {
inference_shape_ = other->inference_shape_;
}
return absl::OkStatus();
}
Status TensorHandle::Shape(tensorflow::PartialTensorShape* shape) const {
DCHECK(shape != nullptr);
if (!IsReady() && !inference_shape_.unknown_rank()) {
*shape = inference_shape_;
return absl::OkStatus();
} else {
auto result = std::visit(
[](auto& data) {
TensorShape shape;
Status s = data.Shape(&shape);
return std::make_pair(shape, s);
},
data_);
TF_RETURN_IF_ERROR(result.second);
*shape = result.first;
}
return absl::OkStatus();
}
Status TensorHandle::NumDims(int* num_dims) const {
DCHECK(num_dims != nullptr);
if (!IsReady() && !inference_shape_.unknown_rank()) {
*num_dims = inference_shape_.dims();
return absl::OkStatus();
} else {
return std::visit([num_dims](auto& data) { return data.NumDims(num_dims); },
data_);
}
}
Status TensorHandle::Dim(int dim_index, int64_t* dim) const {
DCHECK(dim != nullptr);
if (!IsReady() && !inference_shape_.unknown_rank() &&
inference_shape_.dim_size(dim_index) != -1) {
*dim = inference_shape_.dim_size(dim_index);
return absl::OkStatus();
} else {
return std::visit(
[dim_index, dim](auto& data) { return data.Dim(dim_index, dim); },
data_);
}
}
Status TensorHandle::NumElements(int64_t* num_elements) const {
DCHECK(num_elements != nullptr);
if (!IsReady() && inference_shape_.IsFullyDefined()) {
*num_elements = inference_shape_.num_elements();
return absl::OkStatus();
} else {
return std::visit(
[num_elements](auto& data) { return data.NumElements(num_elements); },
data_);
}
}
Status TensorHandle::Unprotect(const Device* d) {
DVLOG(3) << "Unprotect on TensorHandle: " << this << " device: " << d;
if (d == device_) {
return std::visit([](auto& data) { return data.Unprotect(); }, data_);
}
tf_shared_lock l(mu_);
auto elem = local_mirrors_.find(d);
if (elem == local_mirrors_.end()) {
return errors::Internal("Invalid device: ", d,
" in Unprotect call to handle: ", this);
}
auto& mirror = elem->second;
return mirror.Unprotect();
}
bool TensorHandle::HasLocalMirror(const Device* d) const {
DVLOG(3) << "HasLocalMirror on TensorHandle: " << this << " device: " << d;
tf_shared_lock l(mu_);
return local_mirrors_.find(d) != local_mirrors_.end();
}
Status TensorHandle::AddEmptyLocalMirror(const Device* d) {
DVLOG(3) << "AddEmptyLocalMirror on TensorHandle: " << this
<< " device: " << d;
if (d == device_) {
return errors::Internal("Cannot add mirror for primary device.");
}
mutex_lock l(mu_);
if (local_mirrors_.find(d) != local_mirrors_.end()) {
return errors::AlreadyExists("Attempted to duplicate a local mirror.");
}
local_mirrors_.emplace(std::piecewise_construct, std::forward_as_tuple(d),
std::forward_as_tuple());
return absl::OkStatus();
}
#if !defined(IS_MOBILE_PLATFORM)
Status TensorHandle::RemoteAddress(const Device* d, const bool wait_until_ready,
int64_t* op_id, int32* output_num) const {
DVLOG(3) << "RemoteAddress on TensorHandle: " << this << " device: " << d
<< " " << d->name();
const tensorflow::RemoteTensorHandleData* remote_data = nullptr;
if (d != device_) {
tf_shared_lock l(mu_);
auto mirror = remote_mirrors_.find(d->name());
if (mirror != remote_mirrors_.end()) {
remote_data = &mirror->second;
} else {
return errors::FailedPrecondition(
"Could not find remote mirror for specified device");
}
}
if (remote_data != nullptr) {
auto status =
remote_data->OpIdAndOutputNum(wait_until_ready, op_id, output_num);
if (!status.ok()) {
return errors::Internal(
absl::StrCat("Remote address looked up from remote mirrors found to "
"be poisoned with status ",
status.ToString()));
} else {
return absl::OkStatus();
}
}
if (Type() != REMOTE) {
return errors::InvalidArgument("Primary device is not remote");
}
auto& data = std::get<RemoteTensorHandleData>(data_);
auto status = data.OpIdAndOutputNum(wait_until_ready, op_id, output_num);
if (!status.ok()) {
return errors::Internal(
"Remote address looked up from remote data found to be poisoned");
} else {
return absl::OkStatus();
}
}
bool TensorHandle::HasRemoteMirror(const Device* d,
uint64 context_view_id) const {
DVLOG(3) << "HasRemoteMirror on TensorHandle: " << this << " device: " << d
<< " " << d->name();
tf_shared_lock l(mu_);
auto mirror = remote_mirrors_.find(d->name());
if (mirror != remote_mirrors_.end()) {
if (mirror->second.context_view_id() != context_view_id) {
return false;
}
return true;
}
return false;
}
bool TensorHandle::HasResourceShapeMirror(const Device* d,
uint64 context_view_id) const {
DVLOG(3) << "HasResourceShapeMirror on TensorHandle: " << this
<< " device: " << d << " " << d->name();
tf_shared_lock l(mu_);
auto mirror = resource_shape_mirrors_.find(d->name());
if (mirror != resource_shape_mirrors_.end()) {
if (mirror->second.context_view_id() != context_view_id) {
return false;
}
return true;
}
return false;
}
Status TensorHandle::AddUnshapedRemoteMirror(const Device* d, int64_t op_id,
int output_num,
const string& remote_task,
EagerContext* ctx) {
DVLOG(3) << "AddUnshapedRemoteMirror on TensorHandle: " << this
<< " device: " << d << " " << d->name() << " op_id: " << op_id
<< " output_num: " << output_num;
mutex_lock l(mu_);
auto remote_mirror = remote_mirrors_.find(d->name());
if (remote_mirror != remote_mirrors_.end()) {
if (remote_mirror->second.context_view_id() > ctx->GetContextId()) {
return errors::Internal(
"Attempted to duplicate a remote mirror with inconsistent "
"arguments.");
}
remote_mirrors_.erase(remote_mirror);
}
remote_mirrors_.emplace(
std::piecewise_construct, std::forward_as_tuple(d->name()),
std::forward_as_tuple(op_id, output_num, remote_task, ctx));
return absl::OkStatus();
}
Status TensorHandle::AddResourceShapeMirror(const Device* d, int64_t op_id,
int output_num, EagerContext* ctx) {
DVLOG(3) << "AddResourceShapeMirror on TensorHandle: " << this;
mutex_lock l(mu_);
auto mirror = resource_shape_mirrors_.find(d->name());
if (mirror != resource_shape_mirrors_.end()) {
if (mirror->second.context_view_id() == ctx->GetContextViewId()) {
int64_t existing_op_id;
int existing_output_num;
TF_RETURN_IF_ERROR(mirror->second.OpIdAndOutputNum(false, &existing_op_id,
&existing_output_num));
if (op_id == existing_op_id && output_num == existing_output_num) {
return absl::OkStatus();
}
return absl::InternalError(
"Attempted to duplicate a resource shape mirror.");
}
resource_shape_mirrors_.erase(mirror);
}
resource_shape_mirrors_.emplace(
std::piecewise_construct, std::forward_as_tuple(d->name()),
std::forward_as_tuple(op_id, output_num, ctx->GetContextViewId(),
true));
return absl::OkStatus();
}
Status TensorHandle::SetRemoteShape(const TensorShape& shape, const Device* d,
uint64 context_view_id) {
return SetRemoteShapeAndDevice(shape, d, context_view_id, "");
}
Status TensorHandle::SetRemoteShapeAndDevice(const TensorShape& shape,
const Device* d,
uint64 context_view_id,
string op_device) {
DVLOG(3) << "SetRemoteShape on TensorHandle: " << this << " device: " << d
<< " " << d->name();
if (d != device_) {
tf_shared_lock l(mu_);
auto remote_mirror = remote_mirrors_.find(d->name());
if (remote_mirror == remote_mirrors_.end()) {
return absl::OkStatus();
}
auto& mirror = remote_mirror->second;
if (mirror.context_view_id() == context_view_id) {
auto status = mirror.SetShape(shape);
if (!status.ok()) {
LOG(ERROR) << "SetShape returned " << status.message()
<< ". This should never occur.";
}
return status;
} else if (mirror.context_view_id() < context_view_id) {
return errors::Internal(
absl::Substitute("Unexpected context_view_id ($0) which should not "
"be newer than the "
"one ($1) associated to the remote mirror.",
context_view_id, mirror.context_view_id()));
} else {
LOG(WARNING) << "SetRemoteShape is ignored for a remote mirror that is "
"associated with a newer context_view_id.";
}
return absl::OkStatus();
}
if (Type() != REMOTE) {
return errors::InvalidArgument(
"SetRemoteShape should only be called on remote handles.");
}
auto& data = std::get<RemoteTensorHandleData>(data_);
if (op_device.empty()) {
auto status = data.SetShape(shape);
if (!status.ok()) {
LOG(ERROR) << "SetShape returned " << status.message()
<< ". This should never occur.";
}
return status;
} else {
if (!unknown_device_) {
return errors::Internal("Cannot reset known devices.");
}
Device* device;
TF_RETURN_IF_ERROR(ctx_->FindDeviceFromName(op_device.c_str(), &device));
device_ = device;
op_device_ = device;
resource_device_ = dtype == DT_RESOURCE ? device : nullptr;
resource_remote_device_incarnation_ =
GetRemoteDeviceIncarnation(resource_device_);
string remote_task;
if (!DeviceNameUtils::GetTaskName(device->parsed_name(), &remote_task)) {
return errors::InvalidArgument(
"Unable to find remote task corresponding to device ",
device->name());
}
auto status = data.SetShapeAndRemoteTask(shape, remote_task);
if (!status.ok()) {
LOG(ERROR) << "SetShape returned " << status
<< ". This should never occur.";
}
return status;
}
}
void TensorHandle::PoisonRemote(Status status, const Device* d,
uint64 context_view_id) {
DVLOG(3) << "PoisonRemote on TensorHandle: " << this << " device: " << d
<< " " << d->name();
if (d == device_) {
DCHECK(Type() == REMOTE)
<< "Poison can only be on remote handles: " << this;
auto& data = std::get<RemoteTensorHandleData>(data_);
data.Poison(status);
} else {
tf_shared_lock l(mu_);
auto mirror = remote_mirrors_.find(d->name());
if (mirror != remote_mirrors_.end()) {
if (mirror->second.context_view_id() == context_view_id) {
mirror->second.Poison(status);
}
}
}
}
#endif
Status TensorHandle::AddLocalMirror(tensorflow::Tensor&& tensor,
const Device* d) {
if (d == device_) {
return errors::Internal(
"Local mirror assign conflicts with primary device.");
}
mutex_lock l(mu_);
auto elem =
local_mirrors_.emplace(std::piecewise_construct, std::forward_as_tuple(d),
std::forward_as_tuple(std::move(tensor)));
if (!elem.second) {
return errors::AlreadyExists("Attempted to add existing mirror.");
}
return absl::OkStatus();
}
Status TensorHandle::SetTensor(tensorflow::Tensor&& t, const Device* d) {
DVLOG(3) << "SetTensor on TensorHandle: " << this << " device: " << d;
if (d == device_) {
DCHECK(Type() == LOCAL) << "SetTensor is not called on local handles.";
if (t.dtype() == DT_RESOURCE && t.NumElements() > 0) {
auto& resource_handle = t.flat<class ResourceHandle>()(0);
handle_dtypes_and_shapes_ = resource_handle.dtypes_and_shapes();
}
auto& data = std::get<LocalTensorHandleData>(data_);
return data.SetTensor(std::move(t));
} else {
tf_shared_lock l(mu_);
auto elem = local_mirrors_.find(d);
if (elem == local_mirrors_.end()) {
return errors::Internal(
"Attempted to set tensor for non-existent local mirror.");
}
auto& mirror = elem->second;
return mirror.SetTensor(std::move(t));
}
return absl::OkStatus();
}
void TensorHandle::Poison(Status status, const Device* d) {
DVLOG(3) << "Poison on TensorHandle: " << this << " device: " << d;
if (d == device_) {
DCHECK(Type() != REMOTE) << "Poison can only be on local handles: " << this;
std::visit([status](auto& data) { data.Poison(status); }, data_);
} else {
tf_shared_lock l(mu_);
auto elem = local_mirrors_.find(d);
DCHECK(elem != local_mirrors_.end())
<< "Attempted to poison non-existent local mirror, handle: " << this
<< " device: " << d;
auto& mirror = elem->second;
mirror.Poison(status);
}
}
Status TensorHandle::CopyToDevice(const EagerContext& ctx,
tensorflow::Device* d,
tensorflow::Tensor* output) const {
tensorflow::Device* dstd = (d == nullptr) ? ctx.HostCPU() : d;
tensorflow::Device* srcd = DeviceOrHostCPU(ctx);
const bool dst_cpu = dstd->tensorflow_accelerator_device_info() == nullptr;
const bool src_cpu = srcd->tensorflow_accelerator_device_info() == nullptr;
bool is_same_device =
(srcd == dstd) || (srcd->name() == dstd->name()) || (dst_cpu && src_cpu);
const tensorflow::Tensor* src = nullptr;
TF_RETURN_IF_ERROR(Tensor(&src));
if (is_same_device) {
*output = *src;
return absl::OkStatus();
}
if (!dst_cpu && (src->dtype() != tensorflow::DT_VARIANT &&
!tensorflow::DataTypeCanUseMemcpy(src->dtype()))) {
return tensorflow::errors::InvalidArgument(
"Can't copy Tensor with type ",
tensorflow::DataTypeString(src->dtype()), " to device ", dstd->name(),
".");
}
tensorflow::AllocatorAttributes attr;
if (src->dtype() == tensorflow::DT_VARIANT) {
attr.set_on_host(true);
}
const auto* dstd_info = dstd->tensorflow_accelerator_device_info();
tensorflow::Tensor dst(dstd->GetAllocator(attr), src->dtype(), src->shape());
if (src->shape().num_elements() == 0) {
*output = dst;
return absl::OkStatus();
}
tensorflow::DeviceContext* src_device_context = nullptr;
if (!src_cpu) {
src_device_context =
srcd->tensorflow_accelerator_device_info()->default_context;
}
tensorflow::DeviceContext* dst_device_context = nullptr;
if (!dst_cpu) {
if (dstd_info->use_pjrt_tensor_buffer && DataType() != DT_INT4 &&
DataType() != DT_UINT4) {
dst_device_context = dstd_info->pjrt_context;
} else {
dst_device_context = dstd_info->default_context;
}
}
TF_RETURN_IF_ERROR(srcd->Sync());
tensorflow::Notification n;
tensorflow::Status status;
tensorflow::CopyTensor::ViaDMA("copy", src_device_context, dst_device_context,
srcd, dstd, tensorflow::AllocatorAttributes(),
tensorflow::AllocatorAttributes(), src, &dst,
0 ,
[&status, &n](const tensorflow::Status& s) {
status = s;
n.Notify();
});
n.WaitForNotification();
if (status.ok()) {
*output = dst;
return absl::OkStatus();
}
return status;
}
Device* GetResourceDevice(const ResourceHandle& handle, EagerContext* ctx) {
if (ctx == nullptr) {
return nullptr;
}
Device* device = nullptr;
if (!ctx->FindDeviceFromName(handle.device().c_str(), &device).ok()) {
LOG(ERROR) << "Cannot find resource device: " << handle.device() << ".";
return nullptr;
}
return device;
}
const char* TensorHandle::DeviceName(Status* status) const {
status->Update(WaitUnknownDevice());
tensorflow::Device* d = op_device();
return (d == nullptr) ? "/job:localhost/replica:0/task:0/device:CPU:0"
: d->name().c_str();
}
const char* TensorHandle::BackingDeviceName(Status* status) const {
status->Update(WaitUnknownDevice());
tensorflow::Device* d = device();
return (d == nullptr) ? "/job:localhost/replica:0/task:0/device:CPU:0"
: d->name().c_str();
}
const char* TensorHandle::DeviceType(Status* status) const {
status->Update(WaitUnknownDevice());
tensorflow::Device* d = op_device();
return (d == nullptr) ? "CPU" : d->parsed_name().type.c_str();
}
int TensorHandle::DeviceId(Status* status) const {
status->Update(WaitUnknownDevice());
tensorflow::Device* d = op_device();
return (d == nullptr) ? 0 : d->parsed_name().id;
}
} | #include "tensorflow/core/common_runtime/eager/tensor_handle.h"
#include <iostream>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/cleanup/cleanup.h"
#include "tensorflow/core/common_runtime/composite_device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/random.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
using tensorflow::testing::StatusIs;
using ::testing::HasSubstr;
TEST(TensorHandle_ShapeTest, AsyncShape) {
Tensor t(DT_UINT16, TensorShape({2, 2}));
EXPECT_TRUE(t.shape().IsSameSize(TensorShape({2, 2})));
for (int64_t a = 0; a < t.shape().dim_size(0); a++) {
for (int64_t b = 0; b < t.shape().dim_size(1); b++) {
t.matrix<uint16>()(a, b) = uint16(a * b);
}
}
StaticDeviceMgr device_mgr(DeviceFactory::NewDevice(
"CPU", {}, "/job:localhost/replica:0/task:0/device:CPU:0"));
auto ctx = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT, false,
&device_mgr, false, nullptr, nullptr, nullptr,
true);
absl::Cleanup ctx_cleanup = [&]() { ctx->Unref(); };
TensorHandle* sync_th =
TensorHandle::CreateLocalHandle(std::move(t), nullptr, nullptr, ctx);
absl::Cleanup sync_th_cleanup = [&]() { sync_th->Unref(); };
TensorHandle* async_th = TensorHandle::CreateEmptyLocalHandle(
nullptr, nullptr, nullptr, DataType::DT_UINT16, ctx);
absl::Cleanup async_th_cleanup = [&]() { async_th->Unref(); };
EXPECT_TRUE(async_th->CopyInferenceShape(sync_th).ok());
TensorShape sync_shape;
TensorShape async_shape;
EXPECT_TRUE(sync_th->Shape(&sync_shape).ok());
EXPECT_TRUE(async_th->Shape(&async_shape).ok());
EXPECT_EQ(sync_shape, async_shape);
int num_dims = -1;
EXPECT_TRUE(async_th->NumDims(&num_dims).ok());
EXPECT_EQ(num_dims, 2);
int64_t num_elements = -1;
EXPECT_TRUE(async_th->NumElements(&num_elements).ok());
EXPECT_EQ(num_elements, 4);
}
class FakeDevice : public Device {
public:
explicit FakeDevice(const DeviceAttributes& attr, bool is_local)
: Device(nullptr, attr), is_local_(is_local) {}
Status Sync() override { return absl::OkStatus(); }
Allocator* GetAllocator(AllocatorAttributes) override { return nullptr; }
bool IsLocal() const override { return is_local_; }
private:
const bool is_local_;
};
static std::unique_ptr<FakeDevice> CreateDevice(const char* type,
const char* name,
bool is_local = true) {
DeviceAttributes attr;
attr.set_name(name);
attr.set_device_type(type);
int64_t incarnation = random::New64();
while (incarnation == 0) {
incarnation = random::New64();
}
attr.set_incarnation(incarnation);
return std::make_unique<FakeDevice>(attr, is_local);
}
}
class PackedTensorHandleTest : public ::testing::Test {
public:
PackedTensorHandleTest() {
std::vector<std::unique_ptr<Device>> devices;
devices.push_back(CreateDevice("CPU", host_name_));
for (const char* name : device_names_) {
devices.push_back(CreateDevice("GPU", name));
}
device_mgr_ = new StaticDeviceMgr(std::move(devices));
context_ = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, device_mgr_,
false, nullptr,
nullptr, nullptr,
true);
}
~PackedTensorHandleTest() override {
delete device_mgr_;
context_->Unref();
}
EagerContext* context() { return context_; }
std::vector<Device*> ListGPUDevices() const {
auto all_devices = device_mgr_->ListDevices();
return std::vector<Device*>(all_devices.begin() + 1, all_devices.end());
}
bool IsReady(TensorHandle* handle) const { return handle->IsReady(); }
Status WaitReady(TensorHandle* handle) const {
return handle->WaitReady("Test");
}
private:
const std::vector<const char*> device_names_ = {
"/job:worker/replica:0/task:0/device:GPU:0",
"/job:worker/replica:0/task:0/device:GPU:1",
"/job:worker/replica:0/task:1/device:GPU:0",
"/job:worker/replica:0/task:1/device:GPU:1"};
const char* host_name_ = "/job:worker/replica:0/task:0/device:CPU:0";
StaticDeviceMgr* device_mgr_;
EagerContext* context_;
};
TEST_F(PackedTensorHandleTest, PackedHandle) {
tensorflow::DataType dtype = DT_RESOURCE;
TensorShape shape = {};
DtypeAndPartialTensorShape dtype_and_shape = {DT_FLOAT, {2, 2}};
std::vector<TensorHandle*> handles;
Tensor t0(dtype, shape);
Device* d0 = ListGPUDevices().at(0);
TensorHandle* h0 =
TensorHandle::CreateLocalHandle(std::move(t0), d0, d0, d0, context());
absl::Cleanup h0_cleanup = [&]() { h0->Unref(); };
h0->SetResourceHandleDtypeAndShape({dtype_and_shape});
handles.push_back(h0);
Tensor t1(dtype, shape);
Device* d1 = ListGPUDevices().at(1);
TensorHandle* h1 =
TensorHandle::CreateLocalHandle(std::move(t1), d1, d1, d1, context());
absl::Cleanup h1_cleanup = [&]() { h1->Unref(); };
h1->SetResourceHandleDtypeAndShape({dtype_and_shape});
handles.push_back(h1);
const string remote_task = "/job:worker/replica:0/task:1";
Device* d2 = ListGPUDevices().at(2);
TensorHandle* h2 = TensorHandle::CreateUnshapedRemoteHandle(
0, 0, remote_task, dtype, d2, context());
absl::Cleanup h2_cleanup = [&]() { h2->Unref(); };
handles.push_back(h2);
Device* d3 = ListGPUDevices().at(3);
TensorHandle* h3 = TensorHandle::CreateUnshapedRemoteHandle(
1, 0, remote_task, dtype, d3, context());
absl::Cleanup h3_cleanup = [&]() { h3->Unref(); };
handles.push_back(h3);
TensorHandle* packed_handle = nullptr;
TF_EXPECT_OK(TensorHandle::CreatePackedHandle(std::move(handles), context(),
&packed_handle));
absl::Cleanup packed_handle_cleanup = [&]() { packed_handle->Unref(); };
EXPECT_EQ(packed_handle->NumPackedHandles(), 4);
EXPECT_EQ(packed_handle->Type(), TensorHandle::PACKED);
EXPECT_EQ(packed_handle->dtype, dtype);
TensorShape packed_shape;
TF_ASSERT_OK(packed_handle->Shape(&packed_shape));
EXPECT_EQ(packed_shape, shape);
std::vector<DtypeAndPartialTensorShape> dtypes_and_shapes;
TF_ASSERT_OK(
packed_handle->GetResourceHandleDtypesAndShapes(&dtypes_and_shapes));
EXPECT_EQ(dtypes_and_shapes.size(), 1);
EXPECT_EQ(dtypes_and_shapes.at(0).dtype, DT_FLOAT);
EXPECT_EQ(dtypes_and_shapes.at(0).shape.IsIdenticalTo({2, 2}), true);
CompositeDevice* device =
reinterpret_cast<CompositeDevice*>(packed_handle->device());
EXPECT_EQ(device->name(), "/job:worker/replica:0/task:0/device:COMPOSITE:0");
EXPECT_EQ(device->underlying_devices()->size(), 4);
const std::vector<TensorHandle::HandleType> expected_handle_types = {
TensorHandle::LOCAL, TensorHandle::LOCAL, TensorHandle::REMOTE,
TensorHandle::REMOTE};
for (int i = 0; i < packed_handle->NumPackedHandles(); ++i) {
TensorHandle* h = nullptr;
TF_ASSERT_OK(packed_handle->ExtractPackedHandle(i, &h));
EXPECT_EQ(h->device(), ListGPUDevices().at(i));
EXPECT_EQ(h->Type(), expected_handle_types.at(i));
EXPECT_EQ(h->FullType().type_id(), TFT_UNSET);
}
EXPECT_FALSE(IsReady(packed_handle));
TF_ASSERT_OK(h2->SetRemoteShape(shape, ListGPUDevices().at(2),
context()->GetContextViewId()));
EXPECT_FALSE(IsReady(packed_handle));
TF_ASSERT_OK(h3->SetRemoteShape(shape, ListGPUDevices().at(3),
context()->GetContextViewId()));
EXPECT_TRUE(IsReady(packed_handle));
}
TEST_F(PackedTensorHandleTest, PackedSingleHandle) {
tensorflow::DataType dtype = DT_RESOURCE;
TensorShape shape = {};
Tensor t(dtype, shape);
Device* d = ListGPUDevices().at(0);
TensorHandle* h =
TensorHandle::CreateLocalHandle(std::move(t), d, d, d, context());
absl::Cleanup h_cleanup = [&]() { h->Unref(); };
std::vector<TensorHandle*> handles = {h};
TensorHandle* packed_handle = nullptr;
TF_EXPECT_OK(TensorHandle::CreatePackedHandle(std::move(handles), context(),
&packed_handle));
absl::Cleanup packed_handle_cleanup = [&]() { packed_handle->Unref(); };
EXPECT_EQ(packed_handle->Type(), TensorHandle::PACKED);
EXPECT_EQ(packed_handle->dtype, dtype);
TensorShape packed_shape;
TF_ASSERT_OK(packed_handle->Shape(&packed_shape));
EXPECT_EQ(packed_shape, shape);
CompositeDevice* device =
reinterpret_cast<CompositeDevice*>(packed_handle->device());
EXPECT_EQ(device->name(), "/job:worker/replica:0/task:0/device:COMPOSITE:0");
EXPECT_EQ(device->underlying_devices()->size(), 1);
EXPECT_EQ(packed_handle->NumPackedHandles(), 1);
TensorHandle* h0 = nullptr;
TF_ASSERT_OK(packed_handle->ExtractPackedHandle(0, &h0));
EXPECT_EQ(h0->device(), d);
EXPECT_TRUE(IsReady(packed_handle));
}
TEST_F(PackedTensorHandleTest, PoisonHandle) {
tensorflow::DataType dtype = DT_RESOURCE;
TensorShape shape = {};
Tensor t(dtype, shape);
Device* d = ListGPUDevices().at(0);
TensorHandle* h =
TensorHandle::CreateLocalHandle(std::move(t), d, d, d, context());
absl::Cleanup h_cleanup = [&]() { h->Unref(); };
std::vector<TensorHandle*> handles = {h};
TensorHandle* packed_handle = nullptr;
TF_EXPECT_OK(TensorHandle::CreatePackedHandle(std::move(handles), context(),
&packed_handle));
absl::Cleanup packed_handle_cleanup = [&]() { packed_handle->Unref(); };
TF_EXPECT_OK(WaitReady(packed_handle));
tensorflow::Status fake_failure_status(absl::StatusCode::kAborted,
"Fake failure.");
packed_handle->Poison(fake_failure_status, packed_handle->device());
EXPECT_THAT(WaitReady(packed_handle),
StatusIs(fake_failure_status.code(),
std::string(fake_failure_status.message())));
}
TEST(TensorHandle_ResourceDeviceTest, OnLocalDevice) {
std::unique_ptr<Device> d0(
CreateDevice("CPU", "/job:localhost/replica:0/task:0/device:CPU:0"));
StaticDeviceMgr local_device_mgr(std::move(d0));
auto ctx = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT, false,
&local_device_mgr, false, nullptr, nullptr, nullptr,
true);
absl::Cleanup ctx_cleanup = [&]() { ctx->Unref(); };
tensorflow::DataType dtype = DT_RESOURCE;
TensorShape shape = {2};
Tensor t(dtype, shape);
Device* d = local_device_mgr.ListDevices()[0];
TensorHandle* th =
TensorHandle::CreateLocalHandle(std::move(t), d, d, d, ctx);
absl::Cleanup th_cleanup = [&]() { th->Unref(); };
EXPECT_EQ(0, th->resource_remote_device_incarnation());
EXPECT_TRUE(local_device_mgr.ContainsDevice(
th->resource_device()->attributes().incarnation()));
std::unique_ptr<Device> d1(
CreateDevice("CPU", "/job:localhost/replica:0/task:0/device:CPU:0"));
StaticDeviceMgr new_device_mgr(std::move(d1));
EXPECT_FALSE(new_device_mgr.ContainsDevice(
th->resource_device()->attributes().incarnation()));
}
TEST(TensorHandle_ResourceDeviceTest, OnRemoteDevice) {
std::unique_ptr<Device> d_local(
CreateDevice("CPU", "/job:localhost/replica:0/task:0/device:CPU:0"));
StaticDeviceMgr local_device_mgr(std::move(d_local));
auto ctx = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT, false,
&local_device_mgr, false, nullptr, nullptr, nullptr,
true);
absl::Cleanup ctx_cleanup = [&]() { ctx->Unref(); };
std::unique_ptr<Device> d0(
CreateDevice("CPU", "/job:worker/task:0/device:CPU:0", false));
Device* d0_ptr = d0.get();
std::unique_ptr<Device> d1(
CreateDevice("CPU", "/job:worker/task:1/device:CPU:0", false));
Device* d1_ptr = d1.get();
DynamicDeviceMgr remote_device_mgr;
std::vector<std::unique_ptr<Device>> vector_d0;
vector_d0.push_back(std::move(d0));
TF_ASSERT_OK(remote_device_mgr.AddDevices(std::move(vector_d0)));
TensorHandle* th0 = TensorHandle::CreateUnshapedRemoteHandle(
0, 0, "", DT_RESOURCE, d0_ptr, ctx);
absl::Cleanup th0_cleanup = [&]() { th0->Unref(); };
EXPECT_TRUE(remote_device_mgr.ContainsDevice(
th0->resource_remote_device_incarnation()));
std::vector<std::unique_ptr<Device>> vector_d1;
vector_d1.push_back(std::move(d1));
TF_ASSERT_OK(remote_device_mgr.AddDevices(std::move(vector_d1)));
EXPECT_TRUE(remote_device_mgr.ContainsDevice(
th0->resource_remote_device_incarnation()));
TensorHandle* th1 = TensorHandle::CreateUnshapedRemoteHandle(
0, 0, "", DT_RESOURCE, d1_ptr, ctx);
absl::Cleanup th1_cleanup = [&]() { th1->Unref(); };
EXPECT_TRUE(remote_device_mgr.ContainsDevice(
th1->resource_remote_device_incarnation()));
std::vector<Device*> remove_d1{d1_ptr};
TF_ASSERT_OK(remote_device_mgr.RemoveDevices(std::move(remove_d1)));
EXPECT_FALSE(remote_device_mgr.ContainsDevice(
th1->resource_remote_device_incarnation()));
EXPECT_TRUE(remote_device_mgr.ContainsDevice(
th0->resource_remote_device_incarnation()));
}
class RemoteTensorHandleTest : public ::testing::Test {
public:
RemoteTensorHandleTest() {
std::vector<std::unique_ptr<Device>> devices;
for (const char* name : device_names_) {
devices.push_back(CreateDevice("CPU", name));
}
device_mgr_ = new StaticDeviceMgr(std::move(devices));
context_ = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, device_mgr_,
false, nullptr,
nullptr, nullptr,
true);
}
~RemoteTensorHandleTest() override {
delete device_mgr_;
context_->Unref();
}
EagerContext* context() { return context_; }
std::vector<Device*> ListDevices() const {
return device_mgr_->ListDevices();
}
private:
const std::vector<const char*> device_names_ = {
"/job:worker/replica:0/task:0/device:CPU:0",
"/job:worker/replica:0/task:1/device:CPU:0",
"/job:worker/replica:0/task:2/device:CPU:0"};
StaticDeviceMgr* device_mgr_;
EagerContext* context_;
};
TEST_F(RemoteTensorHandleTest, UnknownRemoteDevice) {
std::vector<std::unique_ptr<Device>> devices;
devices.push_back(
CreateDevice("CPU", "/job:worker/replica:0/task:0/device:CPU:0"));
devices.push_back(
CreateDevice("CPU", "/job:worker/replica:0/task:1/device:CPU:0"));
devices.push_back(
CreateDevice("CPU", "/job:worker/replica:0/task:2/device:CPU:0"));
StaticDeviceMgr device_mgr(std::move(devices));
EagerContext* context = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, &device_mgr,
false, nullptr,
nullptr, nullptr,
true);
absl::Cleanup context_cleanup = [&]() { context->Unref(); };
tensorflow::DataType dtype = DT_FLOAT;
TensorShape shape = {};
const string remote_task = "/job:worker/replica:0/task:1";
Device* d1 = device_mgr.ListDevices().at(1);
TensorHandle* h = TensorHandle::CreateUnshapedRemoteHandle(
0, 0, remote_task, dtype, d1, context,
true);
absl::Cleanup h_cleanup = [&]() { h->Unref(); };
EXPECT_EQ(h->device(), d1);
Device* d2 = device_mgr.ListDevices().at(2);
TF_ASSERT_OK(h->SetRemoteShapeAndDevice(
shape, d1, context->GetContextViewId(), d2->name()));
Status s;
EXPECT_EQ(h->BackingDeviceName(&s), d2->name());
TF_EXPECT_OK(s);
EXPECT_EQ(h->device(), d2);
}
TEST_F(RemoteTensorHandleTest, PoisonRemote) {
std::vector<std::unique_ptr<Device>> devices;
devices.push_back(
CreateDevice("CPU", "/job:worker/replica:0/task:0/device:CPU:0"));
devices.push_back(
CreateDevice("CPU", "/job:worker/replica:0/task:1/device:CPU:0"));
devices.push_back(
CreateDevice("CPU", "/job:worker/replica:0/task:2/device:CPU:0"));
StaticDeviceMgr device_mgr(std::move(devices));
EagerContext* context = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, &device_mgr,
false, nullptr,
nullptr, nullptr,
true);
absl::Cleanup context_cleanup = [&]() { context->Unref(); };
tensorflow::DataType dtype = DT_FLOAT;
TensorShape shape = {};
const string remote_task = "/job:worker/replica:0/task:1";
Device* d1 = device_mgr.ListDevices().at(1);
TensorHandle* h = TensorHandle::CreateUnshapedRemoteHandle(
0, 0, remote_task, dtype, d1, context,
true);
absl::Cleanup h_cleanup = [&]() { h->Unref(); };
EXPECT_EQ(h->device(), d1);
tensorflow::Status fake_failure_status(absl::StatusCode::kAborted,
"Fake failure.");
h->PoisonRemote(fake_failure_status, d1, context->GetContextViewId());
Device* d2 = device_mgr.ListDevices().at(2);
EXPECT_THAT(h->SetRemoteShapeAndDevice(shape, d1, context->GetContextViewId(),
d2->name()),
StatusIs(fake_failure_status.code(),
std::string(fake_failure_status.message())));
}
TEST_F(RemoteTensorHandleTest, PoisonRemoteMirror) {
std::vector<std::unique_ptr<Device>> devices;
devices.push_back(
CreateDevice("CPU", "/job:worker/replica:0/task:0/device:CPU:0"));
devices.push_back(
CreateDevice("CPU", "/job:worker/replica:0/task:1/device:CPU:0"));
devices.push_back(
CreateDevice("CPU", "/job:worker/replica:0/task:2/device:CPU:0"));
StaticDeviceMgr device_mgr(std::move(devices));
EagerContext* context = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, &device_mgr,
false, nullptr,
nullptr, nullptr,
true);
absl::Cleanup context_cleanup = [&]() { context->Unref(); };
tensorflow::DataType dtype = DT_FLOAT;
TensorShape shape = {};
const string remote_task = "/job:worker/replica:0/task:1";
Device* d1 = device_mgr.ListDevices().at(1);
TensorHandle* h = TensorHandle::CreateUnshapedRemoteHandle(
0, 0, remote_task, dtype, d1, context,
true);
absl::Cleanup h_cleanup = [&]() { h->Unref(); };
EXPECT_EQ(h->device(), d1);
Device* d2 = device_mgr.ListDevices().at(2);
int64_t op_id = 1;
int output_num = 2;
TF_ASSERT_OK(
h->AddUnshapedRemoteMirror(d2, op_id, output_num, remote_task, context));
tensorflow::Status fake_failure_status(absl::StatusCode::kAborted,
"Fake failure.");
h->PoisonRemote(fake_failure_status, d2, context->GetContextViewId());
EXPECT_THAT(h->SetRemoteShapeAndDevice(shape, d2, context->GetContextViewId(),
d2->name()),
StatusIs(fake_failure_status.code(),
std::string(fake_failure_status.message())));
}
TEST_F(RemoteTensorHandleTest, SetRemoteTensorHandleShapeTwice) {
std::vector<std::unique_ptr<Device>> devices;
devices.push_back(
CreateDevice("CPU", "/job:worker/replica:0/task:0/device:CPU:0"));
devices.push_back(
CreateDevice("CPU", "/job:worker/replica:0/task:1/device:CPU:0"));
devices.push_back(
CreateDevice("CPU", "/job:worker/replica:0/task:2/device:CPU:0"));
StaticDeviceMgr device_mgr(std::move(devices));
EagerContext* context = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, &device_mgr,
false, nullptr,
nullptr, nullptr,
true);
absl::Cleanup context_cleanup = [&]() { context->Unref(); };
tensorflow::DataType dtype = DT_FLOAT;
TensorShape shape = {};
const string remote_task = "/job:worker/replica:0/task:1";
Device* d1 = device_mgr.ListDevices().at(1);
TensorHandle* h = TensorHandle::CreateUnshapedRemoteHandle(
0, 0, remote_task, dtype, d1, context,
true);
absl::Cleanup h_cleanup = [&]() { h->Unref(); };
EXPECT_EQ(h->device(), d1);
Device* d2 = device_mgr.ListDevices().at(2);
int64_t op_id = 1;
int output_num = 2;
TF_ASSERT_OK(
h->AddUnshapedRemoteMirror(d2, op_id, output_num, remote_task, context));
TF_ASSERT_OK(h->SetRemoteShapeAndDevice(
shape, d2, context->GetContextViewId(), d2->name()));
TF_ASSERT_OK(h->SetRemoteShapeAndDevice(
shape, d1, context->GetContextViewId(), d1->name()));
TF_ASSERT_OK(h->SetRemoteShapeAndDevice(
shape, d1, context->GetContextViewId(), d1->name()));
TensorShape another_shape({1});
EXPECT_THAT(h->SetRemoteShapeAndDevice(
another_shape, d1, context->GetContextViewId(), d1->name()),
StatusIs(tensorflow::error::INTERNAL,
HasSubstr("Trying to change shape to")));
}
TEST_F(RemoteTensorHandleTest, SetRemoteMirrorShapeTwice) {
std::vector<std::unique_ptr<Device>> devices;
devices.push_back(
CreateDevice("CPU", "/job:worker/replica:0/task:0/device:CPU:0"));
devices.push_back(
CreateDevice("CPU", "/job:worker/replica:0/task:1/device:CPU:0"));
devices.push_back(
CreateDevice("CPU", "/job:worker/replica:0/task:2/device:CPU:0"));
StaticDeviceMgr device_mgr(std::move(devices));
EagerContext* context = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, &device_mgr,
false, nullptr,
nullptr, nullptr,
true);
absl::Cleanup context_cleanup = [&]() { context->Unref(); };
tensorflow::DataType dtype = DT_FLOAT;
TensorShape shape = {};
const string remote_task = "/job:worker/replica:0/task:1";
Device* d1 = device_mgr.ListDevices().at(1);
TensorHandle* h = TensorHandle::CreateUnshapedRemoteHandle(
0, 0, remote_task, dtype, d1, context,
true);
absl::Cleanup h_cleanup = [&]() { h->Unref(); };
EXPECT_EQ(h->device(), d1);
Device* d2 = device_mgr.ListDevices().at(2);
TF_ASSERT_OK(h->SetRemoteShapeAndDevice(
shape, d1, context->GetContextViewId(), d2->name()));
int64_t op_id = 1;
int output_num = 2;
TF_ASSERT_OK(
h->AddUnshapedRemoteMirror(d1, op_id, output_num, remote_task, context));
TF_ASSERT_OK(h->SetRemoteShapeAndDevice(
shape, d1, context->GetContextViewId(), d2->name()));
TensorShape another_shape({1});
EXPECT_THAT(h->SetRemoteShapeAndDevice(
another_shape, d1, context->GetContextViewId(), d2->name()),
StatusIs(tensorflow::error::INTERNAL,
HasSubstr("Trying to change shape to")));
}
TEST(TensorHandle_LocalTest, TensorFromDeviceSameDevice) {
std::vector<std::unique_ptr<Device>> devices;
devices.push_back(
CreateDevice("CPU", "/job:localhost/replica:0/task:0/device:CPU:0"));
devices.push_back(
CreateDevice("CPU", "/job:localhost/replica:0/task:0/device:CPU:1"));
StaticDeviceMgr device_mgr(std::move(devices));
EagerContext* context = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, &device_mgr,
false, nullptr,
nullptr, nullptr,
true);
absl::Cleanup context_cleanup = [&]() { context->Unref(); };
tensorflow::DataType dtype = DT_FLOAT;
TensorShape shape = {};
Tensor t0(dtype, shape);
Device* d0 = device_mgr.ListDevices().at(1);
TensorHandle* h =
TensorHandle::CreateLocalHandle(std::move(t0), d0, d0, d0, context);
absl::Cleanup h_cleanup = [&]() { h->Unref(); };
const Tensor* tensor_from_device;
TF_EXPECT_OK(h->TensorFromDevice(d0, &tensor_from_device));
}
TEST(TensorHandle_LocalTest, TensorFromDeviceDifferentDevice) {
std::vector<std::unique_ptr<Device>> devices;
devices.push_back(
CreateDevice("CPU", "/job:localhost/replica:0/task:0/device:CPU:0"));
devices.push_back(
CreateDevice("CPU", "/job:localhost/replica:0/task:0/device:CPU:1"));
devices.push_back(
CreateDevice("CPU", "/job:worker/replica:0/task:2/device:CPU:0"));
StaticDeviceMgr device_mgr(std::move(devices));
EagerContext* context = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, &device_mgr,
false, nullptr,
nullptr, nullptr,
true);
absl::Cleanup context_cleanup = [&]() { context->Unref(); };
tensorflow::DataType dtype = DT_FLOAT;
TensorShape shape = {};
Tensor t0(dtype, shape);
Device* d0 = device_mgr.ListDevices().at(1);
TensorHandle* h =
TensorHandle::CreateLocalHandle(std::move(t0), d0, d0, d0, context);
absl::Cleanup h_cleanup = [&]() { h->Unref(); };
Device* d1 = device_mgr.ListDevices().at(2);
tensorflow::Tensor tensor;
TF_EXPECT_OK(h->CopyToDevice(*context, d1, &tensor));
TF_EXPECT_OK(h->AddLocalMirror(std::move(tensor), d1));
const Tensor* tensor_from_device;
TF_EXPECT_OK(h->TensorFromDevice(d1, &tensor_from_device));
}
TEST(TensorHandle_LocalTest, TensorFromDeviceInvalidDevice) {
std::vector<std::unique_ptr<Device>> devices;
devices.push_back(
CreateDevice("CPU", "/job:localhost/replica:0/task:0/device:CPU:0"));
devices.push_back(
CreateDevice("CPU", "/job:localhost/replica:0/task:0/device:CPU:1"));
devices.push_back(
CreateDevice("CPU", "/job:worker/replica:0/task:2/device:CPU:0"));
StaticDeviceMgr device_mgr(std::move(devices));
EagerContext* context = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, &device_mgr,
false, nullptr,
nullptr, nullptr,
true);
absl::Cleanup context_cleanup = [&]() { context->Unref(); };
tensorflow::DataType dtype = DT_FLOAT;
TensorShape shape = {};
Tensor t0(dtype, shape);
Device* d0 = device_mgr.ListDevices().at(1);
TensorHandle* h =
TensorHandle::CreateLocalHandle(std::move(t0), d0, d0, d0, context);
absl::Cleanup h_cleanup = [&]() { h->Unref(); };
Device* d1 = device_mgr.ListDevices().at(2);
const Tensor* tensor_from_device;
EXPECT_THAT(h->TensorFromDevice(d1, &tensor_from_device),
StatusIs(tensorflow::error::INTERNAL));
}
TEST(TensorHandle_ResourceShapeMirror, CreateAndCheckMirror) {
std::vector<std::unique_ptr<Device>> devices;
devices.push_back(
CreateDevice("CPU", "/job:localhost/replica:0/task:0/device:CPU:0"));
devices.push_back(
CreateDevice("CPU", "/job:localhost/replica:0/task:0/device:CPU:1"));
devices.push_back(
CreateDevice("CPU", "/job:localhost/replica:0/task:0/device:CPU:2"));
StaticDeviceMgr device_mgr(std::move(devices));
EagerContext* context = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, &device_mgr,
false, nullptr,
nullptr, nullptr,
true);
absl::Cleanup context_cleanup = [&]() { context->Unref(); };
tensorflow::DataType dtype = DT_RESOURCE;
TensorShape shape = {};
Tensor t0(dtype, shape);
Device* d0 = device_mgr.ListDevices().at(1);
TensorHandle* h =
TensorHandle::CreateLocalHandle(std::move(t0), d0, d0, d0, context);
absl::Cleanup h_cleanup = [&]() { h->Unref(); };
Device* d1 = device_mgr.ListDevices().at(2);
int64_t op_id = 1;
int output_num = 2;
EXPECT_FALSE(h->HasResourceShapeMirror(d1, context->GetContextViewId()));
TF_EXPECT_OK(h->AddResourceShapeMirror(d1, op_id, output_num, context));
EXPECT_TRUE(h->HasResourceShapeMirror(d1, context->GetContextViewId()));
TF_EXPECT_OK(h->AddResourceShapeMirror(d1, op_id, output_num, context));
EXPECT_THAT(h->AddResourceShapeMirror(d1, op_id + 1, output_num, context),
StatusIs(tensorflow::error::INTERNAL));
}
TEST(TensorHandle_DeviceNameTest, OnLocalDevice) {
std::vector<std::unique_ptr<Device>> devices;
devices.push_back(
CreateDevice("CPU", "/job:localhost/replica:0/task:0/device:CPU:0"));
devices.push_back(
CreateDevice("GPU", "/job:localhost/replica:0/task:0/device:GPU:0"));
StaticDeviceMgr local_device_mgr(std::move(devices));
auto ctx = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT, false,
&local_device_mgr, false, nullptr, nullptr, nullptr,
true);
absl::Cleanup ctx_cleanup = [&]() { ctx->Unref(); };
Device* dcpu = local_device_mgr.ListDevices()[0];
Device* dgpu = local_device_mgr.ListDevices()[1];
tensorflow::DataType dtype = DT_RESOURCE;
TensorShape shape = {2};
Tensor tcpu(dtype, shape);
Tensor tgpu(dtype, shape);
Status s;
TensorHandle* th_cpu =
TensorHandle::CreateLocalHandle(std::move(tcpu), dcpu, dcpu, dcpu, ctx);
const char* device_name = th_cpu->DeviceName(&s);
absl::Cleanup th_cpu_cleanup = [&]() { th_cpu->Unref(); };
TF_EXPECT_OK(s);
ASSERT_TRUE(absl::StrContains(device_name, "CPU")) << device_name;
const char* backing_device_name = th_cpu->BackingDeviceName(&s);
TF_EXPECT_OK(s);
ASSERT_TRUE(absl::StrContains(backing_device_name, "CPU"))
<< backing_device_name;
const char* device_type = th_cpu->DeviceType(&s);
TF_EXPECT_OK(s);
ASSERT_TRUE(absl::StrContains(device_type, "CPU")) << device_type;
int device_id = th_cpu->DeviceId(&s);
TF_EXPECT_OK(s);
ASSERT_EQ(0, device_id) << device_id;
TensorHandle* th_gpu =
TensorHandle::CreateLocalHandle(std::move(tgpu), dgpu, dgpu, dgpu, ctx);
absl::Cleanup th_gpu_cleanup = [&]() { th_gpu->Unref(); };
device_name = th_gpu->DeviceName(&s);
TF_EXPECT_OK(s);
ASSERT_TRUE(absl::StrContains(device_name, "GPU")) << device_name;
backing_device_name = th_gpu->BackingDeviceName(&s);
TF_EXPECT_OK(s);
std::cout << "backing_device_name for GPU: " << backing_device_name
<< std::endl;
ASSERT_TRUE(absl::StrContains(backing_device_name, "GPU"))
<< backing_device_name;
device_type = th_gpu->DeviceType(&s);
TF_EXPECT_OK(s);
ASSERT_TRUE(absl::StrContains(device_type, "GPU")) << device_type;
device_id = th_gpu->DeviceId(&s);
TF_EXPECT_OK(s);
ASSERT_EQ(0, device_id) << device_id;
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/tensor_handle.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/tensor_handle_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
15e81774-698f-4cbc-a34a-ff70cdd6ccf9 | cpp | google/cel-cpp | well_known_types | internal/well_known_types.cc | internal/well_known_types_test.cc | #include "internal/well_known_types.h"
#include <cstddef>
#include <cstdint>
#include <functional>
#include <string>
#include <utility>
#include <vector>
#include "google/protobuf/any.pb.h"
#include "google/protobuf/duration.pb.h"
#include "google/protobuf/struct.pb.h"
#include "google/protobuf/timestamp.pb.h"
#include "google/protobuf/wrappers.pb.h"
#include "google/protobuf/descriptor.pb.h"
#include "absl/base/attributes.h"
#include "absl/base/no_destructor.h"
#include "absl/base/nullability.h"
#include "absl/base/optimization.h"
#include "absl/functional/overload.h"
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "absl/time/time.h"
#include "absl/types/variant.h"
#include "common/json.h"
#include "common/memory.h"
#include "extensions/protobuf/internal/map_reflection.h"
#include "internal/status_macros.h"
#include "google/protobuf/arena.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/map_field.h"
#include "google/protobuf/message.h"
#include "google/protobuf/message_lite.h"
#include "google/protobuf/reflection.h"
#include "google/protobuf/util/time_util.h"
namespace cel::well_known_types {
namespace {
using ::google::protobuf::Descriptor;
using ::google::protobuf::DescriptorPool;
using ::google::protobuf::EnumDescriptor;
using ::google::protobuf::FieldDescriptor;
using ::google::protobuf::OneofDescriptor;
using ::google::protobuf::util::TimeUtil;
using CppStringType = ::google::protobuf::FieldDescriptor::CppStringType;
absl::string_view FlatStringValue(
const StringValue& value ABSL_ATTRIBUTE_LIFETIME_BOUND,
std::string& scratch ABSL_ATTRIBUTE_LIFETIME_BOUND) {
return absl::visit(
absl::Overload(
[](absl::string_view string) -> absl::string_view { return string; },
[&](const absl::Cord& cord) -> absl::string_view {
if (auto flat = cord.TryFlat(); flat) {
return *flat;
}
scratch = static_cast<std::string>(cord);
return scratch;
}),
AsVariant(value));
}
StringValue CopyStringValue(const StringValue& value,
std::string& scratch
ABSL_ATTRIBUTE_LIFETIME_BOUND) {
return absl::visit(
absl::Overload(
[&](absl::string_view string) -> StringValue {
if (string.data() != scratch.data()) {
scratch.assign(string.data(), string.size());
return scratch;
}
return string;
},
[](const absl::Cord& cord) -> StringValue { return cord; }),
AsVariant(value));
}
BytesValue CopyBytesValue(const BytesValue& value,
std::string& scratch ABSL_ATTRIBUTE_LIFETIME_BOUND) {
return absl::visit(
absl::Overload(
[&](absl::string_view string) -> BytesValue {
if (string.data() != scratch.data()) {
scratch.assign(string.data(), string.size());
return scratch;
}
return string;
},
[](const absl::Cord& cord) -> BytesValue { return cord; }),
AsVariant(value));
}
google::protobuf::Reflection::ScratchSpace& GetScratchSpace() {
static absl::NoDestructor<google::protobuf::Reflection::ScratchSpace> scratch_space;
return *scratch_space;
}
template <typename Variant>
Variant GetStringField(absl::Nonnull<const google::protobuf::Reflection*> reflection,
const google::protobuf::Message& message,
absl::Nonnull<const FieldDescriptor*> field,
CppStringType string_type,
std::string& scratch ABSL_ATTRIBUTE_LIFETIME_BOUND) {
ABSL_DCHECK(field->cpp_string_type() == string_type);
switch (string_type) {
case CppStringType::kCord:
return reflection->GetCord(message, field);
case CppStringType::kView:
ABSL_FALLTHROUGH_INTENDED;
case CppStringType::kString:
return reflection->GetStringView(message, field, GetScratchSpace());
default:
return absl::string_view(
reflection->GetStringReference(message, field, &scratch));
}
}
template <typename Variant>
Variant GetStringField(const google::protobuf::Message& message,
absl::Nonnull<const FieldDescriptor*> field,
CppStringType string_type,
std::string& scratch ABSL_ATTRIBUTE_LIFETIME_BOUND) {
return GetStringField<Variant>(message.GetReflection(), message, field,
string_type, scratch);
}
template <typename Variant>
Variant GetRepeatedStringField(
absl::Nonnull<const google::protobuf::Reflection*> reflection,
const google::protobuf::Message& message, absl::Nonnull<const FieldDescriptor*> field,
CppStringType string_type, int index,
std::string& scratch ABSL_ATTRIBUTE_LIFETIME_BOUND) {
ABSL_DCHECK(field->cpp_string_type() == string_type);
switch (string_type) {
case CppStringType::kView:
ABSL_FALLTHROUGH_INTENDED;
case CppStringType::kString:
return reflection->GetRepeatedStringView(message, field, index,
GetScratchSpace());
default:
return absl::string_view(reflection->GetRepeatedStringReference(
message, field, index, &scratch));
}
}
template <typename Variant>
Variant GetRepeatedStringField(
const google::protobuf::Message& message, absl::Nonnull<const FieldDescriptor*> field,
CppStringType string_type, int index,
std::string& scratch ABSL_ATTRIBUTE_LIFETIME_BOUND) {
return GetRepeatedStringField<Variant>(message.GetReflection(), message,
field, string_type, index, scratch);
}
absl::StatusOr<absl::Nonnull<const Descriptor*>> GetMessageTypeByName(
absl::Nonnull<const DescriptorPool*> pool, absl::string_view name) {
const auto* descriptor = pool->FindMessageTypeByName(name);
if (ABSL_PREDICT_FALSE(descriptor == nullptr)) {
return absl::InvalidArgumentError(absl::StrCat(
"descriptor missing for protocol buffer message well known type: ",
name));
}
return descriptor;
}
absl::StatusOr<absl::Nonnull<const EnumDescriptor*>> GetEnumTypeByName(
absl::Nonnull<const DescriptorPool*> pool, absl::string_view name) {
const auto* descriptor = pool->FindEnumTypeByName(name);
if (ABSL_PREDICT_FALSE(descriptor == nullptr)) {
return absl::InvalidArgumentError(absl::StrCat(
"descriptor missing for protocol buffer enum well known type: ", name));
}
return descriptor;
}
absl::StatusOr<absl::Nonnull<const OneofDescriptor*>> GetOneofByName(
absl::Nonnull<const Descriptor*> descriptor, absl::string_view name) {
const auto* oneof = descriptor->FindOneofByName(name);
if (ABSL_PREDICT_FALSE(oneof == nullptr)) {
return absl::InvalidArgumentError(absl::StrCat(
"oneof missing for protocol buffer message well known type: ",
descriptor->full_name(), ".", name));
}
return oneof;
}
absl::StatusOr<absl::Nonnull<const FieldDescriptor*>> GetFieldByNumber(
absl::Nonnull<const Descriptor*> descriptor, int32_t number) {
const auto* field = descriptor->FindFieldByNumber(number);
if (ABSL_PREDICT_FALSE(field == nullptr)) {
return absl::InvalidArgumentError(absl::StrCat(
"field missing for protocol buffer message well known type: ",
descriptor->full_name(), ".", number));
}
return field;
}
absl::Status CheckFieldType(absl::Nonnull<const FieldDescriptor*> field,
FieldDescriptor::Type type) {
if (ABSL_PREDICT_FALSE(field->type() != type)) {
return absl::InvalidArgumentError(absl::StrCat(
"unexpected field type for protocol buffer message well known type: ",
field->full_name(), " ", field->type_name()));
}
return absl::OkStatus();
}
absl::Status CheckFieldCppType(absl::Nonnull<const FieldDescriptor*> field,
FieldDescriptor::CppType cpp_type) {
if (ABSL_PREDICT_FALSE(field->cpp_type() != cpp_type)) {
return absl::InvalidArgumentError(absl::StrCat(
"unexpected field type for protocol buffer message well known type: ",
field->full_name(), " ", field->cpp_type_name()));
}
return absl::OkStatus();
}
absl::string_view LabelToString(FieldDescriptor::Label label) {
switch (label) {
case FieldDescriptor::LABEL_REPEATED:
return "REPEATED";
case FieldDescriptor::LABEL_REQUIRED:
return "REQUIRED";
case FieldDescriptor::LABEL_OPTIONAL:
return "OPTIONAL";
default:
return "ERROR";
}
}
absl::Status CheckFieldCardinality(absl::Nonnull<const FieldDescriptor*> field,
FieldDescriptor::Label label) {
if (ABSL_PREDICT_FALSE(field->label() != label)) {
return absl::InvalidArgumentError(
absl::StrCat("unexpected field cardinality for protocol buffer message "
"well known type: ",
field->full_name(), " ", LabelToString(field->label())));
}
return absl::OkStatus();
}
absl::string_view WellKnownTypeToString(
Descriptor::WellKnownType well_known_type) {
switch (well_known_type) {
case Descriptor::WELLKNOWNTYPE_BOOLVALUE:
return "BOOLVALUE";
case Descriptor::WELLKNOWNTYPE_INT32VALUE:
return "INT32VALUE";
case Descriptor::WELLKNOWNTYPE_INT64VALUE:
return "INT64VALUE";
case Descriptor::WELLKNOWNTYPE_UINT32VALUE:
return "UINT32VALUE";
case Descriptor::WELLKNOWNTYPE_UINT64VALUE:
return "UINT64VALUE";
case Descriptor::WELLKNOWNTYPE_FLOATVALUE:
return "FLOATVALUE";
case Descriptor::WELLKNOWNTYPE_DOUBLEVALUE:
return "DOUBLEVALUE";
case Descriptor::WELLKNOWNTYPE_BYTESVALUE:
return "BYTESVALUE";
case Descriptor::WELLKNOWNTYPE_STRINGVALUE:
return "STRINGVALUE";
case Descriptor::WELLKNOWNTYPE_ANY:
return "ANY";
case Descriptor::WELLKNOWNTYPE_DURATION:
return "DURATION";
case Descriptor::WELLKNOWNTYPE_TIMESTAMP:
return "TIMESTAMP";
case Descriptor::WELLKNOWNTYPE_VALUE:
return "VALUE";
case Descriptor::WELLKNOWNTYPE_LISTVALUE:
return "LISTVALUE";
case Descriptor::WELLKNOWNTYPE_STRUCT:
return "STRUCT";
case Descriptor::WELLKNOWNTYPE_FIELDMASK:
return "FIELDMASK";
default:
return "ERROR";
}
}
absl::Status CheckWellKnownType(absl::Nonnull<const Descriptor*> descriptor,
Descriptor::WellKnownType well_known_type) {
if (ABSL_PREDICT_FALSE(descriptor->well_known_type() != well_known_type)) {
return absl::InvalidArgumentError(absl::StrCat(
"expected message to be well known type: ", descriptor->full_name(),
" ", WellKnownTypeToString(descriptor->well_known_type())));
}
return absl::OkStatus();
}
absl::Status CheckFieldWellKnownType(
absl::Nonnull<const FieldDescriptor*> field,
Descriptor::WellKnownType well_known_type) {
ABSL_DCHECK_EQ(field->cpp_type(), FieldDescriptor::CPPTYPE_MESSAGE);
if (ABSL_PREDICT_FALSE(field->message_type()->well_known_type() !=
well_known_type)) {
return absl::InvalidArgumentError(absl::StrCat(
"expected message field to be well known type for protocol buffer "
"message well known type: ",
field->full_name(), " ",
WellKnownTypeToString(field->message_type()->well_known_type())));
}
return absl::OkStatus();
}
absl::Status CheckFieldOneof(absl::Nonnull<const FieldDescriptor*> field,
absl::Nonnull<const OneofDescriptor*> oneof,
int index) {
if (ABSL_PREDICT_FALSE(field->containing_oneof() != oneof)) {
return absl::InvalidArgumentError(
absl::StrCat("expected field to be member of oneof for protocol buffer "
"message well known type: ",
field->full_name()));
}
if (ABSL_PREDICT_FALSE(field->index_in_oneof() != index)) {
return absl::InvalidArgumentError(absl::StrCat(
"expected field to have index in oneof of ", index,
" for protocol buffer "
"message well known type: ",
field->full_name(), " oneof_index=", field->index_in_oneof()));
}
return absl::OkStatus();
}
absl::Status CheckMapField(absl::Nonnull<const FieldDescriptor*> field) {
if (ABSL_PREDICT_FALSE(!field->is_map())) {
return absl::InvalidArgumentError(
absl::StrCat("expected field to be map for protocol buffer "
"message well known type: ",
field->full_name()));
}
return absl::OkStatus();
}
}
bool StringValue::ConsumePrefix(absl::string_view prefix) {
return absl::visit(absl::Overload(
[&](absl::string_view& value) {
return absl::ConsumePrefix(&value, prefix);
},
[&](absl::Cord& cord) {
if (cord.StartsWith(prefix)) {
cord.RemovePrefix(prefix.size());
return true;
}
return false;
}),
AsVariant(*this));
}
StringValue GetStringField(absl::Nonnull<const google::protobuf::Reflection*> reflection,
const google::protobuf::Message& message,
absl::Nonnull<const FieldDescriptor*> field,
std::string& scratch) {
ABSL_DCHECK_EQ(reflection, message.GetReflection());
ABSL_DCHECK(!field->is_map() && !field->is_repeated());
ABSL_DCHECK_EQ(field->type(), FieldDescriptor::TYPE_STRING);
ABSL_DCHECK_EQ(field->cpp_type(), FieldDescriptor::CPPTYPE_STRING);
return GetStringField<StringValue>(reflection, message, field,
field->cpp_string_type(), scratch);
}
BytesValue GetBytesField(absl::Nonnull<const google::protobuf::Reflection*> reflection,
const google::protobuf::Message& message,
absl::Nonnull<const FieldDescriptor*> field,
std::string& scratch) {
ABSL_DCHECK_EQ(reflection, message.GetReflection());
ABSL_DCHECK(!field->is_map() && !field->is_repeated());
ABSL_DCHECK_EQ(field->type(), FieldDescriptor::TYPE_BYTES);
ABSL_DCHECK_EQ(field->cpp_type(), FieldDescriptor::CPPTYPE_STRING);
return GetStringField<BytesValue>(reflection, message, field,
field->cpp_string_type(), scratch);
}
StringValue GetRepeatedStringField(
absl::Nonnull<const google::protobuf::Reflection*> reflection,
const google::protobuf::Message& message, absl::Nonnull<const FieldDescriptor*> field,
int index, std::string& scratch) {
ABSL_DCHECK_EQ(reflection, message.GetReflection());
ABSL_DCHECK(!field->is_map() && field->is_repeated());
ABSL_DCHECK_EQ(field->type(), FieldDescriptor::TYPE_STRING);
ABSL_DCHECK_EQ(field->cpp_type(), FieldDescriptor::CPPTYPE_STRING);
return GetRepeatedStringField<StringValue>(
reflection, message, field, field->cpp_string_type(), index, scratch);
}
BytesValue GetRepeatedBytesField(
absl::Nonnull<const google::protobuf::Reflection*> reflection,
const google::protobuf::Message& message, absl::Nonnull<const FieldDescriptor*> field,
int index, std::string& scratch) {
ABSL_DCHECK_EQ(reflection, message.GetReflection());
ABSL_DCHECK(!field->is_map() && field->is_repeated());
ABSL_DCHECK_EQ(field->type(), FieldDescriptor::TYPE_BYTES);
ABSL_DCHECK_EQ(field->cpp_type(), FieldDescriptor::CPPTYPE_STRING);
return GetRepeatedStringField<BytesValue>(
reflection, message, field, field->cpp_string_type(), index, scratch);
}
absl::Status NullValueReflection::Initialize(
absl::Nonnull<const DescriptorPool*> pool) {
CEL_ASSIGN_OR_RETURN(const auto* descriptor,
GetEnumTypeByName(pool, "google.protobuf.NullValue"));
return Initialize(descriptor);
}
absl::Status NullValueReflection::Initialize(
absl::Nonnull<const EnumDescriptor*> descriptor) {
if (descriptor_ != descriptor) {
if (ABSL_PREDICT_FALSE(descriptor->full_name() !=
"google.protobuf.NullValue")) {
return absl::InvalidArgumentError(absl::StrCat(
"expected enum to be well known type: ", descriptor->full_name(),
" google.protobuf.NullValue"));
}
descriptor_ = nullptr;
value_ = descriptor->FindValueByNumber(0);
if (ABSL_PREDICT_FALSE(value_ == nullptr)) {
return absl::InvalidArgumentError(
"well known protocol buffer enum missing value: "
"google.protobuf.NullValue.NULL_VALUE");
}
if (ABSL_PREDICT_FALSE(descriptor->value_count() != 1)) {
std::vector<absl::string_view> values;
values.reserve(static_cast<size_t>(descriptor->value_count()));
for (int i = 0; i < descriptor->value_count(); ++i) {
values.push_back(descriptor->value(i)->name());
}
return absl::InvalidArgumentError(
absl::StrCat("well known protocol buffer enum has multiple values: [",
absl::StrJoin(values, ", "), "]"));
}
descriptor_ = descriptor;
}
return absl::OkStatus();
}
absl::Status BoolValueReflection::Initialize(
absl::Nonnull<const DescriptorPool*> pool) {
CEL_ASSIGN_OR_RETURN(const auto* descriptor,
GetMessageTypeByName(pool, "google.protobuf.BoolValue"));
return Initialize(descriptor);
}
absl::Status BoolValueReflection::Initialize(
absl::Nonnull<const Descriptor*> descriptor) {
if (descriptor_ != descriptor) {
CEL_RETURN_IF_ERROR(CheckWellKnownType(descriptor, kWellKnownType));
descriptor_ = nullptr;
CEL_ASSIGN_OR_RETURN(value_field_, GetFieldByNumber(descriptor, 1));
CEL_RETURN_IF_ERROR(
CheckFieldCppType(value_field_, FieldDescriptor::CPPTYPE_BOOL));
CEL_RETURN_IF_ERROR(
CheckFieldCardinality(value_field_, FieldDescriptor::LABEL_OPTIONAL));
descriptor_ = descriptor;
}
return absl::OkStatus();
}
bool BoolValueReflection::GetValue(const google::protobuf::Message& message) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message.GetDescriptor(), descriptor_);
return message.GetReflection()->GetBool(message, value_field_);
}
void BoolValueReflection::SetValue(absl::Nonnull<google::protobuf::Message*> message,
bool value) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message->GetDescriptor(), descriptor_);
message->GetReflection()->SetBool(message, value_field_, value);
}
absl::StatusOr<BoolValueReflection> GetBoolValueReflection(
absl::Nonnull<const Descriptor*> descriptor) {
BoolValueReflection reflection;
CEL_RETURN_IF_ERROR(reflection.Initialize(descriptor));
return reflection;
}
absl::Status Int32ValueReflection::Initialize(
absl::Nonnull<const DescriptorPool*> pool) {
CEL_ASSIGN_OR_RETURN(
const auto* descriptor,
GetMessageTypeByName(pool, "google.protobuf.Int32Value"));
return Initialize(descriptor);
}
absl::Status Int32ValueReflection::Initialize(
absl::Nonnull<const Descriptor*> descriptor) {
if (descriptor_ != descriptor) {
CEL_RETURN_IF_ERROR(CheckWellKnownType(descriptor, kWellKnownType));
descriptor_ = nullptr;
CEL_ASSIGN_OR_RETURN(value_field_, GetFieldByNumber(descriptor, 1));
CEL_RETURN_IF_ERROR(
CheckFieldCppType(value_field_, FieldDescriptor::CPPTYPE_INT32));
CEL_RETURN_IF_ERROR(
CheckFieldCardinality(value_field_, FieldDescriptor::LABEL_OPTIONAL));
descriptor_ = descriptor;
}
return absl::OkStatus();
}
int32_t Int32ValueReflection::GetValue(const google::protobuf::Message& message) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message.GetDescriptor(), descriptor_);
return message.GetReflection()->GetInt32(message, value_field_);
}
void Int32ValueReflection::SetValue(absl::Nonnull<google::protobuf::Message*> message,
int32_t value) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message->GetDescriptor(), descriptor_);
message->GetReflection()->SetInt32(message, value_field_, value);
}
absl::StatusOr<Int32ValueReflection> GetInt32ValueReflection(
absl::Nonnull<const Descriptor*> descriptor) {
Int32ValueReflection reflection;
CEL_RETURN_IF_ERROR(reflection.Initialize(descriptor));
return reflection;
}
absl::Status Int64ValueReflection::Initialize(
absl::Nonnull<const DescriptorPool*> pool) {
CEL_ASSIGN_OR_RETURN(
const auto* descriptor,
GetMessageTypeByName(pool, "google.protobuf.Int64Value"));
return Initialize(descriptor);
}
absl::Status Int64ValueReflection::Initialize(
absl::Nonnull<const Descriptor*> descriptor) {
if (descriptor_ != descriptor) {
CEL_RETURN_IF_ERROR(CheckWellKnownType(descriptor, kWellKnownType));
descriptor_ = nullptr;
CEL_ASSIGN_OR_RETURN(value_field_, GetFieldByNumber(descriptor, 1));
CEL_RETURN_IF_ERROR(
CheckFieldCppType(value_field_, FieldDescriptor::CPPTYPE_INT64));
CEL_RETURN_IF_ERROR(
CheckFieldCardinality(value_field_, FieldDescriptor::LABEL_OPTIONAL));
descriptor_ = descriptor;
}
return absl::OkStatus();
}
int64_t Int64ValueReflection::GetValue(const google::protobuf::Message& message) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message.GetDescriptor(), descriptor_);
return message.GetReflection()->GetInt64(message, value_field_);
}
void Int64ValueReflection::SetValue(absl::Nonnull<google::protobuf::Message*> message,
int64_t value) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message->GetDescriptor(), descriptor_);
message->GetReflection()->SetInt64(message, value_field_, value);
}
absl::StatusOr<Int64ValueReflection> GetInt64ValueReflection(
absl::Nonnull<const Descriptor*> descriptor) {
Int64ValueReflection reflection;
CEL_RETURN_IF_ERROR(reflection.Initialize(descriptor));
return reflection;
}
absl::Status UInt32ValueReflection::Initialize(
absl::Nonnull<const DescriptorPool*> pool) {
CEL_ASSIGN_OR_RETURN(
const auto* descriptor,
GetMessageTypeByName(pool, "google.protobuf.UInt32Value"));
return Initialize(descriptor);
}
absl::Status UInt32ValueReflection::Initialize(
absl::Nonnull<const Descriptor*> descriptor) {
if (descriptor_ != descriptor) {
CEL_RETURN_IF_ERROR(CheckWellKnownType(descriptor, kWellKnownType));
descriptor_ = nullptr;
CEL_ASSIGN_OR_RETURN(value_field_, GetFieldByNumber(descriptor, 1));
CEL_RETURN_IF_ERROR(
CheckFieldCppType(value_field_, FieldDescriptor::CPPTYPE_UINT32));
CEL_RETURN_IF_ERROR(
CheckFieldCardinality(value_field_, FieldDescriptor::LABEL_OPTIONAL));
descriptor_ = descriptor;
}
return absl::OkStatus();
}
uint32_t UInt32ValueReflection::GetValue(const google::protobuf::Message& message) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message.GetDescriptor(), descriptor_);
return message.GetReflection()->GetUInt32(message, value_field_);
}
void UInt32ValueReflection::SetValue(absl::Nonnull<google::protobuf::Message*> message,
uint32_t value) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message->GetDescriptor(), descriptor_);
message->GetReflection()->SetUInt32(message, value_field_, value);
}
absl::StatusOr<UInt32ValueReflection> GetUInt32ValueReflection(
absl::Nonnull<const Descriptor*> descriptor) {
UInt32ValueReflection reflection;
CEL_RETURN_IF_ERROR(reflection.Initialize(descriptor));
return reflection;
}
absl::Status UInt64ValueReflection::Initialize(
absl::Nonnull<const DescriptorPool*> pool) {
CEL_ASSIGN_OR_RETURN(
const auto* descriptor,
GetMessageTypeByName(pool, "google.protobuf.UInt64Value"));
return Initialize(descriptor);
}
absl::Status UInt64ValueReflection::Initialize(
absl::Nonnull<const Descriptor*> descriptor) {
if (descriptor_ != descriptor) {
CEL_RETURN_IF_ERROR(CheckWellKnownType(descriptor, kWellKnownType));
descriptor_ = nullptr;
CEL_ASSIGN_OR_RETURN(value_field_, GetFieldByNumber(descriptor, 1));
CEL_RETURN_IF_ERROR(
CheckFieldCppType(value_field_, FieldDescriptor::CPPTYPE_UINT64));
CEL_RETURN_IF_ERROR(
CheckFieldCardinality(value_field_, FieldDescriptor::LABEL_OPTIONAL));
descriptor_ = descriptor;
}
return absl::OkStatus();
}
uint64_t UInt64ValueReflection::GetValue(const google::protobuf::Message& message) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message.GetDescriptor(), descriptor_);
return message.GetReflection()->GetUInt64(message, value_field_);
}
void UInt64ValueReflection::SetValue(absl::Nonnull<google::protobuf::Message*> message,
uint64_t value) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message->GetDescriptor(), descriptor_);
message->GetReflection()->SetUInt64(message, value_field_, value);
}
absl::StatusOr<UInt64ValueReflection> GetUInt64ValueReflection(
absl::Nonnull<const Descriptor*> descriptor) {
UInt64ValueReflection reflection;
CEL_RETURN_IF_ERROR(reflection.Initialize(descriptor));
return reflection;
}
absl::Status FloatValueReflection::Initialize(
absl::Nonnull<const DescriptorPool*> pool) {
CEL_ASSIGN_OR_RETURN(
const auto* descriptor,
GetMessageTypeByName(pool, "google.protobuf.FloatValue"));
return Initialize(descriptor);
}
absl::Status FloatValueReflection::Initialize(
absl::Nonnull<const Descriptor*> descriptor) {
if (descriptor_ != descriptor) {
CEL_RETURN_IF_ERROR(CheckWellKnownType(descriptor, kWellKnownType));
descriptor_ = nullptr;
CEL_ASSIGN_OR_RETURN(value_field_, GetFieldByNumber(descriptor, 1));
CEL_RETURN_IF_ERROR(
CheckFieldCppType(value_field_, FieldDescriptor::CPPTYPE_FLOAT));
CEL_RETURN_IF_ERROR(
CheckFieldCardinality(value_field_, FieldDescriptor::LABEL_OPTIONAL));
descriptor_ = descriptor;
}
return absl::OkStatus();
}
float FloatValueReflection::GetValue(const google::protobuf::Message& message) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message.GetDescriptor(), descriptor_);
return message.GetReflection()->GetFloat(message, value_field_);
}
void FloatValueReflection::SetValue(absl::Nonnull<google::protobuf::Message*> message,
float value) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message->GetDescriptor(), descriptor_);
message->GetReflection()->SetFloat(message, value_field_, value);
}
absl::StatusOr<FloatValueReflection> GetFloatValueReflection(
absl::Nonnull<const Descriptor*> descriptor) {
FloatValueReflection reflection;
CEL_RETURN_IF_ERROR(reflection.Initialize(descriptor));
return reflection;
}
absl::Status DoubleValueReflection::Initialize(
absl::Nonnull<const DescriptorPool*> pool) {
CEL_ASSIGN_OR_RETURN(
const auto* descriptor,
GetMessageTypeByName(pool, "google.protobuf.DoubleValue"));
return Initialize(descriptor);
}
absl::Status DoubleValueReflection::Initialize(
absl::Nonnull<const Descriptor*> descriptor) {
if (descriptor_ != descriptor) {
CEL_RETURN_IF_ERROR(CheckWellKnownType(descriptor, kWellKnownType));
descriptor_ = nullptr;
CEL_ASSIGN_OR_RETURN(value_field_, GetFieldByNumber(descriptor, 1));
CEL_RETURN_IF_ERROR(
CheckFieldCppType(value_field_, FieldDescriptor::CPPTYPE_DOUBLE));
CEL_RETURN_IF_ERROR(
CheckFieldCardinality(value_field_, FieldDescriptor::LABEL_OPTIONAL));
descriptor_ = descriptor;
}
return absl::OkStatus();
}
double DoubleValueReflection::GetValue(const google::protobuf::Message& message) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message.GetDescriptor(), descriptor_);
return message.GetReflection()->GetDouble(message, value_field_);
}
void DoubleValueReflection::SetValue(absl::Nonnull<google::protobuf::Message*> message,
double value) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message->GetDescriptor(), descriptor_);
message->GetReflection()->SetDouble(message, value_field_, value);
}
absl::StatusOr<DoubleValueReflection> GetDoubleValueReflection(
absl::Nonnull<const Descriptor*> descriptor) {
DoubleValueReflection reflection;
CEL_RETURN_IF_ERROR(reflection.Initialize(descriptor));
return reflection;
}
absl::Status BytesValueReflection::Initialize(
absl::Nonnull<const DescriptorPool*> pool) {
CEL_ASSIGN_OR_RETURN(
const auto* descriptor,
GetMessageTypeByName(pool, "google.protobuf.BytesValue"));
return Initialize(descriptor);
}
absl::Status BytesValueReflection::Initialize(
absl::Nonnull<const Descriptor*> descriptor) {
if (descriptor_ != descriptor) {
CEL_RETURN_IF_ERROR(CheckWellKnownType(descriptor, kWellKnownType));
descriptor_ = nullptr;
CEL_ASSIGN_OR_RETURN(value_field_, GetFieldByNumber(descriptor, 1));
CEL_RETURN_IF_ERROR(
CheckFieldType(value_field_, FieldDescriptor::TYPE_BYTES));
CEL_RETURN_IF_ERROR(
CheckFieldCardinality(value_field_, FieldDescriptor::LABEL_OPTIONAL));
value_field_string_type_ = value_field_->cpp_string_type();
descriptor_ = descriptor;
}
return absl::OkStatus();
}
BytesValue BytesValueReflection::GetValue(const google::protobuf::Message& message,
std::string& scratch) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message.GetDescriptor(), descriptor_);
return GetStringField<BytesValue>(message, value_field_,
value_field_string_type_, scratch);
}
void BytesValueReflection::SetValue(absl::Nonnull<google::protobuf::Message*> message,
absl::string_view value) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message->GetDescriptor(), descriptor_);
message->GetReflection()->SetString(message, value_field_,
std::string(value));
}
void BytesValueReflection::SetValue(absl::Nonnull<google::protobuf::Message*> message,
const absl::Cord& value) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message->GetDescriptor(), descriptor_);
message->GetReflection()->SetString(message, value_field_, value);
}
absl::StatusOr<BytesValueReflection> GetBytesValueReflection(
absl::Nonnull<const Descriptor*> descriptor) {
BytesValueReflection reflection;
CEL_RETURN_IF_ERROR(reflection.Initialize(descriptor));
return reflection;
}
absl::Status StringValueReflection::Initialize(
absl::Nonnull<const DescriptorPool*> pool) {
CEL_ASSIGN_OR_RETURN(
const auto* descriptor,
GetMessageTypeByName(pool, "google.protobuf.StringValue"));
return Initialize(descriptor);
}
absl::Status StringValueReflection::Initialize(
absl::Nonnull<const Descriptor*> descriptor) {
if (descriptor_ != descriptor) {
CEL_RETURN_IF_ERROR(CheckWellKnownType(descriptor, kWellKnownType));
descriptor_ = nullptr;
CEL_ASSIGN_OR_RETURN(value_field_, GetFieldByNumber(descriptor, 1));
CEL_RETURN_IF_ERROR(
CheckFieldType(value_field_, FieldDescriptor::TYPE_STRING));
CEL_RETURN_IF_ERROR(
CheckFieldCardinality(value_field_, FieldDescriptor::LABEL_OPTIONAL));
value_field_string_type_ = value_field_->cpp_string_type();
descriptor_ = descriptor;
}
return absl::OkStatus();
}
StringValue StringValueReflection::GetValue(const google::protobuf::Message& message,
std::string& scratch) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message.GetDescriptor(), descriptor_);
return GetStringField<StringValue>(message, value_field_,
value_field_string_type_, scratch);
}
void StringValueReflection::SetValue(absl::Nonnull<google::protobuf::Message*> message,
absl::string_view value) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message->GetDescriptor(), descriptor_);
message->GetReflection()->SetString(message, value_field_,
std::string(value));
}
void StringValueReflection::SetValue(absl::Nonnull<google::protobuf::Message*> message,
const absl::Cord& value) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message->GetDescriptor(), descriptor_);
message->GetReflection()->SetString(message, value_field_, value);
}
absl::StatusOr<StringValueReflection> GetStringValueReflection(
absl::Nonnull<const Descriptor*> descriptor) {
StringValueReflection reflection;
CEL_RETURN_IF_ERROR(reflection.Initialize(descriptor));
return reflection;
}
absl::Status AnyReflection::Initialize(
absl::Nonnull<const DescriptorPool*> pool) {
CEL_ASSIGN_OR_RETURN(const auto* descriptor,
GetMessageTypeByName(pool, "google.protobuf.Any"));
return Initialize(descriptor);
}
absl::Status AnyReflection::Initialize(
absl::Nonnull<const Descriptor*> descriptor) {
if (descriptor_ != descriptor) {
CEL_RETURN_IF_ERROR(CheckWellKnownType(descriptor, kWellKnownType));
descriptor_ = nullptr;
CEL_ASSIGN_OR_RETURN(type_url_field_, GetFieldByNumber(descriptor, 1));
CEL_RETURN_IF_ERROR(
CheckFieldType(type_url_field_, FieldDescriptor::TYPE_STRING));
CEL_RETURN_IF_ERROR(CheckFieldCardinality(type_url_field_,
FieldDescriptor::LABEL_OPTIONAL));
type_url_field_string_type_ = type_url_field_->cpp_string_type();
CEL_ASSIGN_OR_RETURN(value_field_, GetFieldByNumber(descriptor, 2));
CEL_RETURN_IF_ERROR(
CheckFieldType(value_field_, FieldDescriptor::TYPE_BYTES));
CEL_RETURN_IF_ERROR(
CheckFieldCardinality(value_field_, FieldDescriptor::LABEL_OPTIONAL));
value_field_string_type_ = value_field_->cpp_string_type();
descriptor_ = descriptor;
}
return absl::OkStatus();
}
void AnyReflection::SetTypeUrl(absl::Nonnull<google::protobuf::Message*> message,
absl::string_view type_url) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message->GetDescriptor(), descriptor_);
message->GetReflection()->SetString(message, type_url_field_,
std::string(type_url));
}
void AnyReflection::SetValue(absl::Nonnull<google::protobuf::Message*> message,
const absl::Cord& value) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message->GetDescriptor(), descriptor_);
message->GetReflection()->SetString(message, value_field_, value);
}
StringValue AnyReflection::GetTypeUrl(const google::protobuf::Message& message,
std::string& scratch) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message.GetDescriptor(), descriptor_);
return GetStringField<StringValue>(message, type_url_field_,
type_url_field_string_type_, scratch);
}
BytesValue AnyReflection::GetValue(const google::protobuf::Message& message,
std::string& scratch) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message.GetDescriptor(), descriptor_);
return GetStringField<BytesValue>(message, value_field_,
value_field_string_type_, scratch);
}
absl::StatusOr<AnyReflection> GetAnyReflection(
absl::Nonnull<const Descriptor*> descriptor) {
AnyReflection reflection;
CEL_RETURN_IF_ERROR(reflection.Initialize(descriptor));
return reflection;
}
AnyReflection GetAnyReflectionOrDie(
absl::Nonnull<const google::protobuf::Descriptor*> descriptor) {
AnyReflection reflection;
ABSL_CHECK_OK(reflection.Initialize(descriptor));
return reflection;
}
absl::Status DurationReflection::Initialize(
absl::Nonnull<const DescriptorPool*> pool) {
CEL_ASSIGN_OR_RETURN(const auto* descriptor,
GetMessageTypeByName(pool, "google.protobuf.Duration"));
return Initialize(descriptor);
}
absl::Status DurationReflection::Initialize(
absl::Nonnull<const Descriptor*> descriptor) {
if (descriptor_ != descriptor) {
CEL_RETURN_IF_ERROR(CheckWellKnownType(descriptor, kWellKnownType));
descriptor_ = nullptr;
CEL_ASSIGN_OR_RETURN(seconds_field_, GetFieldByNumber(descriptor, 1));
CEL_RETURN_IF_ERROR(
CheckFieldCppType(seconds_field_, FieldDescriptor::CPPTYPE_INT64));
CEL_RETURN_IF_ERROR(
CheckFieldCardinality(seconds_field_, FieldDescriptor::LABEL_OPTIONAL));
CEL_ASSIGN_OR_RETURN(nanos_field_, GetFieldByNumber(descriptor, 2));
CEL_RETURN_IF_ERROR(
CheckFieldCppType(nanos_field_, FieldDescriptor::CPPTYPE_INT32));
CEL_RETURN_IF_ERROR(
CheckFieldCardinality(nanos_field_, FieldDescriptor::LABEL_OPTIONAL));
descriptor_ = descriptor;
}
return absl::OkStatus();
}
int64_t DurationReflection::GetSeconds(const google::protobuf::Message& message) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message.GetDescriptor(), descriptor_);
return message.GetReflection()->GetInt64(message, seconds_field_);
}
int32_t DurationReflection::GetNanos(const google::protobuf::Message& message) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message.GetDescriptor(), descriptor_);
return message.GetReflection()->GetInt32(message, nanos_field_);
}
void DurationReflection::SetSeconds(absl::Nonnull<google::protobuf::Message*> message,
int64_t value) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message->GetDescriptor(), descriptor_);
message->GetReflection()->SetInt64(message, seconds_field_, value);
}
void DurationReflection::SetNanos(absl::Nonnull<google::protobuf::Message*> message,
int32_t value) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message->GetDescriptor(), descriptor_);
message->GetReflection()->SetInt32(message, nanos_field_, value);
}
absl::StatusOr<absl::Duration> DurationReflection::ToAbslDuration(
const google::protobuf::Message& message) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message.GetDescriptor(), descriptor_);
int64_t seconds = GetSeconds(message);
if (ABSL_PREDICT_FALSE(seconds < TimeUtil::kDurationMinSeconds ||
seconds > TimeUtil::kDurationMaxSeconds)) {
return absl::InvalidArgumentError(
absl::StrCat("invalid duration seconds: ", seconds));
}
int32_t nanos = GetNanos(message);
if (ABSL_PREDICT_FALSE(nanos < TimeUtil::kDurationMinNanoseconds ||
nanos > TimeUtil::kDurationMaxNanoseconds)) {
return absl::InvalidArgumentError(
absl::StrCat("invalid duration nanoseconds: ", nanos));
}
if ((seconds < 0 && nanos > 0) || (seconds > 0 && nanos < 0)) {
return absl::InvalidArgumentError(absl::StrCat(
"duration sign mismatch: seconds=", seconds, ", nanoseconds=", nanos));
}
return absl::Seconds(seconds) + absl::Nanoseconds(nanos);
}
absl::StatusOr<DurationReflection> GetDurationReflection(
absl::Nonnull<const Descriptor*> descriptor) {
DurationReflection reflection;
CEL_RETURN_IF_ERROR(reflection.Initialize(descriptor));
return reflection;
}
absl::Status TimestampReflection::Initialize(
absl::Nonnull<const DescriptorPool*> pool) {
CEL_ASSIGN_OR_RETURN(const auto* descriptor,
GetMessageTypeByName(pool, "google.protobuf.Timestamp"));
return Initialize(descriptor);
}
absl::Status TimestampReflection::Initialize(
absl::Nonnull<const Descriptor*> descriptor) {
if (descriptor_ != descriptor) {
CEL_RETURN_IF_ERROR(CheckWellKnownType(descriptor, kWellKnownType));
descriptor_ = nullptr;
CEL_ASSIGN_OR_RETURN(seconds_field_, GetFieldByNumber(descriptor, 1));
CEL_RETURN_IF_ERROR(
CheckFieldCppType(seconds_field_, FieldDescriptor::CPPTYPE_INT64));
CEL_RETURN_IF_ERROR(
CheckFieldCardinality(seconds_field_, FieldDescriptor::LABEL_OPTIONAL));
CEL_ASSIGN_OR_RETURN(nanos_field_, GetFieldByNumber(descriptor, 2));
CEL_RETURN_IF_ERROR(
CheckFieldCppType(nanos_field_, FieldDescriptor::CPPTYPE_INT32));
CEL_RETURN_IF_ERROR(
CheckFieldCardinality(nanos_field_, FieldDescriptor::LABEL_OPTIONAL));
descriptor_ = descriptor;
}
return absl::OkStatus();
}
int64_t TimestampReflection::GetSeconds(const google::protobuf::Message& message) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message.GetDescriptor(), descriptor_);
return message.GetReflection()->GetInt64(message, seconds_field_);
}
int32_t TimestampReflection::GetNanos(const google::protobuf::Message& message) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message.GetDescriptor(), descriptor_);
return message.GetReflection()->GetInt32(message, nanos_field_);
}
void TimestampReflection::SetSeconds(absl::Nonnull<google::protobuf::Message*> message,
int64_t value) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message->GetDescriptor(), descriptor_);
message->GetReflection()->SetInt64(message, seconds_field_, value);
}
void TimestampReflection::SetNanos(absl::Nonnull<google::protobuf::Message*> message,
int32_t value) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message->GetDescriptor(), descriptor_);
message->GetReflection()->SetInt32(message, nanos_field_, value);
}
absl::StatusOr<absl::Time> TimestampReflection::ToAbslTime(
const google::protobuf::Message& message) const {
int64_t seconds = GetSeconds(message);
if (ABSL_PREDICT_FALSE(seconds < TimeUtil::kTimestampMinSeconds ||
seconds > TimeUtil::kTimestampMaxSeconds)) {
return absl::InvalidArgumentError(
absl::StrCat("invalid timestamp seconds: ", seconds));
}
int32_t nanos = GetNanos(message);
if (ABSL_PREDICT_FALSE(nanos < TimeUtil::kTimestampMinNanoseconds ||
nanos > TimeUtil::kTimestampMaxNanoseconds)) {
return absl::InvalidArgumentError(
absl::StrCat("invalid timestamp nanoseconds: ", nanos));
}
return absl::UnixEpoch() + absl::Seconds(seconds) + absl::Nanoseconds(nanos);
}
absl::StatusOr<TimestampReflection> GetTimestampReflection(
absl::Nonnull<const Descriptor*> descriptor) {
TimestampReflection reflection;
CEL_RETURN_IF_ERROR(reflection.Initialize(descriptor));
return reflection;
}
void ValueReflection::SetNumberValue(
absl::Nonnull<google::protobuf::Value*> message, int64_t value) {
if (value < kJsonMinInt || value > kJsonMaxInt) {
SetStringValue(message, absl::StrCat(value));
return;
}
SetNumberValue(message, static_cast<double>(value));
}
void ValueReflection::SetNumberValue(
absl::Nonnull<google::protobuf::Value*> message, uint64_t value) {
if (value > kJsonMaxUint) {
SetStringValue(message, absl::StrCat(value));
return;
}
SetNumberValue(message, static_cast<double>(value));
}
absl::Status ValueReflection::Initialize(
absl::Nonnull<const DescriptorPool*> pool) {
CEL_ASSIGN_OR_RETURN(const auto* descriptor,
GetMessageTypeByName(pool, "google.protobuf.Value"));
return Initialize(descriptor);
}
absl::Status ValueReflection::Initialize(
absl::Nonnull<const Descriptor*> descriptor) {
if (descriptor_ != descriptor) {
CEL_RETURN_IF_ERROR(CheckWellKnownType(descriptor, kWellKnownType));
descriptor_ = nullptr;
CEL_ASSIGN_OR_RETURN(kind_field_, GetOneofByName(descriptor, "kind"));
CEL_ASSIGN_OR_RETURN(null_value_field_, GetFieldByNumber(descriptor, 1));
CEL_RETURN_IF_ERROR(
CheckFieldCppType(null_value_field_, FieldDescriptor::CPPTYPE_ENUM));
CEL_RETURN_IF_ERROR(CheckFieldCardinality(null_value_field_,
FieldDescriptor::LABEL_OPTIONAL));
CEL_RETURN_IF_ERROR(CheckFieldOneof(null_value_field_, kind_field_, 0));
CEL_ASSIGN_OR_RETURN(bool_value_field_, GetFieldByNumber(descriptor, 4));
CEL_RETURN_IF_ERROR(
CheckFieldCppType(bool_value_field_, FieldDescriptor::CPPTYPE_BOOL));
CEL_RETURN_IF_ERROR(CheckFieldCardinality(bool_value_field_,
FieldDescriptor::LABEL_OPTIONAL));
CEL_RETURN_IF_ERROR(CheckFieldOneof(bool_value_field_, kind_field_, 3));
CEL_ASSIGN_OR_RETURN(number_value_field_, GetFieldByNumber(descriptor, 2));
CEL_RETURN_IF_ERROR(CheckFieldCppType(number_value_field_,
FieldDescriptor::CPPTYPE_DOUBLE));
CEL_RETURN_IF_ERROR(CheckFieldCardinality(number_value_field_,
FieldDescriptor::LABEL_OPTIONAL));
CEL_RETURN_IF_ERROR(CheckFieldOneof(number_value_field_, kind_field_, 1));
CEL_ASSIGN_OR_RETURN(string_value_field_, GetFieldByNumber(descriptor, 3));
CEL_RETURN_IF_ERROR(CheckFieldCppType(string_value_field_,
FieldDescriptor::CPPTYPE_STRING));
CEL_RETURN_IF_ERROR(CheckFieldCardinality(string_value_field_,
FieldDescriptor::LABEL_OPTIONAL));
CEL_RETURN_IF_ERROR(CheckFieldOneof(string_value_field_, kind_field_, 2));
string_value_field_string_type_ = string_value_field_->cpp_string_type();
CEL_ASSIGN_OR_RETURN(list_value_field_, GetFieldByNumber(descriptor, 6));
CEL_RETURN_IF_ERROR(
CheckFieldCppType(list_value_field_, FieldDescriptor::CPPTYPE_MESSAGE));
CEL_RETURN_IF_ERROR(CheckFieldCardinality(list_value_field_,
FieldDescriptor::LABEL_OPTIONAL));
CEL_RETURN_IF_ERROR(CheckFieldOneof(list_value_field_, kind_field_, 5));
CEL_RETURN_IF_ERROR(CheckFieldWellKnownType(
list_value_field_, Descriptor::WELLKNOWNTYPE_LISTVALUE));
CEL_ASSIGN_OR_RETURN(struct_value_field_, GetFieldByNumber(descriptor, 5));
CEL_RETURN_IF_ERROR(CheckFieldCppType(struct_value_field_,
FieldDescriptor::CPPTYPE_MESSAGE));
CEL_RETURN_IF_ERROR(CheckFieldCardinality(struct_value_field_,
FieldDescriptor::LABEL_OPTIONAL));
CEL_RETURN_IF_ERROR(CheckFieldOneof(struct_value_field_, kind_field_, 4));
CEL_RETURN_IF_ERROR(CheckFieldWellKnownType(
struct_value_field_, Descriptor::WELLKNOWNTYPE_STRUCT));
descriptor_ = descriptor;
}
return absl::OkStatus();
}
google::protobuf::Value::KindCase ValueReflection::GetKindCase(
const google::protobuf::Message& message) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message.GetDescriptor(), descriptor_);
const auto* field =
message.GetReflection()->GetOneofFieldDescriptor(message, kind_field_);
return field != nullptr ? static_cast<google::protobuf::Value::KindCase>(
field->index_in_oneof() + 1)
: google::protobuf::Value::KIND_NOT_SET;
}
bool ValueReflection::GetBoolValue(const google::protobuf::Message& message) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message.GetDescriptor(), descriptor_);
return message.GetReflection()->GetBool(message, bool_value_field_);
}
double ValueReflection::GetNumberValue(const google::protobuf::Message& message) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message.GetDescriptor(), descriptor_);
return message.GetReflection()->GetDouble(message, number_value_field_);
}
StringValue ValueReflection::GetStringValue(const google::protobuf::Message& message,
std::string& scratch) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message.GetDescriptor(), descriptor_);
return GetStringField<StringValue>(message, string_value_field_,
string_value_field_string_type_, scratch);
}
const google::protobuf::Message& ValueReflection::GetListValue(
const google::protobuf::Message& message) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message.GetDescriptor(), descriptor_);
return message.GetReflection()->GetMessage(message, list_value_field_);
}
const google::protobuf::Message& ValueReflection::GetStructValue(
const google::protobuf::Message& message) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message.GetDescriptor(), descriptor_);
return message.GetReflection()->GetMessage(message, struct_value_field_);
}
void ValueReflection::SetNullValue(
absl::Nonnull<google::protobuf::Message*> message) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message->GetDescriptor(), descriptor_);
message->GetReflection()->SetEnumValue(message, null_value_field_, 0);
}
void ValueReflection::SetBoolValue(absl::Nonnull<google::protobuf::Message*> message,
bool value) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message->GetDescriptor(), descriptor_);
message->GetReflection()->SetBool(message, bool_value_field_, value);
}
void ValueReflection::SetNumberValue(absl::Nonnull<google::protobuf::Message*> message,
int64_t value) const {
if (value < kJsonMinInt || value > kJsonMaxInt) {
SetStringValue(message, absl::StrCat(value));
return;
}
SetNumberValue(message, static_cast<double>(value));
}
void ValueReflection::SetNumberValue(absl::Nonnull<google::protobuf::Message*> message,
uint64_t value) const {
if (value > kJsonMaxUint) {
SetStringValue(message, absl::StrCat(value));
return;
}
SetNumberValue(message, static_cast<double>(value));
}
void ValueReflection::SetNumberValue(absl::Nonnull<google::protobuf::Message*> message,
double value) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message->GetDescriptor(), descriptor_);
message->GetReflection()->SetDouble(message, number_value_field_, value);
}
void ValueReflection::SetStringValue(absl::Nonnull<google::protobuf::Message*> message,
absl::string_view value) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message->GetDescriptor(), descriptor_);
message->GetReflection()->SetString(message, string_value_field_,
std::string(value));
}
void ValueReflection::SetStringValue(absl::Nonnull<google::protobuf::Message*> message,
const absl::Cord& value) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message->GetDescriptor(), descriptor_);
message->GetReflection()->SetString(message, string_value_field_, value);
}
absl::Nonnull<google::protobuf::Message*> ValueReflection::MutableListValue(
absl::Nonnull<google::protobuf::Message*> message) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message->GetDescriptor(), descriptor_);
return message->GetReflection()->MutableMessage(message, list_value_field_);
}
absl::Nonnull<google::protobuf::Message*> ValueReflection::MutableStructValue(
absl::Nonnull<google::protobuf::Message*> message) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message->GetDescriptor(), descriptor_);
return message->GetReflection()->MutableMessage(message, struct_value_field_);
}
Unique<google::protobuf::Message> ValueReflection::ReleaseListValue(
absl::Nonnull<google::protobuf::Message*> message) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message->GetDescriptor(), descriptor_);
const auto* reflection = message->GetReflection();
if (!reflection->HasField(*message, list_value_field_)) {
reflection->MutableMessage(message, list_value_field_);
}
return WrapUnique(
reflection->UnsafeArenaReleaseMessage(message, list_value_field_),
message->GetArena());
}
Unique<google::protobuf::Message> ValueReflection::ReleaseStructValue(
absl::Nonnull<google::protobuf::Message*> message) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message->GetDescriptor(), descriptor_);
const auto* reflection = message->GetReflection();
if (!reflection->HasField(*message, struct_value_field_)) {
reflection->MutableMessage(message, struct_value_field_);
}
return WrapUnique(
reflection->UnsafeArenaReleaseMessage(message, struct_value_field_),
message->GetArena());
}
absl::StatusOr<ValueReflection> GetValueReflection(
absl::Nonnull<const Descriptor*> descriptor) {
ValueReflection reflection;
CEL_RETURN_IF_ERROR(reflection.Initialize(descriptor));
return reflection;
}
ValueReflection GetValueReflectionOrDie(
absl::Nonnull<const google::protobuf::Descriptor*> descriptor) {
ValueReflection reflection;
ABSL_CHECK_OK(reflection.Initialize(descriptor));
return reflection;
}
absl::Status ListValueReflection::Initialize(
absl::Nonnull<const DescriptorPool*> pool) {
CEL_ASSIGN_OR_RETURN(const auto* descriptor,
GetMessageTypeByName(pool, "google.protobuf.ListValue"));
return Initialize(descriptor);
}
absl::Status ListValueReflection::Initialize(
absl::Nonnull<const Descriptor*> descriptor) {
if (descriptor_ != descriptor) {
CEL_RETURN_IF_ERROR(CheckWellKnownType(descriptor, kWellKnownType));
descriptor_ = nullptr;
CEL_ASSIGN_OR_RETURN(values_field_, GetFieldByNumber(descriptor, 1));
CEL_RETURN_IF_ERROR(
CheckFieldCppType(values_field_, FieldDescriptor::CPPTYPE_MESSAGE));
CEL_RETURN_IF_ERROR(
CheckFieldCardinality(values_field_, FieldDescriptor::LABEL_REPEATED));
CEL_RETURN_IF_ERROR(CheckFieldWellKnownType(
values_field_, Descriptor::WELLKNOWNTYPE_VALUE));
descriptor_ = descriptor;
}
return absl::OkStatus();
}
int ListValueReflection::ValuesSize(const google::protobuf::Message& message) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message.GetDescriptor(), descriptor_);
return message.GetReflection()->FieldSize(message, values_field_);
}
google::protobuf::RepeatedFieldRef<google::protobuf::Message> ListValueReflection::Values(
const google::protobuf::Message& message) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message.GetDescriptor(), descriptor_);
return message.GetReflection()->GetRepeatedFieldRef<google::protobuf::Message>(
message, values_field_);
}
const google::protobuf::Message& ListValueReflection::Values(
const google::protobuf::Message& message ABSL_ATTRIBUTE_LIFETIME_BOUND,
int index) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message.GetDescriptor(), descriptor_);
return message.GetReflection()->GetRepeatedMessage(message, values_field_,
index);
}
google::protobuf::MutableRepeatedFieldRef<google::protobuf::Message>
ListValueReflection::MutableValues(
absl::Nonnull<google::protobuf::Message*> message) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message->GetDescriptor(), descriptor_);
return message->GetReflection()->GetMutableRepeatedFieldRef<google::protobuf::Message>(
message, values_field_);
}
absl::Nonnull<google::protobuf::Message*> ListValueReflection::AddValues(
absl::Nonnull<google::protobuf::Message*> message) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message->GetDescriptor(), descriptor_);
return message->GetReflection()->AddMessage(message, values_field_);
}
void ListValueReflection::ReserveValues(absl::Nonnull<google::protobuf::Message*> message,
int capacity) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message->GetDescriptor(), descriptor_);
if (capacity > 0) {
MutableValues(message).Reserve(capacity);
}
}
absl::StatusOr<ListValueReflection> GetListValueReflection(
absl::Nonnull<const Descriptor*> descriptor) {
ListValueReflection reflection;
CEL_RETURN_IF_ERROR(reflection.Initialize(descriptor));
return reflection;
}
ListValueReflection GetListValueReflectionOrDie(
absl::Nonnull<const google::protobuf::Descriptor*> descriptor) {
ListValueReflection reflection;
ABSL_CHECK_OK(reflection.Initialize(descriptor));
return reflection;
}
absl::Status StructReflection::Initialize(
absl::Nonnull<const DescriptorPool*> pool) {
CEL_ASSIGN_OR_RETURN(const auto* descriptor,
GetMessageTypeByName(pool, "google.protobuf.Struct"));
return Initialize(descriptor);
}
absl::Status StructReflection::Initialize(
absl::Nonnull<const Descriptor*> descriptor) {
if (descriptor_ != descriptor) {
CEL_RETURN_IF_ERROR(CheckWellKnownType(descriptor, kWellKnownType));
descriptor_ = nullptr;
CEL_ASSIGN_OR_RETURN(fields_field_, GetFieldByNumber(descriptor, 1));
CEL_RETURN_IF_ERROR(CheckMapField(fields_field_));
fields_key_field_ = fields_field_->message_type()->map_key();
CEL_RETURN_IF_ERROR(
CheckFieldCppType(fields_key_field_, FieldDescriptor::CPPTYPE_STRING));
CEL_RETURN_IF_ERROR(CheckFieldCardinality(fields_key_field_,
FieldDescriptor::LABEL_OPTIONAL));
fields_value_field_ = fields_field_->message_type()->map_value();
CEL_RETURN_IF_ERROR(CheckFieldCppType(fields_value_field_,
FieldDescriptor::CPPTYPE_MESSAGE));
CEL_RETURN_IF_ERROR(CheckFieldCardinality(fields_value_field_,
FieldDescriptor::LABEL_OPTIONAL));
CEL_RETURN_IF_ERROR(CheckFieldWellKnownType(
fields_value_field_, Descriptor::WELLKNOWNTYPE_VALUE));
descriptor_ = descriptor;
}
return absl::OkStatus();
}
int StructReflection::FieldsSize(const google::protobuf::Message& message) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message.GetDescriptor(), descriptor_);
return cel::extensions::protobuf_internal::MapSize(*message.GetReflection(),
message, *fields_field_);
}
google::protobuf::MapIterator StructReflection::BeginFields(
const google::protobuf::Message& message) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message.GetDescriptor(), descriptor_);
return cel::extensions::protobuf_internal::MapBegin(*message.GetReflection(),
message, *fields_field_);
}
google::protobuf::MapIterator StructReflection::EndFields(
const google::protobuf::Message& message) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message.GetDescriptor(), descriptor_);
return cel::extensions::protobuf_internal::MapEnd(*message.GetReflection(),
message, *fields_field_);
}
bool StructReflection::ContainsField(const google::protobuf::Message& message,
absl::string_view name) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message.GetDescriptor(), descriptor_);
std::string key_scratch(name);
google::protobuf::MapKey key;
key.SetStringValue(key_scratch);
return cel::extensions::protobuf_internal::ContainsMapKey(
*message.GetReflection(), message, *fields_field_, key);
}
absl::Nullable<const google::protobuf::Message*> StructReflection::FindField(
const google::protobuf::Message& message, absl::string_view name) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message.GetDescriptor(), descriptor_);
std::string key_scratch(name);
google::protobuf::MapKey key;
key.SetStringValue(key_scratch);
google::protobuf::MapValueConstRef value;
if (cel::extensions::protobuf_internal::LookupMapValue(
*message.GetReflection(), message, *fields_field_, key, &value)) {
return &value.GetMessageValue();
}
return nullptr;
}
absl::Nonnull<google::protobuf::Message*> StructReflection::InsertField(
absl::Nonnull<google::protobuf::Message*> message, absl::string_view name) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message->GetDescriptor(), descriptor_);
std::string key_scratch(name);
google::protobuf::MapKey key;
key.SetStringValue(key_scratch);
google::protobuf::MapValueRef value;
cel::extensions::protobuf_internal::InsertOrLookupMapValue(
*message->GetReflection(), message, *fields_field_, key, &value);
return value.MutableMessageValue();
}
bool StructReflection::DeleteField(absl::Nonnull<google::protobuf::Message*> message,
absl::string_view name) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message->GetDescriptor(), descriptor_);
std::string key_scratch(name);
google::protobuf::MapKey key;
key.SetStringValue(key_scratch);
return cel::extensions::protobuf_internal::DeleteMapValue(
message->GetReflection(), message, fields_field_, key);
}
absl::StatusOr<StructReflection> GetStructReflection(
absl::Nonnull<const Descriptor*> descriptor) {
StructReflection reflection;
CEL_RETURN_IF_ERROR(reflection.Initialize(descriptor));
return reflection;
}
StructReflection GetStructReflectionOrDie(
absl::Nonnull<const google::protobuf::Descriptor*> descriptor) {
StructReflection reflection;
ABSL_CHECK_OK(reflection.Initialize(descriptor));
return reflection;
}
absl::Status FieldMaskReflection::Initialize(
absl::Nonnull<const google::protobuf::DescriptorPool*> pool) {
CEL_ASSIGN_OR_RETURN(const auto* descriptor,
GetMessageTypeByName(pool, "google.protobuf.FieldMask"));
return Initialize(descriptor);
}
absl::Status FieldMaskReflection::Initialize(
absl::Nonnull<const google::protobuf::Descriptor*> descriptor) {
if (descriptor_ != descriptor) {
CEL_RETURN_IF_ERROR(CheckWellKnownType(descriptor, kWellKnownType));
descriptor_ = nullptr;
CEL_ASSIGN_OR_RETURN(paths_field_, GetFieldByNumber(descriptor, 1));
CEL_RETURN_IF_ERROR(
CheckFieldCppType(paths_field_, FieldDescriptor::CPPTYPE_STRING));
CEL_RETURN_IF_ERROR(
CheckFieldCardinality(paths_field_, FieldDescriptor::LABEL_REPEATED));
paths_field_string_type_ = paths_field_->cpp_string_type();
descriptor_ = descriptor;
}
return absl::OkStatus();
}
int FieldMaskReflection::PathsSize(const google::protobuf::Message& message) const {
ABSL_DCHECK(IsInitialized());
ABSL_DCHECK_EQ(message.GetDescriptor(), descriptor_);
return message.GetReflection()->FieldSize(message, paths_field_);
}
StringValue FieldMaskReflection::Paths(const google::protobuf::Message& message,
int index, std::string& scratch) const {
return GetRepeatedStringField<StringValue>(
message, paths_field_, paths_field_string_type_, index, scratch);
}
absl::StatusOr<FieldMaskReflection> GetFieldMaskReflection(
absl::Nonnull<const google::protobuf::Descriptor*> descriptor) {
FieldMaskReflection reflection;
CEL_RETURN_IF_ERROR(reflection.Initialize(descriptor));
return reflection;
}
absl::Status Reflection::Initialize(absl::Nonnull<const DescriptorPool*> pool) {
CEL_RETURN_IF_ERROR(NullValue().Initialize(pool));
CEL_RETURN_IF_ERROR(BoolValue().Initialize(pool));
CEL_RETURN_IF_ERROR(Int32Value().Initialize(pool));
CEL_RETURN_IF_ERROR(Int64Value().Initialize(pool));
CEL_RETURN_IF_ERROR(UInt32Value().Initialize(pool));
CEL_RETURN_IF_ERROR(UInt64Value().Initialize(pool));
CEL_RETURN_IF_ERROR(FloatValue().Initialize(pool));
CEL_RETURN_IF_ERROR(DoubleValue().Initialize(pool));
CEL_RETURN_IF_ERROR(BytesValue().Initialize(pool));
CEL_RETURN_IF_ERROR(StringValue().Initialize(pool));
CEL_RETURN_IF_ERROR(Any().Initialize(pool));
CEL_RETURN_IF_ERROR(Duration().Initialize(pool));
CEL_RETURN_IF_ERROR(Timestamp().Initialize(pool));
CEL_RETURN_IF_ERROR(Value().Initialize(pool));
CEL_RETURN_IF_ERROR(ListValue().Initialize(pool));
CEL_RETURN_IF_ERROR(Struct().Initialize(pool));
if (const auto* descriptor =
pool->FindMessageTypeByName("google.protobuf.FieldMask");
descriptor != nullptr) {
CEL_RETURN_IF_ERROR(FieldMask().Initialize(descriptor));
}
return absl::OkStatus();
}
namespace {
absl::StatusOr<ListValue> AdaptListValue(absl::Nullable<google::protobuf::Arena*> arena,
const google::protobuf::Message& message,
Unique<google::protobuf::Message> adapted) {
ABSL_DCHECK(!adapted || &message == cel::to_address(adapted));
const auto* descriptor = message.GetDescriptor();
if (ABSL_PREDICT_FALSE(descriptor == nullptr)) {
return absl::InvalidArgumentError(
absl::StrCat("missing descriptor for protocol buffer message: ",
message.GetTypeName()));
}
CEL_RETURN_IF_ERROR(GetListValueReflection(descriptor).status());
if (adapted) {
return ListValue(std::move(adapted));
}
return ListValue(std::cref(message));
}
absl::StatusOr<Struct> AdaptStruct(absl::Nullable<google::protobuf::Arena*> arena,
const google::protobuf::Message& message,
Unique<google::protobuf::Message> adapted) {
ABSL_DCHECK(!adapted || &message == cel::to_address(adapted));
const auto* descriptor = message.GetDescriptor();
if (ABSL_PREDICT_FALSE(descriptor == nullptr)) {
return absl::InvalidArgumentError(
absl::StrCat("missing descriptor for protocol buffer message: ",
message.GetTypeName()));
}
CEL_RETURN_IF_ERROR(GetStructReflection(descriptor).status());
if (adapted) {
return Struct(std::move(adapted));
}
return Struct(std::cref(message));
}
absl::StatusOr<Unique<google::protobuf::Message>> AdaptAny(
absl::Nullable<google::protobuf::Arena*> arena, AnyReflection& reflection,
const google::protobuf::Message& message, absl::Nonnull<const Descriptor*> descriptor,
absl::Nonnull<const DescriptorPool*> pool,
absl::Nonnull<google::protobuf::MessageFactory*> factory,
bool error_if_unresolveable) {
ABSL_DCHECK_EQ(descriptor->well_known_type(), Descriptor::WELLKNOWNTYPE_ANY);
absl::Nonnull<const google::protobuf::Message*> to_unwrap = &message;
Unique<google::protobuf::Message> unwrapped;
std::string type_url_scratch;
std::string value_scratch;
do {
CEL_RETURN_IF_ERROR(reflection.Initialize(descriptor));
StringValue type_url = reflection.GetTypeUrl(*to_unwrap, type_url_scratch);
absl::string_view type_url_view =
FlatStringValue(type_url, type_url_scratch);
if (!absl::ConsumePrefix(&type_url_view, "type.googleapis.com/") &&
!absl::ConsumePrefix(&type_url_view, "type.googleprod.com/")) {
if (!error_if_unresolveable) {
break;
}
return absl::InvalidArgumentError(absl::StrCat(
"unable to find descriptor for type URL: ", type_url_view));
}
const auto* packed_descriptor = pool->FindMessageTypeByName(type_url_view);
if (packed_descriptor == nullptr) {
if (!error_if_unresolveable) {
break;
}
return absl::InvalidArgumentError(absl::StrCat(
"unable to find descriptor for type name: ", type_url_view));
}
const auto* prototype = factory->GetPrototype(packed_descriptor);
if (prototype == nullptr) {
return absl::InvalidArgumentError(absl::StrCat(
"unable to build prototype for type name: ", type_url_view));
}
BytesValue value = reflection.GetValue(*to_unwrap, value_scratch);
Unique<google::protobuf::Message> unpacked = WrapUnique(prototype->New(arena), arena);
const bool ok = absl::visit(absl::Overload(
[&](absl::string_view string) -> bool {
return unpacked->ParseFromString(string);
},
[&](const absl::Cord& cord) -> bool {
return unpacked->ParseFromCord(cord);
}),
AsVariant(value));
if (!ok) {
return absl::InvalidArgumentError(absl::StrCat(
"failed to unpack protocol buffer message: ", type_url_view));
}
unwrapped = std::move(unpacked);
to_unwrap = cel::to_address(unwrapped);
descriptor = to_unwrap->GetDescriptor();
if (descriptor == nullptr) {
return absl::InvalidArgumentError(
absl::StrCat("missing descriptor for protocol buffer message: ",
to_unwrap->GetTypeName()));
}
} while (descriptor->well_known_type() == Descriptor::WELLKNOWNTYPE_ANY);
return unwrapped;
}
}
absl::StatusOr<Unique<google::protobuf::Message>> UnpackAnyFrom(
absl::Nullable<google::protobuf::Arena*> arena, AnyReflection& reflection,
const google::protobuf::Message& message,
absl::Nonnull<const google::protobuf::DescriptorPool*> pool,
absl::Nonnull<google::protobuf::MessageFactory*> factory) {
ABSL_DCHECK_EQ(message.GetDescriptor()->well_known_type(),
Descriptor::WELLKNOWNTYPE_ANY);
return AdaptAny(arena, reflection, message, message.GetDescriptor(), pool,
factory, true);
}
absl::StatusOr<Unique<google::protobuf::Message>> UnpackAnyIfResolveable(
absl::Nullable<google::protobuf::Arena*> arena, AnyReflection& reflection,
const google::protobuf::Message& message,
absl::Nonnull<const google::protobuf::DescriptorPool*> pool,
absl::Nonnull<google::protobuf::MessageFactory*> factory) {
ABSL_DCHECK_EQ(message.GetDescriptor()->well_known_type(),
Descriptor::WELLKNOWNTYPE_ANY);
return AdaptAny(arena, reflection, message, message.GetDescriptor(), pool,
factory, false);
}
absl::StatusOr<well_known_types::Value> AdaptFromMessage(
absl::Nullable<google::protobuf::Arena*> arena, const google::protobuf::Message& message,
absl::Nonnull<const DescriptorPool*> pool,
absl::Nonnull<google::protobuf::MessageFactory*> factory, std::string& scratch) {
const auto* descriptor = message.GetDescriptor();
if (ABSL_PREDICT_FALSE(descriptor == nullptr)) {
return absl::InvalidArgumentError(
absl::StrCat("missing descriptor for protocol buffer message: ",
message.GetTypeName()));
}
absl::Nonnull<const google::protobuf::Message*> to_adapt;
Unique<google::protobuf::Message> adapted;
Descriptor::WellKnownType well_known_type = descriptor->well_known_type();
if (well_known_type == Descriptor::WELLKNOWNTYPE_ANY) {
AnyReflection reflection;
CEL_ASSIGN_OR_RETURN(
adapted, UnpackAnyFrom(arena, reflection, message, pool, factory));
to_adapt = cel::to_address(adapted);
descriptor = to_adapt->GetDescriptor();
well_known_type = descriptor->well_known_type();
} else {
to_adapt = &message;
}
switch (descriptor->well_known_type()) {
case Descriptor::WELLKNOWNTYPE_DOUBLEVALUE: {
CEL_ASSIGN_OR_RETURN(auto reflection,
GetDoubleValueReflection(descriptor));
return reflection.GetValue(*to_adapt);
}
case Descriptor::WELLKNOWNTYPE_FLOATVALUE: {
CEL_ASSIGN_OR_RETURN(auto reflection,
GetFloatValueReflection(descriptor));
return reflection.GetValue(*to_adapt);
}
case Descriptor::WELLKNOWNTYPE_INT64VALUE: {
CEL_ASSIGN_OR_RETURN(auto reflection,
GetInt64ValueReflection(descriptor));
return reflection.GetValue(*to_adapt);
}
case Descriptor::WELLKNOWNTYPE_UINT64VALUE: {
CEL_ASSIGN_OR_RETURN(auto reflection,
GetUInt64ValueReflection(descriptor));
return reflection.GetValue(*to_adapt);
}
case Descriptor::WELLKNOWNTYPE_INT32VALUE: {
CEL_ASSIGN_OR_RETURN(auto reflection,
GetInt32ValueReflection(descriptor));
return reflection.GetValue(*to_adapt);
}
case Descriptor::WELLKNOWNTYPE_UINT32VALUE: {
CEL_ASSIGN_OR_RETURN(auto reflection,
GetUInt32ValueReflection(descriptor));
return reflection.GetValue(*to_adapt);
}
case Descriptor::WELLKNOWNTYPE_STRINGVALUE: {
CEL_ASSIGN_OR_RETURN(auto reflection,
GetStringValueReflection(descriptor));
auto value = reflection.GetValue(*to_adapt, scratch);
if (adapted) {
value = CopyStringValue(value, scratch);
}
return value;
}
case Descriptor::WELLKNOWNTYPE_BYTESVALUE: {
CEL_ASSIGN_OR_RETURN(auto reflection,
GetBytesValueReflection(descriptor));
auto value = reflection.GetValue(*to_adapt, scratch);
if (adapted) {
value = CopyBytesValue(value, scratch);
}
return value;
}
case Descriptor::WELLKNOWNTYPE_BOOLVALUE: {
CEL_ASSIGN_OR_RETURN(auto reflection, GetBoolValueReflection(descriptor));
return reflection.GetValue(*to_adapt);
}
case Descriptor::WELLKNOWNTYPE_ANY:
ABSL_UNREACHABLE();
case Descriptor::WELLKNOWNTYPE_DURATION: {
CEL_ASSIGN_OR_RETURN(auto reflection, GetDurationReflection(descriptor));
return reflection.ToAbslDuration(*to_adapt);
}
case Descriptor::WELLKNOWNTYPE_TIMESTAMP: {
CEL_ASSIGN_OR_RETURN(auto reflection, GetTimestampReflection(descriptor));
return reflection.ToAbslTime(*to_adapt);
}
case Descriptor::WELLKNOWNTYPE_VALUE: {
CEL_ASSIGN_OR_RETURN(auto reflection, GetValueReflection(descriptor));
const auto kind_case = reflection.GetKindCase(*to_adapt);
switch (kind_case) {
case google::protobuf::Value::KIND_NOT_SET:
ABSL_FALLTHROUGH_INTENDED;
case google::protobuf::Value::kNullValue:
return nullptr;
case google::protobuf::Value::kNumberValue:
return reflection.GetNumberValue(*to_adapt);
case google::protobuf::Value::kStringValue: {
auto value = reflection.GetStringValue(*to_adapt, scratch);
if (adapted) {
value = CopyStringValue(value, scratch);
}
return value;
}
case google::protobuf::Value::kBoolValue:
return reflection.GetBoolValue(*to_adapt);
case google::protobuf::Value::kStructValue: {
if (adapted) {
adapted = reflection.ReleaseStructValue(cel::to_address(adapted));
to_adapt = cel::to_address(adapted);
} else {
to_adapt = &reflection.GetStructValue(*to_adapt);
}
return AdaptStruct(arena, *to_adapt, std::move(adapted));
}
case google::protobuf::Value::kListValue: {
if (adapted) {
adapted = reflection.ReleaseListValue(cel::to_address(adapted));
to_adapt = cel::to_address(adapted);
} else {
to_adapt = &reflection.GetListValue(*to_adapt);
}
return AdaptListValue(arena, *to_adapt, std::move(adapted));
}
default:
return absl::InvalidArgumentError(
absl::StrCat("unexpected value kind case: ", kind_case));
}
}
case Descriptor::WELLKNOWNTYPE_LISTVALUE:
return AdaptListValue(arena, *to_adapt, std::move(adapted));
case Descriptor::WELLKNOWNTYPE_STRUCT:
return AdaptStruct(arena, *to_adapt, std::move(adapted));
default:
if (adapted) {
return adapted;
}
return absl::monostate{};
}
}
} | #include "internal/well_known_types.h"
#include <cstddef>
#include <cstdint>
#include <string>
#include "google/protobuf/any.pb.h"
#include "google/protobuf/duration.pb.h"
#include "google/protobuf/field_mask.pb.h"
#include "google/protobuf/struct.pb.h"
#include "google/protobuf/timestamp.pb.h"
#include "google/protobuf/wrappers.pb.h"
#include "google/protobuf/descriptor.pb.h"
#include "absl/base/attributes.h"
#include "absl/base/nullability.h"
#include "absl/log/die_if_null.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "absl/types/variant.h"
#include "common/memory.h"
#include "internal/message_type_name.h"
#include "internal/minimal_descriptor_pool.h"
#include "internal/parse_text_proto.h"
#include "internal/testing.h"
#include "internal/testing_descriptor_pool.h"
#include "internal/testing_message_factory.h"
#include "proto/test/v1/proto3/test_all_types.pb.h"
#include "google/protobuf/arena.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/message.h"
namespace cel::well_known_types {
namespace {
using ::absl_testing::IsOk;
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::cel::internal::GetMinimalDescriptorPool;
using ::cel::internal::GetTestingDescriptorPool;
using ::cel::internal::GetTestingMessageFactory;
using ::testing::_;
using ::testing::HasSubstr;
using ::testing::IsNull;
using ::testing::NotNull;
using ::testing::Test;
using ::testing::VariantWith;
using TestAllTypesProto3 = ::google::api::expr::test::v1::proto3::TestAllTypes;
class ReflectionTest : public Test {
public:
absl::Nonnull<google::protobuf::Arena*> arena() ABSL_ATTRIBUTE_LIFETIME_BOUND {
return &arena_;
}
std::string& scratch_space() ABSL_ATTRIBUTE_LIFETIME_BOUND {
return scratch_space_;
}
absl::Nonnull<const google::protobuf::DescriptorPool*> descriptor_pool() {
return GetTestingDescriptorPool();
}
absl::Nonnull<google::protobuf::MessageFactory*> message_factory() {
return GetTestingMessageFactory();
}
template <typename T>
absl::Nonnull<T*> MakeGenerated() {
return google::protobuf::Arena::Create<T>(arena());
}
template <typename T>
absl::Nonnull<google::protobuf::Message*> MakeDynamic() {
const auto* descriptor =
ABSL_DIE_IF_NULL(descriptor_pool()->FindMessageTypeByName(
internal::MessageTypeNameFor<T>()));
const auto* prototype =
ABSL_DIE_IF_NULL(message_factory()->GetPrototype(descriptor));
return prototype->New(arena());
}
private:
google::protobuf::Arena arena_;
std::string scratch_space_;
};
TEST_F(ReflectionTest, MinimalDescriptorPool) {
EXPECT_THAT(Reflection().Initialize(GetMinimalDescriptorPool()), IsOk());
}
TEST_F(ReflectionTest, TestingDescriptorPool) {
EXPECT_THAT(Reflection().Initialize(GetTestingDescriptorPool()), IsOk());
}
TEST_F(ReflectionTest, BoolValue_Generated) {
auto* value = MakeGenerated<google::protobuf::BoolValue>();
EXPECT_EQ(BoolValueReflection::GetValue(*value), false);
BoolValueReflection::SetValue(value, true);
EXPECT_EQ(BoolValueReflection::GetValue(*value), true);
}
TEST_F(ReflectionTest, BoolValue_Dynamic) {
auto* value = MakeDynamic<google::protobuf::BoolValue>();
ASSERT_OK_AND_ASSIGN(
auto reflection,
GetBoolValueReflection(ABSL_DIE_IF_NULL(value->GetDescriptor())));
EXPECT_EQ(reflection.GetValue(*value), false);
reflection.SetValue(value, true);
EXPECT_EQ(reflection.GetValue(*value), true);
}
TEST_F(ReflectionTest, Int32Value_Generated) {
auto* value = MakeGenerated<google::protobuf::Int32Value>();
EXPECT_EQ(Int32ValueReflection::GetValue(*value), 0);
Int32ValueReflection::SetValue(value, 1);
EXPECT_EQ(Int32ValueReflection::GetValue(*value), 1);
}
TEST_F(ReflectionTest, Int32Value_Dynamic) {
auto* value = MakeDynamic<google::protobuf::Int32Value>();
ASSERT_OK_AND_ASSIGN(
auto reflection,
GetInt32ValueReflection(ABSL_DIE_IF_NULL(value->GetDescriptor())));
EXPECT_EQ(reflection.GetValue(*value), 0);
reflection.SetValue(value, 1);
EXPECT_EQ(reflection.GetValue(*value), 1);
}
TEST_F(ReflectionTest, Int64Value_Generated) {
auto* value = MakeGenerated<google::protobuf::Int64Value>();
EXPECT_EQ(Int64ValueReflection::GetValue(*value), 0);
Int64ValueReflection::SetValue(value, 1);
EXPECT_EQ(Int64ValueReflection::GetValue(*value), 1);
}
TEST_F(ReflectionTest, Int64Value_Dynamic) {
auto* value = MakeDynamic<google::protobuf::Int64Value>();
ASSERT_OK_AND_ASSIGN(
auto reflection,
GetInt64ValueReflection(ABSL_DIE_IF_NULL(value->GetDescriptor())));
EXPECT_EQ(reflection.GetValue(*value), 0);
reflection.SetValue(value, 1);
EXPECT_EQ(reflection.GetValue(*value), 1);
}
TEST_F(ReflectionTest, UInt32Value_Generated) {
auto* value = MakeGenerated<google::protobuf::UInt32Value>();
EXPECT_EQ(UInt32ValueReflection::GetValue(*value), 0);
UInt32ValueReflection::SetValue(value, 1);
EXPECT_EQ(UInt32ValueReflection::GetValue(*value), 1);
}
TEST_F(ReflectionTest, UInt32Value_Dynamic) {
auto* value = MakeDynamic<google::protobuf::UInt32Value>();
ASSERT_OK_AND_ASSIGN(
auto reflection,
GetUInt32ValueReflection(ABSL_DIE_IF_NULL(value->GetDescriptor())));
EXPECT_EQ(reflection.GetValue(*value), 0);
reflection.SetValue(value, 1);
EXPECT_EQ(reflection.GetValue(*value), 1);
}
TEST_F(ReflectionTest, UInt64Value_Generated) {
auto* value = MakeGenerated<google::protobuf::UInt64Value>();
EXPECT_EQ(UInt64ValueReflection::GetValue(*value), 0);
UInt64ValueReflection::SetValue(value, 1);
EXPECT_EQ(UInt64ValueReflection::GetValue(*value), 1);
}
TEST_F(ReflectionTest, UInt64Value_Dynamic) {
auto* value = MakeDynamic<google::protobuf::UInt64Value>();
ASSERT_OK_AND_ASSIGN(
auto reflection,
GetUInt64ValueReflection(ABSL_DIE_IF_NULL(value->GetDescriptor())));
EXPECT_EQ(reflection.GetValue(*value), 0);
reflection.SetValue(value, 1);
EXPECT_EQ(reflection.GetValue(*value), 1);
}
TEST_F(ReflectionTest, FloatValue_Generated) {
auto* value = MakeGenerated<google::protobuf::FloatValue>();
EXPECT_EQ(FloatValueReflection::GetValue(*value), 0);
FloatValueReflection::SetValue(value, 1);
EXPECT_EQ(FloatValueReflection::GetValue(*value), 1);
}
TEST_F(ReflectionTest, FloatValue_Dynamic) {
auto* value = MakeDynamic<google::protobuf::FloatValue>();
ASSERT_OK_AND_ASSIGN(
auto reflection,
GetFloatValueReflection(ABSL_DIE_IF_NULL(value->GetDescriptor())));
EXPECT_EQ(reflection.GetValue(*value), 0);
reflection.SetValue(value, 1);
EXPECT_EQ(reflection.GetValue(*value), 1);
}
TEST_F(ReflectionTest, DoubleValue_Generated) {
auto* value = MakeGenerated<google::protobuf::DoubleValue>();
EXPECT_EQ(DoubleValueReflection::GetValue(*value), 0);
DoubleValueReflection::SetValue(value, 1);
EXPECT_EQ(DoubleValueReflection::GetValue(*value), 1);
}
TEST_F(ReflectionTest, DoubleValue_Dynamic) {
auto* value = MakeDynamic<google::protobuf::DoubleValue>();
ASSERT_OK_AND_ASSIGN(
auto reflection,
GetDoubleValueReflection(ABSL_DIE_IF_NULL(value->GetDescriptor())));
EXPECT_EQ(reflection.GetValue(*value), 0);
reflection.SetValue(value, 1);
EXPECT_EQ(reflection.GetValue(*value), 1);
}
TEST_F(ReflectionTest, BytesValue_Generated) {
auto* value = MakeGenerated<google::protobuf::BytesValue>();
EXPECT_EQ(BytesValueReflection::GetValue(*value), "");
BytesValueReflection::SetValue(value, absl::Cord("Hello World!"));
EXPECT_EQ(BytesValueReflection::GetValue(*value), "Hello World!");
}
TEST_F(ReflectionTest, BytesValue_Dynamic) {
auto* value = MakeDynamic<google::protobuf::BytesValue>();
std::string scratch;
ASSERT_OK_AND_ASSIGN(
auto reflection,
GetBytesValueReflection(ABSL_DIE_IF_NULL(value->GetDescriptor())));
EXPECT_EQ(reflection.GetValue(*value, scratch), "");
reflection.SetValue(value, "Hello World!");
EXPECT_EQ(reflection.GetValue(*value, scratch), "Hello World!");
reflection.SetValue(value, absl::Cord());
EXPECT_EQ(reflection.GetValue(*value, scratch), "");
}
TEST_F(ReflectionTest, StringValue_Generated) {
auto* value = MakeGenerated<google::protobuf::StringValue>();
EXPECT_EQ(StringValueReflection::GetValue(*value), "");
StringValueReflection::SetValue(value, "Hello World!");
EXPECT_EQ(StringValueReflection::GetValue(*value), "Hello World!");
}
TEST_F(ReflectionTest, StringValue_Dynamic) {
auto* value = MakeDynamic<google::protobuf::StringValue>();
std::string scratch;
ASSERT_OK_AND_ASSIGN(
auto reflection,
GetStringValueReflection(ABSL_DIE_IF_NULL(value->GetDescriptor())));
EXPECT_EQ(reflection.GetValue(*value, scratch), "");
reflection.SetValue(value, "Hello World!");
EXPECT_EQ(reflection.GetValue(*value, scratch), "Hello World!");
reflection.SetValue(value, absl::Cord());
EXPECT_EQ(reflection.GetValue(*value, scratch), "");
}
TEST_F(ReflectionTest, Any_Generated) {
auto* value = MakeGenerated<google::protobuf::Any>();
EXPECT_EQ(AnyReflection::GetTypeUrl(*value), "");
AnyReflection::SetTypeUrl(value, "Hello World!");
EXPECT_EQ(AnyReflection::GetTypeUrl(*value), "Hello World!");
EXPECT_EQ(AnyReflection::GetValue(*value), "");
AnyReflection::SetValue(value, absl::Cord("Hello World!"));
EXPECT_EQ(AnyReflection::GetValue(*value), "Hello World!");
}
TEST_F(ReflectionTest, Any_Dynamic) {
auto* value = MakeDynamic<google::protobuf::Any>();
std::string scratch;
ASSERT_OK_AND_ASSIGN(
auto reflection,
GetAnyReflection(ABSL_DIE_IF_NULL(value->GetDescriptor())));
EXPECT_EQ(reflection.GetTypeUrl(*value, scratch), "");
reflection.SetTypeUrl(value, "Hello World!");
EXPECT_EQ(reflection.GetTypeUrl(*value, scratch), "Hello World!");
EXPECT_EQ(reflection.GetValue(*value, scratch), "");
reflection.SetValue(value, absl::Cord("Hello World!"));
EXPECT_EQ(reflection.GetValue(*value, scratch), "Hello World!");
}
TEST_F(ReflectionTest, Duration_Generated) {
auto* value = MakeGenerated<google::protobuf::Duration>();
EXPECT_EQ(DurationReflection::GetSeconds(*value), 0);
DurationReflection::SetSeconds(value, 1);
EXPECT_EQ(DurationReflection::GetSeconds(*value), 1);
EXPECT_EQ(DurationReflection::GetNanos(*value), 0);
DurationReflection::SetNanos(value, 1);
EXPECT_EQ(DurationReflection::GetNanos(*value), 1);
}
TEST_F(ReflectionTest, Duration_Dynamic) {
auto* value = MakeDynamic<google::protobuf::Duration>();
ASSERT_OK_AND_ASSIGN(
auto reflection,
GetDurationReflection(ABSL_DIE_IF_NULL(value->GetDescriptor())));
EXPECT_EQ(reflection.GetSeconds(*value), 0);
reflection.SetSeconds(value, 1);
EXPECT_EQ(reflection.GetSeconds(*value), 1);
EXPECT_EQ(reflection.GetNanos(*value), 0);
reflection.SetNanos(value, 1);
EXPECT_EQ(reflection.GetNanos(*value), 1);
}
TEST_F(ReflectionTest, Timestamp_Generated) {
auto* value = MakeGenerated<google::protobuf::Timestamp>();
EXPECT_EQ(TimestampReflection::GetSeconds(*value), 0);
TimestampReflection::SetSeconds(value, 1);
EXPECT_EQ(TimestampReflection::GetSeconds(*value), 1);
EXPECT_EQ(TimestampReflection::GetNanos(*value), 0);
TimestampReflection::SetNanos(value, 1);
EXPECT_EQ(TimestampReflection::GetNanos(*value), 1);
}
TEST_F(ReflectionTest, Timestamp_Dynamic) {
auto* value = MakeDynamic<google::protobuf::Timestamp>();
ASSERT_OK_AND_ASSIGN(
auto reflection,
GetTimestampReflection(ABSL_DIE_IF_NULL(value->GetDescriptor())));
EXPECT_EQ(reflection.GetSeconds(*value), 0);
reflection.SetSeconds(value, 1);
EXPECT_EQ(reflection.GetSeconds(*value), 1);
EXPECT_EQ(reflection.GetNanos(*value), 0);
reflection.SetNanos(value, 1);
EXPECT_EQ(reflection.GetNanos(*value), 1);
}
TEST_F(ReflectionTest, Value_Generated) {
auto* value = MakeGenerated<google::protobuf::Value>();
EXPECT_EQ(ValueReflection::GetKindCase(*value),
google::protobuf::Value::KIND_NOT_SET);
ValueReflection::SetNullValue(value);
EXPECT_EQ(ValueReflection::GetKindCase(*value),
google::protobuf::Value::kNullValue);
ValueReflection::SetBoolValue(value, true);
EXPECT_EQ(ValueReflection::GetKindCase(*value),
google::protobuf::Value::kBoolValue);
EXPECT_EQ(ValueReflection::GetBoolValue(*value), true);
ValueReflection::SetNumberValue(value, 1.0);
EXPECT_EQ(ValueReflection::GetKindCase(*value),
google::protobuf::Value::kNumberValue);
EXPECT_EQ(ValueReflection::GetNumberValue(*value), 1.0);
ValueReflection::SetStringValue(value, "Hello World!");
EXPECT_EQ(ValueReflection::GetKindCase(*value),
google::protobuf::Value::kStringValue);
EXPECT_EQ(ValueReflection::GetStringValue(*value), "Hello World!");
ValueReflection::MutableListValue(value);
EXPECT_EQ(ValueReflection::GetKindCase(*value),
google::protobuf::Value::kListValue);
EXPECT_EQ(ValueReflection::GetListValue(*value).ByteSizeLong(), 0);
ValueReflection::MutableStructValue(value);
EXPECT_EQ(ValueReflection::GetKindCase(*value),
google::protobuf::Value::kStructValue);
EXPECT_EQ(ValueReflection::GetStructValue(*value).ByteSizeLong(), 0);
}
TEST_F(ReflectionTest, Value_Dynamic) {
auto* value = MakeDynamic<google::protobuf::Value>();
std::string scratch;
ASSERT_OK_AND_ASSIGN(
auto reflection,
GetValueReflection(ABSL_DIE_IF_NULL(value->GetDescriptor())));
EXPECT_EQ(reflection.GetKindCase(*value),
google::protobuf::Value::KIND_NOT_SET);
reflection.SetNullValue(value);
EXPECT_EQ(reflection.GetKindCase(*value),
google::protobuf::Value::kNullValue);
reflection.SetBoolValue(value, true);
EXPECT_EQ(reflection.GetKindCase(*value),
google::protobuf::Value::kBoolValue);
EXPECT_EQ(reflection.GetBoolValue(*value), true);
reflection.SetNumberValue(value, 1.0);
EXPECT_EQ(reflection.GetKindCase(*value),
google::protobuf::Value::kNumberValue);
EXPECT_EQ(reflection.GetNumberValue(*value), 1.0);
reflection.SetStringValue(value, "Hello World!");
EXPECT_EQ(reflection.GetKindCase(*value),
google::protobuf::Value::kStringValue);
EXPECT_EQ(reflection.GetStringValue(*value, scratch), "Hello World!");
reflection.MutableListValue(value);
EXPECT_EQ(reflection.GetKindCase(*value),
google::protobuf::Value::kListValue);
EXPECT_EQ(reflection.GetListValue(*value).ByteSizeLong(), 0);
EXPECT_THAT(reflection.ReleaseListValue(value), NotNull());
reflection.MutableStructValue(value);
EXPECT_EQ(reflection.GetKindCase(*value),
google::protobuf::Value::kStructValue);
EXPECT_EQ(reflection.GetStructValue(*value).ByteSizeLong(), 0);
EXPECT_THAT(reflection.ReleaseStructValue(value), NotNull());
}
TEST_F(ReflectionTest, ListValue_Generated) {
auto* value = MakeGenerated<google::protobuf::ListValue>();
EXPECT_EQ(ListValueReflection::ValuesSize(*value), 0);
EXPECT_EQ(ListValueReflection::Values(*value).size(), 0);
EXPECT_EQ(ListValueReflection::MutableValues(value).size(), 0);
}
TEST_F(ReflectionTest, ListValue_Dynamic) {
auto* value = MakeDynamic<google::protobuf::ListValue>();
ASSERT_OK_AND_ASSIGN(
auto reflection,
GetListValueReflection(ABSL_DIE_IF_NULL(value->GetDescriptor())));
EXPECT_EQ(reflection.ValuesSize(*value), 0);
EXPECT_EQ(reflection.Values(*value).size(), 0);
EXPECT_EQ(reflection.MutableValues(value).size(), 0);
}
TEST_F(ReflectionTest, StructValue_Generated) {
auto* value = MakeGenerated<google::protobuf::Struct>();
EXPECT_EQ(StructReflection::FieldsSize(*value), 0);
EXPECT_EQ(StructReflection::BeginFields(*value),
StructReflection::EndFields(*value));
EXPECT_FALSE(StructReflection::ContainsField(*value, "foo"));
EXPECT_THAT(StructReflection::FindField(*value, "foo"), IsNull());
EXPECT_THAT(StructReflection::InsertField(value, "foo"), NotNull());
EXPECT_TRUE(StructReflection::DeleteField(value, "foo"));
}
TEST_F(ReflectionTest, StructValue_Dynamic) {
auto* value = MakeDynamic<google::protobuf::Struct>();
ASSERT_OK_AND_ASSIGN(
auto reflection,
GetStructReflection(ABSL_DIE_IF_NULL(value->GetDescriptor())));
EXPECT_EQ(reflection.FieldsSize(*value), 0);
EXPECT_EQ(reflection.BeginFields(*value), reflection.EndFields(*value));
EXPECT_FALSE(reflection.ContainsField(*value, "foo"));
EXPECT_THAT(reflection.FindField(*value, "foo"), IsNull());
EXPECT_THAT(reflection.InsertField(value, "foo"), NotNull());
EXPECT_TRUE(reflection.DeleteField(value, "foo"));
}
TEST_F(ReflectionTest, FieldMask_Generated) {
auto* value = MakeGenerated<google::protobuf::FieldMask>();
EXPECT_EQ(FieldMaskReflection::PathsSize(*value), 0);
value->add_paths("foo");
EXPECT_EQ(FieldMaskReflection::PathsSize(*value), 1);
EXPECT_EQ(FieldMaskReflection::Paths(*value, 0), "foo");
}
TEST_F(ReflectionTest, FieldMask_Dynamic) {
auto* value = MakeDynamic<google::protobuf::FieldMask>();
ASSERT_OK_AND_ASSIGN(
auto reflection,
GetFieldMaskReflection(ABSL_DIE_IF_NULL(value->GetDescriptor())));
EXPECT_EQ(reflection.PathsSize(*value), 0);
value->GetReflection()->AddString(
&*value,
ABSL_DIE_IF_NULL(value->GetDescriptor()->FindFieldByName("paths")),
"foo");
EXPECT_EQ(reflection.PathsSize(*value), 1);
EXPECT_EQ(reflection.Paths(*value, 0, scratch_space()), "foo");
}
TEST_F(ReflectionTest, NullValue_MissingValue) {
google::protobuf::DescriptorPool descriptor_pool;
{
google::protobuf::FileDescriptorProto file_proto;
file_proto.set_name("google/protobuf/struct.proto");
file_proto.set_syntax("editions");
file_proto.set_edition(google::protobuf::EDITION_2023);
file_proto.set_package("google.protobuf");
auto* enum_proto = file_proto.add_enum_type();
enum_proto->set_name("NullValue");
auto* value_proto = enum_proto->add_value();
value_proto->set_number(1);
value_proto->set_name("NULL_VALUE");
enum_proto->mutable_options()->mutable_features()->set_enum_type(
google::protobuf::FeatureSet::CLOSED);
ASSERT_THAT(descriptor_pool.BuildFile(file_proto), NotNull());
}
EXPECT_THAT(
NullValueReflection().Initialize(&descriptor_pool),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("well known protocol buffer enum missing value: ")));
}
TEST_F(ReflectionTest, NullValue_MultipleValues) {
google::protobuf::DescriptorPool descriptor_pool;
{
google::protobuf::FileDescriptorProto file_proto;
file_proto.set_name("google/protobuf/struct.proto");
file_proto.set_syntax("proto3");
file_proto.set_package("google.protobuf");
auto* enum_proto = file_proto.add_enum_type();
enum_proto->set_name("NullValue");
auto* value_proto = enum_proto->add_value();
value_proto->set_number(0);
value_proto->set_name("NULL_VALUE");
value_proto = enum_proto->add_value();
value_proto->set_number(1);
value_proto->set_name("NULL_VALUE2");
ASSERT_THAT(descriptor_pool.BuildFile(file_proto), NotNull());
}
EXPECT_THAT(
NullValueReflection().Initialize(&descriptor_pool),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr("well known protocol buffer enum has multiple values: ")));
}
TEST_F(ReflectionTest, EnumDescriptorMissing) {
google::protobuf::DescriptorPool descriptor_pool;
EXPECT_THAT(NullValueReflection().Initialize(&descriptor_pool),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("descriptor missing for protocol buffer enum "
"well known type: ")));
}
TEST_F(ReflectionTest, MessageDescriptorMissing) {
google::protobuf::DescriptorPool descriptor_pool;
EXPECT_THAT(BoolValueReflection().Initialize(&descriptor_pool),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("descriptor missing for protocol buffer "
"message well known type: ")));
}
class AdaptFromMessageTest : public Test {
public:
absl::Nonnull<google::protobuf::Arena*> arena() ABSL_ATTRIBUTE_LIFETIME_BOUND {
return &arena_;
}
std::string& scratch_space() ABSL_ATTRIBUTE_LIFETIME_BOUND {
return scratch_space_;
}
absl::Nonnull<const google::protobuf::DescriptorPool*> descriptor_pool() {
return GetTestingDescriptorPool();
}
absl::Nonnull<google::protobuf::MessageFactory*> message_factory() {
return GetTestingMessageFactory();
}
template <typename T>
absl::Nonnull<google::protobuf::Message*> MakeDynamic() {
const auto* descriptor_pool = GetTestingDescriptorPool();
const auto* descriptor =
ABSL_DIE_IF_NULL(descriptor_pool->FindMessageTypeByName(
internal::MessageTypeNameFor<T>()));
const auto* prototype =
ABSL_DIE_IF_NULL(GetTestingMessageFactory()->GetPrototype(descriptor));
return prototype->New(arena());
}
template <typename T>
Owned<google::protobuf::Message> DynamicParseTextProto(absl::string_view text) {
return ::cel::internal::DynamicParseTextProto<T>(
arena(), text, descriptor_pool(), message_factory());
}
absl::StatusOr<Value> AdaptFromMessage(const google::protobuf::Message& message) {
return well_known_types::AdaptFromMessage(
arena(), message, descriptor_pool(), message_factory(),
scratch_space());
}
private:
google::protobuf::Arena arena_;
std::string scratch_space_;
};
TEST_F(AdaptFromMessageTest, BoolValue) {
auto message =
DynamicParseTextProto<google::protobuf::BoolValue>(R"pb(value: true)pb");
EXPECT_THAT(AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<bool>(true)));
}
TEST_F(AdaptFromMessageTest, Int32Value) {
auto message =
DynamicParseTextProto<google::protobuf::Int32Value>(R"pb(value: 1)pb");
EXPECT_THAT(AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<int32_t>(1)));
}
TEST_F(AdaptFromMessageTest, Int64Value) {
auto message =
DynamicParseTextProto<google::protobuf::Int64Value>(R"pb(value: 1)pb");
EXPECT_THAT(AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<int64_t>(1)));
}
TEST_F(AdaptFromMessageTest, UInt32Value) {
auto message =
DynamicParseTextProto<google::protobuf::UInt32Value>(R"pb(value: 1)pb");
EXPECT_THAT(AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<uint32_t>(1)));
}
TEST_F(AdaptFromMessageTest, UInt64Value) {
auto message =
DynamicParseTextProto<google::protobuf::UInt64Value>(R"pb(value: 1)pb");
EXPECT_THAT(AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<uint64_t>(1)));
}
TEST_F(AdaptFromMessageTest, FloatValue) {
auto message =
DynamicParseTextProto<google::protobuf::FloatValue>(R"pb(value: 1.0)pb");
EXPECT_THAT(AdaptFromMessage(*message), IsOkAndHolds(VariantWith<float>(1)));
}
TEST_F(AdaptFromMessageTest, DoubleValue) {
auto message =
DynamicParseTextProto<google::protobuf::DoubleValue>(R"pb(value: 1.0)pb");
EXPECT_THAT(AdaptFromMessage(*message), IsOkAndHolds(VariantWith<double>(1)));
}
TEST_F(AdaptFromMessageTest, BytesValue) {
auto message = DynamicParseTextProto<google::protobuf::BytesValue>(
R"pb(value: "foo")pb");
EXPECT_THAT(AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<BytesValue>(BytesValue("foo"))));
}
TEST_F(AdaptFromMessageTest, StringValue) {
auto message = DynamicParseTextProto<google::protobuf::StringValue>(
R"pb(value: "foo")pb");
EXPECT_THAT(AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<StringValue>(StringValue("foo"))));
}
TEST_F(AdaptFromMessageTest, Duration) {
auto message = DynamicParseTextProto<google::protobuf::Duration>(
R"pb(seconds: 1 nanos: 1)pb");
EXPECT_THAT(AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<absl::Duration>(absl::Seconds(1) +
absl::Nanoseconds(1))));
}
TEST_F(AdaptFromMessageTest, Duration_SecondsOutOfRange) {
auto message = DynamicParseTextProto<google::protobuf::Duration>(
R"pb(seconds: 0x7fffffffffffffff nanos: 1)pb");
EXPECT_THAT(AdaptFromMessage(*message),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("invalid duration seconds: ")));
}
TEST_F(AdaptFromMessageTest, Duration_NanosOutOfRange) {
auto message = DynamicParseTextProto<google::protobuf::Duration>(
R"pb(seconds: 1 nanos: 0x7fffffff)pb");
EXPECT_THAT(AdaptFromMessage(*message),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("invalid duration nanoseconds: ")));
}
TEST_F(AdaptFromMessageTest, Duration_SignMismatch) {
auto message =
DynamicParseTextProto<google::protobuf::Duration>(R"pb(seconds: -1
nanos: 1)pb");
EXPECT_THAT(AdaptFromMessage(*message),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("duration sign mismatch: ")));
}
TEST_F(AdaptFromMessageTest, Timestamp) {
auto message =
DynamicParseTextProto<google::protobuf::Timestamp>(R"pb(seconds: 1
nanos: 1)pb");
EXPECT_THAT(
AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<absl::Time>(
absl::UnixEpoch() + absl::Seconds(1) + absl::Nanoseconds(1))));
}
TEST_F(AdaptFromMessageTest, Timestamp_SecondsOutOfRange) {
auto message = DynamicParseTextProto<google::protobuf::Timestamp>(
R"pb(seconds: 0x7fffffffffffffff nanos: 1)pb");
EXPECT_THAT(AdaptFromMessage(*message),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("invalid timestamp seconds: ")));
}
TEST_F(AdaptFromMessageTest, Timestamp_NanosOutOfRange) {
auto message = DynamicParseTextProto<google::protobuf::Timestamp>(
R"pb(seconds: 1 nanos: 0x7fffffff)pb");
EXPECT_THAT(AdaptFromMessage(*message),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("invalid timestamp nanoseconds: ")));
}
TEST_F(AdaptFromMessageTest, Value_NullValue) {
auto message = DynamicParseTextProto<google::protobuf::Value>(
R"pb(null_value: NULL_VALUE)pb");
EXPECT_THAT(AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<std::nullptr_t>(nullptr)));
}
TEST_F(AdaptFromMessageTest, Value_BoolValue) {
auto message =
DynamicParseTextProto<google::protobuf::Value>(R"pb(bool_value: true)pb");
EXPECT_THAT(AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<bool>(true)));
}
TEST_F(AdaptFromMessageTest, Value_NumberValue) {
auto message = DynamicParseTextProto<google::protobuf::Value>(
R"pb(number_value: 1.0)pb");
EXPECT_THAT(AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<double>(1.0)));
}
TEST_F(AdaptFromMessageTest, Value_StringValue) {
auto message = DynamicParseTextProto<google::protobuf::Value>(
R"pb(string_value: "foo")pb");
EXPECT_THAT(AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<StringValue>(StringValue("foo"))));
}
TEST_F(AdaptFromMessageTest, Value_ListValue) {
auto message =
DynamicParseTextProto<google::protobuf::Value>(R"pb(list_value: {})pb");
EXPECT_THAT(
AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<ListValue>(VariantWith<ListValueConstRef>(_))));
}
TEST_F(AdaptFromMessageTest, Value_StructValue) {
auto message =
DynamicParseTextProto<google::protobuf::Value>(R"pb(struct_value: {})pb");
EXPECT_THAT(
AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<Struct>(VariantWith<StructConstRef>(_))));
}
TEST_F(AdaptFromMessageTest, ListValue) {
auto message = DynamicParseTextProto<google::protobuf::ListValue>(R"pb()pb");
EXPECT_THAT(
AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<ListValue>(VariantWith<ListValueConstRef>(_))));
}
TEST_F(AdaptFromMessageTest, Struct) {
auto message = DynamicParseTextProto<google::protobuf::Struct>(R"pb()pb");
EXPECT_THAT(
AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<Struct>(VariantWith<StructConstRef>(_))));
}
TEST_F(AdaptFromMessageTest, TestAllTypesProto3) {
auto message = DynamicParseTextProto<TestAllTypesProto3>(R"pb()pb");
EXPECT_THAT(AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<absl::monostate>(absl::monostate())));
}
TEST_F(AdaptFromMessageTest, Any_BoolValue) {
auto message = DynamicParseTextProto<google::protobuf::Any>(
R"pb(type_url: "type.googleapis.com/google.protobuf.BoolValue")pb");
EXPECT_THAT(AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<bool>(false)));
}
TEST_F(AdaptFromMessageTest, Any_Int32Value) {
auto message = DynamicParseTextProto<google::protobuf::Any>(
R"pb(type_url: "type.googleapis.com/google.protobuf.Int32Value")pb");
EXPECT_THAT(AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<int32_t>(0)));
}
TEST_F(AdaptFromMessageTest, Any_Int64Value) {
auto message = DynamicParseTextProto<google::protobuf::Any>(
R"pb(type_url: "type.googleapis.com/google.protobuf.Int64Value")pb");
EXPECT_THAT(AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<int64_t>(0)));
}
TEST_F(AdaptFromMessageTest, Any_UInt32Value) {
auto message = DynamicParseTextProto<google::protobuf::Any>(
R"pb(type_url: "type.googleapis.com/google.protobuf.UInt32Value")pb");
EXPECT_THAT(AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<uint32_t>(0)));
}
TEST_F(AdaptFromMessageTest, Any_UInt64Value) {
auto message = DynamicParseTextProto<google::protobuf::Any>(
R"pb(type_url: "type.googleapis.com/google.protobuf.UInt64Value")pb");
EXPECT_THAT(AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<uint64_t>(0)));
}
TEST_F(AdaptFromMessageTest, Any_FloatValue) {
auto message = DynamicParseTextProto<google::protobuf::Any>(
R"pb(type_url: "type.googleapis.com/google.protobuf.FloatValue")pb");
EXPECT_THAT(AdaptFromMessage(*message), IsOkAndHolds(VariantWith<float>(0)));
}
TEST_F(AdaptFromMessageTest, Any_DoubleValue) {
auto message = DynamicParseTextProto<google::protobuf::Any>(
R"pb(type_url: "type.googleapis.com/google.protobuf.DoubleValue")pb");
EXPECT_THAT(AdaptFromMessage(*message), IsOkAndHolds(VariantWith<double>(0)));
}
TEST_F(AdaptFromMessageTest, Any_BytesValue) {
auto message = DynamicParseTextProto<google::protobuf::Any>(
R"pb(type_url: "type.googleapis.com/google.protobuf.BytesValue")pb");
EXPECT_THAT(AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<BytesValue>(BytesValue())));
}
TEST_F(AdaptFromMessageTest, Any_StringValue) {
auto message = DynamicParseTextProto<google::protobuf::Any>(
R"pb(type_url: "type.googleapis.com/google.protobuf.StringValue")pb");
EXPECT_THAT(AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<StringValue>(StringValue())));
}
TEST_F(AdaptFromMessageTest, Any_Duration) {
auto message = DynamicParseTextProto<google::protobuf::Any>(
R"pb(type_url: "type.googleapis.com/google.protobuf.Duration")pb");
EXPECT_THAT(AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<absl::Duration>(absl::ZeroDuration())));
}
TEST_F(AdaptFromMessageTest, Any_Timestamp) {
auto message = DynamicParseTextProto<google::protobuf::Any>(
R"pb(type_url: "type.googleapis.com/google.protobuf.Timestamp")pb");
EXPECT_THAT(AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<absl::Time>(absl::UnixEpoch())));
}
TEST_F(AdaptFromMessageTest, Any_Value_NullValue) {
auto message = DynamicParseTextProto<google::protobuf::Any>(
R"pb(type_url: "type.googleapis.com/google.protobuf.Value")pb");
EXPECT_THAT(AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<std::nullptr_t>(nullptr)));
}
TEST_F(AdaptFromMessageTest, Any_Value_BoolValue) {
auto message = DynamicParseTextProto<google::protobuf::Any>(
R"pb(type_url: "type.googleapis.com/google.protobuf.Value"
value: "\x20\x01")pb");
EXPECT_THAT(AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<bool>(true)));
}
TEST_F(AdaptFromMessageTest, Any_Value_NumberValue) {
auto message = DynamicParseTextProto<google::protobuf::Any>(
R"pb(type_url: "type.googleapis.com/google.protobuf.Value"
value: "\x11\x00\x00\x00\x00\x00\x00\x00\x00")pb");
EXPECT_THAT(AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<double>(0.0)));
}
TEST_F(AdaptFromMessageTest, Any_Value_StringValue) {
auto message = DynamicParseTextProto<google::protobuf::Any>(
R"pb(type_url: "type.googleapis.com/google.protobuf.Value"
value: "\x1a\x03\x66\x6f\x6f")pb");
EXPECT_THAT(AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<StringValue>(StringValue("foo"))));
}
TEST_F(AdaptFromMessageTest, Any_Value_ListValue) {
auto message = DynamicParseTextProto<google::protobuf::Any>(
R"pb(type_url: "type.googleapis.com/google.protobuf.Value"
value: "\x32\x00")pb");
EXPECT_THAT(AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<ListValue>(
VariantWith<ListValuePtr>(NotNull()))));
}
TEST_F(AdaptFromMessageTest, Any_Value_StructValue) {
auto message = DynamicParseTextProto<google::protobuf::Any>(
R"pb(type_url: "type.googleapis.com/google.protobuf.Value"
value: "\x2a\x00")pb");
EXPECT_THAT(
AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<Struct>(VariantWith<StructPtr>(NotNull()))));
}
TEST_F(AdaptFromMessageTest, Any_ListValue) {
auto message = DynamicParseTextProto<google::protobuf::Any>(
R"pb(type_url: "type.googleapis.com/google.protobuf.ListValue")pb");
EXPECT_THAT(AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<ListValue>(
VariantWith<ListValuePtr>(NotNull()))));
}
TEST_F(AdaptFromMessageTest, Any_Struct) {
auto message = DynamicParseTextProto<google::protobuf::Any>(
R"pb(type_url: "type.googleapis.com/google.protobuf.Struct")pb");
EXPECT_THAT(
AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<Struct>(VariantWith<StructPtr>(NotNull()))));
}
TEST_F(AdaptFromMessageTest, Any_TestAllTypesProto3) {
auto message = DynamicParseTextProto<google::protobuf::Any>(
R"pb(type_url: "type.googleapis.com/google.api.expr.test.v1.proto3.TestAllTypes")pb");
EXPECT_THAT(AdaptFromMessage(*message),
IsOkAndHolds(VariantWith<Unique<google::protobuf::Message>>(NotNull())));
}
TEST_F(AdaptFromMessageTest, Any_BadTypeUrlDomain) {
auto message = DynamicParseTextProto<google::protobuf::Any>(
R"pb(type_url: "type.example.com/google.protobuf.BoolValue")pb");
EXPECT_THAT(AdaptFromMessage(*message),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("unable to find descriptor for type URL: ")));
}
TEST_F(AdaptFromMessageTest, Any_UnknownMessage) {
auto message = DynamicParseTextProto<google::protobuf::Any>(
R"pb(type_url: "type.googleapis.com/message.that.does.not.Exist")pb");
EXPECT_THAT(AdaptFromMessage(*message),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("unable to find descriptor for type name: ")));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/internal/well_known_types.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/internal/well_known_types_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
03576d5c-5806-40cf-9612-b9f8e63477e0 | cpp | tensorflow/tensorflow | multi_output_fusion | third_party/xla/xla/service/gpu/transforms/multi_output_fusion.cc | third_party/xla/xla/service/gpu/transforms/multi_output_fusion_test.cc | #include "xla/service/gpu/transforms/multi_output_fusion.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <memory>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_dfs_reachability.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/gpu/model/gpu_performance_model.h"
#include "xla/service/gpu/model/gpu_performance_model_base.h"
#include "xla/service/hlo_graph_dumper.h"
#include "xla/service/instruction_fusion.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
bool IsProfitableOperand(HloInstruction* instr) {
return !ShapeUtil::IsEffectiveScalar(instr->shape());
}
const HloSliceInstruction* FindUniqueSlice(const HloInstruction* parent,
const HloInstruction* instr) {
if (const auto* slice = DynCast<HloSliceInstruction>(instr)) {
return slice;
} else if (const auto* fusion = DynCast<HloFusionInstruction>(instr)) {
const HloSliceInstruction* result = nullptr;
for (size_t i = 0; i < fusion->operand_count(); ++i) {
if (fusion->operand(i) == parent) {
if (result) return nullptr;
auto* called_param = fusion->fused_parameter(i);
if (called_param->user_count() != 1) return nullptr;
result = FindUniqueSlice(called_param, called_param->users()[0]);
if (!result) return nullptr;
}
}
return result;
} else {
return nullptr;
}
}
FusionDecision ParameterSlicesAreNonOverlapping(const HloInstruction& instr1,
const HloInstruction& instr2,
const HloInstruction* parent) {
if (parent->shape().IsTuple()) return FusionDecision::Allow();
if (ShapeUtil::ByteSizeOfElements(parent->shape()) < 1024) {
return FusionDecision::Allow();
}
const HloSliceInstruction* slice1 = FindUniqueSlice(parent, &instr1);
const HloSliceInstruction* slice2 = FindUniqueSlice(parent, &instr2);
if (!slice1 || !slice2) return FusionDecision::Allow();
auto& starts1 = slice1->slice_starts();
auto& starts2 = slice2->slice_starts();
auto& limits1 = slice1->slice_limits();
auto& limits2 = slice2->slice_limits();
for (int64_t dim = 0; dim < parent->shape().rank(); ++dim) {
bool overlap = starts1[dim] < limits2[dim] && starts2[dim] < limits1[dim];
if (!overlap) {
return FusionDecision::Forbid("slices are non-overlapping");
}
}
return FusionDecision::Allow();
}
FusionDecision LegalToFuse(const HloInstruction& instr1,
const HloInstruction& instr2,
const se::DeviceDescription& device_info,
FusionInfoCache* fusion_info_cache) {
CHECK(instr1.opcode() == HloOpcode::kFusion);
if (instr1.fused_expression_root()->opcode() ==
HloOpcode::kDynamicUpdateSlice ||
(instr2.opcode() == HloOpcode::kFusion &&
instr2.fused_expression_root()->opcode() ==
HloOpcode::kDynamicUpdateSlice)) {
return FusionDecision::Forbid("can't fuse multiple DUSs");
}
return FusionFitsInBudget(instr1, instr2, device_info,
false,
fusion_info_cache);
}
int FusionPriority(const HloInstruction* instr) {
if (instr->IsMultiOutputFusion()) {
return 2;
}
if (instr->opcode() == HloOpcode::kFusion) {
return 1;
}
return 0;
}
HloInstruction* SelectPreferredFusionCandidate(
const std::vector<HloInstruction*> candidates) {
if (candidates.empty()) {
return nullptr;
}
return *std::max_element(
candidates.begin(), candidates.end(),
[](const HloInstruction* a, const HloInstruction* b) {
return FusionPriority(a) < FusionPriority(b);
});
}
FusionDecision OperandReachableFromProducer(
const HloInstruction& producer, const HloInstruction& consumer,
const HloDfsReachability& reachability) {
for (const auto* operand : consumer.operands()) {
if (!reachability.IsPresent(operand) &&
operand->opcode() == HloOpcode::kGetTupleElement) {
operand = operand->operand(0);
}
CHECK(reachability.IsPresent(operand) && reachability.IsPresent(&producer))
<< "Reachability map is incomplete. This should never "
"happen.";
if (&producer != operand && reachability.IsReachable(&producer, operand)) {
return FusionDecision::Forbid(
absl::StrCat(producer.name(), " would introduce a cycle when fused"));
}
}
return FusionDecision::Allow();
}
FusionDecision ProducerCandidateIsFusible(
const HloInstruction& producer, const HloInstruction& consumer,
const HloDfsReachability& reachability, FusionInfoCache* fusion_info_cache,
const se::DeviceDescription& device_info,
GpuHloCostAnalysis* cost_analysis) {
if (!IsFusibleAsMultiOutputFusionRoot(consumer)) {
return FusionDecision::Forbid(
"consumer not eligible as multi-output fusion root.");
}
RETURN_IF_NOT_FUSIBLE(
ShapesCompatibleForMultiOutputFusion(consumer, producer));
RETURN_IF_NOT_FUSIBLE(
OperandReachableFromProducer(producer, consumer, reachability));
RETURN_IF_NOT_FUSIBLE(FusionFitsInBudget(
producer, consumer, device_info,
false, fusion_info_cache));
if (cost_analysis->ProducerConsumerMergedTooLarge(producer, consumer)) {
return FusionDecision::Forbid("will generate too large IR");
}
GpuPerformanceModel::RunTimes t = GpuPerformanceModel::EstimateRunTimes(
&producer, device_info, cost_analysis,
GpuPerformanceModelOptions::Default(),
{&consumer},
true);
if (t.time_fused > t.time_unfused) {
return FusionDecision::Forbid("will execute slower if fused");
}
return FusionDecision::Allow();
}
std::vector<HloInstruction*> GetProducerConsumerMultiOutputFusionCandidates(
const HloInstruction* producer, const HloDfsReachability& reachability,
FusionInfoCache* fusion_info_cache,
const se::DeviceDescription& device_info,
GpuHloCostAnalysis* cost_analysis) {
std::vector<HloInstruction*> fusion_candidates;
const HloComputation* computation = producer->parent();
const HloModule* module = computation->parent();
bool dump_fusion =
module->config().debug_options().xla_dump_fusion_visualization();
if (!IsProducerMultiOutputFusible(*producer)) {
return fusion_candidates;
}
if (producer->user_count() == 1 &&
!producer->users()[0]->IsMultiOutputFusion()) {
return fusion_candidates;
}
for (HloInstruction* consumer : producer->users()) {
VLOG(3) << "Looking at producer " << producer->name()
<< " and its consumer " << consumer->name();
if (auto decision = ProducerCandidateIsFusible(
*producer, *consumer, reachability, fusion_info_cache, device_info,
cost_analysis)) {
fusion_candidates.push_back(consumer);
} else if (dump_fusion) {
RegisterFusionState(
*computation,
absl::StrCat("Not considering fusion of producer |", producer->name(),
"| into consumer |", consumer->name(),
"| due to: ", decision.Explain()),
*consumer, producer);
}
}
return fusion_candidates;
}
bool IsSiblingFusionCandidate(const HloInstruction* instr) {
if (instr->users().empty() || !IsFusibleAsMultiOutputFusionRoot(*instr) ||
IsNestableVariadicReduction(*instr)) {
return false;
}
return (!instr->IsMultiOutputFusion() ||
absl::c_all_of(instr->users(), [&](const HloInstruction* user) {
return user->opcode() == HloOpcode::kGetTupleElement;
}));
}
FusionDecision CanFuseSiblings(const HloInstruction& sibling_consumer_1,
const HloInstruction& sibling_consumer_2,
const HloInstruction& common_producer,
const HloDfsReachability& reachability,
FusionInfoCache* fusion_info_cache,
const se::DeviceDescription& device_info) {
if (reachability.IsConnected(&sibling_consumer_1, &sibling_consumer_2)) {
return FusionDecision::Forbid(
absl::StrCat(sibling_consumer_1.name(), " and ",
sibling_consumer_2.name(), " are connected"));
}
RETURN_IF_NOT_FUSIBLE(ShapesCompatibleForMultiOutputFusion(
sibling_consumer_1, sibling_consumer_2));
RETURN_IF_NOT_FUSIBLE(ParameterSlicesAreNonOverlapping(
sibling_consumer_1, sibling_consumer_2, &common_producer));
RETURN_IF_NOT_FUSIBLE(LegalToFuse(sibling_consumer_1, sibling_consumer_2,
device_info, fusion_info_cache));
return FusionDecision::Allow();
}
}
void MultiOutputFusion::RecomputeReachability() {
reachability_ = HloDfsReachability::Build(computation_);
}
bool MultiOutputFusion::FuseSiblings(HloInstruction* parent,
FusionInfoCache* fusion_info_cache,
GpuHloCostAnalysis* cost_analysis) {
const HloComputation* computation = parent->parent();
const HloModule* module = computation->parent();
bool dump_fusion =
module->config().debug_options().xla_dump_fusion_visualization();
if (!IsProfitableOperand(parent)) {
VLOG(3) << "Operand " << parent->ToShortString() << " is not profitable";
return false;
}
bool changed = false;
std::vector<HloInstruction*> siblings;
absl::c_copy_if(parent->users(), std::back_inserter(siblings),
IsSiblingFusionCandidate);
absl::c_stable_sort(siblings,
[](const HloInstruction* a, const HloInstruction* b) {
return FusionPriority(a) > FusionPriority(b);
});
for (auto i = siblings.begin(); i != siblings.end(); ++i) {
VLOG(3) << "Considering " << (*i)->name();
if ((*i)->opcode() != HloOpcode::kFusion) {
continue;
}
for (auto j = i + 1; j != siblings.end();) {
VLOG(3) << "Considering " << (*i)->name() << " and " << (*j)->name();
if (auto fusible = CanFuseSiblings(**i, **j, *parent, *reachability_,
fusion_info_cache, device_info_);
!fusible) {
if (dump_fusion) {
RegisterFusionState(
*computation,
absl::StrCat("Not fusing siblings |", (**i).name(), "| and |",
(**j).name(), "| due to: ", fusible.Explain()),
**i,
parent);
}
++j;
continue;
}
if (!ConsumeFuel(name(), [&] {
return absl::StrFormat("Not fusing siblings %s and %s.",
(*i)->name(), (*j)->name());
})) {
++j;
continue;
}
VLOG(2) << "Fuse siblings " << (*i)->name() << " and " << (*j)->name();
fusion_info_cache->Invalidate(*i);
fusion_info_cache->Invalidate(*j);
HloInstruction* remaining = *i;
HloInstruction* fused = *j;
TF_CHECK_OK(cost_analysis->RemoveInstruction(remaining));
TF_CHECK_OK(cost_analysis->RemoveInstruction(fused));
DumpFusionState(*remaining,
absl::StrCat("About to fuse sibling |", fused->name(),
"| into sibling |", remaining->name(),
"| inside multi-output fusion"),
fused);
if (fused->opcode() == HloOpcode::kFusion) {
remaining->MergeFusionInstructionIntoMultiOutput(fused);
if (fused->IsInputFusion()) {
remaining->set_fusion_kind(HloInstruction::FusionKind::kInput);
}
} else {
remaining->FuseInstructionIntoMultiOutput(fused);
CHECK_EQ(0, fused->user_count());
TF_CHECK_OK(computation_->RemoveInstruction(fused));
}
DumpFusionState(*remaining,
absl::StrCat("Fused into |", remaining->name(),
"| inside multi-output fusion"));
TF_CHECK_OK(cost_analysis->RevisitInstruction(remaining));
changed = true;
siblings.erase(j);
RecomputeReachability();
}
}
return changed;
}
absl::StatusOr<bool> MultiOutputFusion::DoMultiOutputFusion() {
bool changed = false;
RecomputeReachability();
GpuHloCostAnalysis cost_analysis({shape_size_function_,
{},
{},
true},
device_info_);
TF_RETURN_IF_ERROR(computation_->Accept(&cost_analysis));
std::vector<HloInstruction*> defs_before_uses =
computation_->MakeInstructionPostOrder();
FusionInfoCache fusion_info_cache;
for (auto it = defs_before_uses.rbegin(); it != defs_before_uses.rend();
++it) {
auto* producer = *it;
if (producer->opcode() == HloOpcode::kConstant) {
VLOG(3) << producer->name() << " is a constant.";
continue;
}
if (producer->IsCustomFusion()) {
continue;
}
if (FuseSiblings(producer, &fusion_info_cache, &cost_analysis)) {
changed = true;
}
const auto candidates = GetProducerConsumerMultiOutputFusionCandidates(
producer, *reachability_, &fusion_info_cache, device_info_,
&cost_analysis);
auto* consumer_for_fusion = SelectPreferredFusionCandidate(candidates);
if (consumer_for_fusion == nullptr) {
continue;
}
if (!ConsumeFuel(name(), [&] {
return absl::StrFormat("Not fusing %s and %s.", producer->name(),
consumer_for_fusion->name());
})) {
continue;
}
changed = true;
fusion_info_cache.Invalidate(producer);
fusion_info_cache.Invalidate(consumer_for_fusion);
TF_RETURN_IF_ERROR(cost_analysis.RemoveInstruction(producer));
TF_RETURN_IF_ERROR(cost_analysis.RemoveInstruction(consumer_for_fusion));
HloInstruction* input_fusion;
if (consumer_for_fusion->opcode() == HloOpcode::kFusion) {
input_fusion = consumer_for_fusion;
VLOG(2) << "Fuse producer " << producer->name() << " into its consumer "
<< consumer_for_fusion->name();
} else {
input_fusion = computation_->AddInstruction(HloInstruction::CreateFusion(
consumer_for_fusion->shape(),
ChooseFusionKind(*producer, *consumer_for_fusion),
consumer_for_fusion));
VLOG(2) << "Fuse producer " << producer->name() << " and its consumer "
<< consumer_for_fusion->name() << " into "
<< input_fusion->name();
TF_CHECK_OK(
computation_->ReplaceInstruction(consumer_for_fusion, input_fusion));
}
DumpFusionState(*input_fusion,
absl::StrCat("About to fuse producer |", producer->name(),
"| into consumer |", input_fusion->name(),
"| inside multi-output fusion"),
producer);
if (producer->opcode() == HloOpcode::kFusion) {
input_fusion->MergeFusionInstructionIntoMultiOutput(producer);
} else {
input_fusion->FuseInstructionIntoMultiOutput(producer);
CHECK_EQ(0, producer->user_count());
TF_CHECK_OK(computation_->RemoveInstruction(producer));
}
TF_RETURN_IF_ERROR(cost_analysis.RevisitInstruction(input_fusion));
DumpFusionState(*input_fusion,
absl::StrCat("Fused into |", input_fusion->name(),
"| inside multi-output fusion"));
RecomputeReachability();
}
return changed;
}
void MultiOutputFusion::DumpFusionState(const HloInstruction& consumer,
absl::string_view label,
const HloInstruction* producer) {
if (consumer.GetModule()
->config()
.debug_options()
.xla_dump_fusion_visualization()) {
RegisterFusionState(*computation_, label, consumer, producer);
}
}
absl::StatusOr<bool> MultiOutputFusion::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (auto* computation : GetFusibleComputations(*module, execution_threads)) {
computation_ = computation;
TF_ASSIGN_OR_RETURN(bool computation_changed, DoMultiOutputFusion());
changed |= computation_changed;
}
return changed;
}
}
} | #include "xla/service/gpu/transforms/multi_output_fusion.h"
#include <cstdint>
#include <optional>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace gpu {
namespace m = ::xla::match;
class MultiOutputFusionTest : public HloTestBase {
HloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const {
return [&](const Shape& shape) {
constexpr int64_t kPointerSize = 8;
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
};
}
public:
MultiOutputFusion mof_{TestGpuDeviceInfo::RTXA6000DeviceInfo(),
ShapeSizeBytesFunction()};
void CheckMultiOutputFusion(absl::string_view hlo,
std::optional<absl::string_view> expected) {
RunAndFilecheckHloRewrite(
hlo,
MultiOutputFusion{TestGpuDeviceInfo::RTXA6000DeviceInfo(),
ShapeSizeBytesFunction()},
expected);
}
};
const char kModulePrefix[] = R"(
HloModule test_module
scalar_add_computation {
scalar_lhs.0 = f32[] parameter(0)
scalar_rhs.0 = f32[] parameter(1)
ROOT add.0 = f32[] add(scalar_lhs.0, scalar_rhs.0)
}
scalar_mul_computation {
scalar_lhs.1 = f32[] parameter(0)
scalar_rhs.1 = f32[] parameter(1)
ROOT mul.1 = f32[] multiply(scalar_lhs.1, scalar_rhs.1)
})";
static int64_t CountMultiOutputFusions(const HloModule* module) {
int multi_output_fusion_count = 0;
for (auto* computation : module->MakeNonfusionComputations()) {
for (auto* instr : computation->instructions()) {
if (instr->IsMultiOutputFusion()) {
multi_output_fusion_count++;
}
}
}
return multi_output_fusion_count;
}
TEST_F(MultiOutputFusionTest, MultiOutputFusionSiblingReduceAndReduceFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation {
p1.1 = f32[128,512,28,28]{3,2,1,0} parameter(1)
mul = f32[128,512,28,28]{3,2,1,0} multiply(p1.1, p1.1)
const.1 = f32[] parameter(0)
ROOT reduce.1 = f32[512]{0} reduce(mul, const.1), dimensions={0,2,3}, to_apply=scalar_add_computation
}
ENTRY entry {
p0 = f32[] parameter(0)
p1 = f32[128,512,28,28]{3,2,1,0} parameter(1)
const.2 = f32[] constant(1)
fusion = f32[512] fusion(p0, p1), kind=kInput, calls=fused_computation
reduce.2 = f32[512]{0} reduce(p1, const.2), dimensions={0,2,3}, to_apply=scalar_add_computation
ROOT root = (f32[512]{0}, f32[512]{0}) tuple(fusion, reduce.2)
})"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* fusion =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Reduce(), m::Reduce())));
}
TEST_F(MultiOutputFusionTest, MultiOutputFusionDifferentReduceInputShapes) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p1.1 = f32[6400]{0} parameter(1)
mul = f32[6400]{0} multiply(p1.1, p1.1)
const.1 = f32[] parameter(0)
ROOT reduce.1 = f32[] reduce(mul, const.1), dimensions={0}, to_apply=scalar_add_computation
}
fused_computation_2 {
p1.2 = f32[6400]{0} parameter(1)
r1 = f32[64,100]{0,1} reshape(p1.2)
const.2 = f32[] parameter(0)
ROOT reduce.2 = f32[] reduce(r1, const.2), dimensions={1,0}, to_apply=scalar_mul_computation
}
ENTRY entry {
p0 = f32[] parameter(0)
p1 = f32[6400]{0} parameter(1)
fusion.1 = f32[] fusion(p0, p1), kind=kInput, calls=fused_computation_1
fusion.2 = f32[] fusion(p0, p1), kind=kInput, calls=fused_computation_2
ROOT root = (f32[], f32[]) tuple(fusion.1, fusion.2)
})"))
.value();
ASSERT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest, ReduceMofDifferentTypes) {
const char* hlo = R"(
HloModule module
scalar_add_computation {
scalar_lhs.1 = f32[] parameter(0)
scalar_rhs.1 = f32[] parameter(1)
ROOT add.1 = f32[] add(scalar_lhs.1, scalar_rhs.1)
}
scalar_add_computation_f16 {
scalar_lhs.0 = f16[] parameter(0)
scalar_rhs.0 = f16[] parameter(1)
ROOT add.0 = f16[] add(scalar_lhs.0, scalar_rhs.0)
}
fused_computation {
param_0.2 = f32[128,512,28,28]{3,2,1,0} parameter(0)
c.1 = f16[128,512,28,28]{3,2,1,0} convert(param_0.2)
const.0 = f16[] constant(0)
ROOT reduce.0 = f16[512]{0} reduce(c.1, const.0), dimensions={0,2,3}, to_apply=scalar_add_computation_f16
}
ENTRY entry {
p0 = f32[] parameter(0)
p1 = f32[128,512,28,28]{3,2,1,0} parameter(1)
const.2 = f32[] constant(0)
reduce.1 = f32[512]{0} reduce(p1, const.2), dimensions={0,2,3}, to_apply=scalar_add_computation
fusion = f16[512]{0} fusion(p1), kind=kInput, calls=fused_computation
ROOT root = (f32[512]{0}, f16[512]{0}) tuple(reduce.1, fusion)
})";
CheckMultiOutputFusion(hlo, R"(
)");
}
TEST_F(MultiOutputFusionTest, MultiOutputFusionDifferentReduceOutputShapes) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p1.1 = f32[10,10]{1,0} parameter(1)
mul = f32[10,10]{1,0} multiply(p1.1, p1.1)
const.1 = f32[] parameter(0)
ROOT reduce.1 = f32[] reduce(mul, const.1), dimensions={0,1}, to_apply=scalar_add_computation
}
fused_computation_2 {
p1.2 = f32[10,10]{1,0} parameter(1)
const.2 = f32[] parameter(0)
ROOT reduce.2 = f32[10]{0} reduce(p1.2, const.2), dimensions={0}, to_apply=scalar_mul_computation
}
ENTRY entry {
p0 = f32[] parameter(0)
p1.3 = f32[10,10]{1,0} parameter(1)
fusion.1 = f32[] fusion(p0, p1.3), kind=kInput, calls=fused_computation_1
p2 = f32[] parameter(2)
fusion.2 = f32[10]{0} fusion(p2, p1.3), kind=kInput, calls=fused_computation_2
ROOT root = (f32[], f32[10]{0}) tuple(fusion.1, fusion.2)
})"))
.value();
ASSERT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest, MultiOutputFusionSiblingReduceFusions) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p1.1 = f32[128,512,28,28]{3,2,1,0} parameter(1)
mul = f32[128,512,28,28]{3,2,1,0} multiply(p1.1, p1.1)
const.1 = f32[] parameter(0)
ROOT reduce.1 = f32[512]{0} reduce(mul, const.1), dimensions={0,2,3}, to_apply=scalar_add_computation
}
fused_computation_2 {
p1.2 = f32[128,512,28,28]{3,2,1,0} parameter(1)
const.2 = f32[] parameter(0)
ROOT reduce.2 = f32[512]{0} reduce(p1.2, const.2), dimensions={0,2,3}, to_apply=scalar_add_computation
}
ENTRY entry {
p0 = f32[] parameter(0)
p1 = f32[128,512,28,28]{3,2,1,0} parameter(1)
fusion.1 = f32[512] fusion(p0, p1), kind=kInput, calls=fused_computation_1
fusion.2 = f32[512] fusion(p0, p1), kind=kInput, calls=fused_computation_2
ROOT root = (f32[512]{0}, f32[512]{0}) tuple(fusion.1, fusion.2)
})"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* fusion =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Reduce(), m::Reduce())));
}
TEST_F(MultiOutputFusionTest, MultiOutputFusionNoSiblingFusionForCommonScalar) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
param_0.87 = bf16[32,4096,16384]{2,1,0} parameter(0)
param_1.4620 = s32[] parameter(1)
constant_3949 = s32[] constant(0)
compare.1026 = pred[] compare(param_1.4620, constant_3949), direction=LT
constant_5437 = s32[] constant(32)
add.6859 = s32[] add(param_1.4620, constant_5437)
select.1599 = s32[] select(compare.1026, add.6859, param_1.4620)
dynamic-slice.59 = bf16[1,4096,16384]{2,1,0} dynamic-slice(param_0.87, select.1599, constant_3949, constant_3949), dynamic_slice_sizes={1,4096,16384}
ROOT bitcast.41089 = bf16[4096,16384]{1,0} bitcast(dynamic-slice.59)
}
fused_computation_2 {
param_0 = bf16[32,4096,16384]{2,1,0} parameter(0)
param_1 = s32[] parameter(1)
constant = s32[] constant(0)
compare = pred[] compare(param_1, constant), direction=LT
constant.32 = s32[] constant(32)
add = s32[] add(param_1, constant.32)
select = s32[] select(compare, add, param_1)
dynamic-slice = bf16[1,4096,16384]{2,1,0} dynamic-slice(param_0, select, constant, constant), dynamic_slice_sizes={1,4096,16384}
ROOT bitcast.41087 = bf16[4096,16384]{1,0} bitcast(dynamic-slice)
}
ENTRY entry {
p0 = s32[] parameter(0)
p1 = bf16[32,4096,16384]{2,1,0} parameter(1)
p2 = bf16[32,4096,16384]{2,1,0} parameter(2)
fusion.1 = bf16[4096,16384]{1,0} fusion(p1, p0), kind=kLoop, calls=fused_computation_1
fusion.2 = bf16[4096,16384]{1,0} fusion(p2, p0), kind=kLoop, calls=fused_computation_2
ROOT root = (bf16[4096,16384]{1,0}, bf16[4096,16384]{1,0}) tuple(fusion.1, fusion.2)
})"))
.value();
ASSERT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest,
MultiOutputFusionSiblingReduceAndReduceMultiOutputFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation (p0: f32[128,512,28,28]) -> (f32[512], f32[512]) {
const.1 = f32[] constant(1)
p0.1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
mul = f32[128,512,28,28]{3,2,1,0} multiply(f32[128,512,28,28]{3,2,1,0} p0.1, f32[128,512,28,28]{3,2,1,0} p0.1)
reduce.1 = f32[512]{0} reduce(f32[128,512,28,28]{3,2,1,0} mul, f32[] const.1), dimensions={0,2,3}, to_apply=scalar_add_computation
reduce.2 = f32[512]{0} reduce(f32[128,512,28,28]{3,2,1,0} p0.1, f32[] const.1), dimensions={0,2,3}, to_apply=scalar_add_computation
ROOT tuple = (f32[512]{0}, f32[512]{0}) tuple(f32[512]{0} reduce.1, f32[512]{0} reduce.2)
}
ENTRY entry (p0: f32[128,512,28,28]) -> (f32[512], f32[512], f32[512]) {
p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)
const = f32[] constant(1)
fusion = (f32[512]{0}, f32[512]{0}) fusion(f32[128,512,28,28]{3,2,1,0} p0), kind=kInput, calls=fused_computation
get-tuple-element = f32[512]{0} get-tuple-element((f32[512]{0}, f32[512]{0}) fusion), index=0
get-tuple-element.1 = f32[512]{0} get-tuple-element((f32[512]{0}, f32[512]{0}) fusion), index=1
reduce.3 = f32[512]{0} reduce(p0, const), dimensions={0,2,3}, to_apply=scalar_add_computation
ROOT root = (f32[512]{0}, f32[512]{0}, f32[512]{0}) tuple(f32[512]{0} get-tuple-element, f32[512]{0} get-tuple-element.1, f32[512]{0} reduce.3)
})"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* fusion =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Reduce(), m::Reduce(), m::Reduce())));
}
TEST_F(MultiOutputFusionTest,
MultiOutputFusionSiblingFusionCheckAgainstReduceOperand) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p1.1 = f32[10,10]{1,0} parameter(1)
mul = f32[10,10]{1,0} multiply(p1.1, p1.1)
const.1 = f32[] parameter(0)
reduce.1 = f32[] reduce(p1.1, const.1), dimensions={0,1}, to_apply=scalar_add_computation
ROOT tuple = (f32[10,10], f32[]) tuple(mul, reduce.1)
}
fused_computation_2 {
p1.2 = f32[10,10]{1,0} parameter(1)
const.2 = f32[] parameter(0)
ROOT reduce.2 = f32[10] reduce(p1.2, const.2), dimensions={0}, to_apply=scalar_mul_computation
}
ENTRY entry {
p0 = f32[] parameter(0)
p1 = f32[10,10]{1,0} parameter(1)
p2 = f32[] parameter(2)
fusion.1 = (f32[10,10], f32[]) fusion(p0, p1), kind=kInput, calls=fused_computation_1
get-tuple-element.1 = f32[10,10] get-tuple-element((f32[10,10], f32[]) fusion.1), index=0
get-tuple-element.2 = f32[] get-tuple-element((f32[10,10], f32[]) fusion.1), index=1
fusion.2 = f32[10] fusion(p2, p1), kind=kInput, calls=fused_computation_2
ROOT root = (f32[10,10], f32[], f32[10]) tuple(get-tuple-element.1, get-tuple-element.2, fusion.2)
})"))
.value();
ASSERT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest, LoopVariadicReductionFusions) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation.94 {
tmp_0 = f32[] parameter(0)
tmp_1 = f32[] parameter(1)
tmp_2 = pred[] compare(tmp_0, tmp_1), direction=GE
tmp_3 = f32[] select(tmp_2, tmp_0, tmp_1)
tmp_4 = pred[] compare(tmp_0, tmp_1), direction=EQ
tmp_5 = s32[] parameter(2)
tmp_6 = s32[] parameter(3)
tmp_7 = s32[] minimum(tmp_5, tmp_6)
tmp_8 = s32[] select(tmp_2, tmp_5, tmp_6)
tmp_9 = s32[] select(tmp_4, tmp_7, tmp_8)
ROOT tmp_10 = (f32[], s32[]) tuple(tmp_3, tmp_9)
}
minmax_func.1536 {
tmp_0 = f32[] parameter(0)
tmp_1 = f32[] parameter(2)
tmp_2 = s32[] parameter(1)
tmp_3 = s32[] parameter(3)
ROOT tmp_4 = (f32[], s32[]) fusion(tmp_0, tmp_1, tmp_2, tmp_3), kind=kLoop, calls=fused_computation.94
}
fused_computation {
tmp_0 = f32[554112,10]{1,0} parameter(0)
tmp_1 = s32[554112,10]{1,0} iota(), iota_dimension=1
tmp_2 = f32[] constant(-inf)
tmp_3 = s32[] constant(0)
ROOT tmp_4 = (f32[554112]{0}, s32[554112]{0}) reduce(tmp_0, tmp_1, tmp_2, tmp_3), dimensions={1}, to_apply=minmax_func.1536
}
fused_computation2 {
tmp_0 = f32[554112,10]{1,0} parameter(0)
tmp_1 = s32[554112,10]{1,0} iota(), iota_dimension=1
tmp_2 = f32[] constant(inf)
tmp_3 = s32[] constant(1)
ROOT tmp_4 = (f32[554112]{0}, s32[554112]{0}) reduce(tmp_0, tmp_1, tmp_2, tmp_3), dimensions={1}, to_apply=minmax_func.1536
}
ENTRY e {
tmp_0 = f32[554112,10]{1,0} parameter(0)
tmp_1 = (f32[554112]{0}, s32[554112]{0}) fusion(tmp_0), kind=kLoop, calls=fused_computation
tmp_2 = s32[554112]{0} get-tuple-element(tmp_1), index=1
tmp_4 = (f32[554112]{0}, s32[554112]{0}) fusion(tmp_0), kind=kLoop, calls=fused_computation2
tmp_5 = s32[554112]{0} get-tuple-element(tmp_4), index=1
ROOT tmp_6 = s32[554112]{0} add(tmp_2, tmp_5)
})"))
.value();
EXPECT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest, InputVariadicReductionFusions) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation.1117 {
param_0.2433 = f32[] parameter(0)
param_1.2571 = f32[] parameter(1)
compare.1770 = pred[] compare(param_0.2433, param_1.2571), direction=LE
select.682 = f32[] select(compare.1770, param_0.2433, param_1.2571)
compare.1303.clone.1 = pred[] compare(param_0.2433, param_1.2571), direction=EQ
param_2.6460 = s32[] parameter(2)
param_3.6755 = s32[] parameter(3)
minimum.633.clone.1 = s32[] minimum(param_2.6460, param_3.6755)
select.398.clone.1 = s32[] select(compare.1770, param_2.6460, param_3.6755)
select.397.clone.1 = s32[] select(compare.1303.clone.1, minimum.633.clone.1, select.398.clone.1)
ROOT tuple.151 = (f32[], s32[]) tuple(select.682, select.397.clone.1)
}
minmax_func.223 {
lhs_value.224 = f32[] parameter(0)
rhs_value.226 = f32[] parameter(2)
lhs_index.225 = s32[] parameter(1)
rhs_index.227 = s32[] parameter(3)
ROOT fusion.1117 = (f32[], s32[]) fusion(lhs_value.224, rhs_value.226, lhs_index.225, rhs_index.227), kind=kLoop, calls=fused_computation.1117
}
fused_computation.73 {
bitcast.86661 = f32[3,1024,300]{2,1,0} parameter(0)
iota.734 = s32[3,1,1024,300]{3,2,1,0} iota(), iota_dimension=3
bitcast.97555 = s32[3,1024,300]{2,1,0} bitcast(iota.734)
constant_3917 = f32[] constant(inf)
constant_3918 = s32[] constant(0)
ROOT reduce.1069 = (f32[3,1024]{1,0}, s32[3,1024]{1,0}) reduce(bitcast.86661, bitcast.97555, constant_3917, constant_3918), dimensions={2}, to_apply=minmax_func.223
}
fused_computation.84 {
bitcast.86676 = f32[3,1024,300]{2,1,0} parameter(0)
iota.732 = s32[3,1,1024,300]{3,2,1,0} iota(), iota_dimension=3
bitcast.97553 = s32[3,1024,300]{2,1,0} bitcast(iota.732)
constant_3915 = f32[] constant(inf)
constant_3916 = s32[] constant(0)
ROOT reduce.1070 = (f32[3,1024]{1,0}, s32[3,1024]{1,0}) reduce(bitcast.86676, bitcast.97553, constant_3915, constant_3916), dimensions={2}, to_apply=minmax_func.223
}
ENTRY e {
p0 = f32[3,1024,300]{2,1,0} parameter(0)
fusion.84 = (f32[3,1024]{1,0}, s32[3,1024]{1,0}) fusion(p0), kind=kInput, calls=fused_computation.84
gte.391 = s32[3,1024]{1,0} get-tuple-element(fusion.84), index=1
fusion.73 = (f32[3,1024]{1,0}, s32[3,1024]{1,0}) fusion(p0), kind=kInput, calls=fused_computation.73
gte.393 = s32[3,1024]{1,0} get-tuple-element(fusion.73), index=1
ROOT r = s32[3,1024]{1,0} add(gte.391, gte.393)
})"))
.value();
EXPECT_TRUE(mof_.Run(module.get()).value());
EXPECT_EQ(module->entry_computation()->parameter_instruction(0)->user_count(),
1);
const HloInstruction* fusion =
module->entry_computation()->parameter_instruction(0)->users()[0];
EXPECT_THAT(fusion, GmockMatch(m::Fusion()));
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Reduce(), m::Reduce())));
}
TEST_F(MultiOutputFusionTest, MultiOutputFusionTwoLoops) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[6400]{0} parameter(0)
ROOT mul = f32[6400]{0} multiply(p0.1, p0.1)
}
fused_computation_2 {
p0.2 = f32[6400]{0} parameter(0)
const.2 = f32[] constant(1)
broadcast = f32[6400]{0} broadcast(const.2), dimensions={}
ROOT div = f32[6400]{0} divide(p0.2, broadcast)
}
ENTRY entry {
p0 = f32[6400]{0} parameter(0)
fusion.1 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_1
fusion.2 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_2
ROOT root = (f32[6400]{0}, f32[6400]{0}) tuple(fusion.1, fusion.2)
})"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* fusion =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Multiply(), m::Divide())));
}
TEST_F(MultiOutputFusionTest, MultiOutputFusionLoopElementwise) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[6400]{0} parameter(0)
ROOT mul = f32[6400]{0} multiply(p0.1, p0.1)
}
ENTRY entry {
p0 = f32[6400]{0} parameter(0)
fusion.1 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_1
const.2 = f32[] constant(1)
broadcast = f32[6400]{0} broadcast(const.2), dimensions={}
div = f32[6400]{0} divide(p0, broadcast)
ROOT root = (f32[6400]{0}, f32[6400]{0}) tuple(fusion.1, div)
})"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* fusion =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Multiply(), m::Divide())));
}
TEST_F(MultiOutputFusionTest, MultiOutputFusionSiblingLoopsDifferentShapes) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[8,1,5,16,1,2]{5,4,3,2,1,0} parameter(0)
ROOT mul = f32[8,1,5,16,1,2]{5,4,3,2,1,0} multiply(p0.1, p0.1)
}
fused_computation_2 {
p0.2 = f32[8,1,5,16,1,2]{5,4,3,2,1,0} parameter(0)
const.2 = f32[] constant(0)
ROOT reduce = f32[1,5,1,2]{3,2,1,0} reduce(p0.2, const.2), dimensions={0,3}, to_apply=scalar_add_computation
}
ENTRY entry {
p0 = f32[8,1,5,16,1,2]{5,4,3,2,1,0} parameter(0)
fusion.1 = f32[8,1,5,16,1,2]{5,4,3,2,1,0} fusion(p0), kind=kLoop, calls=fused_computation_1
fusion.2 = f32[1,5,1,2]{3,2,1,0} fusion(p0), kind=kLoop, calls=fused_computation_2
ROOT root = (f32[8,1,5,16,1,2]{5,4,3,2,1,0}, f32[1,5,1,2]{3,2,1,0}) tuple(fusion.1, fusion.2)
})"))
.value();
ASSERT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest, MultiOutputFusionSiblingLoopAndMultiOutputLoop) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0)
mul = f32[8,1,5,16,1,1]{5,4,3,2,1,0} multiply(p0.1, p0.1)
exp = f32[8,1,5,16,1,1]{5,4,3,2,1,0} exponential(p0.1)
ROOT tuple = (f32[8,1,5,16,1,1]{5,4,3,2,1,0},
f32[8,1,5,16,1,1]{5,4,3,2,1,0}) tuple(mul, exp)
}
fused_computation_2 {
p0.2 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0)
const.2 = f32[] constant(0)
broadcast = f32[8,1,5,16,1,1]{5,4,3,2,1,0} broadcast(const.2),
dimensions={}
ROOT add = f32[8,1,5,16,1,1]{5,4,3,2,1,0} add(p0.2, broadcast)
}
ENTRY entry {
p0 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0)
fusion.1 = (f32[8,1,5,16,1,1]{5,4,3,2,1,0},
f32[8,1,5,16,1,1]{5,4,3,2,1,0}) fusion(p0), kind=kLoop,
calls=fused_computation_1
fusion.2 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} fusion(p0), kind=kLoop,
calls=fused_computation_2
gte0 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} get-tuple-element(fusion.1), index=0
gte1 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} get-tuple-element(fusion.1), index=1
ROOT root = (f32[8,1,5,16,1,1]{5,4,3,2,1,0},
f32[8,1,5,16,1,1]{5,4,3,2,1,0}, f32[8,1,5,16,1,1]{5,4,3,2,1,0})
tuple(gte0, gte1, fusion.2)
})"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* fusion =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Multiply(), m::Exp(), m::Add())));
}
TEST_F(MultiOutputFusionTest,
MultiOutputFusionSiblingMultiOutputLoopAndMultiOutputLoop) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[8,16]{1,0} parameter(0)
mul = f32[8,16]{1,0} multiply(p0.1, p0.1)
exp = f32[8,16]{1,0} exponential(p0.1)
ROOT tuple = (f32[8,16]{1,0}, f32[8,16]{1,0}) tuple(mul, exp)
}
fused_computation_2 {
p0.2 = f32[8,16]{1,0} parameter(0)
const.2 = f32[] constant(0)
broadcast = f32[8,16]{1,0} broadcast(const.2),
dimensions={}
add = f32[8,16]{1,0} add(p0.2, broadcast)
ROOT tuple.1 = (f32[8,16]{1,0}, f32[8,16]{1,0}) tuple(add, broadcast)
}
ENTRY entry {
p0 = f32[8,16]{1,0} parameter(0)
fusion.1 = (f32[8,16]{1,0}, f32[8,16]{1,0}) fusion(p0), kind=kLoop,
calls=fused_computation_1
fusion.2 = (f32[8,16]{1,0}, f32[8,16]{1,0}) fusion(p0), kind=kLoop,
calls=fused_computation_2
gte0 = f32[8,16]{1,0} get-tuple-element(fusion.1), index=0
gte1 = f32[8,16]{1,0} get-tuple-element(fusion.1), index=1
gte2 = f32[8,16]{1,0} get-tuple-element(fusion.2), index=0
gte3 = f32[8,16]{1,0} get-tuple-element(fusion.2), index=1
ROOT root = (f32[8,16]{1,0}, f32[8,16]{1,0}, f32[8,16]{1,0},
f32[8,16]{1,0})
tuple(gte0, gte1, gte2, gte3)
})"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* fusion =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(
fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Multiply(), m::Exp(), m::Add(), m::Broadcast())));
}
TEST_F(MultiOutputFusionTest,
MultiOutputFusionSiblingLoopAndMultiOutputLoopDifferentShapes) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[8,1,5,16,1,2]{5,4,3,2,1,0} parameter(0)
mul = f32[8,1,5,16,1,2]{5,4,3,2,1,0} multiply(p0.1, p0.1)
exp = f32[8,1,5,16,1,2]{5,4,3,2,1,0} exponential(p0.1)
ROOT tuple = (f32[8,1,5,16,1,2]{5,4,3,2,1,0},
f32[8,1,5,16,1,2]{5,4,3,2,1,0}) tuple(mul, exp)
}
fused_computation_2 {
p0.2 = f32[8,1,5,16,1,2]{5,4,3,2,1,0} parameter(0)
const.2 = f32[] constant(0)
ROOT reduce = f32[1,5,1,2]{3,2,1,0} reduce(p0.2, const.2),
dimensions={0,3}, to_apply=scalar_add_computation
}
ENTRY entry {
p0 = f32[8,1,5,16,1,2]{5,4,3,2,1,0} parameter(0)
fusion.1 = (f32[8,1,5,16,1,2]{5,4,3,2,1,0},
f32[8,1,5,16,1,2]{5,4,3,2,1,0}) fusion(p0), kind=kLoop,
calls=fused_computation_1
fusion.2 = f32[1,5,1,2]{3,2,1,0} fusion(p0), kind=kLoop,
calls=fused_computation_2
gte0 = f32[8,1,5,16,1,2]{5,4,3,2,1,0} get-tuple-element(fusion.1), index=0
gte1 = f32[8,1,5,16,1,2]{5,4,3,2,1,0} get-tuple-element(fusion.1), index=1
ROOT root = (f32[8,1,5,16,1,2]{5,4,3,2,1,0},
f32[8,1,5,16,1,2]{5,4,3,2,1,0}, f32[1,5,1,2]{3,2,1,0})
tuple(gte0, gte1, fusion.2)
})"))
.value();
ASSERT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest, SiblingFusionBitcastAndLoopFusionNotFused) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
fused_computation_1 {
p0.1 = f32[2048,16000]{1,0} parameter(0)
bitcast = f32[2048,1,16000]{2,1,0} bitcast(p0.1)
ROOT exp = f32[2048,1,16000]{2,1,0} exponential(bitcast)
}
ENTRY main {
param_0 = f32[2048,16000]{1,0} parameter(0)
fusion = f32[2048,1,16000]{2,1,0} fusion(param_0), kind=kLoop, calls=fused_computation_1
bitcast = f32[16000,1,2048]{2,1,0} bitcast(param_0)
ROOT tuple.143 = (f32[16000,1,2048]{2,1,0}, f32[2048,1,16000]{2,1,0}) tuple(bitcast, fusion)
})")
.value();
EXPECT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest,
ProducerConsumerFusionBitcastAndElementwiseNotFused) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
ENTRY main {
param_0 = f32[2048,16000]{1,0} parameter(0)
convert = bf16[2048,16000]{1,0} convert(param_0)
bitcast = bf16[16000,1,2048]{2,1,0} bitcast(convert)
ROOT tuple.143 = (bf16[16000,1,2048]{2,1,0}, bf16[2048,16000]{1,0}) tuple(bitcast, convert)
})")
.value();
EXPECT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest, ProducerConsumerFusionElementwiseAndReduce) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY reduce {
p0 = f32[32,32,32]{2,1,0} parameter(0)
c0 = f32[] constant(0)
exp = f32[32,32,32]{2,1,0} exponential(p0)
reduce = f32[32,32]{1,0} reduce(exp, c0), dimensions={2},
to_apply=scalar_add_computation
ROOT root = (f32[32,32]{1,0}, f32[32,32,32]{2,1,0}) tuple(reduce, exp)
})"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root, GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)),
m::GetTupleElement())));
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Reduce(), m::Exp())));
}
TEST_F(MultiOutputFusionTest, ProducerConsumerFusionLoopFusionAndReduce) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_add {
p0.1 = f32[32,32,32]{2,1,0} parameter(0)
p1.1 = f32[32,32,32]{2,1,0} parameter(1)
ROOT add = f32[32,32,32]{2,1,0} add(p0.1, p1.1)
}
ENTRY reduce {
p0 = f32[32,32,32]{2,1,0} parameter(0)
p1 = f32[32,32,32]{2,1,0} parameter(1)
c0 = f32[] constant(0)
add = f32[32,32,32]{2,1,0} fusion(p0, p1), kind=kLoop, calls=fused_add
reduce = f32[32,32]{1,0} reduce(add, c0), dimensions={2},
to_apply=scalar_add_computation
ROOT root = (f32[32,32]{1,0}, f32[32,32,32]{2,1,0}) tuple(reduce, add)
})"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root, GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)),
m::GetTupleElement())));
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Reduce(), m::Add())));
}
TEST_F(MultiOutputFusionTest, ProducerConsumerFusionLoopFusionAndReduceFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_select {
p1.1 = f32[32,32,32]{2,1,0} parameter(1)
c0 = f32[] constant(0)
broadcast = f32[32,32,32]{2,1,0} broadcast(f32[] c0), dimensions={}
greater-than = pred[32,32,32]{2,1,0} compare(f32[32,32,32]{2,1,0} p1.1,
f32[32,32,32]{2,1,0} broadcast), direction=GT
p0.1 = f32[32,32,32]{2,1,0} parameter(0)
ROOT select = f32[32,32,32]{2,1,0} select(pred[32,32,32]{2,1,0}
greater-than, f32[32,32,32]{2,1,0} p0.1, f32[32,32,32]{2,1,0} broadcast)
}
fused_reduce {
p0.2 = f32[32,32,32]{2,1,0} parameter(0)
c1 = f32[] constant(0)
r1 = f32[32,32]{1,0} reduce(p0.2, c1), dimensions={2},
to_apply=scalar_add_computation
mul = f32[32,32,32]{2,1,0} multiply(p0.2, p0.2)
r2 = f32[32,32]{1,0} reduce(mul, c1), dimensions={2},
to_apply=scalar_add_computation
ROOT tuple = (f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(r1, r2)
}
ENTRY reduce {
p0 = f32[32,32,32]{2,1,0} parameter(0)
p1 = f32[32,32,32]{2,1,0} parameter(1)
select = f32[32,32,32]{2,1,0} fusion(p0, p1), kind=kLoop, calls=fused_select
fusion = (f32[32,32]{1,0}, f32[32,32]{1,0}) fusion(select), kind=kInput,
calls=fused_reduce
gte0 = f32[32,32]{1,0} get-tuple-element(fusion), index=0
gte1 = f32[32,32]{1,0} get-tuple-element(fusion), index=1
ROOT root = (f32[32,32]{1,0}, f32[32,32]{1,0}, f32[32,32,32]{2,1,0})
tuple(gte1, gte1, select)
})"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root,
GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)),
m::GetTupleElement(), m::GetTupleElement())));
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Reduce(), m::Reduce(), m::Select())));
}
TEST_F(MultiOutputFusionTest, ProducerConsumerFusionDoNotFuseLoopReduceFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_element_wise {
p0.1 = f32[2,2,2]{2,1,0} parameter(0)
p1.1 = f32[2,2,2]{2,1,0} parameter(1)
ROOT root = f32[2,2,2]{2,1,0} add(p0.1, p1.1)
}
fused_reduce {
p0.2 = f32[2,2,2]{2,1,0} parameter(0)
mul = f32[2,2,2]{2,1,0} multiply(f32[2,2,2]{2,1,0} p0.2,
f32[2,2,2]{2,1,0} p0.2)
broadcast = f32[2,2,2,2]{3,2,1,0} broadcast(mul), dimensions={3,2,1}
c1 = f32[] constant(0)
ROOT reduce = f32[2,2]{1,0} reduce(f32[2,2,2,2]{3,2,1,0} broadcast,
f32[] c1), dimensions={1,3}, to_apply=scalar_add_computation
}
ENTRY reduce {
p0 = f32[2,2,2]{2,1,0} parameter(0)
p1 = f32[2,2,2]{2,1,0} parameter(1)
element_wise = f32[2,2,2]{2,1,0} fusion(p0, p1), kind=kLoop, calls=fused_element_wise
fusion = f32[2,2]{1,0} fusion(element_wise), kind=kLoop, calls=fused_reduce
ROOT root = (f32[2,2]{1,0}, f32[2,2,2]{2,1,0}) tuple(fusion, element_wise)
})"))
.value();
ASSERT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest,
ProducerConsumerFusionFp16LoopFusionAndReduceFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_select {
p1.1 = f16[32,32,32]{2,1,0} parameter(1)
c0 = f16[] constant(0)
broadcast = f16[32,32,32]{2,1,0} broadcast(f16[] c0), dimensions={}
greater-than = pred[32,32,32]{2,1,0} compare(f16[32,32,32]{2,1,0} p1.1,
f16[32,32,32]{2,1,0} broadcast), direction=GT
p0.1 = f16[32,32,32]{2,1,0} parameter(0)
ROOT select = f16[32,32,32]{2,1,0} select(pred[32,32,32]{2,1,0}
greater-than, f16[32,32,32]{2,1,0} p0.1, f16[32,32,32]{2,1,0} broadcast)
}
fused_reduce {
p0.2 = f16[32,32,32]{2,1,0} parameter(0)
convert = f32[32,32,32]{2,1,0} convert(p0.2)
c1 = f32[] constant(0)
r1 = f32[32,32]{1,0} reduce(convert, c1), dimensions={2},
to_apply=scalar_add_computation
mul = f32[32,32,32]{2,1,0} multiply(convert, convert)
r2 = f32[32,32]{1,0} reduce(mul, c1), dimensions={2},
to_apply=scalar_add_computation
ROOT tuple = (f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(r1, r2)
}
ENTRY reduce {
p0 = f16[32,32,32]{2,1,0} parameter(0)
p1 = f16[32,32,32]{2,1,0} parameter(1)
select = f16[32,32,32]{2,1,0} fusion(p0, p1), kind=kLoop, calls=fused_select
fusion = (f32[32,32]{1,0}, f32[32,32]{1,0}) fusion(select), kind=kInput,
calls=fused_reduce
gte0 = f32[32,32]{1,0} get-tuple-element(fusion), index=0
gte1 = f32[32,32]{1,0} get-tuple-element(fusion), index=1
ROOT root = (f32[32,32]{1,0}, f32[32,32]{1,0}, f16[32,32,32]{2,1,0})
tuple(gte1, gte1, select)
})"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root,
GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)),
m::GetTupleElement(), m::GetTupleElement())));
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Reduce(), m::Reduce(), m::Select())));
}
TEST_F(MultiOutputFusionTest,
ProducerConsumerFusionReduceUnfriendlyLoopFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
mixed_input_layouts_computation {
p0.1 = f16[128,32,32,1024]{3,2,1,0} parameter(0)
p1.1 = f16[128,1024,32,32]{3,2,1,0} parameter(1)
transpose = f16[128,32,32,1024]{3,2,1,0} transpose(p1.1), dimensions={0,2,3,1}
c0 = f16[] constant(0)
broadcast = f16[128,32,32,1024]{3,2,1,0} broadcast(c0), dimensions={}
greater-than = pred[128,32,32,1024]{3,2,1,0} compare(transpose, broadcast), direction=GT
ROOT root = f16[128,32,32,1024]{3,2,1,0} select(greater-than, p0.1, broadcast)
}
fused_reduce {
p0.2 = f16[128,32,32,1024]{3,2,1,0} parameter(0)
convert = f32[128,32,32,1024]{3,2,1,0} convert(p0.2)
c0.2 = f32[] constant(0)
ROOT reduce = f32[1024]{0} reduce(convert, c0.2), dimensions={0,1,2}, to_apply=scalar_add_computation
}
ENTRY reduce {
p0 = f16[128,32,32,1024]{3,2,1,0} parameter(0)
p1 = f16[128,1024,32,32]{3,2,1,0} parameter(1)
loop_fusion = f16[128,32,32,1024]{3,2,1,0} fusion(p0, p1), kind=kLoop, calls=mixed_input_layouts_computation
reduce_fusion = f32[1024]{0} fusion(loop_fusion), kind=kInput, calls=fused_reduce
ROOT root = (f32[1024]{0}, f16[128,32,32,1024]{3,2,1,0}) tuple(reduce_fusion, loop_fusion)
})"))
.value();
ASSERT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest, ProducerConsumerFusionAvoidsCycles) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_add {
p0 = f32[32,32,32]{2,1,0} parameter(0)
p1 = f32[32,32,32]{2,1,0} parameter(1)
ROOT add = f32[32,32,32]{2,1,0} add(p0, p1)
}
fused_mul {
p2 = f32[64,64,64]{2,1,0} parameter(0)
p3 = f32[64,64,64]{2,1,0} parameter(1)
ROOT multiply = f32[64,64,64]{2,1,0} multiply(p2, p3)
}
fused_reduce_1 {
p4 = f32[32,32,32]{2,1,0} parameter(0)
p5 = f32[64,64,64]{2,1,0} parameter(1)
slice = f32[32,32,32]{2,1,0} slice(p5), slice={[0:32], [0:32], [0:32]}
add = f32[32,32,32]{2,1,0} add(p4, slice)
c0 = f32[] constant(0)
ROOT r1 = f32[32,32]{1,0} reduce(add, c0), dimensions={2},
to_apply=scalar_add_computation
}
fused_reduce_2 {
p6 = f32[32,32,32]{2,1,0} parameter(0)
p7 = f32[64,64,64]{2,1,0} parameter(1)
c0 = f32[] constant(0)
pad = f32[64,64,64]{2,1,0} pad(p6, c0), padding=16_16x16_16x16_16
mul = f32[64,64,64]{2,1,0} multiply(pad, p7)
ROOT r1 = f32[64,64]{1,0} reduce(mul, c0), dimensions={2},
to_apply=scalar_add_computation
}
ENTRY reduce {
p8 = f32[32,32,32]{2,1,0} parameter(0)
p9 = f32[64,64,64]{2,1,0} parameter(1)
add = f32[32,32,32]{2,1,0} fusion(p8, p8), kind=kLoop, calls=fused_add
mul = f32[64,64,64]{2,1,0} fusion(p9, p9), kind=kLoop, calls=fused_mul
reduce1 = f32[32,32]{1,0} fusion(add, mul), kind=kInput,
calls=fused_reduce_1
reduce2 = f32[64,64]{1,0} fusion(add, mul), kind=kInput,
calls=fused_reduce_2
ROOT root = (f32[32,32,32]{2,1,0}, f32[32,32]{1,0}, f32[64,64]{1,0},
f32[64,64,64]{2,1,0}) tuple(add, reduce1, reduce2, mul)
})"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
EXPECT_EQ(1, CountMultiOutputFusions(module.get()));
}
TEST_F(MultiOutputFusionTest, PreferFuseProducerIntoFusionConsumer) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_add {
p0 = f32[32,32,32]{2,1,0} parameter(0)
p1 = f32[32,32,32]{2,1,0} parameter(1)
ROOT add = f32[32,32,32]{2,1,0} add(p0, p1)
}
fused_reduce {
p0 = f32[32,32,32]{2,1,0} parameter(0)
p1 = f32[64,64,64]{2,1,0} parameter(1)
slice = f32[32,32,32]{2,1,0} slice(p1), slice={[0:32], [0:32], [0:32]}
add = f32[32,32,32]{2,1,0} add(p0, slice)
c0 = f32[] constant(0)
ROOT r1 = f32[32,32]{1,0} reduce(add, c0), dimensions={2},
to_apply=scalar_add_computation
}
ENTRY reduce {
p0 = f32[32,32,32]{2,1,0} parameter(0)
p1 = f32[64,64,64]{2,1,0} parameter(1)
add = f32[32,32,32]{2,1,0} fusion(p0, p0), kind=kLoop, calls=fused_add
c0 = f32[] constant(0)
reduce2 = f32[32,32]{1,0} reduce(add, c0), dimensions={2},
to_apply=scalar_add_computation
reduce = f32[32,32]{1,0} fusion(add, p1), kind=kInput, calls=fused_reduce
ROOT root = (f32[32,32,32]{2,1,0}, f32[32,32]{1,0}, f32[32,32]{1,0})
tuple(add, reduce, reduce2)
})"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
int multi_output_fusion_count = 0;
for (auto* computation : module->MakeNonfusionComputations()) {
for (auto* instr : computation->instructions()) {
if (instr->IsMultiOutputFusion()) {
multi_output_fusion_count++;
}
}
}
EXPECT_EQ(1, multi_output_fusion_count);
}
TEST_F(MultiOutputFusionTest, AvoidsLargeFusion) {
constexpr int64_t kNumParams = 200;
ASSERT_GT(kNumParams, MaxOperandsAndOutputsPerFusion());
auto module = CreateNewVerifiedModule();
HloComputation::Builder b(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {10, 100});
std::vector<HloInstruction*> params;
for (int64_t i = 0; i < kNumParams; ++i) {
params.push_back(
b.AddInstruction(HloInstruction::CreateParameter(i, shape, "p")));
}
auto make_fusion = [&](HloInstruction* x, HloInstruction* y) {
HloComputation::Builder sub_builder("subcomp");
auto* p0 = sub_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "p"));
auto* p1 = sub_builder.AddInstruction(
HloInstruction::CreateParameter(1, shape, "p"));
sub_builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, p0, p1));
HloComputation* subcomp =
module->AddEmbeddedComputation(sub_builder.Build());
return HloInstruction::CreateFusion(
shape, HloInstruction::FusionKind::kLoop, {x, y}, subcomp);
};
auto* sum = b.AddInstruction(make_fusion(params[0], params[1]));
for (int64_t i = 2; i < kNumParams; ++i) {
sum = b.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, sum,
b.AddInstruction(make_fusion(params[i - 1], params[i]))));
}
auto computation = module->AddEntryComputation(b.Build());
EXPECT_TRUE(mof_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
for (const HloInstruction* instr : computation->instructions()) {
EXPECT_LE(instr->operand_count() + ShapeUtil::SubshapeCount(instr->shape()),
MaxOperandsAndOutputsPerFusion())
<< instr->ToString();
}
}
TEST_F(MultiOutputFusionTest, MultiOutputFusionDUS) {
auto module = ParseAndReturnVerifiedModule(R"(HloModule dus_mof
fusion.1 {
p.0 = f16[50,96,1024]{2,1,0} parameter(0)
p.1 = f16[1,96,1024]{2,1,0} parameter(1)
c.0 = s32[3]{0} constant({0, 0, 0})
ROOT %dynamic-update-slice = f16[50,96,1024]{2,1,0} dynamic-update-slice(p.0, p.1, c.0)
}
fusion.2 {
p.0 = f16[50,96,1024]{2,1,0} parameter(0)
p.1 = f16[1,96,1024]{2,1,0} parameter(1)
c.0 = s32[3]{0} constant({0, 0, 0})
ROOT %dynamic-update-slice = f16[50,96,1024]{2,1,0} dynamic-update-slice(p.0, p.1, c.0)
}
ENTRY entry {
p.00 = f16[50,96,1024]{2,1,0} parameter(0)
p.01 = f16[50,96,1024]{2,1,0} parameter(1)
p.1 = f16[1,96,1024]{2,1,0} parameter(2)
f1 = f16[50,96,1024] fusion(p.00, p.1), kind=kLoop, calls=fusion.1
f2 = f16[50,96,1024] fusion(p.01, p.1), kind=kLoop, calls=fusion.2
ROOT tuple = (f16[50,96,1024],f16[50,96,1024]) tuple(f1, f2)
})")
.value();
ASSERT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest, SharedMemoryBudget) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation0 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={0},
to_apply=scalar_add_computation
}
fused_computation1 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={0},
to_apply=scalar_add_computation
}
fused_computation2 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={0},
to_apply=scalar_add_computation
}
fused_computation3 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={0},
to_apply=scalar_add_computation
}
fused_computation4 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={0},
to_apply=scalar_add_computation
}
fused_computation5 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={0},
to_apply=scalar_add_computation
}
fused_computation6 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={0},
to_apply=scalar_add_computation
}
fused_computation7 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={0},
to_apply=scalar_add_computation
}
fused_computation8 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={0},
to_apply=scalar_add_computation
}
fused_computation9 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={0},
to_apply=scalar_add_computation
}
ENTRY computation {
zero = f32[] constant(0)
param0 = f32[64,64] parameter(0)
param1 = f32[64,64] parameter(1)
param2 = f32[64,64] parameter(2)
param3 = f32[64,64] parameter(3)
param4 = f32[64,64] parameter(4)
param5 = f32[64,64] parameter(5)
param6 = f32[64,64] parameter(6)
param7 = f32[64,64] parameter(7)
param8 = f32[64,64] parameter(8)
param9 = f32[64,64] parameter(9)
out0 = f32[64] fusion(param0, param1, zero), kind=kInput, calls=fused_computation0
out1 = f32[64] fusion(param1, param2, zero), kind=kInput, calls=fused_computation1
out2 = f32[64] fusion(param2, param3, zero), kind=kInput, calls=fused_computation2
out3 = f32[64] fusion(param3, param4, zero), kind=kInput, calls=fused_computation3
out4 = f32[64] fusion(param4, param5, zero), kind=kInput, calls=fused_computation4
out5 = f32[64] fusion(param5, param6, zero), kind=kInput, calls=fused_computation5
out6 = f32[64] fusion(param6, param7, zero), kind=kInput, calls=fused_computation6
out7 = f32[64] fusion(param7, param8, zero), kind=kInput, calls=fused_computation7
out8 = f32[64] fusion(param8, param9, zero), kind=kInput, calls=fused_computation8
out9 = f32[64] fusion(param9, param0, zero), kind=kInput, calls=fused_computation9
ROOT out = (f32[64], f32[64], f32[64], f32[64], f32[64], f32[64], f32[64], f32[64], f32[64], f32[64]) tuple(f32[64] out0, f32[64] out1, f32[64] out2, f32[64] out3, f32[64] out4, f32[64] out5, f32[64] out6, f32[64] out7, f32[64] out8, f32[64] out9)
}
)"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
EXPECT_EQ(5, CountMultiOutputFusions(module.get()));
}
TEST_F(MultiOutputFusionTest, DoNotGroupTooManyReductions) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation0 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={1},
to_apply=scalar_add_computation
}
fused_computation1 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={1},
to_apply=scalar_add_computation
}
fused_computation2 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={1},
to_apply=scalar_add_computation
}
fused_computation3 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={1},
to_apply=scalar_add_computation
}
fused_computation4 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={1},
to_apply=scalar_add_computation
}
fused_computation5 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={1},
to_apply=scalar_add_computation
}
fused_computation6 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={1},
to_apply=scalar_add_computation
}
fused_computation7 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={1},
to_apply=scalar_add_computation
}
fused_computation8 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={1},
to_apply=scalar_add_computation
}
fused_computation9 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
p2 = f32[] parameter(2)
add = f32[64,64] add(p0, p1)
ROOT reduce = f32[64] reduce(f32[64,64] add, f32[] p2), dimensions={1},
to_apply=scalar_add_computation
}
ENTRY computation {
zero = f32[] constant(0)
param0 = f32[64,64] parameter(0)
param1 = f32[64,64] parameter(1)
param2 = f32[64,64] parameter(2)
param3 = f32[64,64] parameter(3)
param4 = f32[64,64] parameter(4)
param5 = f32[64,64] parameter(5)
param6 = f32[64,64] parameter(6)
param7 = f32[64,64] parameter(7)
param8 = f32[64,64] parameter(8)
param9 = f32[64,64] parameter(9)
out0 = f32[64] fusion(param0, param1, zero), kind=kInput, calls=fused_computation0
out1 = f32[64] fusion(param1, param2, zero), kind=kInput, calls=fused_computation1
out2 = f32[64] fusion(param2, param3, zero), kind=kInput, calls=fused_computation2
out3 = f32[64] fusion(param3, param4, zero), kind=kInput, calls=fused_computation3
out4 = f32[64] fusion(param4, param5, zero), kind=kInput, calls=fused_computation4
out5 = f32[64] fusion(param5, param6, zero), kind=kInput, calls=fused_computation5
out6 = f32[64] fusion(param6, param7, zero), kind=kInput, calls=fused_computation6
out7 = f32[64] fusion(param7, param8, zero), kind=kInput, calls=fused_computation7
out8 = f32[64] fusion(param8, param9, zero), kind=kInput, calls=fused_computation8
out9 = f32[64] fusion(param9, param0, zero), kind=kInput, calls=fused_computation9
ROOT out = (f32[64], f32[64], f32[64], f32[64], f32[64], f32[64], f32[64], f32[64], f32[64], f32[64]) tuple(f32[64] out0, f32[64] out1, f32[64] out2, f32[64] out3, f32[64] out4, f32[64] out5, f32[64] out6, f32[64] out7, f32[64] out8, f32[64] out9)
}
)"))
.value();
ASSERT_TRUE(mof_.Run(module.get()).value());
EXPECT_EQ(2, CountMultiOutputFusions(module.get()));
}
TEST_F(MultiOutputFusionTest, NoFusionToAvoidUsingTooMuchSharedMemory) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule xla_computation_update_step.10931
%scalar_add_computation.1 (scalar_lhs.1: f64[], scalar_rhs.1: f64[]) -> f64[] {
%scalar_lhs.1 = f64[] parameter(0)
%scalar_rhs.1 = f64[] parameter(1)
ROOT %add.1257 = f64[] add(f64[] %scalar_lhs.1, f64[] %scalar_rhs.1)
}
%fused_computation.1 (param_0.8: f64[64,64], param_1.11: f64[64,64], param_2.9: f64[64,64]) -> (f64[64], f64[64]) {
%param_0.8 = f64[64,64]{1,0} parameter(0)
%param_1.11 = f64[64,64]{1,0} parameter(1)
%multiply.2 = f64[64,64]{1,0} multiply(f64[64,64]{1,0} %param_0.8, f64[64,64]{1,0} %param_1.11)
%constant_5217.3 = f64[] constant(0)
%broadcast.1 = f64[64,64]{1,0} broadcast(f64[] %constant_5217.3), dimensions={}
%multiply.0 = f64[64,64]{1,0} multiply(f64[64,64]{1,0} %multiply.2, f64[64,64]{1,0} %broadcast.1)
%reduce.0 = f64[64]{0} reduce(f64[64,64]{1,0} %multiply.0, f64[] %constant_5217.3), dimensions={0}, to_apply=%scalar_add_computation.1
%param_2.9 = f64[64,64]{1,0} parameter(2)
%multiply.1514.clone.0.clone.1 = f64[64,64]{1,0} multiply(f64[64,64]{1,0} %param_2.9, f64[64,64]{1,0} %param_1.11)
%constant_5217.1.clone.1 = f64[] constant(0)
%broadcast.0.clone.1 = f64[64,64]{1,0} broadcast(f64[] %constant_5217.1.clone.1), dimensions={}
%multiply.1341.clone.0.clone.1 = f64[64,64]{1,0} multiply(f64[64,64]{1,0} %multiply.1514.clone.0.clone.1, f64[64,64]{1,0} %broadcast.0.clone.1)
%reduce.630.clone.0.clone.1 = f64[64]{0} reduce(f64[64,64]{1,0} %multiply.1341.clone.0.clone.1, f64[] %constant_5217.1.clone.1), dimensions={0}, to_apply=%scalar_add_computation.1
ROOT %tuple = (f64[64]{0}, f64[64]{0}) tuple(f64[64]{0} %reduce.0, f64[64]{0} %reduce.630.clone.0.clone.1)
}
%primitive_computation_add__1.6426 (parameter.6427: f64[], parameter.6428: f64[]) -> f64[] {
%parameter.6427 = f64[] parameter(0)
%parameter.6428 = f64[] parameter(1)
ROOT %add.6429 = f64[] add(f64[] %parameter.6427, f64[] %parameter.6428)
}
%fused_computation.2 (param_0.7: f64[64,64], param_1.9: f64[64,64]) -> f64[64] {
%param_0.7 = f64[64,64]{1,0} parameter(0)
%param_1.9 = f64[64,64]{1,0} parameter(1)
%multiply.1 = f64[64,64]{1,0} multiply(f64[64,64]{1,0} %param_0.7, f64[64,64]{1,0} %param_1.9)
%constant_5217.2 = f64[] constant(0)
ROOT %reduce.740.clone.0 = f64[64]{0} reduce(f64[64,64]{1,0} %multiply.1, f64[] %constant_5217.2), dimensions={0}, to_apply=%primitive_computation_add__1.6426
}
ENTRY %reproducer (param_0.1090: f64[64,64], param_1.1377: f64[64,64], param_2.1948: f64[64,64]) -> (f64[64], f64[64], f64[64]) {
%param_0.1090 = f64[64,64]{1,0} parameter(0)
%param_1.1377 = f64[64,64]{1,0} parameter(1)
%param_2.1948 = f64[64,64]{1,0} parameter(2)
%fusion.1 = (f64[64]{0}, f64[64]{0}) fusion(f64[64,64]{1,0} %param_0.1090, f64[64,64]{1,0} %param_1.1377, f64[64,64]{1,0} %param_2.1948), kind=kInput, calls=%fused_computation.1
%get-tuple-element = f64[64]{0} get-tuple-element((f64[64]{0}, f64[64]{0}) %fusion.1), index=0
%fusion.2 = f64[64]{0} fusion(f64[64,64]{1,0} %param_0.1090, f64[64,64]{1,0} %param_1.1377), kind=kInput, calls=%fused_computation.2
%get-tuple-element.1 = f64[64]{0} get-tuple-element((f64[64]{0}, f64[64]{0}) %fusion.1), index=1
ROOT %tuple.428 = (f64[64]{0}, f64[64]{0}, f64[64]{0}) tuple(f64[64]{0} %get-tuple-element, f64[64]{0} %fusion.2, f64[64]{0} %get-tuple-element.1)
}
)")
.value();
EXPECT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest, NoFusionToAvoidCodeDuplication) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule module
and.reduce_sub_computation {
x = pred[] parameter(0)
y = pred[] parameter(1)
ROOT and = pred[] and(x, y)
}
fused_computation.1 {
param_4.658 = f32[2,20,256]{2,0,1} parameter(4)
slice.1385 = f32[2,1,256]{2,0,1} slice(param_4.658), slice={[0:2], [11:12], [0:256]}
constant.6847 = s32[] constant(0)
broadcast.4823 = s32[3]{0} broadcast(constant.6847), dimensions={}
param_9.415 = s32[3]{0} parameter(9)
compare.700 = pred[3]{0} compare(broadcast.4823, param_9.415), direction=LE
constant.6846 = pred[] constant(true)
reduce.221 = pred[] reduce(compare.700, constant.6846), dimensions={0}, to_apply=and.reduce_sub_computation
broadcast.2933 = pred[2,1,256]{2,0,1} broadcast(reduce.221), dimensions={}
param_5.528 = f32[2,512]{1,0} parameter(5)
slice.1384 = f32[2,256]{1,0} slice(param_5.528), slice={[0:2], [0:256]}
bitcast.341 = f32[2,1,256]{2,0,1} bitcast(slice.1384)
constant.5418 = f32[] constant(0)
broadcast.3227 = f32[2,1,256]{2,0,1} broadcast(constant.5418), dimensions={}
select.173 = f32[2,1,256]{2,0,1} select(broadcast.2933, bitcast.341, broadcast.3227)
add.573 = f32[2,1,256]{2,0,1} add(slice.1385, select.173)
param_0.299 = s32[] parameter(0)
constant.5157 = s32[] constant(11)
dynamic-update-slice.189 = f32[2,20,256]{2,0,1} dynamic-update-slice(param_4.658, add.573, param_0.299, constant.5157, param_0.299)
slice.1383 = f32[2,1,256]{2,0,1} slice(dynamic-update-slice.189), slice={[0:2], [10:11], [0:256]}
constant.6800 = s32[] constant(0)
broadcast.4803 = s32[3]{0} broadcast(constant.6800), dimensions={}
param_8.484 = s32[3]{0} parameter(8)
compare.681 = pred[3]{0} compare(broadcast.4803, param_8.484), direction=LE
constant.6798 = pred[] constant(true)
reduce.203 = pred[] reduce(compare.681, constant.6798), dimensions={0}, to_apply=and.reduce_sub_computation
broadcast.2932 = pred[2,1,256]{2,0,1} broadcast(reduce.203), dimensions={}
param_3.1169 = f32[2,512]{1,0} parameter(3)
slice.1382 = f32[2,256]{1,0} slice(param_3.1169), slice={[0:2], [0:256]}
bitcast.340 = f32[2,1,256]{2,0,1} bitcast(slice.1382)
select.172 = f32[2,1,256]{2,0,1} select(broadcast.2932, bitcast.340, broadcast.3227)
add.572 = f32[2,1,256]{2,0,1} add(slice.1383, select.172)
constant.5154 = s32[] constant(10)
dynamic-update-slice.188 = f32[2,20,256]{2,0,1} dynamic-update-slice(dynamic-update-slice.189, add.572, param_0.299, constant.5154, param_0.299)
slice.1381 = f32[2,1,256]{2,0,1} slice(dynamic-update-slice.188), slice={[0:2], [9:10], [0:256]}
constant.6794 = s32[] constant(0)
broadcast.4801 = s32[3]{0} broadcast(constant.6794), dimensions={}
param_7.478 = s32[3]{0} parameter(7)
compare.679 = pred[3]{0} compare(broadcast.4801, param_7.478), direction=LE
constant.6793 = pred[] constant(true)
reduce.201 = pred[] reduce(compare.679, constant.6793), dimensions={0}, to_apply=and.reduce_sub_computation
broadcast.2930 = pred[2,1,256]{2,0,1} broadcast(reduce.201), dimensions={}
param_2.1685 = f32[2,512]{1,0} parameter(2)
slice.1380 = f32[2,256]{1,0} slice(param_2.1685), slice={[0:2], [0:256]}
bitcast.339 = f32[2,1,256]{2,0,1} bitcast(slice.1380)
select.171 = f32[2,1,256]{2,0,1} select(broadcast.2930, bitcast.339, broadcast.3227)
add.571 = f32[2,1,256]{2,0,1} add(slice.1381, select.171)
constant.5153 = s32[] constant(9)
dynamic-update-slice.187 = f32[2,20,256]{2,0,1} dynamic-update-slice(dynamic-update-slice.188, add.571, param_0.299, constant.5153, param_0.299)
slice.1379 = f32[2,1,256]{2,0,1} slice(dynamic-update-slice.187), slice={[0:2], [8:9], [0:256]}
constant.6788 = s32[] constant(0)
broadcast.4799 = s32[3]{0} broadcast(constant.6788), dimensions={}
param_6.495 = s32[3]{0} parameter(6)
compare.677 = pred[3]{0} compare(broadcast.4799, param_6.495), direction=LE
constant.6786 = pred[] constant(true)
reduce.199 = pred[] reduce(compare.677, constant.6786), dimensions={0}, to_apply=and.reduce_sub_computation
broadcast.2929 = pred[2,1,256]{2,0,1} broadcast(reduce.199), dimensions={}
param_1.1408 = f32[2,512]{1,0} parameter(1)
slice.1378 = f32[2,256]{1,0} slice(param_1.1408), slice={[0:2], [0:256]}
bitcast.338 = f32[2,1,256]{2,0,1} bitcast(slice.1378)
select.170 = f32[2,1,256]{2,0,1} select(broadcast.2929, bitcast.338, broadcast.3227)
add.570 = f32[2,1,256]{2,0,1} add(slice.1379, select.170)
constant.5152 = s32[] constant(8)
ROOT dynamic-update-slice.186 = f32[2,20,256]{2,0,1} dynamic-update-slice(dynamic-update-slice.187, add.570, param_0.299, constant.5152, param_0.299)
}
fused_computation.2 {
param_4.655 = f32[2,20,256]{2,0,1} parameter(4)
slice.1369 = f32[2,1,256]{2,0,1} slice(param_4.655), slice={[0:2], [7:8], [0:256]}
param_6.483 = pred[] parameter(6)
broadcast.2927 = pred[2,1,256]{2,0,1} broadcast(param_6.483), dimensions={}
param_5.525 = f32[2,512]{1,0} parameter(5)
slice.1368 = f32[2,256]{1,0} slice(param_5.525), slice={[0:2], [0:256]}
bitcast.333 = f32[2,1,256]{2,0,1} bitcast(slice.1368)
constant.5415 = f32[] constant(0)
broadcast.3225 = f32[2,1,256]{2,0,1} broadcast(constant.5415), dimensions={}
select.161 = f32[2,1,256]{2,0,1} select(broadcast.2927, bitcast.333, broadcast.3225)
add.549 = f32[2,1,256]{2,0,1} add(slice.1369, select.161)
param_0.265 = s32[] parameter(0)
constant.5151 = s32[] constant(7)
dynamic-update-slice.185 = f32[2,20,256]{2,0,1} dynamic-update-slice(param_4.655, add.549, param_0.265, constant.5151, param_0.265)
slice.1367 = f32[2,1,256]{2,0,1} slice(dynamic-update-slice.185), slice={[0:2], [6:7], [0:256]}
constant.6782 = s32[] constant(0)
broadcast.4797 = s32[3]{0} broadcast(constant.6782), dimensions={}
param_9.391 = s32[3]{0} parameter(9)
compare.675 = pred[3]{0} compare(broadcast.4797, param_9.391), direction=LE
constant.6781 = pred[] constant(true)
reduce.197 = pred[] reduce(compare.675, constant.6781), dimensions={0}, to_apply=and.reduce_sub_computation
broadcast.2926 = pred[2,1,256]{2,0,1} broadcast(reduce.197), dimensions={}
param_3.1167 = f32[2,512]{1,0} parameter(3)
slice.1366 = f32[2,256]{1,0} slice(param_3.1167), slice={[0:2], [0:256]}
bitcast.332 = f32[2,1,256]{2,0,1} bitcast(slice.1366)
select.160 = f32[2,1,256]{2,0,1} select(broadcast.2926, bitcast.332, broadcast.3225)
add.548 = f32[2,1,256]{2,0,1} add(slice.1367, select.160)
constant.5150 = s32[] constant(6)
dynamic-update-slice.184 = f32[2,20,256]{2,0,1} dynamic-update-slice(dynamic-update-slice.185, add.548, param_0.265, constant.5150, param_0.265)
slice.1365 = f32[2,1,256]{2,0,1} slice(dynamic-update-slice.184), slice={[0:2], [5:6], [0:256]}
constant.6776 = s32[] constant(0)
broadcast.4794 = s32[3]{0} broadcast(constant.6776), dimensions={}
param_8.464 = s32[3]{0} parameter(8)
compare.673 = pred[3]{0} compare(broadcast.4794, param_8.464), direction=LE
constant.6775 = pred[] constant(true)
reduce.195 = pred[] reduce(compare.673, constant.6775), dimensions={0}, to_apply=and.reduce_sub_computation
broadcast.2925 = pred[2,1,256]{2,0,1} broadcast(reduce.195), dimensions={}
param_2.1684 = f32[2,512]{1,0} parameter(2)
slice.1364 = f32[2,256]{1,0} slice(param_2.1684), slice={[0:2], [0:256]}
bitcast.331 = f32[2,1,256]{2,0,1} bitcast(slice.1364)
select.159 = f32[2,1,256]{2,0,1} select(broadcast.2925, bitcast.331, broadcast.3225)
add.547 = f32[2,1,256]{2,0,1} add(slice.1365, select.159)
constant.5149 = s32[] constant(5)
dynamic-update-slice.183 = f32[2,20,256]{2,0,1} dynamic-update-slice(dynamic-update-slice.184, add.547, param_0.265, constant.5149, param_0.265)
slice.1363 = f32[2,1,256]{2,0,1} slice(dynamic-update-slice.183), slice={[0:2], [4:5], [0:256]}
constant.6770 = s32[] constant(0)
broadcast.4792 = s32[3]{0} broadcast(constant.6770), dimensions={}
param_7.458 = s32[3]{0} parameter(7)
compare.671 = pred[3]{0} compare(broadcast.4792, param_7.458), direction=LE
constant.6769 = pred[] constant(true)
reduce.193 = pred[] reduce(compare.671, constant.6769), dimensions={0}, to_apply=and.reduce_sub_computation
broadcast.2924 = pred[2,1,256]{2,0,1} broadcast(reduce.193), dimensions={}
param_1.1405 = f32[2,512]{1,0} parameter(1)
slice.1362 = f32[2,256]{1,0} slice(param_1.1405), slice={[0:2], [0:256]}
bitcast.330 = f32[2,1,256]{2,0,1} bitcast(slice.1362)
select.158 = f32[2,1,256]{2,0,1} select(broadcast.2924, bitcast.330, broadcast.3225)
add.546 = f32[2,1,256]{2,0,1} add(slice.1363, select.158)
constant.5148 = s32[] constant(4)
ROOT dynamic-update-slice.182 = f32[2,20,256]{2,0,1} dynamic-update-slice(dynamic-update-slice.183, add.546, param_0.265, constant.5148, param_0.265)
}
ENTRY main {
param_0.0 = s32[] parameter(0)
param_1.0 = f32[2,512]{1,0} parameter(1)
param_2.0 = f32[2,512]{1,0} parameter(2)
param_3.0 = f32[2,512]{1,0} parameter(3)
param_4.0 = f32[2,20,256]{2,1,0} parameter(4)
param_5.0 = f32[2,512]{1,0} parameter(5)
param_6.0 = s32[3]{0} parameter(6)
param_7.0 = s32[3]{0} parameter(7)
param_8.0 = s32[3]{0} parameter(8)
param_9.0 = s32[3]{0} parameter(9)
fusion.1 = f32[2,20,256]{2,0,1} fusion(param_0.0, param_1.0, param_2.0, param_3.0, param_4.0, param_5.0, param_6.0, param_7.0, param_8.0, param_9.0), kind=kLoop, calls=fused_computation.1
param_10 = pred[] parameter(10)
fusion.2 = f32[2,20,256]{2,0,1} fusion(param_0.0, param_1.0, param_2.0, param_3.0, fusion.1, param_5.0, param_10, param_7.0, param_8.0, param_9.0), kind=kLoop, calls=fused_computation.2
ROOT root = (f32[2,20,256]{2,0,1}, f32[2,20,256]{2,0,1}) tuple(fusion.1, fusion.2)
}
)")
.value();
auto& debug_options = module->mutable_config().mutable_debug_options();
debug_options.set_xla_gpu_mlir_emitter_level(3);
EXPECT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest, DoNotFuseRoot) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule module
no_op {
arg_empty_tuple = () parameter(0)
ROOT tuple = () tuple()
}
fused_computation {
param_0 = f32[] parameter(0)
ROOT convert = s32[] convert(param_0)
}
ENTRY main {
param_0 = f32[] parameter(0)
fusion = s32[] fusion(param_0), kind=kLoop, calls=fused_computation
tuple = () tuple()
conditional = () conditional(fusion, tuple, tuple), branch_computations={no_op, no_op}
constant = f32[] constant(1)
ROOT root = f32[] add(param_0, constant)
}
)")
.value();
EXPECT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest, CostBasedNoMerge) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
region_3.63 {
Arg_0.64 = f32[] parameter(0)
Arg_1.65 = f32[] parameter(1)
ROOT add.66 = f32[] add(Arg_0.64, Arg_1.65)
}
fused_computation.29 {
param_0.161 = f32[5,32,32,1]{3,2,1,0} parameter(0)
multiply.208 = f32[5,32,32,1]{3,2,1,0} multiply(param_0.161, param_0.161)
bitcast.67 = f32[5,32,32]{2,1,0} bitcast(multiply.208)
constant.265 = f32[] constant(0)
reduce-window.81 = f32[5,30,31]{2,1,0} reduce-window(bitcast.67, constant.265), window={size=1x3x2}, to_apply=region_3.63
constant.264 = f32[] constant(0.166666672)
broadcast.204 = f32[5,30,31]{2,1,0} broadcast(constant.264), dimensions={}
multiply.205 = f32[5,30,31]{2,1,0} multiply(reduce-window.81, broadcast.204)
constant.263 = f32[] constant(0)
reduce-window.80 = f32[5,30,31]{2,1,0} reduce-window(multiply.205, constant.263), window={size=1x2x3 pad=0_0x0_1x1_1}, to_apply=region_3.63
constant.262 = f32[] constant(0.0138888899)
broadcast.201 = f32[5,30,31]{2,1,0} broadcast(constant.262), dimensions={}
multiply.204 = f32[5,30,31]{2,1,0} multiply(reduce-window.80, broadcast.201)
constant.261 = f32[] constant(0)
reduce-window.78 = f32[5,30,31]{2,1,0} reduce-window(multiply.204, constant.261), window={size=1x1x2 pad=0_0x0_0x0_1}, to_apply=region_3.63
constant.113 = f32[] constant(0.5)
broadcast.137 = f32[5,30,31]{2,1,0} broadcast(constant.113), dimensions={}
multiply.125 = f32[5,30,31]{2,1,0} multiply(reduce-window.78, broadcast.137)
constant.114 = f32[] constant(0)
ROOT reduce-window.17 = f32[5,30,31]{2,1,0} reduce-window(multiply.125, constant.114), window={size=1x2x1 pad=0_0x0_1x0_0}, to_apply=region_3.63
}
fused_computation.15 {
constant.108 = f32[] constant(0.5)
broadcast.105 = f32[5,5,30,31]{3,2,1,0} broadcast(constant.108), dimensions={}
param_3.126 = f32[5,30,31]{2,1,0} parameter(3)
constant.295 = f32[] constant(0.25)
broadcast.234 = f32[5,30,31]{2,1,0} broadcast(constant.295), dimensions={}
multiply.242 = f32[5,30,31]{2,1,0} multiply(param_3.126, broadcast.234)
broadcast.233 = f32[5,5,30,31]{3,2,1,0} broadcast(multiply.242), dimensions={0,2,3}
param_2.154 = f32[5,30,31]{2,1,0} parameter(2)
multiply.241 = f32[5,30,31]{2,1,0} multiply(param_2.154, broadcast.234)
broadcast.232 = f32[5,5,30,31]{3,2,1,0} broadcast(multiply.241), dimensions={1,2,3}
multiply.240 = f32[5,5,30,31]{3,2,1,0} multiply(broadcast.233, broadcast.232)
param_1.188 = f32[5,5,30,31]{3,2,1,0} parameter(1)
constant.294 = f32[] constant(0.159154937)
broadcast.231 = f32[5,5,30,31]{3,2,1,0} broadcast(constant.294), dimensions={}
multiply.239 = f32[5,5,30,31]{3,2,1,0} multiply(param_1.188, broadcast.231)
param_0.164 = f32[5,5,30,31]{3,2,1,0} parameter(0)
add.19 = f32[5,5,30,31]{3,2,1,0} add(multiply.239, param_0.164)
constant.293 = f32[] constant(0)
reduce-window.90 = f32[5,5,30,31]{3,2,1,0} reduce-window(add.19, constant.293), window={size=1x1x1x2 pad=0_0x0_0x0_0x0_1}, to_apply=region_3.63
constant.292 = f32[] constant(0.5)
broadcast.230 = f32[5,5,30,31]{3,2,1,0} broadcast(constant.292), dimensions={}
multiply.238 = f32[5,5,30,31]{3,2,1,0} multiply(reduce-window.90, broadcast.230)
constant.291 = f32[] constant(0)
reduce-window.89 = f32[5,5,30,31]{3,2,1,0} reduce-window(multiply.238, constant.291), window={size=1x1x2x1 pad=0_0x0_0x0_1x0_0}, to_apply=region_3.63
constant.290 = f32[] constant(0.25)
broadcast.229 = f32[5,5,30,31]{3,2,1,0} broadcast(constant.290), dimensions={}
multiply.237 = f32[5,5,30,31]{3,2,1,0} multiply(reduce-window.89, broadcast.229)
multiply.236 = f32[5,5,30,31]{3,2,1,0} multiply(multiply.237, multiply.237)
subtract.10 = f32[5,5,30,31]{3,2,1,0} subtract(multiply.240, multiply.236)
constant.289 = f32[] constant(0)
broadcast.228 = f32[5,5,30,31]{3,2,1,0} broadcast(constant.289), dimensions={}
maximum.6 = f32[5,5,30,31]{3,2,1,0} maximum(subtract.10, broadcast.228)
sqrt.6 = f32[5,5,30,31]{3,2,1,0} sqrt(maximum.6)
constant.110 = f32[] constant(0)
broadcast.107 = f32[5,5,30,31]{3,2,1,0} broadcast(constant.110), dimensions={}
compare.4 = pred[5,5,30,31]{3,2,1,0} compare(sqrt.6, broadcast.107), direction=EQ
constant.243 = f32[] constant(0.159154937)
broadcast.193 = f32[5,5,30,31]{3,2,1,0} broadcast(constant.243), dimensions={}
multiply.194 = f32[5,5,30,31]{3,2,1,0} multiply(param_1.188, broadcast.193)
add.15 = f32[5,5,30,31]{3,2,1,0} add(multiply.194, param_0.164)
constant.242 = f32[] constant(0)
reduce-window.66 = f32[5,5,30,31]{3,2,1,0} reduce-window(add.15, constant.242), window={size=1x1x1x2 pad=0_0x0_0x0_0x0_1}, to_apply=region_3.63
constant.241 = f32[] constant(0.5)
broadcast.192 = f32[5,5,30,31]{3,2,1,0} broadcast(constant.241), dimensions={}
multiply.193 = f32[5,5,30,31]{3,2,1,0} multiply(reduce-window.66, broadcast.192)
constant.240 = f32[] constant(0)
reduce-window.65 = f32[5,5,30,31]{3,2,1,0} reduce-window(multiply.193, constant.240), window={size=1x1x2x1 pad=0_0x0_0x0_1x0_0}, to_apply=region_3.63
constant.239 = f32[] constant(0.25)
broadcast.191 = f32[5,5,30,31]{3,2,1,0} broadcast(constant.239), dimensions={}
multiply.192 = f32[5,5,30,31]{3,2,1,0} multiply(reduce-window.65, broadcast.191)
compare.3 = pred[5,5,30,31]{3,2,1,0} compare(multiply.192, broadcast.107), direction=EQ
and.1 = pred[5,5,30,31]{3,2,1,0} and(compare.4, compare.3)
constant.109 = f32[] constant(1.57079637)
broadcast.104 = f32[5,5,30,31]{3,2,1,0} broadcast(constant.109), dimensions={}
atan2.1 = f32[5,5,30,31]{3,2,1,0} atan2(sqrt.6, multiply.192)
select.4 = f32[5,5,30,31]{3,2,1,0} select(and.1, broadcast.104, atan2.1)
constant.107 = f32[] constant(0.159154937)
broadcast.106 = f32[5,5,30,31]{3,2,1,0} broadcast(constant.107), dimensions={}
multiply.100 = f32[5,5,30,31]{3,2,1,0} multiply(select.4, broadcast.106)
ROOT subtract.3 = f32[5,5,30,31]{3,2,1,0} subtract(broadcast.105, multiply.100)
}
fused_computation.4 {
param_0.172 = f32[5,30,31]{2,1,0} parameter(0)
constant.315 = f32[] constant(0.125)
broadcast.242 = f32[5,30,31]{2,1,0} broadcast(constant.315), dimensions={}
multiply.250 = f32[5,30,31]{2,1,0} multiply(param_0.172, broadcast.242)
constant.314 = f32[] constant(0)
reduce-window.100 = f32[5,30,31]{2,1,0} reduce-window(multiply.250, constant.314), window={size=1x3x3 pad=0_0x1_1x1_1}, to_apply=region_3.63
constant.79 = f32[] constant(0.055555556)
broadcast.85 = f32[5,30,31]{2,1,0} broadcast(constant.79), dimensions={}
multiply.80 = f32[5,30,31]{2,1,0} multiply(reduce-window.100, broadcast.85)
constant.81 = f32[] constant(0)
reduce-window.1 = f32[5,30,31]{2,1,0} reduce-window(multiply.80, constant.81), window={size=1x3x3 pad=0_0x1_1x1_1}, to_apply=region_3.63
constant.80 = f32[] constant(0.111111112)
broadcast.86 = f32[5,30,31]{2,1,0} broadcast(constant.80), dimensions={}
multiply.79 = f32[5,30,31]{2,1,0} multiply(reduce-window.1, broadcast.86)
bitcast.26 = f32[5,930]{1,0} bitcast(multiply.79)
ROOT reduce.8 = f32[5]{0} reduce(bitcast.26, constant.81), dimensions={1}, to_apply=region_3.63
}
ENTRY e {
Arg_0.1 = f32[5,32,32,1]{3,2,1,0} parameter(0)
p1 = f32[5,5,30,31]{3,2,1,0} parameter(1)
p2 = f32[5,5,30,31]{3,2,1,0} parameter(2)
p3 = f32[5,30,31]{2,1,0} parameter(3)
fusion.29 = f32[5,30,31]{2,1,0} fusion(Arg_0.1), kind=kLoop, calls=fused_computation.29
fusion.15 = f32[5,5,30,31]{3,2,1,0} fusion(p2, p1, p3, fusion.29), kind=kLoop, calls=fused_computation.15
ROOT fusion.4 = f32[5]{0} fusion(fusion.29), kind=kInput, calls=fused_computation.4
})")
.value();
EXPECT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest, NoOverlappingRead) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule module
fused_computation_1 {
p0.1 = f32[100,200]{1,0} parameter(0)
slice.0 = f32[50,100]{1,0} slice(p0.1), slice={[0:50],[0:100]}
mul = f32[50,100]{1,0} multiply(slice.0, slice.0)
exp = f32[50,100]{1,0} exponential(slice.0)
ROOT tuple = (f32[50,100]{1,0}, f32[50,100]{1,0}) tuple(mul, exp)
}
fused_computation_2 {
p0.2 = f32[100,200]{1,0} parameter(0)
slice.1 = f32[50,100]{1,0} slice(p0.2), slice={[0:50],[100:200]}
const.2 = f32[] constant(0)
broadcast = f32[50,100]{1,0} broadcast(const.2), dimensions={}
ROOT add = f32[50,100]{1,0} add(slice.1, broadcast)
}
ENTRY entry {
p0 = f32[100,200]{1,0} parameter(0)
fusion.1 = (f32[50,100]{1,0}, f32[50,100]{1,0}) fusion(p0), kind=kLoop,
calls=fused_computation_1
gte0 = f32[50,100]{1,0} get-tuple-element(fusion.1), index=0
gte1 = f32[50,100]{1,0} get-tuple-element(fusion.1), index=1
fusion.2 = f32[50,100]{1,0} fusion(p0), kind=kLoop,
calls=fused_computation_2
ROOT root = (f32[50,100]{1,0}, f32[50,100]{1,0}, f32[50,100]{1,0})
tuple(gte0, gte1, fusion.2)
})")
.value();
EXPECT_FALSE(mof_.Run(module.get()).value());
}
TEST_F(MultiOutputFusionTest, OverlappingRead) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule module
fused_computation_1 {
p0.1 = f32[100,200]{1,0} parameter(0)
slice.0 = f32[50,100]{1,0} slice(p0.1), slice={[0:50],[50:150]}
mul = f32[50,100]{1,0} multiply(slice.0, slice.0)
exp = f32[50,100]{1,0} exponential(slice.0)
ROOT tuple = (f32[50,100]{1,0}, f32[50,100]{1,0}) tuple(mul, exp)
}
fused_computation_2 {
p0.2 = f32[100,200]{1,0} parameter(0)
slice.1 = f32[50,100]{1,0} slice(p0.2), slice={[30:80],[20:120]}
const.2 = f32[] constant(0)
broadcast = f32[50,100]{1,0} broadcast(const.2), dimensions={}
ROOT add = f32[50,100]{1,0} add(slice.1, broadcast)
}
ENTRY entry {
p0 = f32[100,200]{1,0} parameter(0)
fusion.1 = (f32[50,100]{1,0}, f32[50,100]{1,0}) fusion(p0), kind=kLoop,
calls=fused_computation_1
gte0 = f32[50,100]{1,0} get-tuple-element(fusion.1), index=0
gte1 = f32[50,100]{1,0} get-tuple-element(fusion.1), index=1
fusion.2 = f32[50,100]{1,0} fusion(p0), kind=kLoop,
calls=fused_computation_2
ROOT root = (f32[50,100]{1,0}, f32[50,100]{1,0}, f32[50,100]{1,0})
tuple(gte0, gte1, fusion.2)
})")
.value();
EXPECT_TRUE(mof_.Run(module.get()).value());
}
class TransposeMultiOutputFusionTest : public MultiOutputFusionTest {
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options =
MultiOutputFusionTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_mlir_emitter_level(3);
return debug_options;
}
};
TEST_F(TransposeMultiOutputFusionTest, MultipleTransposes) {
const char* hlo = R"(
HloModule module
fused_computation {
param_0.1 = f32[16,32]{1,0} parameter(0)
s.1 = f32[16,32]{1,0} sqrt(param_0.1)
ROOT t.1 = f32[32,16]{1,0} transpose(s.1), dimensions={1,0}
}
ENTRY main {
p = f32[16,32]{1,0} parameter(0)
fusion = f32[32,16]{1,0} fusion(p), kind=kInput, calls=fused_computation
t1 = f32[32,16]{1,0} transpose(p), dimensions={1,0}
ROOT t = (f32[32,16]{1,0}, f32[32,16]{1,0}) tuple(fusion, t1)
}
)";
CheckMultiOutputFusion(hlo, R"(
)");
}
TEST_F(TransposeMultiOutputFusionTest, MultipleTransposesDifferentTypes) {
const char* hlo = R"(
HloModule module
fused_computation {
param_0.1 = f16[16,32]{1,0} parameter(0)
s.1 = f32[16,32]{1,0} convert(param_0.1)
ROOT t.1 = f32[32,16]{1,0} transpose(s.1), dimensions={1,0}
}
ENTRY main {
p = f16[16,32]{1,0} parameter(0)
fusion = f32[32,16]{1,0} fusion(p), kind=kInput, calls=fused_computation
t1 = f16[32,16]{1,0} transpose(p), dimensions={1,0}
ROOT t = (f32[32,16]{1,0}, f16[32,16]{1,0}) tuple(fusion, t1)
}
)";
CheckMultiOutputFusion(hlo, R"(
)");
}
TEST_F(TransposeMultiOutputFusionTest, TiledReduceTranspose) {
const char* hlo = R"(
HloModule module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = add(lhs, rhs)
}
fused_computation {
param_0.1 = f32[16,32]{1,0} parameter(0)
s.1 = f32[16,32]{1,0} sqrt(param_0.1)
ROOT t.1 = f32[32,16]{1,0} transpose(s.1), dimensions={1,0}
}
ENTRY main {
p = f32[16,32]{1,0} parameter(0)
fusion = f32[32,16]{1,0} fusion(p), kind=kInput, calls=fused_computation
z = f32[] constant(0)
r1 = f32[32]{0} reduce(p, z), dimensions={0}, to_apply=add
ROOT t = (f32[32,16]{1,0}, f32[32]{0}) tuple(fusion, r1)
}
)";
CheckMultiOutputFusion(hlo, std::nullopt);
}
TEST_F(TransposeMultiOutputFusionTest, IncompatibleTransposes) {
const char* hlo = R"(
HloModule module
fused_computation {
param_0.1 = f32[18,16,32]{2,1,0} parameter(0)
param_1.1 = f32[32,16,18]{2,1,0} parameter(1)
s.1 = f32[18,16,32]{2,1,0} sqrt(param_0.1)
t.1 = f32[32,16,18]{2,1,0} transpose(s.1), dimensions={2,1,0}
sub.1 = f32[32,16,18]{2,1,0} subtract(t.1, param_1.1)
exp.1 = f32[32,16,18]{2,1,0} exponential(sub.1)
ROOT add.1 = f32[32,16,18]{2,1,0} add(exp.1, exp.1)
}
fused_computation.2 {
param_0.2 = f32[18,16,32]{2,1,0} parameter(0)
s.2 = f32[18,16,32]{2,1,0} sqrt(param_0.2)
ROOT t.2 = f32[18,32,16]{2,1,0} transpose(s.2), dimensions={0,2,1}
}
ENTRY main {
p = f32[18,16,32]{2,1,0} parameter(0)
p2 = f32[32,16,18]{2,1,0} parameter(1)
fusion = f32[32,16,18]{2,1,0} fusion(p, p2), kind=kLoop, calls=fused_computation
fusion2 = f32[18,32,16]{2,1,0} fusion(p), kind=kInput, calls=fused_computation.2
ROOT t = (f32[32,16,18]{2,1,0}, f32[18,32,16]{2,1,0}) tuple(fusion, fusion2)
}
)";
CheckMultiOutputFusion(hlo, std::nullopt);
}
TEST_F(TransposeMultiOutputFusionTest, TransposesNoCSE) {
const char* hlo = R"(
HloModule module
fused_computation {
param_0.1 = f32[18,16,32]{2,1,0} parameter(0)
param_1.1 = f32[32,16,18]{2,1,0} parameter(1)
s.1 = f32[18,16,32]{2,1,0} sqrt(param_0.1)
t.1 = f32[32,16,18]{2,1,0} transpose(s.1), dimensions={2,1,0}
sub.1 = f32[32,16,18]{2,1,0} subtract(t.1, param_1.1)
exp.1 = f32[32,16,18]{2,1,0} exponential(sub.1)
exp.2 = f32[32,16,18]{2,1,0} exponential(sub.1)
ROOT add.1 = f32[32,16,18]{2,1,0} add(exp.1, exp.2)
}
fused_computation.2 {
param_0.2 = f32[18,16,32]{2,1,0} parameter(0)
s.2 = f32[18,16,32]{2,1,0} sqrt(param_0.2)
ROOT t.2 = f32[18,32,16]{2,1,0} transpose(s.2), dimensions={0,2,1}
}
ENTRY main {
p = f32[18,16,32]{2,1,0} parameter(0)
p2 = f32[32,16,18]{2,1,0} parameter(1)
fusion = f32[32,16,18]{2,1,0} fusion(p, p2), kind=kLoop, calls=fused_computation
fusion2 = f32[18,32,16]{2,1,0} fusion(p), kind=kInput, calls=fused_computation.2
ROOT t = (f32[32,16,18]{2,1,0}, f32[18,32,16]{2,1,0}) tuple(fusion, fusion2)
}
)";
CheckMultiOutputFusion(hlo, std::nullopt);
}
TEST_F(TransposeMultiOutputFusionTest, TransposeAndInput) {
const char* hlo = R"(
HloModule module
fused_computation {
param_0.1 = f32[16,32]{1,0} parameter(0)
s.1 = f32[16,32]{1,0} sqrt(param_0.1)
ROOT t.1 = f32[32,16]{1,0} transpose(s.1), dimensions={1,0}
}
ENTRY main {
p = f32[16,32]{1,0} parameter(0)
fusion = f32[32,16]{1,0} fusion(p), kind=kInput, calls=fused_computation
c1 = f32[16,32]{1,0} exponential(p)
ROOT t = (f32[32,16]{1,0}, f32[16,32]{1,0}) tuple(fusion, c1)
}
)";
CheckMultiOutputFusion(hlo, R"(
)");
}
TEST_F(TransposeMultiOutputFusionTest, TransposeAndInputEpilogueFusion) {
const char* hlo = R"(
HloModule module
fused_computation {
param_0.1 = f32[1,16,32]{2,1,0} parameter(0)
s.1 = f32[1,16,32]{2,1,0} sqrt(param_0.1)
t.1 = f32[1,32,16]{2,1,0} transpose(s.1), dimensions={0,2,1}
ROOT out = f32[32,16,1]{2,1,0} bitcast(t.1)
}
ENTRY main {
p = f32[1,16,32]{2,1,0} parameter(0)
fusion = f32[32,16,1]{2,1,0} fusion(p), kind=kInput, calls=fused_computation
c1 = f32[1,16,32]{2,1,0} exponential(p)
ROOT t = (f32[32,16,1]{2,1,0}, f32[1,16,32]{2,1,0}) tuple(fusion, c1)
}
)";
CheckMultiOutputFusion(hlo, R"(
)");
}
class ReduceMultiOutputFusionTest : public MultiOutputFusionTest {};
TEST_F(ReduceMultiOutputFusionTest, ReduceAndLoop) {
const char* hlo = R"(
HloModule module
add {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a, b)
}
fused_reduction {
p = f32[200] parameter(0)
z = f32[] constant(0)
e = f32[200] exponential(p)
ROOT r = f32[] reduce(e, z), dimensions={0}, to_apply=add
}
fused_elementwise {
p = f32[200] parameter(0)
ROOT r = f32[200] sqrt(p)
}
ENTRY computation {
p = f32[200] parameter(0)
o1 = f32[200] fusion(p), kind=kLoop, calls=fused_elementwise
o2 = f32[] fusion(p), kind=kInput, calls=fused_reduction
ROOT out = (f32[200], f32[]) tuple(o1, o2)
}
)";
CheckMultiOutputFusion(hlo, R"(
)");
}
TEST_F(ReduceMultiOutputFusionTest, ReduceAndLoopDifferentShape) {
const char* hlo = R"(
HloModule module
add {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a, b)
}
fused_reduction {
p = f32[10,20] parameter(0)
z = f32[] constant(0)
e = f32[10,20] exponential(p)
b = f32[200] bitcast(e)
ROOT r = f32[] reduce(b, z), dimensions={0}, to_apply=add
}
fused_elementwise {
p = f32[10,20] parameter(0)
ROOT r = f32[10,20] sqrt(p)
}
ENTRY computation {
p = f32[10,20] parameter(0)
o1 = f32[10,20] fusion(p), kind=kLoop, calls=fused_elementwise
o2 = f32[] fusion(p), kind=kInput, calls=fused_reduction
ROOT out = (f32[10,20], f32[]) tuple(o1, o2)
}
)";
CheckMultiOutputFusion(hlo, R"(
)");
}
TEST_F(ReduceMultiOutputFusionTest, ReduceAndLoopDifferentShapeDifferentType) {
const char* hlo = R"(
HloModule module, entry_computation_layout={(f16[100,200]{1,0},f32[],f32[])->(f16[100,200]{1,0}, f32[])}
max {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] maximum(a, b)
}
fused_computation {
one_5 = f32[] constant(1)
one_b.5 = f32[100,200]{1,0} broadcast(one_5), dimensions={}
param_1.15 = f16[100,200]{1,0} parameter(1)
c.6 = f32[100,200]{1,0} convert(param_1.15)
param_0.11 = f32[] parameter(0)
b.6 = f32[100,200]{1,0} broadcast(param_0.11), dimensions={}
d.5 = f32[100,200]{1,0} divide(c.6, b.6)
a.6 = f32[100,200]{1,0} add(one_b.5, d.5)
bitcast.1 = f32[20000]{0} bitcast(a.6)
z_1 = f32[] constant(0)
ROOT r.1 = f32[] reduce(bitcast.1, z_1), dimensions={0}, to_apply=max
}
fused_computation.1 {
one_3 = f32[] constant(1)
one_b.3 = f32[100,200]{1,0} broadcast(one_3), dimensions={}
param_2.7 = f16[100,200]{1,0} parameter(2)
c.4 = f32[100,200]{1,0} convert(param_2.7)
param_1.10 = f32[] parameter(1)
b.4 = f32[100,200]{1,0} broadcast(param_1.10), dimensions={}
d.3 = f32[100,200]{1,0} divide(c.4, b.4)
a.4 = f32[100,200]{1,0} add(one_b.3, d.3)
param_0.8 = f32[] parameter(0)
output_scale_broadcast.1 = f32[100,200]{1,0} broadcast(param_0.8), dimensions={}
a_scaled.1 = f32[100,200]{1,0} multiply(a.4, output_scale_broadcast.1)
ROOT a_scaled_converted.1 = f16[100,200]{1,0} convert(a_scaled.1)
}
ENTRY computation {
output_scale = f32[] parameter(2)
input_scale = f32[] parameter(1)
p = f16[100,200]{1,0} parameter(0)
fusion.1 = f16[100,200]{1,0} fusion(output_scale, input_scale, p), kind=kLoop, calls=fused_computation.1
fusion = f32[] fusion(input_scale, p), kind=kInput, calls=fused_computation
ROOT out = (f16[100,200]{1,0}, f32[]) tuple(fusion.1, fusion)
}
)";
CheckMultiOutputFusion(hlo, R"(
)");
}
TEST_F(ReduceMultiOutputFusionTest, GetTupleElementMakeTupleSequence) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
fusion {
p0 = s32[] parameter(0)
p1 = s32[32] parameter(1)
custom-call = (bf16[], s32[], u32[]) custom-call(p1), custom_call_target="my_custom_call"
get-tuple-element.0 = bf16[] get-tuple-element(custom-call), index=0
get-tuple-element.1 = s32[] get-tuple-element(custom-call), index=1
bitcast = s32[1] bitcast(get-tuple-element.1)
dynamic-update-slice = s32[32] dynamic-update-slice(p1, bitcast, p0)
get-tuple-element.2 = u32[] get-tuple-element(custom-call), index=2
ROOT tuple.30 = (bf16[], s32[32], u32[]) tuple(get-tuple-element.0, dynamic-update-slice, get-tuple-element.2)
}
ENTRY entry{
p0 = s32[] parameter(0)
bitcast = s32[32] bitcast(p0)
ROOT address_computation.7.0 = (bf16[], s32[32], u32[]) fusion(p0, bitcast), kind=kCustom, calls=fusion
}
)")
.value();
ASSERT_FALSE(mof_.Run(module.get()).value());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/multi_output_fusion.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/multi_output_fusion_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
da2b505e-fc9f-4953-95f5-53652fefab01 | cpp | tensorflow/tensorflow | save_report | tensorflow/compiler/mlir/quantization/stablehlo/instrumentations/save_report.cc | tensorflow/compiler/mlir/quantization/stablehlo/instrumentations/save_report_test.cc | #include "tensorflow/compiler/mlir/quantization/stablehlo/instrumentations/save_report.h"
#include <optional>
#include <string>
#include "absl/base/nullability.h"
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Operation.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/report.h"
namespace mlir::quant::stablehlo {
namespace {
std::optional<std::string> OptionalStringViewToOptionalString(
std::optional<absl::string_view> view) {
if (view == std::nullopt) return std::nullopt;
return std::make_optional<std::string>(*view);
}
bool IsQuantizeCompositeFunctionPass(absl::Nullable<Pass*> pass,
absl::Nullable<Operation*> op) {
return pass != nullptr &&
pass->getArgument() == "stablehlo-quantize-composite-functions" &&
isa_and_nonnull<ModuleOp>(op);
}
bool ShouldSaveReport(absl::Nullable<Pass*> pass, absl::Nullable<Operation*> op,
const std::optional<std::string>& file_path) {
return file_path != std::nullopt && IsQuantizeCompositeFunctionPass(pass, op);
}
void SaveReport(const QuantizationReport& report,
const absl::string_view file_path) {
if (const absl::Status save_status = report.Save(file_path);
save_status.ok()) {
LOG(INFO) << "Successfully saved quantization report to: " << file_path;
} else {
LOG(ERROR) << "Failed to save quantization report to: " << file_path
<< " with status: " << save_status;
}
}
}
SaveQuantizationReportInstrumentation::SaveQuantizationReportInstrumentation(
std::optional<absl::string_view> file_path)
: file_path_(OptionalStringViewToOptionalString(file_path)) {}
void SaveQuantizationReportInstrumentation::runAfterPass(Pass* pass,
Operation* op) {
if (!IsQuantizeCompositeFunctionPass(pass, op)) return;
auto module_op = cast<ModuleOp>(op);
const QuantizationReport report(module_op);
report.Print();
if (!ShouldSaveReport(pass, op, file_path_)) return;
SaveReport(report, *file_path_);
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/instrumentations/save_report.h"
#include <memory>
#include <optional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/io.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status_matchers.h"
namespace mlir::quant::stablehlo {
namespace {
using ::stablehlo::quantization::QuantizationResults;
using ::stablehlo::quantization::io::ReadFileToString;
using ::testing::SizeIs;
using ::testing::StrEq;
using ::tsl::protobuf::TextFormat;
using ::tsl::testing::IsOk;
using ::tsl::testing::StatusIs;
using SaveQuantizationReportInstrumentationTest = QuantizationTestBase;
TEST_F(SaveQuantizationReportInstrumentationTest, SaveReport) {
constexpr absl::string_view kModuleWithCompositeDotGeneral = R"mlir(
func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> {
%cst = "tf.Const"() {value = dense<3.00000000e-1> : tensor<2x3xf32>} : () -> tensor<2x3xf32>
%0 = "quantfork.stats"(%arg0) {layerStats = dense<[6.00000000e-6, 9.00000000e-1]> : tensor<2xf32>} : (tensor<1x2xf32>) -> tensor<1x2xf32>
%1 = "tf.XlaCallModule"(%0, %cst) {Sout = [#tf_type.shape<1x3>], _entry_function = @composite_dot_general_fn, _original_entry_function = "composite_dot_general_fn", _quantization_method = "static_range_ptq { }", _stablehlo_module_attrs = {}, _tfl_quant_trait = "fully_quantizable", device = "", dim_args_spec = [], disabled_checks = [], has_token_input_output = false, module = "", platforms = [], version = 5 : i64} : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
%2 = "quantfork.stats"(%1) {layerStats = dense<[5.00000000e-6, 7.00000000e-1]> : tensor<2xf32>} : (tensor<1x3xf32>) -> tensor<1x3xf32>
return %2 : tensor<1x3xf32>
}
func.func private @composite_dot_general_fn(%arg0: tensor<1x2xf32>, %arg1: tensor<2x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleWithCompositeDotGeneral);
ASSERT_TRUE(module_op);
PassManager pm(ctx_.get());
QuantizeCompositeFunctionsPassOptions options;
pm.addPass(createQuantizeCompositeFunctionsPass(options));
const std::string report_file_path =
absl::StrCat(testing::TempDir(), "/save_report.txtpb");
pm.addInstrumentation(std::make_unique<SaveQuantizationReportInstrumentation>(
report_file_path));
const LogicalResult run_result = pm.run(*module_op);
ASSERT_TRUE(succeeded(run_result));
const absl::StatusOr<std::string> file_data =
ReadFileToString(report_file_path);
ASSERT_THAT(file_data, IsOk());
QuantizationResults results{};
ASSERT_TRUE(TextFormat::ParseFromString(*file_data, &results));
ASSERT_THAT(results.results(), SizeIs(1));
EXPECT_THAT(results.results(0).quantizable_unit().name(),
StrEq("composite_dot_general_fn"));
EXPECT_TRUE(results.results(0).method().has_static_range_ptq());
}
TEST_F(SaveQuantizationReportInstrumentationTest,
ReportNotSavedWhenNoQuantizeCompositeFunctionsPass) {
constexpr absl::string_view kModuleWithCompositeDotGeneral = R"mlir(
func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> {
%cst = "stablehlo.constant"() {value = dense<3.00000000e-1> : tensor<2x3xf32>} : () -> tensor<2x3xf32>
%0 = "quantfork.stats"(%arg0) {layerStats = dense<[6.00000000e-6, 9.00000000e-1]> : tensor<2xf32>} : (tensor<1x2xf32>) -> tensor<1x2xf32>
%1 = "tf.XlaCallModule"(%0, %cst) {Sout = [#tf_type.shape<1x3>], _entry_function = @composite_dot_general_fn, _original_entry_function = "composite_dot_general_fn", _quantization_method = "static_range_ptq { }", _stablehlo_module_attrs = {}, _tfl_quant_trait = "fully_quantizable", device = "", dim_args_spec = [], disabled_checks = [], has_token_input_output = false, module = "", platforms = [], version = 5 : i64} : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
%2 = "quantfork.stats"(%1) {layerStats = dense<[5.00000000e-6, 7.00000000e-1]> : tensor<2xf32>} : (tensor<1x3xf32>) -> tensor<1x3xf32>
return %2 : tensor<1x3xf32>
}
func.func private @composite_dot_general_fn(%arg0: tensor<1x2xf32>, %arg1: tensor<2x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleWithCompositeDotGeneral);
ASSERT_TRUE(module_op);
PassManager pm(ctx_.get());
pm.addPass(createPrepareQuantizePass());
const std::string report_file_path = absl::StrCat(
testing::TempDir(),
"/report_not_saved_no_quantize_composite_functions_pass.txtpb");
pm.addInstrumentation(std::make_unique<SaveQuantizationReportInstrumentation>(
report_file_path));
const LogicalResult run_result = pm.run(*module_op);
ASSERT_TRUE(succeeded(run_result));
EXPECT_THAT(ReadFileToString(report_file_path),
StatusIs(absl::StatusCode::kNotFound));
}
TEST_F(SaveQuantizationReportInstrumentationTest,
ReportNotSavedWhenReportFilePathIsNullopt) {
constexpr absl::string_view kModuleWithCompositeDotGeneral = R"mlir(
func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> {
%cst = "stablehlo.constant"() {value = dense<3.00000000e-1> : tensor<2x3xf32>} : () -> tensor<2x3xf32>
%0 = "quantfork.stats"(%arg0) {layerStats = dense<[6.00000000e-6, 9.00000000e-1]> : tensor<2xf32>} : (tensor<1x2xf32>) -> tensor<1x2xf32>
%1 = "tf.XlaCallModule"(%0, %cst) {Sout = [#tf_type.shape<1x3>], _entry_function = @composite_dot_general_fn, _original_entry_function = "composite_dot_general_fn", _quantization_method = "static_range_ptq { }", _stablehlo_module_attrs = {}, _tfl_quant_trait = "fully_quantizable", device = "", dim_args_spec = [], disabled_checks = [], has_token_input_output = false, module = "", platforms = [], version = 5 : i64} : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
%2 = "quantfork.stats"(%1) {layerStats = dense<[5.00000000e-6, 7.00000000e-1]> : tensor<2xf32>} : (tensor<1x3xf32>) -> tensor<1x3xf32>
return %2 : tensor<1x3xf32>
}
func.func private @composite_dot_general_fn(%arg0: tensor<1x2xf32>, %arg1: tensor<2x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleWithCompositeDotGeneral);
ASSERT_TRUE(module_op);
PassManager pm(ctx_.get());
QuantizeCompositeFunctionsPassOptions options;
pm.addPass(createQuantizeCompositeFunctionsPass(options));
pm.addInstrumentation(std::make_unique<SaveQuantizationReportInstrumentation>(
std::nullopt));
const LogicalResult run_result = pm.run(*module_op);
ASSERT_TRUE(succeeded(run_result));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/instrumentations/save_report.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/instrumentations/save_report_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f724d8e5-774c-4366-8f95-a977c6960432 | cpp | tensorflow/tensorflow | grpc_client_session | third_party/xla/xla/python/ifrt_proxy/client/grpc_client_session.cc | third_party/xla/xla/python/ifrt_proxy/client/grpc_client_session_test.cc | #include "xla/python/ifrt_proxy/client/grpc_client_session.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/base/call_once.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/functional/bind_front.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/synchronization/notification.h"
#include "grpc/grpc.h"
#include "grpcpp/channel.h"
#include "grpcpp/client_context.h"
#include "grpcpp/create_channel.h"
#include "grpcpp/security/credentials.h"
#include "grpcpp/support/channel_arguments.h"
#include "xla/pjrt/distributed/util.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt_proxy/common/grpc_credentials.h"
#include "xla/python/ifrt_proxy/common/grpc_ifrt_service.grpc.pb.h"
#include "xla/python/ifrt_proxy/common/ifrt_service.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/threadpool.h"
#include "tsl/platform/unbounded_work_queue.h"
namespace xla {
namespace ifrt {
namespace proxy {
class GrpcClientSession::ResponseCallbackTable {
public:
absl::Status Add(OpId op_id, ResponseCallback callback) {
absl::MutexLock l(&mu_);
const bool inserted = table_.insert({op_id, std::move(callback)}).second;
if (!inserted) {
return absl::AlreadyExistsError(
absl::StrCat("Op id ", op_id, " already exists"));
}
return absl::OkStatus();
}
std::optional<ResponseCallback> Pop(OpId op_id) {
absl::MutexLock l(&mu_);
auto it = table_.find(op_id);
if (it == table_.end()) {
return std::nullopt;
}
auto cb = std::move(it->second);
table_.erase(it);
return std::move(cb);
}
absl::flat_hash_map<OpId, ResponseCallback> PopAll() {
absl::flat_hash_map<OpId, ResponseCallback> result;
absl::MutexLock l(&mu_);
result = std::move(table_);
table_ = absl::flat_hash_map<OpId, ResponseCallback>();
return result;
}
private:
absl::Mutex mu_;
absl::flat_hash_map<OpId, ResponseCallback> table_ ABSL_GUARDED_BY(mu_);
};
std::shared_ptr<GrpcClientSession> GrpcClientSession::Create(
std::shared_ptr<grpc::GrpcIfrtService::StubInterface> stub,
GrpcIfrtSessionMetadata metadata,
StreamTerminatedCallback stream_terminated_cb) {
auto context = std::make_unique<::grpc::ClientContext>();
context->AddMetadata("ifrt-proxy-grpc-ifrt-session-metadata-bin",
metadata.SerializeAsString());
std::shared_ptr<GrpcClientSession> result(new GrpcClientSession(
std::move(stub), std::move(context), std::move(stream_terminated_cb)));
return result;
}
GrpcClientSession::GrpcClientSession(
std::shared_ptr<grpc::GrpcIfrtService::StubInterface> stub,
std::unique_ptr<::grpc::ClientContext> context,
StreamTerminatedCallback stream_terminated_cb)
: response_callbacks_(std::make_unique<ResponseCallbackTable>()),
reader_thread_(std::make_unique<tsl::thread::ThreadPool>(
tsl::Env::Default(), "ifrt_proxy_client_grpc_reader",
1)),
stub_(std::move(stub)),
context_(std::move(context)),
stream_(stub_->IfrtSession(context_.get())),
stream_terminated_cb_(std::move(stream_terminated_cb)),
user_futures_work_queue_(std::make_unique<tsl::UnboundedWorkQueue>(
tsl::Env::Default(), "GrpcClientSessionUserFuturesWorkQueue")) {
reader_thread_->Schedule(
absl::bind_front(&GrpcClientSession::ReadLoop, this));
}
Future<std::shared_ptr<IfrtResponse>> GrpcClientSession::Enqueue(
std::unique_ptr<IfrtRequest> request) {
auto promise = Future<std::shared_ptr<IfrtResponse>>::CreatePromise();
absl::Status status = Enqueue(
std::move(request),
[promise, queue = user_futures_work_queue_.get()](
absl::StatusOr<std::shared_ptr<IfrtResponse>> response) mutable {
queue->Schedule([promise = std::move(promise),
response = std::move(response)]() mutable -> void {
promise.Set(std::move(response));
});
});
if (!status.ok()) {
user_futures_work_queue_->Schedule([promise, status]() mutable -> void {
promise.Set(std::move(status));
});
}
return Future<std::shared_ptr<IfrtResponse>>(std::move(promise));
}
absl::Status GrpcClientSession::Enqueue(std::unique_ptr<IfrtRequest> req,
ResponseCallback callback) {
absl::MutexLock l(&writer_mu_);
const OpId op_id = writer_next_op_id_++;
if (writes_stopped_) {
return absl::FailedPreconditionError(
"GrpcClientSession: writes no longer allowed.");
}
TF_RETURN_IF_ERROR(response_callbacks_->Add(op_id, std::move(callback)));
CHECK_EQ(req->mutable_request_metadata()->op_id(), 0);
req->mutable_request_metadata()->set_op_id(op_id);
if (!stream_->Write(*req)) {
CHECK(response_callbacks_->Pop(op_id).has_value());
return absl::UnknownError("GrpcClientSession: writing to stream failed.");
}
return absl::OkStatus();
}
void GrpcClientSession::ReadLoop() {
while (true) {
auto read_buffer = std::make_unique<IfrtResponse>();
if (!stream_->Read(read_buffer.get())) {
LOG(INFO) << "GrpcClientSession: reader loop is exiting.";
break;
}
const OpId op_id = read_buffer->response_metadata().op_id();
std::optional<ResponseCallback> callback = response_callbacks_->Pop(op_id);
if (callback.has_value()) {
VLOG(1) << "GrpcClientSession: Issuing callback for " << op_id;
(*callback)(std::move(read_buffer));
VLOG(1) << "GrpcClientSession: Done with callback for " << op_id;
} else {
LOG(ERROR) << "Received response with no remaining registered callback: "
<< read_buffer->DebugString();
}
}
reader_thread_stopped_.Notify();
Finish(absl::OkStatus());
}
void GrpcClientSession::Finish(const absl::Status& client_status) {
LOG(INFO) << "GrpcClientSession: Finish() called with client status "
<< client_status;
absl::call_once(finish_once_, [&] {
context_->TryCancel();
LOG(INFO) << "GrpcClientSession: Waiting for reader thread to stop.";
reader_thread_stopped_.WaitForNotification();
auto finish_stream_and_get_server_status = [&]() -> absl::Status {
LOG(INFO) << "GrpClientSession: Attempting to call stream->Finish()";
absl::MutexLock l(&writer_mu_);
LOG(INFO) << "GrpClientSession: Attempting to call stream->Finish(), "
"mutex acquired";
absl::Status server_status = xla::FromGrpcStatus(stream_->Finish());
LOG(INFO) << "GrpClientSession: stream->Finish() returned server status "
<< server_status;
CHECK(!writes_stopped_);
writes_stopped_ = true;
return server_status;
};
absl::Status combined_status = finish_stream_and_get_server_status();
combined_status.Update(client_status);
auto all_callbacks = response_callbacks_->PopAll();
for (auto& [_, cb] : all_callbacks) {
if (combined_status.ok()) {
cb(absl::AbortedError("Finish(OK) called."));
} else {
cb(combined_status);
}
}
LOG(INFO) << "GrpClientSession::Finish(): calling terminated cb with "
<< combined_status;
stream_terminated_cb_(combined_status);
});
}
GrpcClientSession::~GrpcClientSession() {
GrpcClientSession::Finish(absl::CancelledError("~GrpcClientSession called."));
reader_thread_.reset();
LOG(INFO) << "Deleting GrpcClientSession.user_futures_work_queue_ ...";
user_futures_work_queue_.reset();
LOG(INFO) << "Deleted GrpcClientSession.user_futures_work_queue_.";
}
std::shared_ptr<grpc::GrpcIfrtService::StubInterface> CreateGrpcStub(
absl::string_view server_address) {
::grpc::ChannelArguments args;
args.SetInt(GRPC_ARG_MAX_SEND_MESSAGE_LENGTH, -1);
args.SetInt(GRPC_ARG_MAX_RECEIVE_MESSAGE_LENGTH, -1);
std::shared_ptr<::grpc::Channel> channel = ::grpc::CreateCustomChannel(
std::string(server_address), GetClientCredentials(), args);
VLOG(0) << " Established channel.";
CHECK(channel != nullptr);
std::shared_ptr<grpc::GrpcIfrtService::StubInterface> stub =
grpc::GrpcIfrtService::NewStub(channel);
VLOG(0) << " Created stub.";
CHECK(stub != nullptr);
return stub;
}
}
}
} | #include "xla/python/ifrt_proxy/client/grpc_client_session.h"
#include <atomic>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/log/log_sink_registry.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/synchronization/mutex.h"
#include "absl/synchronization/notification.h"
#include "absl/time/time.h"
#include "grpc/support/time.h"
#include "grpcpp/channel.h"
#include "grpcpp/create_channel.h"
#include "grpcpp/server_builder.h"
#include "grpcpp/server_context.h"
#include "grpcpp/support/status.h"
#include "grpcpp/support/sync_stream.h"
#include "xla/python/ifrt_proxy/client/version.h"
#include "xla/python/ifrt_proxy/common/grpc_credentials.h"
#include "xla/python/ifrt_proxy/common/grpc_ifrt_service.grpc.pb.h"
#include "xla/python/ifrt_proxy/common/grpc_ifrt_service.pb.h"
#include "xla/python/ifrt_proxy/common/ifrt_service.pb.h"
#include "xla/python/ifrt_proxy/common/test_utils.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace ifrt {
namespace proxy {
namespace {
using ::testing::Not;
using ::tsl::testing::IsOk;
constexpr absl::Duration kSufficientTime = absl::Seconds(5);
GrpcIfrtSessionMetadata Metadata() {
GrpcIfrtSessionMetadata metadata;
metadata.mutable_version()->set_protocol_version(kClientMaxVersion);
return metadata;
}
absl::Status TestError() { return absl::UnknownError("test error"); }
struct Queue : public TestQueue<absl::Status> {
Queue() : TestQueue<absl::Status>(kSufficientTime) {}
};
void ExpectHeadAndTail(
std::vector<std::variant<absl::StatusOr<Queue*>, absl::Status>> var_list) {
std::vector<absl::Status> status_list;
for (const auto& v : var_list) {
if (std::holds_alternative<absl::StatusOr<Queue*>>(v)) {
status_list.push_back(std::get<absl::StatusOr<Queue*>>(v).status());
} else {
status_list.push_back(std::get<absl::Status>(v));
}
}
bool seen_not_ok = false;
std::string str;
for (const auto& s : status_list) {
absl::StrAppend(&str, "\n", s.ToString(), "\n-----\n");
}
for (const auto& s : status_list) {
if (!s.ok()) seen_not_ok = true;
if (seen_not_ok) {
EXPECT_THAT(s, Not(IsOk())) << str;
}
}
}
using ServerStream = ::grpc::ServerReaderWriter<IfrtResponse, IfrtRequest>;
using SessionAction = bool;
constexpr SessionAction kContinueSession = true;
constexpr SessionAction kStopSession = false;
using OnSessionStart = std::function<SessionAction()>;
using OnReqReceived =
std::function<SessionAction(const IfrtRequest&, ServerStream*)>;
class SimpleIfrtService : public grpc::GrpcIfrtService::Service {
public:
SimpleIfrtService(OnReqReceived on_req_received,
OnSessionStart on_session_start)
: on_req_received_(std::move(on_req_received)),
on_session_start_(std::move(on_session_start)) {}
::grpc::Status IfrtSession(::grpc::ServerContext* context,
ServerStream* stream) override {
if (on_session_start_ && on_session_start_() == kStopSession) {
return ::grpc::Status::OK;
}
{
absl::MutexLock l(&mu_);
CHECK(contexts_.insert(context).second);
}
while (true) {
IfrtRequest request;
LOG(INFO) << "Server: waiting on Read().";
if (!stream->Read(&request)) {
LOG(INFO) << "Server: Read() returned false.";
break;
}
LOG(INFO) << "Server: Read() returned true.";
if (!on_req_received_) {
IfrtResponse response;
response.mutable_response_metadata()->set_op_id(
request.request_metadata().op_id());
stream->Write(response);
} else if (on_req_received_(request, stream) == kStopSession) {
break;
}
}
{
absl::MutexLock l(&mu_);
CHECK_EQ(contexts_.erase(context), 1);
}
LOG(INFO) << "Finishing IFRT session";
return ::grpc::Status::OK;
}
void CancelAllServerSessions() {
absl::MutexLock l(&mu_);
for (const auto& context : contexts_) {
context->TryCancel();
}
}
private:
const OnReqReceived on_req_received_;
const OnSessionStart on_session_start_;
absl::Mutex mu_;
absl::flat_hash_set<::grpc::ServerContext*> contexts_ ABSL_GUARDED_BY(mu_);
};
class ClientAndServer {
public:
explicit ClientAndServer(OnReqReceived on_req_received = nullptr,
OnSessionStart on_session_start = nullptr) {
std::string address =
absl::StrCat("localhost:", tsl::testing::PickUnusedPortOrDie());
::grpc::ServerBuilder builder;
builder.AddListeningPort(address, GetServerCredentials());
ifrt_service_ =
std::make_unique<SimpleIfrtService>(on_req_received, on_session_start);
builder.RegisterService(ifrt_service_.get());
server_ = builder.BuildAndStart();
LOG(INFO) << "Server started and listening on " << address;
absl::FlushLogSinks();
std::shared_ptr<::grpc::Channel> channel =
::grpc::CreateChannel(address, GetClientCredentials());
channel->WaitForConnected(gpr_time_add(
gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_seconds(10, GPR_TIMESPAN)));
LOG(INFO) << "conn_state = " << channel->GetState(false);
auto stub = grpc::GrpcIfrtService::NewStub(channel);
CHECK(stub != nullptr);
client_session_ = GrpcClientSession::Create(
std::move(stub), Metadata(), [this](absl::Status s) {
client_finished_q_.Push(s);
client_finished_notification_.Notify();
});
client_finished_q_.AllowNonEmptyDestruction(true);
}
void StopServer() {
ifrt_service_->CancelAllServerSessions();
server_->Shutdown();
server_->Wait();
}
~ClientAndServer() {
StopServer();
client_session_->Finish(absl::CancelledError("~ClientAndServer"));
client_finished_notification_.WaitForNotificationWithTimeout(
kSufficientTime);
CHECK(client_finished_notification_.HasBeenNotified());
}
GrpcClientSession* client_session() { return client_session_.get(); }
Queue* client_finished_q() { return &client_finished_q_; }
absl::StatusOr<Queue*> SendSimpleRequest() {
owned_queues_.push_back(std::make_unique<Queue>());
Queue* q = owned_queues_.back().get();
auto req = std::make_unique<IfrtRequest>();
TF_RETURN_IF_ERROR(client_session_->Enqueue(
std::move(req), [q](absl::StatusOr<GrpcClientSession::Response> resp) {
q->Push(resp.status());
}));
return q;
}
private:
std::vector<std::unique_ptr<Queue>> owned_queues_;
Queue client_finished_q_;
absl::Notification client_finished_notification_;
std::shared_ptr<GrpcClientSession> client_session_;
std::unique_ptr<::grpc::Server> server_;
std::unique_ptr<SimpleIfrtService> ifrt_service_;
};
TEST(GrpcClientSessionTest, HappyCaseOneRequestWithServerTermination) {
ClientAndServer cs;
TF_ASSERT_OK_AND_ASSIGN(Queue * response_q, cs.SendSimpleRequest());
EXPECT_THAT(response_q->Pop(), IsOk());
EXPECT_EQ(cs.client_finished_q()->PopOrTimeout(), std::nullopt);
cs.StopServer();
EXPECT_THAT(cs.client_finished_q()->Pop(), Not(IsOk()));
}
TEST(GrpcClientSessionTest, HappyCaseTwoRequestsWithClientFinish) {
ClientAndServer cs;
TF_ASSERT_OK_AND_ASSIGN(Queue * response_q_1, cs.SendSimpleRequest());
TF_ASSERT_OK_AND_ASSIGN(Queue * response_q_2, cs.SendSimpleRequest());
EXPECT_THAT(response_q_1->Pop(), IsOk());
EXPECT_THAT(response_q_2->Pop(), IsOk());
EXPECT_EQ(cs.client_finished_q()->PopOrTimeout(), std::nullopt);
cs.client_session()->Finish(TestError());
EXPECT_THAT(cs.client_finished_q()->Pop(), Not(IsOk()));
}
TEST(GrpcClientSessionTest, ServerFinishesDuringFirstRead) {
ClientAndServer cs(
[](auto, auto) { return kStopSession; });
TF_ASSERT_OK_AND_ASSIGN(Queue * response_q_1, cs.SendSimpleRequest());
EXPECT_THAT(response_q_1->Pop(), Not(IsOk()));
absl::StatusOr<Queue*> response_q_2 = cs.SendSimpleRequest();
EXPECT_THAT(response_q_2.status(), Not(IsOk()));
EXPECT_THAT(cs.client_finished_q()->Pop(), Not(IsOk()));
}
TEST(GrpcClientSessionTest, ServerFinishesDuringConstruction) {
ClientAndServer cs(nullptr,
[]() { return kStopSession; });
absl::StatusOr<Queue*> response_q_1 = cs.SendSimpleRequest();
absl::StatusOr<Queue*> response_q_2 = cs.SendSimpleRequest();
ExpectHeadAndTail({response_q_1, response_q_2});
if (response_q_1.ok()) EXPECT_THAT(response_q_1.value()->Pop(), Not(IsOk()));
if (response_q_2.ok()) EXPECT_THAT(response_q_2.value()->Pop(), Not(IsOk()));
EXPECT_THAT(cs.client_finished_q()->Pop(), Not(IsOk()));
}
TEST(GrpcClientSessionTest, ClientFinishesAfterServerConsumesFirstRequest) {
std::atomic<GrpcClientSession*> session_ptr;
ClientAndServer cs(
[session_ptr = &session_ptr](auto, auto) {
session_ptr->load()->Finish(TestError());
return kContinueSession;
});
session_ptr.store(cs.client_session());
TF_ASSERT_OK_AND_ASSIGN(Queue * response_q_1, cs.SendSimpleRequest());
EXPECT_THAT(response_q_1->Pop(), Not(IsOk()));
absl::StatusOr<Queue*> response_q_2 = cs.SendSimpleRequest();
EXPECT_THAT(response_q_2.status(), Not(IsOk()));
EXPECT_THAT(cs.client_finished_q()->Pop(), Not(IsOk()));
}
TEST(GrpcClientSessionTest, ClientFinishesAfterServerWritesFirstResponse) {
std::atomic<GrpcClientSession*> session_ptr;
ClientAndServer cs(
[session_ptr = &session_ptr](const IfrtRequest& r,
ServerStream* s) {
IfrtResponse response;
response.mutable_response_metadata()->set_op_id(
r.request_metadata().op_id());
s->Write(response);
session_ptr->load()->Finish(TestError());
return kContinueSession;
});
session_ptr.store(cs.client_session());
TF_ASSERT_OK_AND_ASSIGN(Queue * response_q_1, cs.SendSimpleRequest());
absl::StatusOr<Queue*> response_q_2 = cs.SendSimpleRequest();
response_q_1->Pop().IgnoreError();
if (response_q_2.ok()) {
EXPECT_THAT(response_q_2.value()->Pop(), Not(IsOk()));
}
EXPECT_THAT(cs.client_finished_q()->Pop(), Not(IsOk()));
}
TEST(GrpcClientSessionTest, ClientFinishesDuringServerConstruction) {
std::atomic<GrpcClientSession*> session_ptr;
absl::Notification init_done;
ClientAndServer cs(nullptr,
[session_ptr = &session_ptr,
init_done = &init_done]() {
init_done->WaitForNotification();
session_ptr->load()->Finish(TestError());
return kContinueSession;
});
session_ptr.store(cs.client_session());
init_done.Notify();
absl::StatusOr<Queue*> response_q_1 = cs.SendSimpleRequest();
absl::StatusOr<Queue*> response_q_2 = cs.SendSimpleRequest();
if (response_q_1.ok()) {
EXPECT_THAT(response_q_1.value()->Pop(), Not(IsOk()));
}
if (response_q_2.ok()) {
EXPECT_THAT(response_q_2.value()->Pop(), Not(IsOk()));
}
ExpectHeadAndTail({response_q_1, response_q_2});
EXPECT_THAT(cs.client_finished_q()->Pop(), Not(IsOk()));
}
TEST(GrpcClientSessionTest, MethodsAfterFinishReturnError) {
ClientAndServer cs;
TF_ASSERT_OK_AND_ASSIGN(Queue * response_q_1, cs.SendSimpleRequest());
cs.client_session()->Finish(TestError());
EXPECT_THAT(cs.SendSimpleRequest(), Not(IsOk()));
response_q_1->AllowNonEmptyDestruction(true);
}
TEST(GrpcClientSessionTest, ReceivingBadIfrtResponseDoesNotCrash) {
ClientAndServer cs(
[](const IfrtRequest& r, ServerStream* s) mutable {
IfrtResponse resp;
resp.mutable_response_metadata()->set_op_id(2000);
s->Write(resp);
resp.mutable_response_metadata()->set_op_id(
r.request_metadata().op_id());
s->Write(resp);
return kContinueSession;
});
TF_ASSERT_OK_AND_ASSIGN(Queue * response_q, cs.SendSimpleRequest());
EXPECT_THAT(response_q->Pop(), IsOk());
}
TEST(GrpcClientSessionTest, BadInitialChannelFailsPromptly) {
std::string address =
absl::StrCat("localhost:", tsl::testing::PickUnusedPortOrDie());
std::shared_ptr<::grpc::Channel> channel =
::grpc::CreateChannel(address, GetClientCredentials());
std::unique_ptr<grpc::GrpcIfrtService::StubInterface> stub =
grpc::GrpcIfrtService::NewStub(channel);
EXPECT_TRUE(stub != nullptr);
auto session_finished = std::make_shared<Queue>();
auto session = GrpcClientSession::Create(
std::move(stub), Metadata(),
[session_finished](absl::Status s) { session_finished->Push(s); });
EXPECT_THAT(session_finished->Pop(), Not(IsOk()));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt_proxy/client/grpc_client_session.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt_proxy/client/grpc_client_session_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
91d2793d-e918-49f9-b4b2-ff37091c2a24 | cpp | tensorflow/tensorflow | quantize_and_dequantize_op | tensorflow/compiler/tf2xla/kernels/quantize_and_dequantize_op.cc | tensorflow/core/kernels/quantize_and_dequantize_op_test.cc | #include <cstddef>
#include <vector>
#include "absl/types/span.h"
#include "tensorflow/compiler/tf2xla/type_util.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/lib/math.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
enum QuantizerRoundMode {
ROUND_HALF_UP,
ROUND_HALF_TO_EVEN,
};
class QuantizeAndDequantizeOp : public XlaOpKernel {
public:
explicit QuantizeAndDequantizeOp(OpKernelConstruction* ctx)
: XlaOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("signed_input", &signed_input_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("range_given", &range_given_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("narrow_range", &narrow_range_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("axis", &axis_));
round_mode_ = ROUND_HALF_TO_EVEN;
}
void Compile(XlaOpKernelContext* ctx) override {
xla::XlaOp input = ctx->Input(0);
const DataType data_type = ctx->input_type(0);
xla::PrimitiveType xla_type;
OP_REQUIRES_OK(ctx, DataTypeToPrimitiveType(data_type, &xla_type));
xla::XlaBuilder* b = ctx->builder();
xla::XlaOp min_range, max_range;
if (range_given_) {
min_range = ctx->Input(1);
max_range = ctx->Input(2);
} else {
const xla::XlaComputation* fmax = ctx->GetOrCreateMax(data_type);
const xla::XlaComputation* fmin = ctx->GetOrCreateMin(data_type);
if (axis_ == -1) {
min_range = ReduceAll(input, xla::MaxValue(b, xla_type), *fmin);
max_range = ReduceAll(input, xla::MinValue(b, xla_type), *fmax);
} else {
std::vector<int64_t> dimensions_to_reduce;
TensorShape input_shape = ctx->InputShape(0);
int64_t input_rank = input_shape.dims();
OP_REQUIRES(ctx, input_rank >= 1,
errors::Unimplemented("QuantizeAndDequantizeOp with axis "
"!= -1 requires minimum rank 1"));
OP_REQUIRES(
ctx, axis_ >= 0 && axis_ < input_rank,
errors::Unimplemented("QuantizeAndDequantizeOp with invalid axis"));
dimensions_to_reduce.reserve(input_rank - 1);
for (int64_t i = 0; i < input_rank; ++i) {
if (i != axis_) {
dimensions_to_reduce.push_back(i);
}
}
min_range = Reduce(input, xla::MaxValue(b, xla_type), *fmin,
dimensions_to_reduce);
max_range = Reduce(input, xla::MinValue(b, xla_type), *fmax,
dimensions_to_reduce);
}
}
xla::XlaOp num_bits;
if (num_bits_ < 0) {
OP_REQUIRES(
ctx, ctx->num_inputs() == 4,
errors::Internal("Expected 4 inputs to QuantizeAndDequantize"));
num_bits = ctx->Input(3);
} else {
num_bits = xla::ConstantR0<int32>(b, num_bits_);
}
const xla::XlaOp zero = XlaHelpers::Zero(b, data_type);
const xla::XlaOp one = XlaHelpers::One(b, data_type);
const xla::XlaOp two = XlaHelpers::FloatLiteral(b, data_type, 2.0);
const xla::XlaOp half = XlaHelpers::FloatLiteral(b, data_type, 0.5);
xla::XlaOp min_quantized, max_quantized;
if (signed_input_) {
if (narrow_range_) {
min_quantized =
-Pow(two, ConvertElementType(
num_bits - xla::ConstantR0<int32>(b, 1), xla_type)) +
one;
} else {
min_quantized =
-Pow(two, ConvertElementType(
num_bits - xla::ConstantR0<int32>(b, 1), xla_type));
}
max_quantized =
Pow(two, ConvertElementType(num_bits - xla::ConstantR0<int32>(b, 1),
xla_type)) -
one;
} else {
min_quantized = zero;
max_quantized = Pow(two, ConvertElementType(num_bits, xla_type)) - one;
}
xla::XlaOp scale_from_min_side =
Select(Gt(min_quantized * min_range, zero), min_quantized / min_range,
xla::MaxFiniteValue(b, xla_type));
xla::XlaOp scale_from_max_side =
Select(Gt(max_quantized * max_range, zero), max_quantized / max_range,
xla::MaxFiniteValue(b, xla_type));
xla::XlaOp cond = Lt(scale_from_min_side, scale_from_max_side);
xla::XlaOp scale = Select(cond, scale_from_min_side, scale_from_max_side);
xla::XlaOp inverse_scale =
Select(cond, min_range / min_quantized, max_range / max_quantized);
min_range = Select(cond, min_range, min_quantized * inverse_scale);
max_range = Select(cond, max_quantized * inverse_scale, max_range);
xla::Shape axis_shape = b->GetShape(min_range).value();
if (!xla::ShapeUtil::IsScalar(axis_shape)) {
xla::Shape input_shape = b->GetShape(input).value();
absl::Span<const int64_t> input_dimensions = input_shape.dimensions();
auto convert_to_input_shape = [&](const xla::XlaOp op) {
return xla::BroadcastInDim(op, input_dimensions, {axis_});
};
min_range = convert_to_input_shape(min_range);
max_range = convert_to_input_shape(max_range);
scale = convert_to_input_shape(scale);
inverse_scale = convert_to_input_shape(inverse_scale);
}
if (range_given_) {
input = Clamp(min_range, input, max_range);
}
xla::XlaOp result;
switch (round_mode_) {
case ROUND_HALF_TO_EVEN: {
result = xla::RoundToEven(input * scale) * inverse_scale;
break;
}
case ROUND_HALF_UP: {
result = Floor(input * scale + half) * inverse_scale;
break;
}
}
ctx->SetOutput(0, result);
}
protected:
int64_t num_bits_ = -1;
int axis_;
bool signed_input_;
bool range_given_;
bool narrow_range_;
QuantizerRoundMode round_mode_;
};
class QuantizeAndDequantizeV2Op : public QuantizeAndDequantizeOp {
public:
explicit QuantizeAndDequantizeV2Op(OpKernelConstruction* ctx)
: QuantizeAndDequantizeOp(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("num_bits", &num_bits_));
OP_REQUIRES(ctx, num_bits_ > 0 && num_bits_ < (signed_input_ ? 62 : 63),
errors::InvalidArgument("num_bits is out of range: ", num_bits_,
" with signed_input_ ", signed_input_));
string round_mode_string;
OP_REQUIRES_OK(ctx, ctx->GetAttr("round_mode", &round_mode_string));
OP_REQUIRES(
ctx,
(round_mode_string == "HALF_UP" || round_mode_string == "HALF_TO_EVEN"),
errors::InvalidArgument("Round mode string must be "
"'HALF_UP' or "
"'HALF_TO_EVEN', is '" +
round_mode_string + "'"));
if (round_mode_string == "HALF_UP") {
round_mode_ = ROUND_HALF_UP;
} else if (round_mode_string == "HALF_TO_EVEN") {
round_mode_ = ROUND_HALF_TO_EVEN;
}
}
};
REGISTER_XLA_OP(Name("QuantizeAndDequantizeV2"), QuantizeAndDequantizeV2Op);
REGISTER_XLA_OP(Name("QuantizeAndDequantizeV3"), QuantizeAndDequantizeOp);
REGISTER_XLA_OP(Name("QuantizeAndDequantizeV4"), QuantizeAndDequantizeV2Op);
}
} | #include <functional>
#include <memory>
#include <vector>
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
using ::tensorflow::testing::StatusIs;
using ::testing::MatchesRegex;
class QuantizeAndDequantizeTest : public OpsTestBase {};
struct ParameterizedQuantizeAndDequantizeTest
: public OpsTestBase,
public ::testing::WithParamInterface<int> {};
TEST_F(QuantizeAndDequantizeTest, Convert_scalar_tensor) {
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("signed_input", true)
.Attr("num_bits", 8)
.Attr("range_given", false)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({1}), {-3.5});
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<float>(TensorShape({}), {0.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1}));
test::FillValues<float>(&expected, {-3.5});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
EXPECT_EQ(inputs_[1]->scalar<float>()(), 0.0);
EXPECT_EQ(inputs_[2]->scalar<float>()(), 0.0);
}
TEST_F(QuantizeAndDequantizeTest, Convert_scalar_tensor_V3) {
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV3")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("signed_input", true)
.Attr("range_given", false)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({1}), {-3.5});
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<int32>(TensorShape({}), {8});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1}));
test::FillValues<float>(&expected, {-3.5});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
EXPECT_EQ(inputs_[1]->scalar<float>()(), 0.0);
EXPECT_EQ(inputs_[2]->scalar<float>()(), 0.0);
}
template <typename T>
std::vector<T> ScalePerSliceAlongAxis(std::vector<int64_t> dims, int axis,
const std::vector<T>& data) {
uint32 seed = 123;
int64_t out_size = 1;
for (int dim : dims) {
out_size *= dim;
}
int minor_size = 1;
for (int i = axis + 1; i < dims.size(); ++i) {
minor_size *= dims[i];
}
std::vector<T> out(out_size);
int num_slices = (axis == -1) ? 1 : dims[axis];
for (int out_idx = 0; out_idx < out_size; ++out_idx) {
int in_idx = rand_r(&seed) % data.size();
int multiplier = ((out_idx / minor_size) % num_slices) + 1;
out[out_idx] = data[in_idx] * multiplier;
}
return out;
}
TEST_P(ParameterizedQuantizeAndDequantizeTest, Convert_4D_tensor_with_int8) {
const int axis = GetParam();
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("signed_input", true)
.Attr("num_bits", 8)
.Attr("range_given", false)
.Attr("axis", axis)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
const std::vector<int64_t> dims = {2, 3, 4, 5};
AddInputFromArray<float>(
TensorShape(dims),
ScalePerSliceAlongAxis<float>(
dims, axis, {-1, -0.5, 0, 0.3, 0.8, 0.555, 0.50390625}));
const int num_slices = (axis == -1) ? 1 : dims[axis];
const TensorShape range_shape =
(axis == -1) ? TensorShape({}) : TensorShape({num_slices});
std::vector<float> init_value(num_slices, 0.0f);
AddInputFromArray<float>(range_shape, init_value);
AddInputFromArray<float>(range_shape, init_value);
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape(dims));
test::FillValues<float>(
&expected,
ScalePerSliceAlongAxis<float>(
dims, axis, {-1, -0.5, 0, 38.0 / 128, 102.0 / 128, 71.0 / 128, 0.5}));
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
for (int slice_idx = 0; slice_idx < num_slices; ++slice_idx) {
EXPECT_EQ(inputs_[1]->flat<float>()(slice_idx), 0.0);
EXPECT_EQ(inputs_[2]->flat<float>()(slice_idx), 0.0);
}
}
TEST_P(ParameterizedQuantizeAndDequantizeTest,
Convert_4D_tensor_with_int8_round_half_up) {
const int axis = GetParam();
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("signed_input", true)
.Attr("num_bits", 8)
.Attr("range_given", false)
.Attr("round_mode", "HALF_UP")
.Attr("axis", axis)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
const std::vector<int64_t> dims = {5, 7, 11, 13};
AddInputFromArray<float>(
TensorShape(dims),
ScalePerSliceAlongAxis<float>(
dims, axis, {-1, -0.5, 0, 0.3, 0.8, 0.555, 0.50390625}));
const int num_slices = (axis == -1) ? 1 : dims[axis];
const TensorShape range_shape =
(axis == -1) ? TensorShape({}) : TensorShape({num_slices});
std::vector<float> init_value(num_slices, 0.0f);
AddInputFromArray<float>(range_shape, init_value);
AddInputFromArray<float>(range_shape, init_value);
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape(dims));
test::FillValues<float>(&expected, ScalePerSliceAlongAxis<float>(
dims, axis,
{-1, -0.5, 0, 38.0 / 128, 102.0 / 128,
71.0 / 128, 65.0 / 128}));
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
for (int slice_idx = 0; slice_idx < num_slices; ++slice_idx) {
EXPECT_EQ(inputs_[1]->flat<float>()(slice_idx), 0.0);
EXPECT_EQ(inputs_[2]->flat<float>()(slice_idx), 0.0);
}
}
TEST_P(ParameterizedQuantizeAndDequantizeTest,
Convert_4D_tensor_with_int8_round_half_up_narrow_range) {
const int axis = GetParam();
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("signed_input", true)
.Attr("num_bits", 8)
.Attr("range_given", false)
.Attr("round_mode", "HALF_UP")
.Attr("narrow_range", true)
.Attr("axis", axis)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
const std::vector<int64_t> dims = {2, 3, 4, 5};
AddInputFromArray<float>(
TensorShape(dims),
ScalePerSliceAlongAxis<float>(
dims, axis, {-1, -0.5, 0, 0.3, 0.8, 0.555, 0.50390625}));
const int num_slices = (axis == -1) ? 1 : dims[axis];
const TensorShape range_shape =
(axis == -1) ? TensorShape({}) : TensorShape({num_slices});
std::vector<float> init_value(num_slices, 0.0f);
AddInputFromArray<float>(range_shape, init_value);
AddInputFromArray<float>(range_shape, init_value);
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape(dims));
test::FillValues<float>(
&expected,
ScalePerSliceAlongAxis<float>(dims, axis,
{-1, -63.0 / 127, 0, 38.0 / 127,
102.0 / 127, 70.0 / 127, 64.0 / 127}));
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
for (int slice_idx = 0; slice_idx < num_slices; ++slice_idx) {
EXPECT_EQ(inputs_[1]->flat<float>()(slice_idx), 0.0);
EXPECT_EQ(inputs_[2]->flat<float>()(slice_idx), 0.0);
}
}
TEST_F(QuantizeAndDequantizeTest, Convert_1D_tensor_with_int8_V3) {
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV3")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("signed_input", true)
.Attr("range_given", false)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({6}), {-1, -0.5, 0, 0.3, 0.8, 0.555});
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<int32>(TensorShape({}), {8});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({6}));
test::FillValues<float>(&expected,
{-1, -0.5, 0, 38.0 / 128, 102.0 / 128, 71.0 / 128});
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
EXPECT_EQ(inputs_[1]->scalar<float>()(), 0.0);
EXPECT_EQ(inputs_[2]->scalar<float>()(), 0.0);
}
TEST_P(ParameterizedQuantizeAndDequantizeTest,
Convert_4D_tensor_with_int8_narrow_range_V3) {
const int axis = GetParam();
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV3")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("signed_input", true)
.Attr("range_given", false)
.Attr("narrow_range", true)
.Attr("axis", axis)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
const std::vector<int64_t> dims = {2, 3, 4, 5};
AddInputFromArray<float>(
TensorShape(dims),
ScalePerSliceAlongAxis<float>(
dims, axis, {-1, -0.5, 0, 0.3, 0.8, 0.555, 0.50390625}));
const int num_slices = (axis == -1) ? 1 : dims[axis];
const TensorShape range_shape =
(axis == -1) ? TensorShape({}) : TensorShape({num_slices});
std::vector<float> init_value(num_slices, 0.0f);
AddInputFromArray<float>(range_shape, init_value);
AddInputFromArray<float>(range_shape, init_value);
AddInputFromArray<int32>(TensorShape({}), {8});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape(dims));
test::FillValues<float>(
&expected,
ScalePerSliceAlongAxis<float>(dims, axis,
{-1, -64.0 / 127, 0, 38.0 / 127,
102.0 / 127, 70.0 / 127, 64.0 / 127}));
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
for (int slice_idx = 0; slice_idx < num_slices; ++slice_idx) {
EXPECT_EQ(inputs_[1]->flat<float>()(slice_idx), 0.0);
EXPECT_EQ(inputs_[2]->flat<float>()(slice_idx), 0.0);
}
}
TEST_P(ParameterizedQuantizeAndDequantizeTest, GradientV4_op) {
const int axis = GetParam();
TF_ASSERT_OK(NodeDefBuilder("qdq_v4_grad_op", "QuantizeAndDequantizeV4Grad")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("axis", axis)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
const std::vector<int64_t> dims = {2, 3, 4, 5};
auto gradients = ScalePerSliceAlongAxis<float>(
dims, axis, {1, -2, -3, 4, 5, 6, -7, -8, -9, -10, 11});
AddInputFromArray<float>(TensorShape(dims), gradients);
auto inputs = ScalePerSliceAlongAxis<float>(
dims, axis, {-1, -0.5, 0, 0.3, 0.8, 0.55, 0.6});
AddInputFromArray<float>(TensorShape(dims), inputs);
const int num_slices = (axis == -1) ? 1 : dims[axis];
const TensorShape range_shape =
(axis == -1) ? TensorShape({}) : TensorShape({num_slices});
std::vector<float> input_min_values(num_slices), input_max_values(num_slices);
for (int i = 0; i < num_slices; ++i) {
input_max_values[i] = 0.8f + i * 0.4f;
input_min_values[i] = -input_max_values[i];
}
AddInputFromArray<float>(range_shape, input_min_values);
AddInputFromArray<float>(range_shape, input_max_values);
std::vector<float> expected_vals(inputs.size());
int minor_size = 1;
for (int i = axis + 1; i < dims.size(); ++i) {
minor_size *= dims[i];
}
for (int i = 0; i < inputs.size(); ++i) {
int slice_idx = (i / minor_size) % num_slices;
expected_vals[i] = ((inputs[i] >= input_min_values[slice_idx]) &&
(inputs[i] <= input_max_values[slice_idx]))
? gradients[i]
: 0;
}
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape(dims));
test::FillValues<float>(&expected, expected_vals);
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
}
INSTANTIATE_TEST_SUITE_P(All, ParameterizedQuantizeAndDequantizeTest,
::testing::Values(-1, 1, 3));
TEST_F(QuantizeAndDequantizeTest, Convert_1D_tensor_with_int4) {
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("signed_input", true)
.Attr("num_bits", 4)
.Attr("range_given", false)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({6}), {-1, -0.5, 0, 0.3125, 0.8, 0.555});
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<float>(TensorShape({}), {0.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({6}));
test::FillValues<float>(&expected, {-1, -0.5, 0, 0.25, 0.75, 0.5});
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
EXPECT_EQ(inputs_[1]->scalar<float>()(), 0.0);
EXPECT_EQ(inputs_[2]->scalar<float>()(), 0.0);
}
TEST_F(QuantizeAndDequantizeTest, Convert_1D_tensor_with_int4_round_half_up) {
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("signed_input", true)
.Attr("num_bits", 4)
.Attr("range_given", false)
.Attr("round_mode", "HALF_UP")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({6}), {-1, -0.5, 0, 0.3125, 0.8, 0.555});
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<float>(TensorShape({}), {0.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({6}));
test::FillValues<float>(&expected, {-1, -0.5, 0, 0.375, 0.75, 0.5});
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
EXPECT_EQ(inputs_[1]->scalar<float>()(), 0.0);
EXPECT_EQ(inputs_[2]->scalar<float>()(), 0.0);
}
TEST_F(QuantizeAndDequantizeTest, Convert_1D_tensor_with_int4_V3) {
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV3")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("signed_input", true)
.Attr("range_given", false)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({6}), {-1, -0.5, 0, 0.3, 0.8, 0.555});
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<int32>(TensorShape({}), {4});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({6}));
test::FillValues<float>(&expected, {-1, -0.5, 0, 0.25, 0.75, 0.5});
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
EXPECT_EQ(inputs_[1]->scalar<float>()(), 0.0);
EXPECT_EQ(inputs_[2]->scalar<float>()(), 0.0);
}
TEST_F(QuantizeAndDequantizeTest, Convert_2D_tensor_with_int8_range_given) {
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("signed_input", true)
.Attr("num_bits", 8)
.Attr("range_given", true)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 4}),
{-0.8, -0.5, 0, 0.3, 0.8, 0.555, -2, 33});
AddInputFromArray<float>(TensorShape({}), {-1.0});
AddInputFromArray<float>(TensorShape({}), {1.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 4}));
test::FillValues<float>(
&expected, {-102.0 / 127, -64.0 / 127, 0, 38.0 / 127, 102.0 / 127,
70.0 / 127, -128.0 / 127, 1});
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
}
TEST_F(QuantizeAndDequantizeTest,
Convert_2D_tensor_with_int8_range_given_round_half_up) {
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("signed_input", true)
.Attr("num_bits", 8)
.Attr("range_given", true)
.Attr("round_mode", "HALF_UP")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 4}),
{-0.8, -0.5, 0, 0.3, 0.8, 0.555, -2, 33});
AddInputFromArray<float>(TensorShape({}), {-1.0});
AddInputFromArray<float>(TensorShape({}), {1.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 4}));
test::FillValues<float>(
&expected, {-102.0 / 127, -63.0 / 127, 0, 38.0 / 127, 102.0 / 127,
70.0 / 127, -128.0 / 127, 1});
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
}
TEST_F(QuantizeAndDequantizeTest, Convert_2D_tensor_with_int8_range_given_V3) {
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV3")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("signed_input", true)
.Attr("range_given", true)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 4}),
{-0.8, -0.5, 0, 0.3, 0.8, 0.555, -2, 33});
AddInputFromArray<float>(TensorShape({}), {-1.0});
AddInputFromArray<float>(TensorShape({}), {1.0});
AddInputFromArray<int32>(TensorShape({}), {8});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 4}));
test::FillValues<float>(
&expected, {-102.0 / 127, -64.0 / 127, 0, 38.0 / 127, 102.0 / 127,
70.0 / 127, -128.0 / 127, 1});
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
}
TEST_F(QuantizeAndDequantizeTest, Convert_4D_tensor_with_uint8_range_given) {
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("signed_input", false)
.Attr("num_bits", 8)
.Attr("range_given", true)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 2, 1, 1}), {-0.5, 0, 0.3, 0.8});
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<float>(TensorShape({}), {1.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 1, 1}));
test::FillValues<float>(&expected, {0, 0, 76.0 / 255, 204.0 / 255});
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
}
TEST_F(QuantizeAndDequantizeTest,
Convert_4D_tensor_with_uint8_range_given_round_half_up) {
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("signed_input", false)
.Attr("num_bits", 8)
.Attr("range_given", true)
.Attr("round_mode", "HALF_UP")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 2, 1, 1}), {-0.5, 0, 0.3, 0.8});
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<float>(TensorShape({}), {1.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 1, 1}));
test::FillValues<float>(&expected, {0, 0, 77.0 / 255, 204.0 / 255});
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
}
TEST_F(QuantizeAndDequantizeTest, Convert_4D_tensor_with_uint8_range_given_V3) {
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV3")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("signed_input", false)
.Attr("range_given", true)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 2, 1, 1}), {-0.5, 0, 0.3, 0.8});
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<float>(TensorShape({}), {1.0});
AddInputFromArray<int32>(TensorShape({}), {8});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 1, 1}));
test::FillValues<float>(&expected, {0, 0, 76.0 / 255, 204.0 / 255});
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
}
TEST_F(QuantizeAndDequantizeTest, Convert_tensor_with_all_0) {
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("signed_input", false)
.Attr("num_bits", 8)
.Attr("range_given", false)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 2, 1, 1}), {0, 0, 0, 0});
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<float>(TensorShape({}), {0.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 1, 1}));
test::FillValues<float>(&expected, {0, 0, 0, 0});
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
}
TEST_F(QuantizeAndDequantizeTest, Convert_tensor_with_all_0_V3) {
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_op", "QuantizeAndDequantizeV3")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("signed_input", false)
.Attr("range_given", false)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 2, 1, 1}), {0, 0, 0, 0});
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<int32>(TensorShape({}), {8});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 1, 1}));
test::FillValues<float>(&expected, {0, 0, 0, 0});
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-5);
}
TEST_F(QuantizeAndDequantizeTest, Invalid_range_given) {
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_Op", "QuantizeAndDequantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("num_bits", 8)
.Attr("range_given", true)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 2, 1, 1}), {-0.5, 0, 0.3, 0.8});
AddInputFromArray<float>(TensorShape({}), {1.0});
AddInputFromArray<float>(TensorShape({}), {0.0});
Status s = RunOpKernel();
EXPECT_TRUE(absl::StrContains(s.ToString(),
"Invalid range: input_min 1 > input_max 0"))
<< s;
}
TEST_F(QuantizeAndDequantizeTest, Invalid_range_given_V3) {
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_Op", "QuantizeAndDequantizeV3")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("range_given", true)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 2, 1, 1}), {-0.5, 0, 0.3, 0.8});
AddInputFromArray<float>(TensorShape({}), {1.0});
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<int32>(TensorShape({}), {8});
Status s = RunOpKernel();
EXPECT_TRUE(absl::StrContains(s.ToString(),
"Invalid range: input_min 1 > input_max 0"))
<< s;
}
TEST_F(QuantizeAndDequantizeTest, Invalid_axis_given_V3) {
TF_ASSERT_OK(
NodeDefBuilder("quantize_and_dequantize_Op", "QuantizeAndDequantizeV3")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("range_given", false)
.Attr("axis", static_cast<int32_t>(-2147483648))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({2, 2, 1, 1}), {-0.5, 0, 0.3, 0.8});
AddInputFromArray<float>(TensorShape({}), {1.0});
AddInputFromArray<float>(TensorShape({}), {0.0});
AddInputFromArray<int32>(TensorShape({}), {8});
EXPECT_THAT(
RunOpKernel(),
StatusIs(
error::INVALID_ARGUMENT,
MatchesRegex("Axis requested is larger than input dimensions.*")));
}
#define BM_SIMPLE_QUAN_DEQUAN(DEVICE) \
static void BM_SIMPLE_QUAN_DEQUAN_##DEVICE( \
::testing::benchmark::State& state) { \
auto root = Scope::NewRootScope().ExitOnError(); \
ops::QuantizeAndDequantizeV2(root, -3.5, -3.5, -3.5); \
TF_CHECK_OK(root.status()); \
Graph* g = new Graph(OpRegistry::Global()); \
TF_CHECK_OK(root.ToGraph(g)); \
test::Benchmark(#DEVICE, g, false).Run(state); \
} \
BENCHMARK(BM_SIMPLE_QUAN_DEQUAN_##DEVICE);
BM_SIMPLE_QUAN_DEQUAN(cpu);
BM_SIMPLE_QUAN_DEQUAN(gpu);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/quantize_and_dequantize_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantize_and_dequantize_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ce739db8-d021-48f4-9f21-a73dda38b5ef | cpp | tensorflow/tensorflow | source_writer | tensorflow/java/src/gen/cc/source_writer.cc | tensorflow/java/src/gen/cc/source_writer_test.cc | #include "tensorflow/java/src/gen/cc/source_writer.h"
#include <algorithm>
#include <list>
#include <string>
#include "absl/log/check.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/java/src/gen/cc/java_defs.h"
#include "tsl/platform/status.h"
namespace tensorflow {
namespace java {
SourceWriter::SourceWriter() {
generic_namespaces_.push(new GenericNamespace());
}
SourceWriter::~SourceWriter() {
while (!generic_namespaces_.empty()) {
GenericNamespace* generic_namespace = generic_namespaces_.top();
generic_namespaces_.pop();
delete generic_namespace;
}
}
SourceWriter& SourceWriter::Indent(int tab) {
left_margin_.resize(
std::max(static_cast<int>(left_margin_.size() + tab), 0), ' ');
return *this;
}
SourceWriter& SourceWriter::Prefix(const char* line_prefix) {
line_prefix_ = line_prefix;
return *this;
}
SourceWriter& SourceWriter::Write(const StringPiece& str) {
size_t line_pos = 0;
do {
size_t start_pos = line_pos;
line_pos = str.find('\n', start_pos);
if (line_pos != string::npos) {
++line_pos;
Append(str.substr(start_pos, line_pos - start_pos));
newline_ = true;
} else {
Append(str.substr(start_pos, str.size() - start_pos));
}
} while (line_pos != string::npos && line_pos < str.size());
return *this;
}
SourceWriter& SourceWriter::WriteFromFile(const string& fname, Env* env) {
string data_;
TF_CHECK_OK(ReadFileToString(env, fname, &data_));
return Write(data_);
}
SourceWriter& SourceWriter::Append(const StringPiece& str) {
if (!str.empty()) {
if (newline_) {
DoAppend(left_margin_ + line_prefix_);
newline_ = false;
}
DoAppend(str);
}
return *this;
}
SourceWriter& SourceWriter::AppendType(const Type& type) {
if (type.wildcard()) {
Append("?");
} else {
Append(type.name());
if (!type.parameters().empty()) {
Append("<");
bool first = true;
for (const Type& t : type.parameters()) {
if (!first) {
Append(", ");
}
AppendType(t);
first = false;
}
Append(">");
}
}
return *this;
}
SourceWriter& SourceWriter::EndLine() {
Append("\n");
newline_ = true;
return *this;
}
SourceWriter& SourceWriter::BeginBlock(const string& expression) {
if (!expression.empty()) {
Append(expression + " {");
} else {
Append(newline_ ? "{" : " {");
}
return EndLine().Indent(2);
}
SourceWriter& SourceWriter::EndBlock() {
return Indent(-2).Append("}").EndLine();
}
SourceWriter& SourceWriter::BeginMethod(const Method& method, int modifiers,
const Javadoc* javadoc) {
GenericNamespace* generic_namespace = PushGenericNamespace(modifiers);
if (!method.constructor()) {
generic_namespace->Visit(method.return_type());
}
for (const Variable& v : method.arguments()) {
generic_namespace->Visit(v.type());
}
EndLine();
if (javadoc != nullptr) {
WriteJavadoc(*javadoc);
}
if (!method.annotations().empty()) {
WriteAnnotations(method.annotations());
}
WriteModifiers(modifiers);
if (!generic_namespace->declared_types().empty()) {
WriteGenerics(generic_namespace->declared_types());
Append(" ");
}
if (!method.constructor()) {
AppendType(method.return_type()).Append(" ");
}
Append(method.name()).Append("(");
bool first = true;
for (const Variable& v : method.arguments()) {
if (!first) {
Append(", ");
}
AppendType(v.type()).Append(v.variadic() ? "... " : " ").Append(v.name());
first = false;
}
return Append(")").BeginBlock();
}
SourceWriter& SourceWriter::EndMethod() {
EndBlock();
PopGenericNamespace();
return *this;
}
SourceWriter& SourceWriter::BeginType(const Type& type, int modifiers,
const std::list<Type>* extra_dependencies,
const Javadoc* javadoc) {
if (!type.package().empty()) {
Append("package ").Append(type.package()).Append(";").EndLine();
}
TypeImporter type_importer(type.package());
type_importer.Visit(type);
if (extra_dependencies != nullptr) {
for (const Type& t : *extra_dependencies) {
type_importer.Visit(t);
}
}
if (!type_importer.imports().empty()) {
EndLine();
for (const string& s : type_importer.imports()) {
Append("import ").Append(s).Append(";").EndLine();
}
}
return BeginInnerType(type, modifiers, javadoc);
}
SourceWriter& SourceWriter::BeginInnerType(const Type& type, int modifiers,
const Javadoc* javadoc) {
GenericNamespace* generic_namespace = PushGenericNamespace(modifiers);
generic_namespace->Visit(type);
EndLine();
if (javadoc != nullptr) {
WriteJavadoc(*javadoc);
}
if (!type.annotations().empty()) {
WriteAnnotations(type.annotations());
}
WriteModifiers(modifiers);
CHECK_EQ(Type::Kind::CLASS, type.kind()) << ": Not supported yet";
Append("class ").Append(type.name());
if (!generic_namespace->declared_types().empty()) {
WriteGenerics(generic_namespace->declared_types());
}
if (!type.supertypes().empty()) {
bool first_interface = true;
for (const Type& t : type.supertypes()) {
if (t.kind() == Type::CLASS) {
Append(" extends ");
} else if (first_interface) {
Append(" implements ");
first_interface = false;
} else {
Append(", ");
}
AppendType(t);
}
}
return BeginBlock();
}
SourceWriter& SourceWriter::EndType() {
EndBlock();
PopGenericNamespace();
return *this;
}
SourceWriter& SourceWriter::WriteField(const Variable& field, int modifiers,
const Javadoc* javadoc) {
if (javadoc != nullptr && !javadoc->brief().empty()) {
Append("").EndLine();
}
WriteModifiers(modifiers);
AppendType(field.type()).Append(" ").Append(field.name()).Append(";");
EndLine();
return *this;
}
SourceWriter& SourceWriter::WriteModifiers(int modifiers) {
if (modifiers & PUBLIC) {
Append("public ");
} else if (modifiers & PROTECTED) {
Append("protected ");
} else if (modifiers & PRIVATE) {
Append("private ");
}
if (modifiers & STATIC) {
Append("static ");
}
if (modifiers & FINAL) {
Append("final ");
}
return *this;
}
SourceWriter& SourceWriter::WriteJavadoc(const Javadoc& javadoc) {
Append("").EndLine();
}
SourceWriter& SourceWriter::WriteAnnotations(
const std::list<Annotation>& annotations) {
for (const Annotation& a : annotations) {
Append("@" + a.name());
if (!a.attributes().empty()) {
Append("(").Append(a.attributes()).Append(")");
}
EndLine();
}
return *this;
}
SourceWriter& SourceWriter::WriteGenerics(
const std::list<const Type*>& generics) {
Append("<");
bool first = true;
for (const Type* pt : generics) {
if (!first) {
Append(", ");
}
Append(pt->name());
if (!pt->supertypes().empty()) {
Append(" extends ").AppendType(pt->supertypes().front());
}
first = false;
}
return Append(">");
}
SourceWriter::GenericNamespace* SourceWriter::PushGenericNamespace(
int modifiers) {
GenericNamespace* generic_namespace;
if (modifiers & STATIC) {
generic_namespace = new GenericNamespace();
} else {
generic_namespace = new GenericNamespace(generic_namespaces_.top());
}
generic_namespaces_.push(generic_namespace);
return generic_namespace;
}
void SourceWriter::PopGenericNamespace() {
GenericNamespace* generic_namespace = generic_namespaces_.top();
generic_namespaces_.pop();
delete generic_namespace;
}
void SourceWriter::TypeVisitor::Visit(const Type& type) {
DoVisit(type);
for (const Type& t : type.parameters()) {
Visit(t);
}
for (const Annotation& t : type.annotations()) {
DoVisit(t);
}
for (const Type& t : type.supertypes()) {
Visit(t);
}
}
void SourceWriter::GenericNamespace::DoVisit(const Type& type) {
if (type.kind() == Type::GENERIC && !type.wildcard() &&
generic_names_.find(type.name()) == generic_names_.end()) {
declared_types_.push_back(&type);
generic_names_.insert(type.name());
}
}
void SourceWriter::TypeImporter::DoVisit(const Type& type) {
if (!type.package().empty() && type.package() != current_package_) {
imports_.insert(type.canonical_name());
}
}
}
} | #include "tensorflow/java/src/gen/cc/source_writer.h"
#include <list>
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/java/src/gen/cc/java_defs.h"
namespace tensorflow {
namespace java {
namespace {
TEST(AppendTest, SingleLineText) {
SourceBufferWriter writer;
writer.Append("You say goodbye and I say hello!");
const char* expected = "You say goodbye and I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(AppendTest, MultiLineText) {
SourceBufferWriter writer;
writer.Append("You say goodbye\nand I say hello!");
const char* expected = "You say goodbye\nand I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(AppendTest, MultiLineTextWithIndent) {
SourceBufferWriter writer;
writer.Indent(2).Append("You say goodbye\nand I say hello!");
const char* expected = " You say goodbye\nand I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(AppendTest, MultiLineTextWithPrefix) {
SourceBufferWriter writer;
writer.Prefix("--").Append("You say goodbye\nand I say hello!");
const char* expected = "--You say goodbye\nand I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(AppendTest, MultiLineTextWithIndentAndPrefix) {
SourceBufferWriter writer;
writer.Indent(2).Prefix("--").Append("You say goodbye\nand I say hello!");
const char* expected = " --You say goodbye\nand I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteTest, SingleLineText) {
SourceBufferWriter writer;
writer.Write("You say goodbye and I say hello!");
const char* expected = "You say goodbye and I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteTest, MultiLineText) {
SourceBufferWriter writer;
writer.Write("You say goodbye\nand I say hello!");
const char* expected = "You say goodbye\nand I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteTest, MultiLineTextWithIndent) {
SourceBufferWriter writer;
writer.Indent(2).Write("You say goodbye\nand I say hello!");
const char* expected = " You say goodbye\n and I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteTest, MultiLineTextWithPrefix) {
SourceBufferWriter writer;
writer.Prefix("--").Write("You say goodbye\nand I say hello!");
const char* expected = "--You say goodbye\n--and I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteTest, MultiLineTextWithIndentAndPrefix) {
SourceBufferWriter writer;
writer.Indent(2).Prefix("--").Write("You say goodbye\nand I say hello!");
const char* expected = " --You say goodbye\n --and I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, Basic) {
SourceBufferWriter writer;
writer.Append("You say goodbye").EndLine().Append("and I say hello!");
const char* expected = "You say goodbye\nand I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, Indent) {
SourceBufferWriter writer;
writer.Append("You say goodbye")
.EndLine()
.Indent(2)
.Append("and I say hello!");
const char* expected = "You say goodbye\n and I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, IndentAndOutdent) {
SourceBufferWriter writer;
writer.Append("You say goodbye")
.EndLine()
.Indent(2)
.Append("and I say hello!")
.EndLine()
.Indent(-2)
.Append("Hello, hello!");
const char* expected = "You say goodbye\n and I say hello!\nHello, hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, Prefix) {
SourceBufferWriter writer;
writer.Append("You say goodbye")
.EndLine()
.Prefix("--")
.Append("and I say hello!");
const char* expected = "You say goodbye\n--and I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, PrefixAndRemovePrefix) {
SourceBufferWriter writer;
writer.Append("You say goodbye")
.EndLine()
.Prefix("--")
.Append("and I say hello!")
.EndLine()
.Prefix("")
.Append("Hello, hello!");
const char* expected = "You say goodbye\n--and I say hello!\nHello, hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, IndentAndPrefixAndOutdentAndRemovePrefix) {
SourceBufferWriter writer;
writer.Append("You say goodbye")
.EndLine()
.Indent(2)
.Prefix("--")
.Append("and I say hello!")
.EndLine()
.Indent(-2)
.Prefix("")
.Append("Hello, hello!");
const char* expected = "You say goodbye\n --and I say hello!\nHello, hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, NegativeIndent) {
SourceBufferWriter writer;
writer.Append("You say goodbye")
.EndLine()
.Indent(-10)
.Append("and I say hello!");
const char* expected = "You say goodbye\nand I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, CumulativeIndent) {
SourceBufferWriter writer;
writer.Append("You say goodbye")
.EndLine()
.Indent(2)
.Append("and I say hello!")
.EndLine()
.Indent(2)
.Append("Hello, hello!");
const char* expected =
"You say goodbye\n and I say hello!\n Hello, hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, EmptyPrefix) {
SourceBufferWriter writer;
writer.Append("You say goodbye")
.EndLine()
.Prefix("")
.Append("and I say hello!");
const char* expected = "You say goodbye\nand I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(StreamTest, BlocksAndLines) {
SourceBufferWriter writer;
writer.Append("int i = 0;").EndLine()
.Append("int j = 10;").EndLine()
.Append("if (true)")
.BeginBlock()
.Append("int aLongWayToTen = 0;").EndLine()
.Append("while (++i <= j)")
.BeginBlock()
.Append("++aLongWayToTen;").EndLine()
.EndBlock()
.EndBlock();
const char* expected =
"int i = 0;\n"
"int j = 10;\n"
"if (true) {\n"
" int aLongWayToTen = 0;\n"
" while (++i <= j) {\n"
" ++aLongWayToTen;\n"
" }\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(StreamTest, Types) {
SourceBufferWriter writer;
Type generic = Type::Generic("T").add_supertype(Type::Class("Number"));
writer.AppendType(Type::Int())
.Append(", ")
.AppendType(Type::Class("String"))
.Append(", ")
.AppendType(generic)
.Append(", ")
.AppendType(Type::ListOf(generic))
.Append(", ")
.AppendType(Type::ListOf(Type::IterableOf(generic)))
.Append(", ")
.AppendType(Type::ListOf(Type::Wildcard()));
const char* expected =
"int, String, T, List<T>, List<Iterable<T>>, List<?>";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(StreamTest, FileSnippet) {
SourceBufferWriter writer;
const string fname = tensorflow::io::JoinPath(
tensorflow::testing::TensorFlowSrcRoot(),
"java/src/gen/resources/test.java.snippet");
writer.WriteFromFile(fname)
.BeginBlock()
.WriteFromFile(fname)
.EndBlock();
const char* expected =
"
"System.out.println(\"Hello!\");\n"
"{\n"
"
" System.out.println(\"Hello!\");\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteType, SimpleClass) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
writer.BeginType(clazz, PUBLIC).EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test {\n}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteType, SimpleClassWithDependencies) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
std::list<Type> deps;
deps.push_back(Type::Class("TypeA", "org.test.sub"));
deps.push_back(Type::Class("TypeA", "org.test.sub"));
deps.push_back(Type::Class("TypeB", "org.other"));
deps.push_back(Type::Class("SamePackageType", "org.tensorflow"));
deps.push_back(Type::Class("NoPackageType"));
writer.BeginType(clazz, PUBLIC, &deps).EndType();
const char* expected =
"package org.tensorflow;\n\n"
"import org.other.TypeB;\n"
"import org.test.sub.TypeA;\n\n"
"public class Test {\n}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteType, AnnotatedAndDocumentedClass) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Javadoc clazz_doc = Javadoc::Create("Javadoc test")
.details("This is a\nmultiline description.");
clazz.add_annotation(Annotation::Create("Bean"));
clazz.add_annotation(Annotation::Create("SuppressWarnings")
.attributes("\"rawtypes\""));
writer.BeginType(clazz, PUBLIC, nullptr, &clazz_doc).EndType();
const char* expected =
"package org.tensorflow;\n\n"
"\n"
"@Bean\n"
"@SuppressWarnings(\"rawtypes\")\n"
"public class Test {\n}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteType, ParameterizedClass) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
clazz.add_parameter(Type::Generic("T"));
clazz.add_parameter(Type::Generic("U").add_supertype(Type::Class("Number")));
writer.BeginType(clazz, PUBLIC).EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test<T, U extends Number> {\n}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteType, ParameterizedClassAndSupertypes) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Type type_t = Type::Generic("T");
clazz.add_parameter(type_t);
Type type_u = Type::Generic("U").add_supertype(Type::Class("Number"));
clazz.add_parameter(type_u);
clazz.add_supertype(Type::Interface("Parameterizable").add_parameter(type_u));
clazz.add_supertype(Type::Interface("Runnable"));
clazz.add_supertype(Type::Class("SuperTest").add_parameter(type_t));
writer.BeginType(clazz, PUBLIC).EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test<T, U extends Number>"
" extends SuperTest<T> implements Parameterizable<U>, Runnable {\n}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteType, ParameterizedClassFields) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Type type_t = Type::Generic("T").add_supertype(Type::Class("Number"));
clazz.add_parameter(type_t);
Variable field1 = Variable::Create("field1", Type::Class("String"));
Variable field2 = Variable::Create("field2", Type::Class("String"));
Variable field3 = Variable::Create("field3", type_t);
Javadoc field3_doc = Javadoc::Create("This variable is documented");
writer.BeginType(clazz, PUBLIC)
.WriteField(field1, STATIC | PUBLIC | FINAL)
.WriteField(field2, PRIVATE)
.WriteField(field3, PRIVATE, &field3_doc)
.EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test<T extends Number> {\n"
" public static final String field1;\n"
" private String field2;\n"
" \n"
" private T field3;\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteType, SimpleInnerClass) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Type inner_class = Type::Class("InnerTest");
writer.BeginType(clazz, PUBLIC)
.BeginInnerType(inner_class, PUBLIC)
.EndType()
.EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test {\n"
" \n"
" public class InnerTest {\n"
" }\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteType, StaticParameterizedInnerClass) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Type type_t = Type::Generic("T").add_supertype(Type::Class("Number"));
clazz.add_parameter(type_t);
Type inner_class = Type::Class("InnerTest");
inner_class.add_parameter(type_t);
writer.BeginType(clazz, PUBLIC)
.BeginInnerType(inner_class, PUBLIC | STATIC)
.EndType()
.EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test<T extends Number> {\n"
" \n"
" public static class InnerTest<T extends Number> {\n"
" }\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteMethod, SimpleMethod) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Method method = Method::Create("doNothing", Type::Void());
writer.BeginType(clazz, PUBLIC)
.BeginMethod(method, PUBLIC)
.EndMethod()
.EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test {\n"
" \n"
" public void doNothing() {\n"
" }\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteMethod, AnnotatedAndDocumentedMethod) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Method method = Method::Create("doNothing", Type::Void());
Javadoc method_doc =
Javadoc::Create("Javadoc test")
.details("This method has a\nmultiline description.");
method.add_annotation(Annotation::Create("Override"));
method.add_annotation(Annotation::Create("SuppressWarnings")
.attributes("\"rawtypes\""));
writer.BeginType(clazz, PUBLIC)
.BeginMethod(method, PUBLIC, &method_doc)
.EndMethod()
.EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test {\n"
" \n"
" \n"
" @Override\n"
" @SuppressWarnings(\"rawtypes\")\n"
" public void doNothing() {\n"
" }\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteMethod, DocumentedMethodWithArguments) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Variable reverse = Variable::Create("reverse", Type::Boolean());
Method method = Method::Create("boolToInt", Type::Int());
method.add_argument(Variable::Create("b", Type::Boolean()));
method.add_argument(reverse);
Javadoc method_doc =
Javadoc::Create("Converts a boolean to an int")
.details("This method will convert\na boolean to an int")
.add_param_tag(reverse.name(), "if true, value is reversed")
.add_tag("return", "int value for this boolean");
writer.BeginType(clazz, PUBLIC)
.BeginMethod(method, PUBLIC, &method_doc)
.Append("if (b && !reverse)")
.BeginBlock()
.Append("return 1;")
.EndLine()
.EndBlock()
.Append("return 0;")
.EndLine()
.EndMethod()
.EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test {\n"
" \n"
" \n"
" public int boolToInt(boolean b, boolean reverse) {\n"
" if (b && !reverse) {\n"
" return 1;\n"
" }\n"
" return 0;\n"
" }\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteMethod, ParameterizedMethod) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Type type_t = Type::Generic("T").add_supertype(Type::Class("Number"));
clazz.add_parameter(type_t);
Method method = Method::Create("doNothing", type_t);
writer.BeginType(clazz, PUBLIC)
.BeginMethod(method, PUBLIC)
.Append("return null;")
.EndLine()
.EndMethod()
.EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test<T extends Number> {\n"
" \n"
" public T doNothing() {\n"
" return null;\n"
" }\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteMethod, StaticParameterizedMethod) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Type type_t = Type::Generic("T").add_supertype(Type::Class("Number"));
clazz.add_parameter(type_t);
Method method = Method::Create("doNothing", type_t);
writer.BeginType(clazz, PUBLIC)
.BeginMethod(method, PUBLIC | STATIC)
.Append("return null;")
.EndLine()
.EndMethod()
.EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test<T extends Number> {\n"
" \n"
" public static <T extends Number> T doNothing() {\n"
" return null;\n"
" }\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/java/src/gen/cc/source_writer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/java/src/gen/cc/source_writer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f9dbdb80-c8a7-4e42-8117-cb0ca6796318 | cpp | google/arolla | casting | arolla/qexpr/casting.cc | arolla/qexpr/casting_test.cc | #include "arolla/qexpr/casting.h"
#include <cstddef>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/qexpr/operator_errors.h"
#include "arolla/qexpr/qexpr_operator_signature.h"
#include "arolla/qtype/derived_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/standard_type_properties/common_qtype.h"
namespace arolla {
namespace {
bool CanCastImplicitly(absl::Span<const QTypePtr> from_types,
absl::Span<const QTypePtr> to_types) {
if (from_types.size() != to_types.size()) {
return false;
}
for (size_t i = 0; i < to_types.size(); ++i) {
if (!CanCastImplicitly(from_types[i], to_types[i],
true)) {
return false;
}
}
return true;
}
struct SignatureFormatter {
void operator()(std::string* out,
const QExprOperatorSignature* signature) const {
absl::StrAppend(out, signature);
}
};
}
absl::StatusOr<const QExprOperatorSignature*> FindMatchingSignature(
absl::Span<const QTypePtr> input_types, QTypePtr output_type,
absl::Span<const QExprOperatorSignature* const> supported_signatures,
absl::string_view op_name) {
const QTypePtr decayed_output_type = DecayDerivedQType(output_type);
absl::InlinedVector<QTypePtr, 6> decayed_input_types(input_types.size());
for (size_t i = 0; i < input_types.size(); ++i) {
decayed_input_types[i] = DecayDerivedQType(input_types[i]);
}
absl::InlinedVector<const QExprOperatorSignature*, 8> frontier;
for (const auto& candidate : supported_signatures) {
if (decayed_output_type != DecayDerivedQType(candidate->output_type())) {
continue;
}
if (!CanCastImplicitly(input_types, candidate->input_types())) {
continue;
}
if (decayed_input_types == candidate->input_types()) {
return candidate;
}
bool dominates = false;
bool dominated = false;
auto out_it = frontier.begin();
for (auto* previous : frontier) {
if (CanCastImplicitly(candidate->input_types(),
previous->input_types())) {
dominates = true;
} else if (dominates || !CanCastImplicitly(previous->input_types(),
candidate->input_types())) {
*out_it++ = previous;
} else {
dominated = true;
break;
}
}
if (dominates) {
frontier.erase(out_it, frontier.end());
}
if (!dominated) {
frontier.push_back(candidate);
}
}
if (frontier.empty()) {
return absl::NotFoundError(absl::StrFormat(
"QExpr operator %s%v not found; %s\n%s", op_name,
QExprOperatorSignature::Get(input_types, output_type),
SuggestMissingDependency(),
SuggestAvailableOverloads(op_name, supported_signatures)));
}
if (frontier.size() > 1) {
return absl::FailedPreconditionError(absl::StrFormat(
"ambiguous overloads for the QExpr operator %s%v: provided argument "
"types can be cast to the following supported signatures: %s ",
op_name, QExprOperatorSignature::Get(input_types, output_type),
absl::StrJoin(frontier, ", ", SignatureFormatter())));
}
return frontier[0];
}
} | #include "arolla/qexpr/casting.h"
#include <cstdint>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/qexpr/qexpr_operator_signature.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
namespace arolla {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::testing::HasSubstr;
TEST(CastingTest, FindMatchingSignature) {
const QTypePtr i32 = GetQType<int32_t>();
const QTypePtr i64 = GetQType<int64_t>();
const QTypePtr oi32 = GetOptionalQType<int32_t>();
const QTypePtr oi64 = GetOptionalQType<int64_t>();
const QTypePtr f32 = GetQType<float>();
const QTypePtr f64 = GetQType<double>();
const QTypePtr of64 = GetOptionalQType<double>();
EXPECT_THAT(
FindMatchingSignature({i64, i32}, i32,
{QExprOperatorSignature::Get({i32, i32}, i64)},
"foo")
.status(),
StatusIs(absl::StatusCode::kNotFound,
HasSubstr("QExpr operator foo(INT64,INT32)->INT32 not found")));
EXPECT_THAT(
FindMatchingSignature({i64, i32}, i64,
{QExprOperatorSignature::Get({i32, i32}, i32)},
"foo")
.status(),
StatusIs(absl::StatusCode::kNotFound,
HasSubstr("QExpr operator foo(INT64,INT32)->INT64 not found")));
EXPECT_THAT(
FindMatchingSignature({i64, i32}, i64,
{QExprOperatorSignature::Get({i64, i64}, i64)}, ""),
IsOkAndHolds(QExprOperatorSignature::Get({i64, i64}, i64)));
EXPECT_THAT(
FindMatchingSignature({i32, i32}, oi32,
{
QExprOperatorSignature::Get({oi64, i32}, oi32),
QExprOperatorSignature::Get({i64, i32}, oi32),
QExprOperatorSignature::Get({i64, i64}, oi32),
QExprOperatorSignature::Get({i32, i32}, i32)},
""),
IsOkAndHolds(QExprOperatorSignature::Get({i64, i32}, oi32)));
EXPECT_THAT(FindMatchingSignature({GetWeakFloatQType()}, GetWeakFloatQType(),
{QExprOperatorSignature::Get({f32}, f64),
QExprOperatorSignature::Get({f64}, f64)},
""),
IsOkAndHolds(QExprOperatorSignature::Get({f64}, f64)));
EXPECT_THAT(FindMatchingSignature({GetWeakFloatQType()}, GetWeakFloatQType(),
{QExprOperatorSignature::Get({f32}, f64),
QExprOperatorSignature::Get({of64}, f64)},
""),
IsOkAndHolds(QExprOperatorSignature::Get({f32}, f64)));
EXPECT_THAT(
FindMatchingSignature({GetWeakFloatQType()}, GetWeakFloatQType(),
{QExprOperatorSignature::Get({f32}, f32),
QExprOperatorSignature::Get({f64}, f32)},
""),
StatusIs(absl::StatusCode::kNotFound,
HasSubstr("QExpr operator (WEAK_FLOAT)->WEAK_FLOAT not found")));
EXPECT_THAT(
FindMatchingSignature({i32, i32}, i64,
{QExprOperatorSignature::Get({i32, i64}, i64),
QExprOperatorSignature::Get({i64, i32}, i64),
QExprOperatorSignature::Get({i32, oi64}, i64),
QExprOperatorSignature::Get({i32, i32}, i32)},
"")
.status(),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("ambiguous overloads for the QExpr operator "
"(INT32,INT32)->INT64: provided argument types "
"can be cast to the following supported signatures: "
"(INT32,INT64)->INT64, (INT64,INT32)->INT64")));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/casting.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/casting_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
e8aae33a-e103-4f39-a719-bdecff704539 | cpp | tensorflow/tensorflow | scanner | third_party/xla/third_party/tsl/tsl/platform/scanner.cc | third_party/xla/third_party/tsl/tsl/platform/scanner_test.cc | #include "tsl/platform/scanner.h"
namespace tsl {
namespace strings {
void Scanner::ScanUntilImpl(char end_ch, bool escaped) {
for (;;) {
if (cur_.empty()) {
Error();
return;
}
const char ch = cur_[0];
if (ch == end_ch) {
return;
}
cur_.remove_prefix(1);
if (escaped && ch == '\\') {
if (cur_.empty()) {
Error();
return;
}
cur_.remove_prefix(1);
}
}
}
bool Scanner::GetResult(absl::string_view* remaining,
absl::string_view* capture) {
if (error_) {
return false;
}
if (remaining != nullptr) {
*remaining = cur_;
}
if (capture != nullptr) {
const char* end = capture_end_ == nullptr ? cur_.data() : capture_end_;
*capture = absl::string_view(capture_start_, end - capture_start_);
}
return true;
}
}
} | #include "tsl/platform/scanner.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace strings {
class ScannerTest : public ::testing::Test {
protected:
string ClassStr(Scanner::CharClass clz) {
string s;
for (int i = 0; i < 256; ++i) {
char ch = i;
if (Scanner::Matches(clz, ch)) {
s += ch;
}
}
return s;
}
};
TEST_F(ScannerTest, Any) {
absl::string_view remaining, match;
EXPECT_TRUE(Scanner(" horse0123")
.Any(Scanner::SPACE)
.Any(Scanner::DIGIT)
.Any(Scanner::LETTER)
.GetResult(&remaining, &match));
EXPECT_EQ(" horse", match);
EXPECT_EQ("0123", remaining);
EXPECT_TRUE(Scanner("")
.Any(Scanner::SPACE)
.Any(Scanner::DIGIT)
.Any(Scanner::LETTER)
.GetResult(&remaining, &match));
EXPECT_EQ("", remaining);
EXPECT_EQ("", match);
EXPECT_TRUE(Scanner("----")
.Any(Scanner::SPACE)
.Any(Scanner::DIGIT)
.Any(Scanner::LETTER)
.GetResult(&remaining, &match));
EXPECT_EQ("----", remaining);
EXPECT_EQ("", match);
}
TEST_F(ScannerTest, AnySpace) {
absl::string_view remaining, match;
EXPECT_TRUE(Scanner(" a b ")
.AnySpace()
.One(Scanner::LETTER)
.AnySpace()
.GetResult(&remaining, &match));
EXPECT_EQ(" a ", match);
EXPECT_EQ("b ", remaining);
}
TEST_F(ScannerTest, AnyEscapedNewline) {
absl::string_view remaining, match;
EXPECT_TRUE(Scanner("\\\n")
.Any(Scanner::LETTER_DIGIT_UNDERSCORE)
.GetResult(&remaining, &match));
EXPECT_EQ("\\\n", remaining);
EXPECT_EQ("", match);
}
TEST_F(ScannerTest, AnyEmptyString) {
absl::string_view remaining, match;
EXPECT_TRUE(Scanner("")
.Any(Scanner::LETTER_DIGIT_UNDERSCORE)
.GetResult(&remaining, &match));
EXPECT_EQ("", remaining);
EXPECT_EQ("", match);
}
TEST_F(ScannerTest, Eos) {
EXPECT_FALSE(Scanner("a").Eos().GetResult());
EXPECT_TRUE(Scanner("").Eos().GetResult());
EXPECT_FALSE(Scanner("abc").OneLiteral("ab").Eos().GetResult());
EXPECT_TRUE(Scanner("abc").OneLiteral("abc").Eos().GetResult());
}
TEST_F(ScannerTest, Many) {
absl::string_view remaining, match;
EXPECT_TRUE(Scanner("abc").Many(Scanner::LETTER).GetResult());
EXPECT_FALSE(Scanner("0").Many(Scanner::LETTER).GetResult());
EXPECT_FALSE(Scanner("").Many(Scanner::LETTER).GetResult());
EXPECT_TRUE(
Scanner("abc ").Many(Scanner::LETTER).GetResult(&remaining, &match));
EXPECT_EQ(" ", remaining);
EXPECT_EQ("abc", match);
EXPECT_TRUE(
Scanner("abc").Many(Scanner::LETTER).GetResult(&remaining, &match));
EXPECT_EQ("", remaining);
EXPECT_EQ("abc", match);
}
TEST_F(ScannerTest, One) {
absl::string_view remaining, match;
EXPECT_TRUE(Scanner("abc").One(Scanner::LETTER).GetResult());
EXPECT_FALSE(Scanner("0").One(Scanner::LETTER).GetResult());
EXPECT_FALSE(Scanner("").One(Scanner::LETTER).GetResult());
EXPECT_TRUE(Scanner("abc")
.One(Scanner::LETTER)
.One(Scanner::LETTER)
.GetResult(&remaining, &match));
EXPECT_EQ("c", remaining);
EXPECT_EQ("ab", match);
EXPECT_TRUE(Scanner("a").One(Scanner::LETTER).GetResult(&remaining, &match));
EXPECT_EQ("", remaining);
EXPECT_EQ("a", match);
}
TEST_F(ScannerTest, OneLiteral) {
EXPECT_FALSE(Scanner("abc").OneLiteral("abC").GetResult());
EXPECT_TRUE(Scanner("abc").OneLiteral("ab").OneLiteral("c").GetResult());
}
TEST_F(ScannerTest, ScanUntil) {
absl::string_view remaining, match;
EXPECT_TRUE(Scanner(R"(' \1 \2 \3 \' \\'rest)")
.OneLiteral("'")
.ScanUntil('\'')
.OneLiteral("'")
.GetResult(&remaining, &match));
EXPECT_EQ(R"( \\'rest)", remaining);
EXPECT_EQ(R"(' \1 \2 \3 \')", match);
remaining = match = "unset";
EXPECT_FALSE(Scanner(R"(' \1 \2 \3 \\rest)")
.OneLiteral("'")
.ScanUntil('\'')
.GetResult(&remaining, &match));
EXPECT_EQ("unset", remaining);
EXPECT_EQ("unset", match);
remaining = match = "";
EXPECT_TRUE(
Scanner(R"(123\456)").ScanUntil('\\').GetResult(&remaining, &match));
EXPECT_EQ(R"(\456)", remaining);
EXPECT_EQ("123", match);
}
TEST_F(ScannerTest, ScanEscapedUntil) {
absl::string_view remaining, match;
EXPECT_TRUE(Scanner(R"(' \1 \2 \3 \' \\'rest)")
.OneLiteral("'")
.ScanEscapedUntil('\'')
.OneLiteral("'")
.GetResult(&remaining, &match));
EXPECT_EQ("rest", remaining);
EXPECT_EQ(R"(' \1 \2 \3 \' \\')", match);
remaining = match = "unset";
EXPECT_FALSE(Scanner(R"(' \1 \2 \3 \' \\rest)")
.OneLiteral("'")
.ScanEscapedUntil('\'')
.GetResult(&remaining, &match));
EXPECT_EQ("unset", remaining);
EXPECT_EQ("unset", match);
}
TEST_F(ScannerTest, ZeroOrOneLiteral) {
absl::string_view remaining, match;
EXPECT_TRUE(
Scanner("abc").ZeroOrOneLiteral("abC").GetResult(&remaining, &match));
EXPECT_EQ("abc", remaining);
EXPECT_EQ("", match);
EXPECT_TRUE(
Scanner("abcd").ZeroOrOneLiteral("ab").ZeroOrOneLiteral("c").GetResult(
&remaining, &match));
EXPECT_EQ("d", remaining);
EXPECT_EQ("abc", match);
EXPECT_TRUE(
Scanner("").ZeroOrOneLiteral("abc").GetResult(&remaining, &match));
EXPECT_EQ("", remaining);
EXPECT_EQ("", match);
}
TEST_F(ScannerTest, CaptureAndGetResult) {
absl::string_view remaining, match;
Scanner scan(" first second");
EXPECT_TRUE(scan.Any(Scanner::SPACE)
.RestartCapture()
.One(Scanner::LETTER)
.Any(Scanner::LETTER_DIGIT)
.StopCapture()
.Any(Scanner::SPACE)
.GetResult(&remaining, &match));
EXPECT_EQ("second", remaining);
EXPECT_EQ("first", match);
EXPECT_TRUE(scan.GetResult());
remaining = "";
EXPECT_TRUE(scan.GetResult(&remaining));
EXPECT_EQ("second", remaining);
remaining = "";
match = "";
EXPECT_TRUE(scan.GetResult(&remaining, &match));
EXPECT_EQ("second", remaining);
EXPECT_EQ("first", match);
scan.RestartCapture().One(Scanner::LETTER).One(Scanner::LETTER);
remaining = "";
match = "";
EXPECT_TRUE(scan.GetResult(&remaining, &match));
EXPECT_EQ("cond", remaining);
EXPECT_EQ("se", match);
}
TEST_F(ScannerTest, MultipleGetResultExtendsCapture) {
absl::string_view remaining, match;
Scanner scan("one2three");
EXPECT_TRUE(scan.Many(Scanner::LETTER).GetResult(&remaining, &match));
EXPECT_EQ("2three", remaining);
EXPECT_EQ("one", match);
EXPECT_TRUE(scan.Many(Scanner::DIGIT).GetResult(&remaining, &match));
EXPECT_EQ("three", remaining);
EXPECT_EQ("one2", match);
EXPECT_TRUE(scan.Many(Scanner::LETTER).GetResult(&remaining, &match));
EXPECT_EQ("", remaining);
EXPECT_EQ("one2three", match);
}
TEST_F(ScannerTest, FailedMatchDoesntChangeResult) {
Scanner scan("name");
absl::string_view remaining = "rem";
absl::string_view match = "match";
EXPECT_FALSE(scan.One(Scanner::SPACE).GetResult(&remaining, &match));
EXPECT_EQ("rem", remaining);
EXPECT_EQ("match", match);
}
TEST_F(ScannerTest, DefaultCapturesAll) {
Scanner scan("a b");
absl::string_view remaining = "rem";
absl::string_view match = "match";
EXPECT_TRUE(scan.Any(Scanner::LETTER)
.AnySpace()
.Any(Scanner::LETTER)
.GetResult(&remaining, &match));
EXPECT_EQ("", remaining);
EXPECT_EQ("a b", match);
}
TEST_F(ScannerTest, AllCharClasses) {
EXPECT_EQ(256, ClassStr(Scanner::ALL).size());
EXPECT_EQ("0123456789", ClassStr(Scanner::DIGIT));
EXPECT_EQ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz",
ClassStr(Scanner::LETTER));
EXPECT_EQ("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz",
ClassStr(Scanner::LETTER_DIGIT));
EXPECT_EQ(
"-0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_"
"abcdefghijklmnopqrstuvwxyz",
ClassStr(Scanner::LETTER_DIGIT_DASH_UNDERSCORE));
EXPECT_EQ(
"-./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz",
ClassStr(Scanner::LETTER_DIGIT_DASH_DOT_SLASH));
EXPECT_EQ(
"-./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_"
"abcdefghijklmnopqrstuvwxyz",
ClassStr(Scanner::LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE));
EXPECT_EQ(".0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz",
ClassStr(Scanner::LETTER_DIGIT_DOT));
EXPECT_EQ("+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz",
ClassStr(Scanner::LETTER_DIGIT_DOT_PLUS_MINUS));
EXPECT_EQ(".0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz",
ClassStr(Scanner::LETTER_DIGIT_DOT_UNDERSCORE));
EXPECT_EQ("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz",
ClassStr(Scanner::LETTER_DIGIT_UNDERSCORE));
EXPECT_EQ("abcdefghijklmnopqrstuvwxyz", ClassStr(Scanner::LOWERLETTER));
EXPECT_EQ("0123456789abcdefghijklmnopqrstuvwxyz",
ClassStr(Scanner::LOWERLETTER_DIGIT));
EXPECT_EQ("0123456789_abcdefghijklmnopqrstuvwxyz",
ClassStr(Scanner::LOWERLETTER_DIGIT_UNDERSCORE));
EXPECT_EQ("123456789", ClassStr(Scanner::NON_ZERO_DIGIT));
EXPECT_EQ("\t\n\v\f\r ", ClassStr(Scanner::SPACE));
EXPECT_EQ("ABCDEFGHIJKLMNOPQRSTUVWXYZ", ClassStr(Scanner::UPPERLETTER));
EXPECT_EQ(">", ClassStr(Scanner::RANGLE));
}
TEST_F(ScannerTest, Peek) {
EXPECT_EQ('a', Scanner("abc").Peek());
EXPECT_EQ('a', Scanner("abc").Peek('b'));
EXPECT_EQ('\0', Scanner("").Peek());
EXPECT_EQ('z', Scanner("").Peek('z'));
EXPECT_EQ('A', Scanner("0123A").Any(Scanner::DIGIT).Peek());
EXPECT_EQ('\0', Scanner("0123A").Any(Scanner::LETTER_DIGIT).Peek());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/scanner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/scanner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2facd601-ccda-4a4e-bc77-bdae4246546d | cpp | tensorflow/tensorflow | in_topk_op | tensorflow/compiler/tf2xla/kernels/in_topk_op.cc | tensorflow/core/kernels/in_topk_op_test.cc | #include "tensorflow/compiler/tf2xla/type_util.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/lib/arithmetic.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
class InTopKOp : public XlaOpKernel {
public:
explicit InTopKOp(OpKernelConstruction* context) : XlaOpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("T", &targets_dtype_));
OP_REQUIRES_OK(context,
DataTypeToPrimitiveType(targets_dtype_, &targets_type_));
}
void Compile(XlaOpKernelContext* context) override {
int64_t k;
OP_REQUIRES_OK(context, context->ConstantInputAsIntScalar(2, &k));
OP_REQUIRES(context, k >= 0,
errors::InvalidArgument("Need k >= 0, got ", k));
const TensorShape predictions_shape = context->InputShape(0);
OP_REQUIRES(
context, predictions_shape.dims() == 2,
errors::InvalidArgument("predictions must be == 2-D, got shape ",
predictions_shape.DebugString()));
const TensorShape targets_shape = context->InputShape(1);
OP_REQUIRES(context, targets_shape.dims() == 1,
errors::InvalidArgument("targets must be == 1-D, got shape ",
targets_shape.DebugString()));
int64_t batch_size = predictions_shape.dim_size(0);
OP_REQUIRES(context, batch_size == targets_shape.dim_size(0),
errors::InvalidArgument(
"targets must have same elements as predictions rows. Had ",
targets_shape.dim_size(0), ", needed ", batch_size));
xla::XlaOp predictions_r2 = context->Input(0);
xla::XlaOp targets_r1 = context->Input(1);
xla::XlaBuilder* xla_builder = context->builder();
xla::XlaOp iota_r1 =
xla::Iota(xla_builder, targets_type_, predictions_shape.dim_size(1));
xla::XlaOp iota_r2 = xla::Broadcast(iota_r1, {batch_size});
xla::XlaOp eq_r2 = xla::Eq(targets_r1, iota_r2, {0});
xla::XlaOp zero_r0_f32 = xla::Zero(xla_builder, xla::F32);
xla::XlaOp zero_r2_f32 = xla::ZerosLike(predictions_r2);
xla::XlaOp select_r2 = xla::Select(eq_r2, predictions_r2, zero_r2_f32);
xla::XlaOp targets_values_r1 = xla::Reduce(
select_r2, zero_r0_f32,
xla::CreateScalarAddComputation(xla::F32, xla_builder), {1});
xla::XlaOp gt_r2 = xla::Gt(predictions_r2, targets_values_r1, {0});
xla::XlaOp zero_r0 = xla::Zero(xla_builder, xla::S32);
xla::XlaOp zero_r2 = xla::Broadcast(zero_r0, predictions_shape.dim_sizes());
xla::XlaOp one_r0 = xla::One(xla_builder, xla::S32);
xla::XlaOp one_r2 = xla::Broadcast(one_r0, predictions_shape.dim_sizes());
xla::XlaOp one_hot_r2 = xla::Select(gt_r2, one_r2, zero_r2);
xla::XlaOp num_gt_r1 = xla::Reduce(
one_hot_r2, zero_r0,
xla::CreateScalarAddComputation(xla::S32, xla_builder), {1});
xla::XlaOp result =
xla::And(xla::Lt(num_gt_r1, xla::ConstantR0<int32>(xla_builder, k)),
xla::IsFinite(targets_values_r1));
context->SetOutput(0, result);
}
protected:
DataType targets_dtype_;
xla::PrimitiveType targets_type_;
InTopKOp(const InTopKOp&) = delete;
void operator=(const InTopKOp&) = delete;
};
REGISTER_XLA_OP(Name("InTopKV2")
.CompileTimeConstantInput("k")
.TypeConstraint("T", {DT_INT32, DT_INT64}),
InTopKOp);
}
} | #include <vector>
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
template <typename T>
static Graph* InTopK(int num_targets, int num_classes, T top_k) {
Graph* g = new Graph(OpRegistry::Global());
DataType dtype = DataTypeToEnum<T>::value;
Tensor predictions_t(DT_FLOAT, TensorShape({num_targets, num_classes}));
predictions_t.flat<float>().setRandom();
Tensor targets_t(dtype, TensorShape({num_targets}));
targets_t.flat<T>().setRandom();
Tensor k_t(dtype, TensorShape({}));
k_t.scalar<T>() = k_t.scalar<T>().constant(top_k);
Node* predictions = test::graph::Constant(g, predictions_t, "predictions");
Node* targets = test::graph::Constant(g, targets_t, "targets");
Node* k = test::graph::Constant(g, k_t, "k");
Node* in_topk;
TF_CHECK_OK(NodeBuilder(g->NewName("in_topk"), "InTopKV2")
.Input(predictions)
.Input(targets)
.Input(k)
.Attr("T", dtype)
.Finalize(g, &in_topk));
return g;
}
#define BM_NAME(T, TARGETS, CLASSES, K, DEVICE) \
BM_InTopK##_##T##_##TARGETS##_##CLASSES##_##K##_##DEVICE
#define BM_InTopK(T, TARGETS, CLASSES, K, DEVICE) \
static void BM_NAME(T, TARGETS, CLASSES, K, \
DEVICE)(::testing::benchmark::State & state) { \
test::Benchmark(#DEVICE, InTopK<T>(TARGETS, CLASSES, K), \
false) \
.Run(state); \
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * \
TARGETS * CLASSES); \
} \
BENCHMARK(BM_NAME(T, TARGETS, CLASSES, K, DEVICE))->UseRealTime();
BM_InTopK(int64_t, 64, 1000, 10, cpu);
BM_InTopK(int64_t, 64, 10000, 10, cpu);
#if defined(GOOGLE_CUDA) || defined(TENSORFLOW_USE_ROCM)
BM_InTopK(int64_t, 64, 1000, 10, gpu);
BM_InTopK(int64_t, 64, 10000, 10, gpu);
#endif
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/in_topk_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/in_topk_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5a3a6fb6-e758-4bea-854b-db3f3cb2cf0e | cpp | tensorflow/tensorflow | space_to_depth | tensorflow/lite/delegates/gpu/gl/kernels/space_to_depth.cc | tensorflow/lite/delegates/xnnpack/space_to_depth_test.cc | #include "tensorflow/lite/delegates/gpu/gl/kernels/space_to_depth.h"
#include <any>
#include <memory>
#include <string>
#include <utility>
#include "absl/memory/memory.h"
#include "absl/types/any.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class SpaceToDepth : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
const auto& attr =
std::any_cast<const SpaceToDepthAttributes&>(ctx.op_attr);
std::string code = R"(
for (int i = 0; i < 4; ++i) {
int dst_c = 4 * gid.z + i;
int block_id = dst_c / $input_data_0_c$;
int src_x = gid.x * $block_size$ + block_id % $block_size$;
int src_y = gid.y * $block_size$ + block_id / $block_size$;
int src_c = dst_c % $input_data_0_c$;
value_0[i] = $input_data_0[src_x, src_y, src_c / 4]$[src_c % 4];
}
)";
*generated_code = {
{
{"block_size", attr.block_size},
{"input_data_0_c", static_cast<int>(ctx.input_shapes[0][3])},
},
{},
{},
uint3(),
uint3(),
std::move(code),
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
class DepthToSpace : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
const auto& attr =
std::any_cast<const SpaceToDepthAttributes&>(ctx.op_attr);
std::string code = R"(
for (int i = 0; i < 4; ++i) {
int dst_c = 4 * gid.z + i;
int block_x = gid.x % $block_size$;
int src_x = gid.x / $block_size$;
int block_y = gid.y % $block_size$;
int src_y = gid.y / $block_size$;
int block_id = block_y * $block_size$ + block_x;
int src_c = block_id * $output_channels$ + dst_c;
value_0[i] = $input_data_0[src_x, src_y, src_c / 4]$[src_c % 4];
}
)";
*generated_code = {
{
{"block_size", attr.block_size},
{"output_channels", static_cast<int>(ctx.output_shapes[0][3])},
},
{},
{},
uint3(),
uint3(),
std::move(code),
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
}
std::unique_ptr<NodeShader> NewSpaceToDepthNodeShader() {
return std::make_unique<SpaceToDepth>();
}
std::unique_ptr<NodeShader> NewDepthToSpaceNodeShader() {
return std::make_unique<DepthToSpace>();
}
}
}
} | #include <algorithm>
#include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/space_to_depth_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite::xnnpack {
namespace {
TEST(SpaceToDepth, SinglePixel) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto block_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
const int32_t block_size = block_rng();
SpaceToDepthTester()
.BatchSize(batch_rng())
.InputHeight(block_size)
.InputWidth(block_size)
.InputChannels(channel_rng())
.BlockSize(block_size)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
TEST(SpaceToDepth, SingleRow) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto width_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto block_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
const int32_t block_size = block_rng();
SpaceToDepthTester()
.BatchSize(batch_rng())
.InputHeight(block_size)
.InputWidth(width_rng() * block_size)
.InputChannels(channel_rng())
.BlockSize(block_size)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
TEST(SpaceToDepth, SingleColumn) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto height_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto block_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
const int32_t block_size = block_rng();
SpaceToDepthTester()
.BatchSize(batch_rng())
.InputHeight(height_rng() * block_size)
.InputWidth(block_size)
.InputChannels(channel_rng())
.BlockSize(block_size)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
TEST(SpaceToDepth, FullImage) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto size_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto block_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
const int32_t block_size = block_rng();
SpaceToDepthTester()
.BatchSize(batch_rng())
.InputHeight(size_rng() * block_size)
.InputWidth(size_rng() * block_size)
.InputChannels(channel_rng())
.BlockSize(block_size)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
TEST(SpaceToDepth, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto batch_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 4), std::ref(rng));
auto size_rng =
std::bind(std::uniform_int_distribution<int32_t>(5, 25), std::ref(rng));
auto block_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 3), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
const int32_t block_size = block_rng();
SpaceToDepthTester()
.BatchSize(batch_rng())
.InputHeight(size_rng() * block_size)
.InputWidth(size_rng() * block_size)
.InputChannels(channel_rng())
.BlockSize(block_size)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/kernels/space_to_depth.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/space_to_depth_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c05f1a72-d7c8-40ca-9a21-d6f1b89f5fcc | cpp | tensorflow/tensorflow | gen_op_registration | tensorflow/lite/tools/gen_op_registration.cc | tensorflow/lite/tools/gen_op_registration_test.cc | #include "tensorflow/lite/tools/gen_op_registration.h"
#include <algorithm>
#include <string>
#include <vector>
#include "re2/re2.h"
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/schema/schema_utils.h"
namespace tflite {
string NormalizeCustomOpName(const string& op) {
string method(op);
RE2::GlobalReplace(&method, "([a-z])([A-Z])", "\\1_\\2");
std::transform(method.begin(), method.end(), method.begin(), ::toupper);
return method;
}
void ReadOpsFromModel(const ::tflite::Model* model,
tflite::RegisteredOpMap* builtin_ops,
tflite::RegisteredOpMap* custom_ops) {
if (!model) return;
auto opcodes = model->operator_codes();
if (!opcodes) return;
for (const auto* opcode : *opcodes) {
const int version = opcode->version();
auto builtin_code = GetBuiltinCode(opcode);
if (builtin_code != ::tflite::BuiltinOperator_CUSTOM) {
auto iter_and_bool = builtin_ops->insert(
std::make_pair(tflite::EnumNameBuiltinOperator(builtin_code),
std::make_pair(version, version)));
auto& versions = iter_and_bool.first->second;
versions.first = std::min(versions.first, version);
versions.second = std::max(versions.second, version);
} else {
auto iter_and_bool = custom_ops->insert(std::make_pair(
opcode->custom_code()->c_str(), std::make_pair(version, version)));
auto& versions = iter_and_bool.first->second;
versions.first = std::min(versions.first, version);
versions.second = std::max(versions.second, version);
}
}
}
} | #include "tensorflow/lite/tools/gen_op_registration.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
using ::testing::ElementsAreArray;
namespace tflite {
class GenOpRegistrationTest : public ::testing::Test {
protected:
GenOpRegistrationTest() {}
void ReadOps(const string& model_path) {
auto model = FlatBufferModel::BuildFromFile(model_path.data());
if (model) {
ReadOpsFromModel(model->GetModel(), &builtin_ops_, &custom_ops_);
}
}
std::map<string, std::pair<int, int>> builtin_ops_;
std::map<string, std::pair<int, int>> custom_ops_;
};
TEST_F(GenOpRegistrationTest, TestNonExistentFiles) {
ReadOps("/tmp/tflite_model_1234");
EXPECT_EQ(builtin_ops_.size(), 0);
EXPECT_EQ(custom_ops_.size(), 0);
}
TEST_F(GenOpRegistrationTest, TestModels) {
ReadOps("tensorflow/lite/testdata/test_model.bin");
RegisteredOpMap builtin_expected{{"CONV_2D", {1, 1}}};
RegisteredOpMap custom_expected{{"testing_op", {1, 1}}};
EXPECT_THAT(builtin_ops_, ElementsAreArray(builtin_expected));
EXPECT_THAT(custom_ops_, ElementsAreArray(custom_expected));
}
TEST_F(GenOpRegistrationTest, TestVersionedModels) {
ReadOps("tensorflow/lite/testdata/test_model_versioned_ops.bin");
RegisteredOpMap builtin_expected{{"CONV_2D", {3, 3}}};
RegisteredOpMap custom_expected{{"testing_op", {2, 2}}};
EXPECT_THAT(builtin_ops_, ElementsAreArray(builtin_expected));
EXPECT_THAT(custom_ops_, ElementsAreArray(custom_expected));
}
TEST_F(GenOpRegistrationTest, TestBothModels) {
ReadOps("tensorflow/lite/testdata/test_model.bin");
ReadOps("tensorflow/lite/testdata/test_model_versioned_ops.bin");
RegisteredOpMap builtin_expected{{"CONV_2D", {1, 3}}};
RegisteredOpMap custom_expected{{"testing_op", {1, 2}}};
EXPECT_THAT(builtin_ops_, ElementsAreArray(builtin_expected));
EXPECT_THAT(custom_ops_, ElementsAreArray(custom_expected));
}
TEST_F(GenOpRegistrationTest, TestEmptyModels) {
ReadOps("tensorflow/lite/testdata/empty_model.bin");
EXPECT_EQ(builtin_ops_.size(), 0);
EXPECT_EQ(custom_ops_.size(), 0);
}
TEST_F(GenOpRegistrationTest, TestZeroSubgraphs) {
ReadOps("tensorflow/lite/testdata/0_subgraphs.bin");
EXPECT_EQ(builtin_ops_.size(), 0);
EXPECT_EQ(custom_ops_.size(), 0);
}
TEST_F(GenOpRegistrationTest, TestBrokenMmap) {
ReadOps("tensorflow/lite/testdata/test_model_broken.bin");
EXPECT_EQ(builtin_ops_.size(), 0);
EXPECT_EQ(custom_ops_.size(), 0);
}
TEST_F(GenOpRegistrationTest, TestNormalizeCustomOpName) {
std::vector<std::pair<string, string>> testcase = {
{"CustomOp", "CUSTOM_OP"},
{"a", "A"},
{"custom_op", "CUSTOM_OP"},
{"customop", "CUSTOMOP"},
};
for (const auto& test : testcase) {
EXPECT_EQ(NormalizeCustomOpName(test.first), test.second);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/gen_op_registration.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/gen_op_registration_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
de709be8-4825-4ab0-be7a-004f6ea800d0 | cpp | google/cel-cpp | type_checker_impl | checker/internal/type_checker_impl.cc | checker/internal/type_checker_impl_test.cc | #include "checker/internal/type_checker_impl.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/no_destructor.h"
#include "absl/base/nullability.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
#include "base/ast_internal/ast_impl.h"
#include "base/ast_internal/expr.h"
#include "checker/internal/builtins_arena.h"
#include "checker/internal/namespace_generator.h"
#include "checker/internal/type_check_env.h"
#include "checker/internal/type_inference_context.h"
#include "checker/type_check_issue.h"
#include "checker/validation_result.h"
#include "common/ast.h"
#include "common/ast_rewrite.h"
#include "common/ast_traverse.h"
#include "common/ast_visitor.h"
#include "common/ast_visitor_base.h"
#include "common/constant.h"
#include "common/decl.h"
#include "common/expr.h"
#include "common/memory.h"
#include "common/source.h"
#include "common/type.h"
#include "common/type_factory.h"
#include "common/type_kind.h"
#include "extensions/protobuf/memory_manager.h"
#include "internal/status_macros.h"
#include "google/protobuf/arena.h"
namespace cel::checker_internal {
namespace {
class TrivialTypeFactory : public TypeFactory {
public:
explicit TrivialTypeFactory(absl::Nonnull<google::protobuf::Arena*> arena)
: arena_(arena) {}
MemoryManagerRef GetMemoryManager() const override {
return extensions::ProtoMemoryManagerRef(arena_);
}
private:
absl::Nonnull<google::protobuf::Arena*> arena_;
};
using cel::ast_internal::AstImpl;
using AstType = cel::ast_internal::Type;
using Severity = TypeCheckIssue::Severity;
Type FreeListType() {
static absl::NoDestructor<Type> kInstance(
Type(ListType(BuiltinsArena(), TypeParamType("element_type"))));
return *kInstance;
}
Type FreeMapType() {
static absl::NoDestructor<Type> kInstance(
Type(MapType(BuiltinsArena(), TypeParamType("key_type"),
TypeParamType("value_type"))));
return *kInstance;
}
std::string FormatCandidate(absl::Span<const std::string> qualifiers) {
return absl::StrJoin(qualifiers, ".");
}
SourceLocation ComputeSourceLocation(const AstImpl& ast, int64_t expr_id) {
const auto& source_info = ast.source_info();
auto iter = source_info.positions().find(expr_id);
if (iter == source_info.positions().end()) {
return SourceLocation{};
}
int32_t absolute_position = iter->second;
int32_t line_idx = -1;
for (int32_t offset : source_info.line_offsets()) {
if (absolute_position >= offset) {
break;
}
++line_idx;
}
if (line_idx < 0 || line_idx >= source_info.line_offsets().size()) {
return SourceLocation{1, absolute_position};
}
int32_t rel_position =
absolute_position - source_info.line_offsets()[line_idx] + 1;
return SourceLocation{line_idx + 1, rel_position};
}
absl::StatusOr<AstType> FlattenType(const Type& type);
absl::StatusOr<AstType> FlattenAbstractType(const OpaqueType& type) {
std::vector<AstType> parameter_types;
parameter_types.reserve(type.GetParameters().size());
for (const auto& param : type.GetParameters()) {
CEL_ASSIGN_OR_RETURN(auto param_type, FlattenType(param));
parameter_types.push_back(std::move(param_type));
}
return AstType(ast_internal::AbstractType(std::string(type.name()),
std::move(parameter_types)));
}
absl::StatusOr<AstType> FlattenMapType(const MapType& type) {
CEL_ASSIGN_OR_RETURN(auto key, FlattenType(type.key()));
CEL_ASSIGN_OR_RETURN(auto value, FlattenType(type.value()));
return AstType(
ast_internal::MapType(std::make_unique<AstType>(std::move(key)),
std::make_unique<AstType>(std::move(value))));
}
absl::StatusOr<AstType> FlattenListType(const ListType& type) {
CEL_ASSIGN_OR_RETURN(auto elem, FlattenType(type.element()));
return AstType(
ast_internal::ListType(std::make_unique<AstType>(std::move(elem))));
}
absl::StatusOr<AstType> FlattenMessageType(const StructType& type) {
return AstType(ast_internal::MessageType(std::string(type.name())));
}
absl::StatusOr<AstType> FlattenTypeType(const TypeType& type) {
if (type.GetParameters().size() > 1) {
return absl::InternalError(
absl::StrCat("Unsupported type: ", type.DebugString()));
}
if (type.GetParameters().empty()) {
return AstType(std::make_unique<AstType>());
}
CEL_ASSIGN_OR_RETURN(auto param, FlattenType(type.GetParameters()[0]));
return AstType(std::make_unique<AstType>(std::move(param)));
}
absl::StatusOr<AstType> FlattenType(const Type& type) {
switch (type.kind()) {
case TypeKind::kDyn:
return AstType(ast_internal::DynamicType());
case TypeKind::kError:
return AstType(ast_internal::ErrorType());
case TypeKind::kNull:
return AstType(ast_internal::NullValue());
case TypeKind::kBool:
return AstType(ast_internal::PrimitiveType::kBool);
case TypeKind::kInt:
return AstType(ast_internal::PrimitiveType::kInt64);
case TypeKind::kUint:
return AstType(ast_internal::PrimitiveType::kUint64);
case TypeKind::kDouble:
return AstType(ast_internal::PrimitiveType::kDouble);
case TypeKind::kString:
return AstType(ast_internal::PrimitiveType::kString);
case TypeKind::kBytes:
return AstType(ast_internal::PrimitiveType::kBytes);
case TypeKind::kDuration:
return AstType(ast_internal::WellKnownType::kDuration);
case TypeKind::kTimestamp:
return AstType(ast_internal::WellKnownType::kTimestamp);
case TypeKind::kStruct:
return FlattenMessageType(type.GetStruct());
case TypeKind::kList:
return FlattenListType(type.GetList());
case TypeKind::kMap:
return FlattenMapType(type.GetMap());
case TypeKind::kOpaque:
return FlattenAbstractType(type.GetOpaque());
case TypeKind::kBoolWrapper:
return AstType(ast_internal::PrimitiveTypeWrapper(
ast_internal::PrimitiveType::kBool));
case TypeKind::kIntWrapper:
return AstType(ast_internal::PrimitiveTypeWrapper(
ast_internal::PrimitiveType::kInt64));
case TypeKind::kUintWrapper:
return AstType(ast_internal::PrimitiveTypeWrapper(
ast_internal::PrimitiveType::kUint64));
case TypeKind::kDoubleWrapper:
return AstType(ast_internal::PrimitiveTypeWrapper(
ast_internal::PrimitiveType::kDouble));
case TypeKind::kStringWrapper:
return AstType(ast_internal::PrimitiveTypeWrapper(
ast_internal::PrimitiveType::kString));
case TypeKind::kBytesWrapper:
return AstType(ast_internal::PrimitiveTypeWrapper(
ast_internal::PrimitiveType::kBytes));
case TypeKind::kTypeParam:
return AstType(ast_internal::DynamicType());
case TypeKind::kType:
return FlattenTypeType(type.GetType());
case TypeKind::kAny:
return AstType(ast_internal::WellKnownType::kAny);
default:
return absl::InternalError(
absl::StrCat("Unsupported type: ", type.DebugString()));
}
}
class ResolveVisitor : public AstVisitorBase {
public:
struct FunctionResolution {
const FunctionDecl* decl;
bool namespace_rewrite;
};
ResolveVisitor(absl::string_view container,
NamespaceGenerator namespace_generator,
const TypeCheckEnv& env, const AstImpl& ast,
TypeInferenceContext& inference_context,
std::vector<TypeCheckIssue>& issues,
absl::Nonnull<google::protobuf::Arena*> arena, TypeFactory& type_factory)
: container_(container),
namespace_generator_(std::move(namespace_generator)),
env_(&env),
inference_context_(&inference_context),
issues_(&issues),
ast_(&ast),
root_scope_(env.MakeVariableScope()),
arena_(arena),
type_factory_(&type_factory),
current_scope_(&root_scope_) {}
void PreVisitExpr(const Expr& expr) override { expr_stack_.push_back(&expr); }
void PostVisitExpr(const Expr& expr) override {
if (expr_stack_.empty()) {
return;
}
expr_stack_.pop_back();
}
void PostVisitConst(const Expr& expr, const Constant& constant) override;
void PreVisitComprehension(const Expr& expr,
const ComprehensionExpr& comprehension) override;
void PostVisitComprehension(const Expr& expr,
const ComprehensionExpr& comprehension) override;
void PostVisitMap(const Expr& expr, const MapExpr& map) override;
void PostVisitList(const Expr& expr, const ListExpr& list) override;
void PreVisitComprehensionSubexpression(
const Expr& expr, const ComprehensionExpr& comprehension,
ComprehensionArg comprehension_arg) override;
void PostVisitComprehensionSubexpression(
const Expr& expr, const ComprehensionExpr& comprehension,
ComprehensionArg comprehension_arg) override;
void PostVisitIdent(const Expr& expr, const IdentExpr& ident) override;
void PostVisitSelect(const Expr& expr, const SelectExpr& select) override;
void PostVisitCall(const Expr& expr, const CallExpr& call) override;
void PostVisitStruct(const Expr& expr,
const StructExpr& create_struct) override;
const absl::flat_hash_map<const Expr*, FunctionResolution>& functions()
const {
return functions_;
}
const absl::flat_hash_map<const Expr*, const VariableDecl*>& attributes()
const {
return attributes_;
}
const absl::flat_hash_map<const Expr*, std::string>& struct_types() const {
return struct_types_;
}
const absl::flat_hash_map<const Expr*, Type>& types() const { return types_; }
const absl::Status& status() const { return status_; }
private:
struct ComprehensionScope {
const Expr* comprehension_expr;
const VariableScope* parent;
VariableScope* accu_scope;
VariableScope* iter_scope;
};
struct FunctionOverloadMatch {
Type result_type;
const FunctionDecl* decl;
};
void ResolveSimpleIdentifier(const Expr& expr, absl::string_view name);
void ResolveQualifiedIdentifier(const Expr& expr,
absl::Span<const std::string> qualifiers);
const FunctionDecl* ResolveFunctionCallShape(const Expr& expr,
absl::string_view function_name,
int arg_count, bool is_receiver);
absl::Nullable<const VariableDecl*> LookupIdentifier(absl::string_view name);
void ResolveFunctionOverloads(const Expr& expr, const FunctionDecl& decl,
int arg_count, bool is_receiver,
bool is_namespaced);
void ResolveSelectOperation(const Expr& expr, absl::string_view field,
const Expr& operand);
void ReportMissingReference(const Expr& expr, absl::string_view name) {
issues_->push_back(TypeCheckIssue::CreateError(
ComputeSourceLocation(*ast_, expr.id()),
absl::StrCat("undeclared reference to '", name, "' (in container '",
container_, "')")));
}
void ReportUndefinedField(int64_t expr_id, absl::string_view field_name,
absl::string_view struct_name) {
issues_->push_back(TypeCheckIssue::CreateError(
ComputeSourceLocation(*ast_, expr_id),
absl::StrCat("undefined field '", field_name, "' not found in struct '",
struct_name, "'")));
}
absl::Status CheckFieldAssignments(const Expr& expr,
const StructExpr& create_struct,
Type struct_type,
absl::string_view resolved_name) {
for (const auto& field : create_struct.fields()) {
const Expr* value = &field.value();
Type value_type = GetTypeOrDyn(value);
CEL_ASSIGN_OR_RETURN(
absl::optional<StructTypeField> field_info,
env_->LookupStructField(*type_factory_, resolved_name, field.name()));
if (!field_info.has_value()) {
ReportUndefinedField(field.id(), field.name(), resolved_name);
continue;
}
Type field_type = field_info->GetType();
if (field.optional()) {
field_type = OptionalType(arena_, field_type);
}
if (!inference_context_->IsAssignable(value_type, field_type)) {
issues_->push_back(TypeCheckIssue::CreateError(
ComputeSourceLocation(*ast_, field.id()),
absl::StrCat("expected type of field '", field_info->name(),
"' is '", field_type.DebugString(),
"' but provided type is '", value_type.DebugString(),
"'")));
continue;
}
}
return absl::OkStatus();
}
Type GetTypeOrDyn(const Expr* expr) {
auto iter = types_.find(expr);
return iter != types_.end() ? iter->second : DynType();
}
absl::string_view container_;
NamespaceGenerator namespace_generator_;
absl::Nonnull<const TypeCheckEnv*> env_;
absl::Nonnull<TypeInferenceContext*> inference_context_;
absl::Nonnull<std::vector<TypeCheckIssue>*> issues_;
absl::Nonnull<const ast_internal::AstImpl*> ast_;
VariableScope root_scope_;
absl::Nonnull<google::protobuf::Arena*> arena_;
absl::Nonnull<TypeFactory*> type_factory_;
const VariableScope* current_scope_;
std::vector<const Expr*> expr_stack_;
absl::flat_hash_map<const Expr*, std::vector<std::string>>
maybe_namespaced_functions_;
absl::flat_hash_set<const Expr*> deferred_select_operations_;
absl::Status status_;
std::vector<std::unique_ptr<VariableScope>> comprehension_vars_;
std::vector<ComprehensionScope> comprehension_scopes_;
absl::flat_hash_map<const Expr*, FunctionResolution> functions_;
absl::flat_hash_map<const Expr*, const VariableDecl*> attributes_;
absl::flat_hash_map<const Expr*, std::string> struct_types_;
absl::flat_hash_map<const Expr*, Type> types_;
};
void ResolveVisitor::PostVisitIdent(const Expr& expr, const IdentExpr& ident) {
if (expr_stack_.size() == 1) {
ResolveSimpleIdentifier(expr, ident.name());
return;
}
int stack_pos = expr_stack_.size() - 1;
std::vector<std::string> qualifiers;
qualifiers.push_back(ident.name());
const Expr* receiver_call = nullptr;
const Expr* root_candidate = expr_stack_[stack_pos];
while (stack_pos > 0) {
--stack_pos;
const Expr* parent = expr_stack_[stack_pos];
if (parent->has_call_expr() &&
(&parent->call_expr().target() == root_candidate)) {
receiver_call = parent;
break;
} else if (!parent->has_select_expr()) {
break;
}
qualifiers.push_back(parent->select_expr().field());
deferred_select_operations_.insert(parent);
root_candidate = parent;
if (parent->select_expr().test_only()) {
break;
}
}
if (receiver_call == nullptr) {
ResolveQualifiedIdentifier(*root_candidate, qualifiers);
} else {
maybe_namespaced_functions_[receiver_call] = std::move(qualifiers);
}
}
void ResolveVisitor::PostVisitConst(const Expr& expr,
const Constant& constant) {
switch (constant.kind().index()) {
case ConstantKindIndexOf<std::nullptr_t>():
types_[&expr] = NullType();
break;
case ConstantKindIndexOf<bool>():
types_[&expr] = BoolType();
break;
case ConstantKindIndexOf<int64_t>():
types_[&expr] = IntType();
break;
case ConstantKindIndexOf<uint64_t>():
types_[&expr] = UintType();
break;
case ConstantKindIndexOf<double>():
types_[&expr] = DoubleType();
break;
case ConstantKindIndexOf<BytesConstant>():
types_[&expr] = BytesType();
break;
case ConstantKindIndexOf<StringConstant>():
types_[&expr] = StringType();
break;
case ConstantKindIndexOf<absl::Duration>():
types_[&expr] = DurationType();
break;
case ConstantKindIndexOf<absl::Time>():
types_[&expr] = TimestampType();
break;
default:
issues_->push_back(TypeCheckIssue::CreateError(
ComputeSourceLocation(*ast_, expr.id()),
absl::StrCat("unsupported constant type: ",
constant.kind().index())));
break;
}
}
bool IsSupportedKeyType(const Type& type) {
switch (type.kind()) {
case TypeKind::kBool:
case TypeKind::kInt:
case TypeKind::kUint:
case TypeKind::kString:
case TypeKind::kDyn:
return true;
default:
return false;
}
}
void ResolveVisitor::PostVisitMap(const Expr& expr, const MapExpr& map) {
absl::optional<Type> overall_key_type;
absl::optional<Type> overall_value_type;
for (const auto& entry : map.entries()) {
const Expr* key = &entry.key();
Type key_type = GetTypeOrDyn(key);
if (!IsSupportedKeyType(key_type)) {
issues_->push_back(TypeCheckIssue(
Severity::kWarning, ComputeSourceLocation(*ast_, key->id()),
absl::StrCat("unsupported map key type: ", key_type.DebugString())));
}
if (overall_key_type.has_value()) {
if (key_type != *overall_key_type) {
overall_key_type = DynType();
}
} else {
overall_key_type = key_type;
}
const Expr* value = &entry.value();
Type value_type = GetTypeOrDyn(value);
if (entry.optional()) {
if (value_type.IsOptional()) {
value_type = value_type.GetOptional().GetParameter();
}
}
if (overall_value_type.has_value()) {
if (value_type != *overall_value_type) {
overall_value_type = DynType();
}
} else {
overall_value_type = value_type;
}
}
if (overall_value_type.has_value() && overall_key_type.has_value()) {
types_[&expr] = MapType(arena_, *overall_key_type, *overall_value_type);
return;
} else if (overall_value_type.has_value() != overall_key_type.has_value()) {
status_.Update(absl::InternalError(
"Map has mismatched key and value type inference resolution"));
return;
}
types_[&expr] = inference_context_->InstantiateTypeParams(FreeMapType());
}
void ResolveVisitor::PostVisitList(const Expr& expr, const ListExpr& list) {
absl::optional<Type> overall_value_type;
for (const auto& element : list.elements()) {
const Expr* value = &element.expr();
Type value_type = GetTypeOrDyn(value);
if (element.optional()) {
if (value_type.IsOptional()) {
value_type = value_type.GetOptional().GetParameter();
}
}
if (overall_value_type.has_value()) {
if (value_type != *overall_value_type) {
overall_value_type = DynType();
}
} else {
overall_value_type = value_type;
}
}
if (overall_value_type.has_value()) {
types_[&expr] = ListType(arena_, *overall_value_type);
return;
}
types_[&expr] = inference_context_->InstantiateTypeParams(FreeListType());
}
void ResolveVisitor::PostVisitStruct(const Expr& expr,
const StructExpr& create_struct) {
absl::Status status;
std::string resolved_name;
Type resolved_type;
namespace_generator_.GenerateCandidates(
create_struct.name(), [&](const absl::string_view name) {
auto type = env_->LookupTypeName(*type_factory_, name);
if (!type.ok()) {
status.Update(type.status());
return false;
} else if (type->has_value()) {
resolved_name = name;
resolved_type = **type;
return false;
}
return true;
});
if (!status.ok()) {
status_.Update(status);
return;
}
if (resolved_name.empty()) {
ReportMissingReference(expr, create_struct.name());
return;
}
if (resolved_type.kind() != TypeKind::kStruct &&
!IsWellKnownMessageType(resolved_name)) {
issues_->push_back(TypeCheckIssue::CreateError(
ComputeSourceLocation(*ast_, expr.id()),
absl::StrCat("type '", resolved_name,
"' does not support message creation")));
return;
}
types_[&expr] = resolved_type;
struct_types_[&expr] = resolved_name;
status_.Update(
CheckFieldAssignments(expr, create_struct, resolved_type, resolved_name));
}
void ResolveVisitor::PostVisitCall(const Expr& expr, const CallExpr& call) {
if (auto iter = maybe_namespaced_functions_.find(&expr);
iter != maybe_namespaced_functions_.end()) {
std::string namespaced_name =
absl::StrCat(FormatCandidate(iter->second), ".", call.function());
const FunctionDecl* decl =
ResolveFunctionCallShape(expr, namespaced_name, call.args().size(),
false);
if (decl != nullptr) {
ResolveFunctionOverloads(expr, *decl, call.args().size(),
false,
true);
return;
}
ResolveQualifiedIdentifier(call.target(), iter->second);
}
int arg_count = call.args().size();
if (call.has_target()) {
++arg_count;
}
const FunctionDecl* decl = ResolveFunctionCallShape(
expr, call.function(), arg_count, call.has_target());
if (decl != nullptr) {
ResolveFunctionOverloads(expr, *decl, arg_count, call.has_target(),
false);
return;
}
ReportMissingReference(expr, call.function());
}
void ResolveVisitor::PreVisitComprehension(
const Expr& expr, const ComprehensionExpr& comprehension) {
std::unique_ptr<VariableScope> accu_scope = current_scope_->MakeNestedScope();
auto* accu_scope_ptr = accu_scope.get();
std::unique_ptr<VariableScope> iter_scope = accu_scope->MakeNestedScope();
auto* iter_scope_ptr = iter_scope.get();
comprehension_vars_.push_back(std::move(accu_scope));
comprehension_vars_.push_back(std::move(iter_scope));
comprehension_scopes_.push_back(
{&expr, current_scope_, accu_scope_ptr, iter_scope_ptr});
}
void ResolveVisitor::PostVisitComprehension(
const Expr& expr, const ComprehensionExpr& comprehension) {
comprehension_scopes_.pop_back();
types_[&expr] = GetTypeOrDyn(&comprehension.result());
}
void ResolveVisitor::PreVisitComprehensionSubexpression(
const Expr& expr, const ComprehensionExpr& comprehension,
ComprehensionArg comprehension_arg) {
if (comprehension_scopes_.empty()) {
status_.Update(absl::InternalError(
"Comprehension scope stack is empty in comprehension"));
return;
}
auto& scope = comprehension_scopes_.back();
if (scope.comprehension_expr != &expr) {
status_.Update(absl::InternalError("Comprehension scope stack broken"));
return;
}
switch (comprehension_arg) {
case ComprehensionArg::LOOP_CONDITION:
current_scope_ = scope.accu_scope;
break;
case ComprehensionArg::LOOP_STEP:
current_scope_ = scope.iter_scope;
break;
case ComprehensionArg::RESULT:
current_scope_ = scope.accu_scope;
break;
default:
current_scope_ = scope.parent;
break;
}
}
void ResolveVisitor::PostVisitComprehensionSubexpression(
const Expr& expr, const ComprehensionExpr& comprehension,
ComprehensionArg comprehension_arg) {
if (comprehension_scopes_.empty()) {
status_.Update(absl::InternalError(
"Comprehension scope stack is empty in comprehension"));
return;
}
auto& scope = comprehension_scopes_.back();
if (scope.comprehension_expr != &expr) {
status_.Update(absl::InternalError("Comprehension scope stack broken"));
return;
}
current_scope_ = scope.parent;
switch (comprehension_arg) {
case ComprehensionArg::ACCU_INIT:
scope.accu_scope->InsertVariableIfAbsent(MakeVariableDecl(
comprehension.accu_var(), GetTypeOrDyn(&comprehension.accu_init())));
break;
case ComprehensionArg::ITER_RANGE: {
Type range_type = GetTypeOrDyn(&comprehension.iter_range());
Type iter_type = DynType();
switch (range_type.kind()) {
case TypeKind::kList:
iter_type = range_type.GetList().element();
break;
case TypeKind::kMap:
iter_type = range_type.GetMap().key();
break;
case TypeKind::kDyn:
break;
default:
issues_->push_back(TypeCheckIssue::CreateError(
ComputeSourceLocation(*ast_, expr.id()),
absl::StrCat("expression of type '", range_type.DebugString(),
"' cannot be the range of a comprehension (must be "
"list, map, or dynamic)")));
break;
}
scope.iter_scope->InsertVariableIfAbsent(
MakeVariableDecl(comprehension.iter_var(), iter_type));
break;
}
case ComprehensionArg::RESULT:
types_[&expr] = types_[&expr];
break;
default:
break;
}
}
void ResolveVisitor::PostVisitSelect(const Expr& expr,
const SelectExpr& select) {
if (!deferred_select_operations_.contains(&expr)) {
ResolveSelectOperation(expr, select.field(), select.operand());
}
}
const FunctionDecl* ResolveVisitor::ResolveFunctionCallShape(
const Expr& expr, absl::string_view function_name, int arg_count,
bool is_receiver) {
const FunctionDecl* decl = nullptr;
namespace_generator_.GenerateCandidates(
function_name, [&, this](absl::string_view candidate) -> bool {
decl = env_->LookupFunction(candidate);
if (decl == nullptr) {
return true;
}
for (const auto& ovl : decl->overloads()) {
if (ovl.member() == is_receiver && ovl.args().size() == arg_count) {
return false;
}
}
decl = nullptr;
return true;
});
return decl;
}
void ResolveVisitor::ResolveFunctionOverloads(const Expr& expr,
const FunctionDecl& decl,
int arg_count, bool is_receiver,
bool is_namespaced) {
std::vector<Type> arg_types;
arg_types.reserve(arg_count);
if (is_receiver) {
arg_types.push_back(GetTypeOrDyn(&expr.call_expr().target()));
}
for (int i = 0; i < expr.call_expr().args().size(); ++i) {
arg_types.push_back(GetTypeOrDyn(&expr.call_expr().args()[i]));
}
absl::optional<TypeInferenceContext::OverloadResolution> resolution =
inference_context_->ResolveOverload(decl, arg_types, is_receiver);
if (!resolution.has_value()) {
issues_->push_back(TypeCheckIssue::CreateError(
ComputeSourceLocation(*ast_, expr.id()),
absl::StrCat("found no matching overload for '", decl.name(),
"' applied to (",
absl::StrJoin(arg_types, ", ",
[](std::string* out, const Type& type) {
out->append(type.DebugString());
}),
")")));
return;
}
auto* result_decl = google::protobuf::Arena::Create<FunctionDecl>(arena_);
result_decl->set_name(decl.name());
for (const auto& ovl : resolution->overloads) {
absl::Status s = result_decl->AddOverload(ovl);
if (!s.ok()) {
status_.Update(absl::InternalError(absl::StrCat(
"failed to add overload to resolved function declaration: ", s)));
}
}
functions_[&expr] = {result_decl, is_namespaced};
types_[&expr] = resolution->result_type;
}
absl::Nullable<const VariableDecl*> ResolveVisitor::LookupIdentifier(
absl::string_view name) {
if (const VariableDecl* decl = current_scope_->LookupVariable(name);
decl != nullptr) {
return decl;
}
absl::StatusOr<absl::optional<VariableDecl>> constant =
env_->LookupTypeConstant(*type_factory_, arena_, name);
if (!constant.ok()) {
status_.Update(constant.status());
return nullptr;
}
if (constant->has_value()) {
if (constant->value().type().kind() == TypeKind::kEnum) {
constant->value().set_type(IntType());
}
return google::protobuf::Arena::Create<VariableDecl>(
arena_, std::move(constant).value().value());
}
return nullptr;
}
void ResolveVisitor::ResolveSimpleIdentifier(const Expr& expr,
absl::string_view name) {
const VariableDecl* decl = nullptr;
namespace_generator_.GenerateCandidates(
name, [&decl, this](absl::string_view candidate) {
decl = LookupIdentifier(candidate);
return decl == nullptr;
});
if (decl == nullptr) {
ReportMissingReference(expr, name);
return;
}
attributes_[&expr] = decl;
types_[&expr] = inference_context_->InstantiateTypeParams(decl->type());
}
void ResolveVisitor::ResolveQualifiedIdentifier(
const Expr& expr, absl::Span<const std::string> qualifiers) {
if (qualifiers.size() == 1) {
ResolveSimpleIdentifier(expr, qualifiers[0]);
return;
}
absl::Nullable<const VariableDecl*> decl = nullptr;
int segment_index_out = -1;
namespace_generator_.GenerateCandidates(
qualifiers, [&decl, &segment_index_out, this](absl::string_view candidate,
int segment_index) {
decl = LookupIdentifier(candidate);
if (decl != nullptr) {
segment_index_out = segment_index;
return false;
}
return true;
});
if (decl == nullptr) {
ReportMissingReference(expr, FormatCandidate(qualifiers));
return;
}
const int num_select_opts = qualifiers.size() - segment_index_out - 1;
const Expr* root = &expr;
std::vector<const Expr*> select_opts;
select_opts.reserve(num_select_opts);
for (int i = 0; i < num_select_opts; ++i) {
select_opts.push_back(root);
root = &root->select_expr().operand();
}
attributes_[root] = decl;
types_[root] = inference_context_->InstantiateTypeParams(decl->type());
for (auto iter = select_opts.rbegin(); iter != select_opts.rend(); ++iter) {
ResolveSelectOperation(**iter, (*iter)->select_expr().field(),
(*iter)->select_expr().operand());
}
}
void ResolveVisitor::ResolveSelectOperation(const Expr& expr,
absl::string_view field,
const Expr& operand) {
auto impl = [&](const Type& operand_type) -> absl::optional<Type> {
if (operand_type.kind() == TypeKind::kDyn ||
operand_type.kind() == TypeKind::kAny) {
return DynType();
}
if (operand_type.kind() == TypeKind::kStruct) {
StructType struct_type = operand_type.GetStruct();
auto field_info =
env_->LookupStructField(*type_factory_, struct_type.name(), field);
if (!field_info.ok()) {
status_.Update(field_info.status());
return absl::nullopt;
}
if (!field_info->has_value()) {
ReportUndefinedField(expr.id(), field, struct_type.name());
return absl::nullopt;
}
auto type = field_info->value().GetType();
if (type.kind() == TypeKind::kEnum) {
return IntType();
}
return type;
}
if (operand_type.kind() == TypeKind::kMap) {
MapType map_type = operand_type.GetMap();
if (inference_context_->IsAssignable(StringType(), map_type.GetKey())) {
return map_type.GetValue();
}
}
issues_->push_back(TypeCheckIssue::CreateError(
ComputeSourceLocation(*ast_, expr.id()),
absl::StrCat("expression of type '", operand_type.DebugString(),
"' cannot be the operand of a select operation")));
return absl::nullopt;
};
const Type& operand_type = GetTypeOrDyn(&operand);
absl::optional<Type> result_type;
if (operand_type.IsOptional()) {
auto optional_type = operand_type.GetOptional();
Type held_type = optional_type.GetParameter();
result_type = impl(held_type);
} else {
result_type = impl(operand_type);
}
if (result_type.has_value()) {
if (expr.select_expr().test_only()) {
types_[&expr] = BoolType();
} else {
types_[&expr] = *result_type;
}
}
}
class ResolveRewriter : public AstRewriterBase {
public:
explicit ResolveRewriter(const ResolveVisitor& visitor,
const TypeInferenceContext& inference_context,
AstImpl::ReferenceMap& references,
AstImpl::TypeMap& types)
: visitor_(visitor),
inference_context_(inference_context),
reference_map_(references),
type_map_(types) {}
bool PostVisitRewrite(Expr& expr) override {
bool rewritten = false;
if (auto iter = visitor_.attributes().find(&expr);
iter != visitor_.attributes().end()) {
const VariableDecl* decl = iter->second;
auto& ast_ref = reference_map_[expr.id()];
ast_ref.set_name(decl->name());
if (decl->has_value()) {
ast_ref.set_value(decl->value());
}
expr.mutable_ident_expr().set_name(decl->name());
rewritten = true;
} else if (auto iter = visitor_.functions().find(&expr);
iter != visitor_.functions().end()) {
const FunctionDecl* decl = iter->second.decl;
const bool needs_rewrite = iter->second.namespace_rewrite;
auto& ast_ref = reference_map_[expr.id()];
ast_ref.set_name(decl->name());
for (const auto& overload : decl->overloads()) {
ast_ref.mutable_overload_id().push_back(overload.id());
}
expr.mutable_call_expr().set_function(decl->name());
if (needs_rewrite && expr.call_expr().has_target()) {
expr.mutable_call_expr().set_target(nullptr);
}
rewritten = true;
} else if (auto iter = visitor_.struct_types().find(&expr);
iter != visitor_.struct_types().end()) {
auto& ast_ref = reference_map_[expr.id()];
ast_ref.set_name(iter->second);
if (expr.has_struct_expr()) {
expr.mutable_struct_expr().set_name(iter->second);
}
rewritten = true;
}
if (auto iter = visitor_.types().find(&expr);
iter != visitor_.types().end()) {
auto flattened_type =
FlattenType(inference_context_.FinalizeType(iter->second));
if (!flattened_type.ok()) {
status_.Update(flattened_type.status());
return rewritten;
}
type_map_[expr.id()] = *std::move(flattened_type);
rewritten = true;
}
return rewritten;
}
const absl::Status& status() const { return status_; }
private:
absl::Status status_;
const ResolveVisitor& visitor_;
const TypeInferenceContext& inference_context_;
AstImpl::ReferenceMap& reference_map_;
AstImpl::TypeMap& type_map_;
};
}
absl::StatusOr<ValidationResult> TypeCheckerImpl::Check(
std::unique_ptr<Ast> ast) const {
auto& ast_impl = AstImpl::CastFromPublicAst(*ast);
google::protobuf::Arena type_arena;
std::vector<TypeCheckIssue> issues;
CEL_ASSIGN_OR_RETURN(auto generator,
NamespaceGenerator::Create(env_.container()));
TypeInferenceContext type_inference_context(&type_arena);
TrivialTypeFactory type_factory(&type_arena);
ResolveVisitor visitor(env_.container(), std::move(generator), env_, ast_impl,
type_inference_context, issues, &type_arena,
type_factory);
TraversalOptions opts;
opts.use_comprehension_callbacks = true;
AstTraverse(ast_impl.root_expr(), visitor, opts);
CEL_RETURN_IF_ERROR(visitor.status());
for (const auto& issue : issues) {
if (issue.severity() == Severity::kError) {
return ValidationResult(std::move(issues));
}
}
ResolveRewriter rewriter(visitor, type_inference_context,
ast_impl.reference_map(), ast_impl.type_map());
AstRewrite(ast_impl.root_expr(), rewriter);
CEL_RETURN_IF_ERROR(rewriter.status());
ast_impl.set_is_checked(true);
return ValidationResult(std::move(ast), std::move(issues));
}
} | #include "checker/internal/type_checker_impl.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/no_destructor.h"
#include "absl/base/nullability.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "base/ast_internal/ast_impl.h"
#include "base/ast_internal/expr.h"
#include "checker/internal/test_ast_helpers.h"
#include "checker/internal/type_check_env.h"
#include "checker/type_check_issue.h"
#include "checker/validation_result.h"
#include "common/ast.h"
#include "common/decl.h"
#include "common/type.h"
#include "common/type_introspector.h"
#include "extensions/protobuf/type_reflector.h"
#include "internal/status_macros.h"
#include "internal/testing.h"
#include "proto/test/v1/proto2/test_all_types.pb.h"
#include "proto/test/v1/proto3/test_all_types.pb.h"
#include "google/protobuf/arena.h"
#include "google/protobuf/message.h"
namespace cel {
namespace checker_internal {
namespace {
using ::absl_testing::IsOk;
using ::cel::ast_internal::AstImpl;
using ::cel::ast_internal::Reference;
using ::google::api::expr::test::v1::proto3::TestAllTypes;
using ::testing::_;
using ::testing::Contains;
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::IsEmpty;
using ::testing::Pair;
using ::testing::Property;
using AstType = ast_internal::Type;
using Severity = TypeCheckIssue::Severity;
namespace testpb3 = ::google::api::expr::test::v1::proto3;
std::string SevString(Severity severity) {
switch (severity) {
case Severity::kDeprecated:
return "Deprecated";
case Severity::kError:
return "Error";
case Severity::kWarning:
return "Warning";
case Severity::kInformation:
return "Information";
}
}
}
}
template <typename Sink>
void AbslStringify(Sink& sink, const TypeCheckIssue& issue) {
absl::Format(&sink, "TypeCheckIssue(%s): %s",
checker_internal::SevString(issue.severity()), issue.message());
}
namespace checker_internal {
namespace {
absl::Nonnull<google::protobuf::Arena*> TestTypeArena() {
static absl::NoDestructor<google::protobuf::Arena> kArena;
return &(*kArena);
}
FunctionDecl MakeIdentFunction() {
auto decl = MakeFunctionDecl(
"identity",
MakeOverloadDecl("identity", TypeParamType("A"), TypeParamType("A")));
ABSL_CHECK_OK(decl.status());
return decl.value();
}
MATCHER_P2(IsIssueWithSubstring, severity, substring, "") {
const TypeCheckIssue& issue = arg;
if (issue.severity() == severity &&
absl::StrContains(issue.message(), substring)) {
return true;
}
*result_listener << "expected: " << SevString(severity) << " " << substring
<< "\nactual: " << SevString(issue.severity()) << " "
<< issue.message();
return false;
}
MATCHER_P(IsVariableReference, var_name, "") {
const Reference& reference = arg;
if (reference.name() == var_name) {
return true;
}
*result_listener << "expected: " << var_name
<< "\nactual: " << reference.name();
return false;
}
MATCHER_P2(IsFunctionReference, fn_name, overloads, "") {
const Reference& reference = arg;
if (reference.name() != fn_name) {
*result_listener << "expected: " << fn_name
<< "\nactual: " << reference.name();
}
absl::flat_hash_set<std::string> got_overload_set(
reference.overload_id().begin(), reference.overload_id().end());
absl::flat_hash_set<std::string> want_overload_set(overloads.begin(),
overloads.end());
if (got_overload_set != want_overload_set) {
*result_listener << "expected overload_ids: "
<< absl::StrJoin(want_overload_set, ",")
<< "\nactual: " << absl::StrJoin(got_overload_set, ",");
}
return reference.name() == fn_name && got_overload_set == want_overload_set;
}
absl::Status RegisterMinimalBuiltins(absl::Nonnull<google::protobuf::Arena*> arena,
TypeCheckEnv& env) {
Type list_of_a = ListType(arena, TypeParamType("A"));
FunctionDecl add_op;
add_op.set_name("_+_");
CEL_RETURN_IF_ERROR(add_op.AddOverload(
MakeOverloadDecl("add_int_int", IntType(), IntType(), IntType())));
CEL_RETURN_IF_ERROR(add_op.AddOverload(
MakeOverloadDecl("add_uint_uint", UintType(), UintType(), UintType())));
CEL_RETURN_IF_ERROR(add_op.AddOverload(MakeOverloadDecl(
"add_double_double", DoubleType(), DoubleType(), DoubleType())));
CEL_RETURN_IF_ERROR(add_op.AddOverload(
MakeOverloadDecl("add_list", list_of_a, list_of_a, list_of_a)));
FunctionDecl not_op;
not_op.set_name("!_");
CEL_RETURN_IF_ERROR(not_op.AddOverload(
MakeOverloadDecl("logical_not",
BoolType{}, BoolType{})));
FunctionDecl not_strictly_false;
not_strictly_false.set_name("@not_strictly_false");
CEL_RETURN_IF_ERROR(not_strictly_false.AddOverload(
MakeOverloadDecl("not_strictly_false",
BoolType{}, DynType{})));
FunctionDecl mult_op;
mult_op.set_name("_*_");
CEL_RETURN_IF_ERROR(mult_op.AddOverload(
MakeOverloadDecl("mult_int_int",
IntType(), IntType(), IntType())));
FunctionDecl or_op;
or_op.set_name("_||_");
CEL_RETURN_IF_ERROR(or_op.AddOverload(
MakeOverloadDecl("logical_or",
BoolType{}, BoolType{}, BoolType{})));
FunctionDecl and_op;
and_op.set_name("_&&_");
CEL_RETURN_IF_ERROR(and_op.AddOverload(
MakeOverloadDecl("logical_and",
BoolType{}, BoolType{}, BoolType{})));
FunctionDecl lt_op;
lt_op.set_name("_<_");
CEL_RETURN_IF_ERROR(lt_op.AddOverload(
MakeOverloadDecl("lt_int_int",
BoolType{}, IntType(), IntType())));
FunctionDecl gt_op;
gt_op.set_name("_>_");
CEL_RETURN_IF_ERROR(gt_op.AddOverload(
MakeOverloadDecl("gt_int_int",
BoolType{}, IntType(), IntType())));
FunctionDecl eq_op;
eq_op.set_name("_==_");
CEL_RETURN_IF_ERROR(eq_op.AddOverload(MakeOverloadDecl(
"equals",
BoolType{}, TypeParamType("A"), TypeParamType("A"))));
FunctionDecl ternary_op;
ternary_op.set_name("_?_:_");
CEL_RETURN_IF_ERROR(eq_op.AddOverload(MakeOverloadDecl(
"conditional",
TypeParamType("A"), BoolType{}, TypeParamType("A"), TypeParamType("A"))));
FunctionDecl to_int;
to_int.set_name("int");
CEL_RETURN_IF_ERROR(to_int.AddOverload(
MakeOverloadDecl("to_int",
IntType(), DynType())));
FunctionDecl to_duration;
to_duration.set_name("duration");
CEL_RETURN_IF_ERROR(to_duration.AddOverload(
MakeOverloadDecl("to_duration",
DurationType(), StringType())));
FunctionDecl to_timestamp;
to_timestamp.set_name("timestamp");
CEL_RETURN_IF_ERROR(to_timestamp.AddOverload(
MakeOverloadDecl("to_timestamp",
TimestampType(), IntType())));
FunctionDecl to_dyn;
to_dyn.set_name("dyn");
CEL_RETURN_IF_ERROR(to_dyn.AddOverload(
MakeOverloadDecl("to_dyn",
DynType(), TypeParamType("A"))));
FunctionDecl to_type;
to_type.set_name("type");
CEL_RETURN_IF_ERROR(to_type.AddOverload(
MakeOverloadDecl("to_type",
TypeType(arena, TypeParamType("A")),
TypeParamType("A"))));
env.InsertFunctionIfAbsent(std::move(not_op));
env.InsertFunctionIfAbsent(std::move(not_strictly_false));
env.InsertFunctionIfAbsent(std::move(add_op));
env.InsertFunctionIfAbsent(std::move(mult_op));
env.InsertFunctionIfAbsent(std::move(or_op));
env.InsertFunctionIfAbsent(std::move(and_op));
env.InsertFunctionIfAbsent(std::move(lt_op));
env.InsertFunctionIfAbsent(std::move(gt_op));
env.InsertFunctionIfAbsent(std::move(to_int));
env.InsertFunctionIfAbsent(std::move(eq_op));
env.InsertFunctionIfAbsent(std::move(ternary_op));
env.InsertFunctionIfAbsent(std::move(to_dyn));
env.InsertFunctionIfAbsent(std::move(to_type));
env.InsertFunctionIfAbsent(std::move(to_duration));
env.InsertFunctionIfAbsent(std::move(to_timestamp));
return absl::OkStatus();
}
TEST(TypeCheckerImplTest, SmokeTest) {
TypeCheckEnv env;
google::protobuf::Arena arena;
ASSERT_THAT(RegisterMinimalBuiltins(&arena, env), IsOk());
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast, MakeTestParsedAst("1 + 2"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
EXPECT_TRUE(result.IsValid());
EXPECT_THAT(result.GetIssues(), IsEmpty());
}
TEST(TypeCheckerImplTest, SimpleIdentsResolved) {
TypeCheckEnv env;
google::protobuf::Arena arena;
ASSERT_THAT(RegisterMinimalBuiltins(&arena, env), IsOk());
env.InsertVariableIfAbsent(MakeVariableDecl("x", IntType()));
env.InsertVariableIfAbsent(MakeVariableDecl("y", IntType()));
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast, MakeTestParsedAst("x + y"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
EXPECT_TRUE(result.IsValid());
EXPECT_THAT(result.GetIssues(), IsEmpty());
}
TEST(TypeCheckerImplTest, ReportMissingIdentDecl) {
TypeCheckEnv env;
google::protobuf::Arena arena;
ASSERT_THAT(RegisterMinimalBuiltins(&arena, env), IsOk());
env.InsertVariableIfAbsent(MakeVariableDecl("x", IntType()));
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast, MakeTestParsedAst("x + y"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
EXPECT_FALSE(result.IsValid());
EXPECT_THAT(result.GetIssues(),
ElementsAre(IsIssueWithSubstring(Severity::kError,
"undeclared reference to 'y'")));
}
TEST(TypeCheckerImplTest, QualifiedIdentsResolved) {
TypeCheckEnv env;
google::protobuf::Arena arena;
ASSERT_THAT(RegisterMinimalBuiltins(&arena, env), IsOk());
env.InsertVariableIfAbsent(MakeVariableDecl("x.y", IntType()));
env.InsertVariableIfAbsent(MakeVariableDecl("x.z", IntType()));
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast, MakeTestParsedAst("x.y + x.z"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
EXPECT_TRUE(result.IsValid());
EXPECT_THAT(result.GetIssues(), IsEmpty());
}
TEST(TypeCheckerImplTest, ReportMissingQualifiedIdentDecl) {
TypeCheckEnv env;
google::protobuf::Arena arena;
ASSERT_THAT(RegisterMinimalBuiltins(&arena, env), IsOk());
env.InsertVariableIfAbsent(MakeVariableDecl("x", IntType()));
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast, MakeTestParsedAst("y.x"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
EXPECT_FALSE(result.IsValid());
EXPECT_THAT(result.GetIssues(),
ElementsAre(IsIssueWithSubstring(
Severity::kError, "undeclared reference to 'y.x'")));
}
TEST(TypeCheckerImplTest, ResolveMostQualfiedIdent) {
TypeCheckEnv env;
google::protobuf::Arena arena;
ASSERT_THAT(RegisterMinimalBuiltins(&arena, env), IsOk());
env.InsertVariableIfAbsent(MakeVariableDecl("x", IntType()));
env.InsertVariableIfAbsent(MakeVariableDecl("x.y", MapType()));
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast, MakeTestParsedAst("x.y.z"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
ASSERT_OK_AND_ASSIGN(auto checked_ast, result.ReleaseAst());
auto& ast_impl = AstImpl::CastFromPublicAst(*checked_ast);
EXPECT_THAT(ast_impl.reference_map(),
Contains(Pair(_, IsVariableReference("x.y"))));
}
TEST(TypeCheckerImplTest, MemberFunctionCallResolved) {
TypeCheckEnv env;
env.InsertVariableIfAbsent(MakeVariableDecl("x", IntType()));
env.InsertVariableIfAbsent(MakeVariableDecl("y", IntType()));
FunctionDecl foo;
foo.set_name("foo");
ASSERT_THAT(foo.AddOverload(MakeMemberOverloadDecl("int_foo_int",
IntType(),
IntType(), IntType())),
IsOk());
env.InsertFunctionIfAbsent(std::move(foo));
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast, MakeTestParsedAst("x.foo(y)"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
EXPECT_TRUE(result.IsValid());
EXPECT_THAT(result.GetIssues(), IsEmpty());
}
TEST(TypeCheckerImplTest, MemberFunctionCallNotDeclared) {
TypeCheckEnv env;
env.InsertVariableIfAbsent(MakeVariableDecl("x", IntType()));
env.InsertVariableIfAbsent(MakeVariableDecl("y", IntType()));
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast, MakeTestParsedAst("x.foo(y)"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
EXPECT_FALSE(result.IsValid());
EXPECT_THAT(result.GetIssues(),
ElementsAre(IsIssueWithSubstring(
Severity::kError, "undeclared reference to 'foo'")));
}
TEST(TypeCheckerImplTest, FunctionShapeMismatch) {
TypeCheckEnv env;
ASSERT_OK_AND_ASSIGN(
auto foo,
MakeFunctionDecl("foo", MakeOverloadDecl("foo_int_int", IntType(),
IntType(), IntType())));
env.InsertFunctionIfAbsent(foo);
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast, MakeTestParsedAst("foo(1, 2, 3)"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
EXPECT_FALSE(result.IsValid());
EXPECT_THAT(result.GetIssues(),
ElementsAre(IsIssueWithSubstring(
Severity::kError, "undeclared reference to 'foo'")));
}
TEST(TypeCheckerImplTest, NamespaceFunctionCallResolved) {
TypeCheckEnv env;
env.InsertVariableIfAbsent(MakeVariableDecl("x", IntType()));
env.InsertVariableIfAbsent(MakeVariableDecl("y", IntType()));
FunctionDecl foo;
foo.set_name("x.foo");
ASSERT_THAT(
foo.AddOverload(MakeOverloadDecl("x_foo_int",
IntType(), IntType())),
IsOk());
env.InsertFunctionIfAbsent(std::move(foo));
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast, MakeTestParsedAst("x.foo(y)"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
EXPECT_TRUE(result.IsValid());
EXPECT_THAT(result.GetIssues(), IsEmpty());
ASSERT_OK_AND_ASSIGN(auto checked_ast, result.ReleaseAst());
auto& ast_impl = AstImpl::CastFromPublicAst(*checked_ast);
EXPECT_TRUE(ast_impl.root_expr().has_call_expr())
<< absl::StrCat("kind: ", ast_impl.root_expr().kind().index());
EXPECT_EQ(ast_impl.root_expr().call_expr().function(), "x.foo");
EXPECT_FALSE(ast_impl.root_expr().call_expr().has_target());
}
TEST(TypeCheckerImplTest, NamespacedFunctionSkipsFieldCheck) {
TypeCheckEnv env;
env.InsertVariableIfAbsent(MakeVariableDecl("x", IntType()));
FunctionDecl foo;
foo.set_name("x.y.foo");
ASSERT_THAT(
foo.AddOverload(MakeOverloadDecl("x_y_foo_int",
IntType(), IntType())),
IsOk());
env.InsertFunctionIfAbsent(std::move(foo));
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast, MakeTestParsedAst("x.y.foo(x)"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
EXPECT_TRUE(result.IsValid());
EXPECT_THAT(result.GetIssues(), IsEmpty());
ASSERT_OK_AND_ASSIGN(auto checked_ast, result.ReleaseAst());
auto& ast_impl = AstImpl::CastFromPublicAst(*checked_ast);
EXPECT_TRUE(ast_impl.root_expr().has_call_expr())
<< absl::StrCat("kind: ", ast_impl.root_expr().kind().index());
EXPECT_EQ(ast_impl.root_expr().call_expr().function(), "x.y.foo");
EXPECT_FALSE(ast_impl.root_expr().call_expr().has_target());
}
TEST(TypeCheckerImplTest, MixedListTypeToDyn) {
TypeCheckEnv env;
google::protobuf::Arena arena;
ASSERT_THAT(RegisterMinimalBuiltins(&arena, env), IsOk());
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast, MakeTestParsedAst("[1, 'a']"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
ASSERT_TRUE(result.IsValid());
EXPECT_THAT(result.GetIssues(), IsEmpty());
auto& ast_impl = AstImpl::CastFromPublicAst(*result.GetAst());
EXPECT_TRUE(ast_impl.type_map().at(1).list_type().elem_type().has_dyn());
}
TEST(TypeCheckerImplTest, FreeListTypeToDyn) {
TypeCheckEnv env;
google::protobuf::Arena arena;
ASSERT_THAT(RegisterMinimalBuiltins(&arena, env), IsOk());
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast, MakeTestParsedAst("[]"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
ASSERT_TRUE(result.IsValid());
EXPECT_THAT(result.GetIssues(), IsEmpty());
auto& ast_impl = AstImpl::CastFromPublicAst(*result.GetAst());
EXPECT_TRUE(ast_impl.type_map().at(1).list_type().elem_type().has_dyn());
}
TEST(TypeCheckerImplTest, FreeMapTypeToDyn) {
TypeCheckEnv env;
google::protobuf::Arena arena;
ASSERT_THAT(RegisterMinimalBuiltins(&arena, env), IsOk());
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast, MakeTestParsedAst("{}"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
ASSERT_TRUE(result.IsValid());
EXPECT_THAT(result.GetIssues(), IsEmpty());
auto& ast_impl = AstImpl::CastFromPublicAst(*result.GetAst());
EXPECT_TRUE(ast_impl.type_map().at(1).map_type().key_type().has_dyn());
EXPECT_TRUE(ast_impl.type_map().at(1).map_type().value_type().has_dyn());
}
TEST(TypeCheckerImplTest, MapTypeWithMixedKeys) {
TypeCheckEnv env;
google::protobuf::Arena arena;
ASSERT_THAT(RegisterMinimalBuiltins(&arena, env), IsOk());
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast, MakeTestParsedAst("{'a': 1, 2: 3}"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
ASSERT_TRUE(result.IsValid());
EXPECT_THAT(result.GetIssues(), IsEmpty());
auto& ast_impl = AstImpl::CastFromPublicAst(*result.GetAst());
EXPECT_TRUE(ast_impl.type_map().at(1).map_type().key_type().has_dyn());
EXPECT_EQ(ast_impl.type_map().at(1).map_type().value_type().primitive(),
ast_internal::PrimitiveType::kInt64);
}
TEST(TypeCheckerImplTest, MapTypeUnsupportedKeyWarns) {
TypeCheckEnv env;
google::protobuf::Arena arena;
ASSERT_THAT(RegisterMinimalBuiltins(&arena, env), IsOk());
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast, MakeTestParsedAst("{{}: 'a'}"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
ASSERT_TRUE(result.IsValid());
EXPECT_THAT(result.GetIssues(),
ElementsAre(IsIssueWithSubstring(Severity::kWarning,
"unsupported map key type:")));
}
TEST(TypeCheckerImplTest, MapTypeWithMixedValues) {
TypeCheckEnv env;
google::protobuf::Arena arena;
ASSERT_THAT(RegisterMinimalBuiltins(&arena, env), IsOk());
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast, MakeTestParsedAst("{'a': 1, 'b': '2'}"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
ASSERT_TRUE(result.IsValid());
EXPECT_THAT(result.GetIssues(), IsEmpty());
auto& ast_impl = AstImpl::CastFromPublicAst(*result.GetAst());
EXPECT_EQ(ast_impl.type_map().at(1).map_type().key_type().primitive(),
ast_internal::PrimitiveType::kString);
EXPECT_TRUE(ast_impl.type_map().at(1).map_type().value_type().has_dyn());
}
TEST(TypeCheckerImplTest, ComprehensionVariablesResolved) {
TypeCheckEnv env;
google::protobuf::Arena arena;
ASSERT_THAT(RegisterMinimalBuiltins(&arena, env), IsOk());
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast,
MakeTestParsedAst("[1, 2, 3].exists(x, x * x > 10)"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
EXPECT_TRUE(result.IsValid());
EXPECT_THAT(result.GetIssues(), IsEmpty());
}
TEST(TypeCheckerImplTest, MapComprehensionVariablesResolved) {
TypeCheckEnv env;
google::protobuf::Arena arena;
ASSERT_THAT(RegisterMinimalBuiltins(&arena, env), IsOk());
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast,
MakeTestParsedAst("{1: 3, 2: 4}.exists(x, x == 2)"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
EXPECT_TRUE(result.IsValid());
EXPECT_THAT(result.GetIssues(), IsEmpty());
}
TEST(TypeCheckerImplTest, NestedComprehensions) {
TypeCheckEnv env;
google::protobuf::Arena arena;
ASSERT_THAT(RegisterMinimalBuiltins(&arena, env), IsOk());
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(
auto ast,
MakeTestParsedAst("[1, 2].all(x, ['1', '2'].exists(y, int(y) == x))"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
EXPECT_TRUE(result.IsValid());
EXPECT_THAT(result.GetIssues(), IsEmpty());
}
TEST(TypeCheckerImplTest, ComprehensionVarsFollowNamespacePriorityRules) {
TypeCheckEnv env;
env.set_container("com");
google::protobuf::Arena arena;
ASSERT_THAT(RegisterMinimalBuiltins(&arena, env), IsOk());
env.InsertVariableIfAbsent(MakeVariableDecl("com.x", IntType()));
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast,
MakeTestParsedAst("['1', '2'].all(x, x == 2)"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
EXPECT_TRUE(result.IsValid());
EXPECT_THAT(result.GetIssues(), IsEmpty());
ASSERT_OK_AND_ASSIGN(auto checked_ast, result.ReleaseAst());
auto& ast_impl = AstImpl::CastFromPublicAst(*checked_ast);
EXPECT_THAT(ast_impl.reference_map(),
Contains(Pair(_, IsVariableReference("com.x"))));
}
TEST(TypeCheckerImplTest, ComprehensionVarsFollowQualifiedIdentPriority) {
TypeCheckEnv env;
google::protobuf::Arena arena;
ASSERT_THAT(RegisterMinimalBuiltins(&arena, env), IsOk());
env.InsertVariableIfAbsent(MakeVariableDecl("x.y", IntType()));
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast,
MakeTestParsedAst("[{'y': '2'}].all(x, x.y == 2)"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
EXPECT_TRUE(result.IsValid());
EXPECT_THAT(result.GetIssues(), IsEmpty());
ASSERT_OK_AND_ASSIGN(auto checked_ast, result.ReleaseAst());
auto& ast_impl = AstImpl::CastFromPublicAst(*checked_ast);
EXPECT_THAT(ast_impl.reference_map(),
Contains(Pair(_, IsVariableReference("x.y"))));
}
struct PrimitiveLiteralsTestCase {
std::string expr;
ast_internal::PrimitiveType expected_type;
};
class PrimitiveLiteralsTest
: public testing::TestWithParam<PrimitiveLiteralsTestCase> {};
TEST_P(PrimitiveLiteralsTest, LiteralsTypeInferred) {
TypeCheckEnv env;
const PrimitiveLiteralsTestCase& test_case = GetParam();
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast, MakeTestParsedAst(test_case.expr));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
ASSERT_TRUE(result.IsValid());
ASSERT_OK_AND_ASSIGN(auto checked_ast, result.ReleaseAst());
auto& ast_impl = AstImpl::CastFromPublicAst(*checked_ast);
EXPECT_EQ(ast_impl.type_map()[1].primitive(), test_case.expected_type);
}
INSTANTIATE_TEST_SUITE_P(
PrimitiveLiteralsTests, PrimitiveLiteralsTest,
::testing::Values(
PrimitiveLiteralsTestCase{
.expr = "1",
.expected_type = ast_internal::PrimitiveType::kInt64,
},
PrimitiveLiteralsTestCase{
.expr = "1.0",
.expected_type = ast_internal::PrimitiveType::kDouble,
},
PrimitiveLiteralsTestCase{
.expr = "1u",
.expected_type = ast_internal::PrimitiveType::kUint64,
},
PrimitiveLiteralsTestCase{
.expr = "'string'",
.expected_type = ast_internal::PrimitiveType::kString,
},
PrimitiveLiteralsTestCase{
.expr = "b'bytes'",
.expected_type = ast_internal::PrimitiveType::kBytes,
},
PrimitiveLiteralsTestCase{
.expr = "false",
.expected_type = ast_internal::PrimitiveType::kBool,
}));
struct AstTypeConversionTestCase {
Type decl_type;
ast_internal::Type expected_type;
};
class AstTypeConversionTest
: public testing::TestWithParam<AstTypeConversionTestCase> {};
TEST_P(AstTypeConversionTest, TypeConversion) {
TypeCheckEnv env;
ASSERT_TRUE(
env.InsertVariableIfAbsent(MakeVariableDecl("x", GetParam().decl_type)));
const AstTypeConversionTestCase& test_case = GetParam();
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast, MakeTestParsedAst("x"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
ASSERT_TRUE(result.IsValid());
ASSERT_OK_AND_ASSIGN(auto checked_ast, result.ReleaseAst());
auto& ast_impl = AstImpl::CastFromPublicAst(*checked_ast);
EXPECT_EQ(ast_impl.type_map()[1], test_case.expected_type)
<< GetParam().decl_type.DebugString();
}
INSTANTIATE_TEST_SUITE_P(
Primitives, AstTypeConversionTest,
::testing::Values(
AstTypeConversionTestCase{
.decl_type = NullType(),
.expected_type = AstType(ast_internal::NullValue()),
},
AstTypeConversionTestCase{
.decl_type = DynType(),
.expected_type = AstType(ast_internal::DynamicType()),
},
AstTypeConversionTestCase{
.decl_type = BoolType(),
.expected_type = AstType(ast_internal::PrimitiveType::kBool),
},
AstTypeConversionTestCase{
.decl_type = IntType(),
.expected_type = AstType(ast_internal::PrimitiveType::kInt64),
},
AstTypeConversionTestCase{
.decl_type = UintType(),
.expected_type = AstType(ast_internal::PrimitiveType::kUint64),
},
AstTypeConversionTestCase{
.decl_type = DoubleType(),
.expected_type = AstType(ast_internal::PrimitiveType::kDouble),
},
AstTypeConversionTestCase{
.decl_type = StringType(),
.expected_type = AstType(ast_internal::PrimitiveType::kString),
},
AstTypeConversionTestCase{
.decl_type = BytesType(),
.expected_type = AstType(ast_internal::PrimitiveType::kBytes),
},
AstTypeConversionTestCase{
.decl_type = TimestampType(),
.expected_type = AstType(ast_internal::WellKnownType::kTimestamp),
},
AstTypeConversionTestCase{
.decl_type = DurationType(),
.expected_type = AstType(ast_internal::WellKnownType::kDuration),
}));
INSTANTIATE_TEST_SUITE_P(
Wrappers, AstTypeConversionTest,
::testing::Values(
AstTypeConversionTestCase{
.decl_type = IntWrapperType(),
.expected_type = AstType(ast_internal::PrimitiveTypeWrapper(
ast_internal::PrimitiveType::kInt64)),
},
AstTypeConversionTestCase{
.decl_type = UintWrapperType(),
.expected_type = AstType(ast_internal::PrimitiveTypeWrapper(
ast_internal::PrimitiveType::kUint64)),
},
AstTypeConversionTestCase{
.decl_type = DoubleWrapperType(),
.expected_type = AstType(ast_internal::PrimitiveTypeWrapper(
ast_internal::PrimitiveType::kDouble)),
},
AstTypeConversionTestCase{
.decl_type = BoolWrapperType(),
.expected_type = AstType(ast_internal::PrimitiveTypeWrapper(
ast_internal::PrimitiveType::kBool)),
},
AstTypeConversionTestCase{
.decl_type = StringWrapperType(),
.expected_type = AstType(ast_internal::PrimitiveTypeWrapper(
ast_internal::PrimitiveType::kString)),
},
AstTypeConversionTestCase{
.decl_type = BytesWrapperType(),
.expected_type = AstType(ast_internal::PrimitiveTypeWrapper(
ast_internal::PrimitiveType::kBytes)),
}));
INSTANTIATE_TEST_SUITE_P(
ComplexTypes, AstTypeConversionTest,
::testing::Values(
AstTypeConversionTestCase{
.decl_type = ListType(TestTypeArena(), IntType()),
.expected_type =
AstType(ast_internal::ListType(std::make_unique<AstType>(
ast_internal::PrimitiveType::kInt64))),
},
AstTypeConversionTestCase{
.decl_type = MapType(TestTypeArena(), IntType(), IntType()),
.expected_type = AstType(ast_internal::MapType(
std::make_unique<AstType>(ast_internal::PrimitiveType::kInt64),
std::make_unique<AstType>(
ast_internal::PrimitiveType::kInt64))),
},
AstTypeConversionTestCase{
.decl_type = TypeType(TestTypeArena(), IntType()),
.expected_type = AstType(
std::make_unique<AstType>(ast_internal::PrimitiveType::kInt64)),
},
AstTypeConversionTestCase{
.decl_type = OpaqueType(TestTypeArena(), "tuple",
{IntType(), IntType()}),
.expected_type = AstType(ast_internal::AbstractType(
"tuple", {AstType(ast_internal::PrimitiveType::kInt64),
AstType(ast_internal::PrimitiveType::kInt64)})),
},
AstTypeConversionTestCase{
.decl_type = StructType(MessageType(TestAllTypes::descriptor())),
.expected_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes"))}));
TEST(TypeCheckerImplTest, NullLiteral) {
TypeCheckEnv env;
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast, MakeTestParsedAst("null"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
ASSERT_TRUE(result.IsValid());
ASSERT_OK_AND_ASSIGN(auto checked_ast, result.ReleaseAst());
auto& ast_impl = AstImpl::CastFromPublicAst(*checked_ast);
EXPECT_TRUE(ast_impl.type_map()[1].has_null());
}
TEST(TypeCheckerImplTest, ComprehensionUnsupportedRange) {
TypeCheckEnv env;
google::protobuf::Arena arena;
ASSERT_THAT(RegisterMinimalBuiltins(&arena, env), IsOk());
env.InsertVariableIfAbsent(MakeVariableDecl("y", IntType()));
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast, MakeTestParsedAst("'abc'.all(x, y == 2)"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
EXPECT_FALSE(result.IsValid());
EXPECT_THAT(result.GetIssues(), Contains(IsIssueWithSubstring(
Severity::kError,
"expression of type 'string' cannot be "
"the range of a comprehension")));
}
TEST(TypeCheckerImplTest, ComprehensionDynRange) {
TypeCheckEnv env;
google::protobuf::Arena arena;
ASSERT_THAT(RegisterMinimalBuiltins(&arena, env), IsOk());
env.InsertVariableIfAbsent(MakeVariableDecl("range", DynType()));
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast, MakeTestParsedAst("range.all(x, x == 2)"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
EXPECT_TRUE(result.IsValid());
EXPECT_THAT(result.GetIssues(), IsEmpty());
}
TEST(TypeCheckerImplTest, BasicOvlResolution) {
TypeCheckEnv env;
google::protobuf::Arena arena;
ASSERT_THAT(RegisterMinimalBuiltins(&arena, env), IsOk());
env.InsertVariableIfAbsent(MakeVariableDecl("x", DoubleType()));
env.InsertVariableIfAbsent(MakeVariableDecl("y", DoubleType()));
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast, MakeTestParsedAst("x + y"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
EXPECT_TRUE(result.IsValid());
EXPECT_THAT(result.GetIssues(), IsEmpty());
ASSERT_OK_AND_ASSIGN(auto checked_ast, result.ReleaseAst());
auto& ast_impl = AstImpl::CastFromPublicAst(*checked_ast);
EXPECT_THAT(ast_impl.reference_map()[2],
IsFunctionReference(
"_+_", std::vector<std::string>{"add_double_double"}));
}
TEST(TypeCheckerImplTest, OvlResolutionMultipleOverloads) {
TypeCheckEnv env;
google::protobuf::Arena arena;
ASSERT_THAT(RegisterMinimalBuiltins(&arena, env), IsOk());
env.InsertVariableIfAbsent(MakeVariableDecl("x", DoubleType()));
env.InsertVariableIfAbsent(MakeVariableDecl("y", DoubleType()));
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast, MakeTestParsedAst("dyn(x) + dyn(y)"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
EXPECT_TRUE(result.IsValid());
EXPECT_THAT(result.GetIssues(), IsEmpty());
ASSERT_OK_AND_ASSIGN(auto checked_ast, result.ReleaseAst());
auto& ast_impl = AstImpl::CastFromPublicAst(*checked_ast);
EXPECT_THAT(ast_impl.reference_map()[3],
IsFunctionReference("_+_", std::vector<std::string>{
"add_double_double", "add_int_int",
"add_list", "add_uint_uint"}));
}
TEST(TypeCheckerImplTest, BasicFunctionResultTypeResolution) {
TypeCheckEnv env;
google::protobuf::Arena arena;
ASSERT_THAT(RegisterMinimalBuiltins(&arena, env), IsOk());
env.InsertVariableIfAbsent(MakeVariableDecl("x", DoubleType()));
env.InsertVariableIfAbsent(MakeVariableDecl("y", DoubleType()));
env.InsertVariableIfAbsent(MakeVariableDecl("z", DoubleType()));
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast, MakeTestParsedAst("x + y + z"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
EXPECT_TRUE(result.IsValid());
EXPECT_THAT(result.GetIssues(), IsEmpty());
ASSERT_OK_AND_ASSIGN(auto checked_ast, result.ReleaseAst());
auto& ast_impl = AstImpl::CastFromPublicAst(*checked_ast);
EXPECT_THAT(ast_impl.reference_map()[2],
IsFunctionReference(
"_+_", std::vector<std::string>{"add_double_double"}));
EXPECT_THAT(ast_impl.reference_map()[4],
IsFunctionReference(
"_+_", std::vector<std::string>{"add_double_double"}));
int64_t root_id = ast_impl.root_expr().id();
EXPECT_EQ(ast_impl.type_map()[root_id].primitive(),
ast_internal::PrimitiveType::kDouble);
}
TEST(TypeCheckerImplTest, BasicOvlResolutionNoMatch) {
TypeCheckEnv env;
google::protobuf::Arena arena;
ASSERT_THAT(RegisterMinimalBuiltins(&arena, env), IsOk());
env.InsertVariableIfAbsent(MakeVariableDecl("x", IntType()));
env.InsertVariableIfAbsent(MakeVariableDecl("y", StringType()));
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast, MakeTestParsedAst("x + y"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
EXPECT_FALSE(result.IsValid());
EXPECT_THAT(result.GetIssues(),
Contains(IsIssueWithSubstring(Severity::kError,
"no matching overload for '_+_'"
" applied to (int, string)")));
}
TEST(TypeCheckerImplTest, ParmeterizedOvlResolutionMatch) {
TypeCheckEnv env;
google::protobuf::Arena arena;
ASSERT_THAT(RegisterMinimalBuiltins(&arena, env), IsOk());
env.InsertVariableIfAbsent(MakeVariableDecl("x", IntType()));
env.InsertVariableIfAbsent(MakeVariableDecl("y", StringType()));
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast, MakeTestParsedAst("([x] + []) == [x]"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
EXPECT_TRUE(result.IsValid());
}
TEST(TypeCheckerImplTest, AliasedTypeVarSameType) {
TypeCheckEnv env;
google::protobuf::Arena arena;
ASSERT_THAT(RegisterMinimalBuiltins(&arena, env), IsOk());
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast,
MakeTestParsedAst("[].exists(x, x == 10 || x == '10')"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
EXPECT_FALSE(result.IsValid());
EXPECT_THAT(
result.GetIssues(),
ElementsAre(IsIssueWithSubstring(
Severity::kError, "no matching overload for '_==_' applied to")));
}
TEST(TypeCheckerImplTest, TypeVarRange) {
TypeCheckEnv env;
google::protobuf::Arena arena;
ASSERT_THAT(RegisterMinimalBuiltins(&arena, env), IsOk());
env.InsertFunctionIfAbsent(MakeIdentFunction());
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast,
MakeTestParsedAst("identity([]).exists(x, x == 10 )"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
EXPECT_TRUE(result.IsValid()) << absl::StrJoin(result.GetIssues(), "\n");
}
TEST(TypeCheckerImplTest, WellKnownTypeCreation) {
TypeCheckEnv env;
env.AddTypeProvider(std::make_unique<TypeIntrospector>());
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(
auto ast, MakeTestParsedAst("google.protobuf.Int32Value{value: 10}"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
ASSERT_OK_AND_ASSIGN(std::unique_ptr<Ast> checked_ast, result.ReleaseAst());
const auto& ast_impl = AstImpl::CastFromPublicAst(*checked_ast);
EXPECT_THAT(ast_impl.type_map(),
Contains(Pair(ast_impl.root_expr().id(),
Eq(AstType(ast_internal::PrimitiveTypeWrapper(
ast_internal::PrimitiveType::kInt64))))));
EXPECT_THAT(ast_impl.reference_map(),
Contains(Pair(ast_impl.root_expr().id(),
Property(&ast_internal::Reference::name,
"google.protobuf.Int32Value"))));
}
TEST(TypeCheckerImplTest, TypeInferredFromStructCreation) {
TypeCheckEnv env;
env.AddTypeProvider(std::make_unique<TypeIntrospector>());
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast,
MakeTestParsedAst("google.protobuf.Struct{fields: {}}"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
ASSERT_OK_AND_ASSIGN(std::unique_ptr<Ast> checked_ast, result.ReleaseAst());
const auto& ast_impl = AstImpl::CastFromPublicAst(*checked_ast);
int64_t map_expr_id =
ast_impl.root_expr().struct_expr().fields().at(0).value().id();
ASSERT_NE(map_expr_id, 0);
EXPECT_THAT(
ast_impl.type_map(),
Contains(Pair(
map_expr_id,
Eq(AstType(ast_internal::MapType(
std::make_unique<AstType>(ast_internal::PrimitiveType::kString),
std::make_unique<AstType>(ast_internal::DynamicType())))))));
}
TEST(TypeCheckerImplTest, ContainerLookupForMessageCreation) {
TypeCheckEnv env;
env.set_container("google.protobuf");
env.AddTypeProvider(std::make_unique<TypeIntrospector>());
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast, MakeTestParsedAst("Int32Value{value: 10}"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
ASSERT_OK_AND_ASSIGN(std::unique_ptr<Ast> checked_ast, result.ReleaseAst());
const auto& ast_impl = AstImpl::CastFromPublicAst(*checked_ast);
EXPECT_THAT(ast_impl.type_map(),
Contains(Pair(ast_impl.root_expr().id(),
Eq(AstType(ast_internal::PrimitiveTypeWrapper(
ast_internal::PrimitiveType::kInt64))))));
EXPECT_THAT(ast_impl.reference_map(),
Contains(Pair(ast_impl.root_expr().id(),
Property(&ast_internal::Reference::name,
"google.protobuf.Int32Value"))));
}
TEST(TypeCheckerImplTest, EnumValueCopiedToReferenceMap) {
TypeCheckEnv env;
env.set_container("google.api.expr.test.v1.proto3");
env.AddTypeProvider(std::make_unique<cel::extensions::ProtoTypeReflector>());
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast,
MakeTestParsedAst("TestAllTypes.NestedEnum.BAZ"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
ASSERT_OK_AND_ASSIGN(std::unique_ptr<Ast> checked_ast, result.ReleaseAst());
const auto& ast_impl = AstImpl::CastFromPublicAst(*checked_ast);
auto ref_iter = ast_impl.reference_map().find(ast_impl.root_expr().id());
ASSERT_NE(ref_iter, ast_impl.reference_map().end());
EXPECT_EQ(ref_iter->second.name(),
"google.api.expr.test.v1.proto3.TestAllTypes.NestedEnum.BAZ");
EXPECT_EQ(ref_iter->second.value().int_value(), 2);
}
struct CheckedExprTestCase {
std::string expr;
ast_internal::Type expected_result_type;
std::string error_substring;
};
class WktCreationTest : public testing::TestWithParam<CheckedExprTestCase> {};
TEST_P(WktCreationTest, MessageCreation) {
const CheckedExprTestCase& test_case = GetParam();
TypeCheckEnv env;
env.AddTypeProvider(std::make_unique<TypeIntrospector>());
env.set_container("google.protobuf");
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast, MakeTestParsedAst(test_case.expr));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
if (!test_case.error_substring.empty()) {
EXPECT_THAT(result.GetIssues(),
Contains(IsIssueWithSubstring(Severity::kError,
test_case.error_substring)));
return;
}
ASSERT_TRUE(result.IsValid())
<< absl::StrJoin(result.GetIssues(), "\n",
[](std::string* out, const TypeCheckIssue& issue) {
absl::StrAppend(out, issue.message());
});
ASSERT_OK_AND_ASSIGN(std::unique_ptr<Ast> checked_ast, result.ReleaseAst());
const auto& ast_impl = AstImpl::CastFromPublicAst(*checked_ast);
EXPECT_THAT(ast_impl.type_map(),
Contains(Pair(ast_impl.root_expr().id(),
Eq(test_case.expected_result_type))));
}
INSTANTIATE_TEST_SUITE_P(
WellKnownTypes, WktCreationTest,
::testing::Values(
CheckedExprTestCase{
.expr = "google.protobuf.Int32Value{value: 10}",
.expected_result_type = AstType(ast_internal::PrimitiveTypeWrapper(
ast_internal::PrimitiveType::kInt64)),
},
CheckedExprTestCase{
.expr = ".google.protobuf.Int32Value{value: 10}",
.expected_result_type = AstType(ast_internal::PrimitiveTypeWrapper(
ast_internal::PrimitiveType::kInt64)),
},
CheckedExprTestCase{
.expr = "Int32Value{value: 10}",
.expected_result_type = AstType(ast_internal::PrimitiveTypeWrapper(
ast_internal::PrimitiveType::kInt64)),
},
CheckedExprTestCase{
.expr = "google.protobuf.Int32Value{value: '10'}",
.expected_result_type = AstType(),
.error_substring = "expected type of field 'value' is 'int' but "
"provided type is 'string'"},
CheckedExprTestCase{
.expr = "google.protobuf.Int32Value{not_a_field: '10'}",
.expected_result_type = AstType(),
.error_substring = "undefined field 'not_a_field' not found in "
"struct 'google.protobuf.Int32Value'"},
CheckedExprTestCase{
.expr = "NotAType{not_a_field: '10'}",
.expected_result_type = AstType(),
.error_substring =
"undeclared reference to 'NotAType' (in container "
"'google.protobuf')"},
CheckedExprTestCase{
.expr = ".protobuf.Int32Value{value: 10}",
.expected_result_type = AstType(),
.error_substring =
"undeclared reference to '.protobuf.Int32Value' (in container "
"'google.protobuf')"},
CheckedExprTestCase{
.expr = "Int32Value{value: 10}.value",
.expected_result_type = AstType(),
.error_substring =
"expression of type 'google.protobuf.Int64Value' cannot be the "
"operand of a select operation"},
CheckedExprTestCase{
.expr = "Int64Value{value: 10}",
.expected_result_type = AstType(ast_internal::PrimitiveTypeWrapper(
ast_internal::PrimitiveType::kInt64)),
},
CheckedExprTestCase{
.expr = "BoolValue{value: true}",
.expected_result_type = AstType(ast_internal::PrimitiveTypeWrapper(
ast_internal::PrimitiveType::kBool)),
},
CheckedExprTestCase{
.expr = "UInt64Value{value: 10u}",
.expected_result_type = AstType(ast_internal::PrimitiveTypeWrapper(
ast_internal::PrimitiveType::kUint64)),
},
CheckedExprTestCase{
.expr = "UInt32Value{value: 10u}",
.expected_result_type = AstType(ast_internal::PrimitiveTypeWrapper(
ast_internal::PrimitiveType::kUint64)),
},
CheckedExprTestCase{
.expr = "FloatValue{value: 1.25}",
.expected_result_type = AstType(ast_internal::PrimitiveTypeWrapper(
ast_internal::PrimitiveType::kDouble)),
},
CheckedExprTestCase{
.expr = "DoubleValue{value: 1.25}",
.expected_result_type = AstType(ast_internal::PrimitiveTypeWrapper(
ast_internal::PrimitiveType::kDouble)),
},
CheckedExprTestCase{
.expr = "StringValue{value: 'test'}",
.expected_result_type = AstType(ast_internal::PrimitiveTypeWrapper(
ast_internal::PrimitiveType::kString)),
},
CheckedExprTestCase{
.expr = "BytesValue{value: b'test'}",
.expected_result_type = AstType(ast_internal::PrimitiveTypeWrapper(
ast_internal::PrimitiveType::kBytes)),
},
CheckedExprTestCase{
.expr = "Duration{seconds: 10, nanos: 11}",
.expected_result_type =
AstType(ast_internal::WellKnownType::kDuration),
},
CheckedExprTestCase{
.expr = "Timestamp{seconds: 10, nanos: 11}",
.expected_result_type =
AstType(ast_internal::WellKnownType::kTimestamp),
},
CheckedExprTestCase{
.expr = "Struct{fields: {'key': 'value'}}",
.expected_result_type = AstType(ast_internal::MapType(
std::make_unique<AstType>(ast_internal::PrimitiveType::kString),
std::make_unique<AstType>(ast_internal::DynamicType()))),
},
CheckedExprTestCase{
.expr = "ListValue{values: [1, 2, 3]}",
.expected_result_type = AstType(ast_internal::ListType(
std::make_unique<AstType>(ast_internal::DynamicType()))),
},
CheckedExprTestCase{
.expr = R"cel(
Any{
type_url:'type.googleapis.com/google.protobuf.Int32Value',
value: b''
})cel",
.expected_result_type = AstType(ast_internal::WellKnownType::kAny),
}));
class GenericMessagesTest : public testing::TestWithParam<CheckedExprTestCase> {
};
TEST_P(GenericMessagesTest, TypeChecksProto3) {
const CheckedExprTestCase& test_case = GetParam();
google::protobuf::Arena arena;
TypeCheckEnv env;
env.AddTypeProvider(std::make_unique<cel::extensions::ProtoTypeReflector>());
env.set_container("google.api.expr.test.v1.proto3");
google::protobuf::LinkMessageReflection<testpb3::TestAllTypes>();
ASSERT_TRUE(env.InsertVariableIfAbsent(MakeVariableDecl(
"test_msg", MessageType(testpb3::TestAllTypes::descriptor()))));
ASSERT_THAT(RegisterMinimalBuiltins(&arena, env), IsOk());
TypeCheckerImpl impl(std::move(env));
ASSERT_OK_AND_ASSIGN(auto ast, MakeTestParsedAst(test_case.expr));
ASSERT_OK_AND_ASSIGN(ValidationResult result, impl.Check(std::move(ast)));
if (!test_case.error_substring.empty()) {
EXPECT_THAT(result.GetIssues(),
Contains(IsIssueWithSubstring(Severity::kError,
test_case.error_substring)));
return;
}
ASSERT_TRUE(result.IsValid())
<< absl::StrJoin(result.GetIssues(), "\n",
[](std::string* out, const TypeCheckIssue& issue) {
absl::StrAppend(out, issue.message());
});
ASSERT_OK_AND_ASSIGN(std::unique_ptr<Ast> checked_ast, result.ReleaseAst());
const auto& ast_impl = AstImpl::CastFromPublicAst(*checked_ast);
EXPECT_THAT(ast_impl.type_map(),
Contains(Pair(ast_impl.root_expr().id(),
Eq(test_case.expected_result_type))));
}
INSTANTIATE_TEST_SUITE_P(
TestAllTypesCreation, GenericMessagesTest,
::testing::Values(
CheckedExprTestCase{
.expr = "TestAllTypes{not_a_field: 10}",
.expected_result_type = AstType(),
.error_substring =
"undefined field 'not_a_field' not found in "
"struct 'google.api.expr.test.v1.proto3.TestAllTypes'"},
CheckedExprTestCase{
.expr = "TestAllTypes{single_int64: 10}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr = "TestAllTypes{single_int64: 'string'}",
.expected_result_type = AstType(),
.error_substring =
"expected type of field 'single_int64' is 'int' but "
"provided type is 'string'"},
CheckedExprTestCase{
.expr = "TestAllTypes{single_int32: 10}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr = "TestAllTypes{single_uint64: 10u}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr = "TestAllTypes{single_uint32: 10u}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr = "TestAllTypes{single_sint64: 10}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr = "TestAllTypes{single_sint32: 10}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr = "TestAllTypes{single_fixed64: 10u}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr = "TestAllTypes{single_fixed32: 10u}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr = "TestAllTypes{single_sfixed64: 10}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr = "TestAllTypes{single_sfixed32: 10}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr = "TestAllTypes{single_double: 1.25}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr = "TestAllTypes{single_float: 1.25}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr = "TestAllTypes{single_string: 'string'}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr = "TestAllTypes{single_bool: true}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr = "TestAllTypes{single_bytes: b'string'}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr = "TestAllTypes{single_any: TestAllTypes{single_int64: 10}}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr = "TestAllTypes{single_any: 1}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr = "TestAllTypes{single_any: 'string'}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr = "TestAllTypes{single_any: ['string']}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr = "TestAllTypes{single_duration: duration('1s')}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr = "TestAllTypes{single_timestamp: timestamp(0)}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr = "TestAllTypes{single_struct: {}}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr = "TestAllTypes{single_struct: {'key': 'value'}}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr = "TestAllTypes{single_struct: {1: 2}}",
.expected_result_type = AstType(),
.error_substring = "expected type of field 'single_struct' is "
"'map<string, dyn>' but "
"provided type is 'map<int, int>'"},
CheckedExprTestCase{
.expr = "TestAllTypes{list_value: [1, 2, 3]}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr = "TestAllTypes{list_value: []}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr = "TestAllTypes{list_value: 1}",
.expected_result_type = AstType(),
.error_substring =
"expected type of field 'list_value' is 'list<dyn>' but "
"provided type is 'int'"},
CheckedExprTestCase{
.expr = "TestAllTypes{single_int64_wrapper: 1}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr = "TestAllTypes{single_int64_wrapper: null}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr = "TestAllTypes{single_value: null}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr = "TestAllTypes{single_value: 1.0}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr = "TestAllTypes{single_value: 'string'}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr = "TestAllTypes{single_value: {'string': 'string'}}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr = "TestAllTypes{single_value: ['string']}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr = "TestAllTypes{repeated_int64: [1, 2, 3]}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr = "TestAllTypes{repeated_int64: ['string']}",
.expected_result_type = AstType(),
.error_substring =
"expected type of field 'repeated_int64' is 'list<int>'"},
CheckedExprTestCase{
.expr = "TestAllTypes{map_string_int64: ['string']}",
.expected_result_type = AstType(),
.error_substring = "expected type of field 'map_string_int64' is "
"'map<string, int>'"},
CheckedExprTestCase{
.expr = "TestAllTypes{map_string_int64: {'string': 1}}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr = "TestAllTypes{single_nested_enum: 1}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr =
"TestAllTypes{single_nested_enum: TestAllTypes.NestedEnum.BAR}",
.expected_result_type = AstType(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes")),
},
CheckedExprTestCase{
.expr = "TestAllTypes.NestedEnum.BAR",
.expected_result_type =
AstType(ast_internal::PrimitiveType::kInt64),
},
CheckedExprTestCase{
.expr = "TestAllTypes",
.expected_result_type =
AstType(std::make_unique<AstType>(ast_internal::MessageType(
"google.api.expr.test.v1.proto3.TestAllTypes"))),
},
CheckedExprTestCase{
.expr = "TestAllTypes == type(TestAllTypes{})",
.expected_result_type = AstType(ast_internal::PrimitiveType::kBool),
}));
INSTANTIATE_TEST_SUITE_P(
TestAllTypesFieldSelection, GenericMessagesTest,
::testing::Values(
CheckedExprTestCase{
.expr = "test_msg.not_a_field",
.expected_result_type = AstType(),
.error_substring =
"undefined field 'not_a_field' not found in "
"struct 'google.api.expr.test.v1.proto3.TestAllTypes'"},
CheckedExprTestCase{
.expr = "test_msg.single_int64",
.expected_result_type =
AstType(ast_internal::PrimitiveType::kInt64),
},
CheckedExprTestCase{
.expr = "test_msg.single_nested_enum",
.expected_result_type =
AstType(ast_internal::PrimitiveType::kInt64),
},
CheckedExprTestCase{
.expr = "test_msg.single_nested_enum == 1",
.expected_result_type = AstType(ast_internal::PrimitiveType::kBool),
},
CheckedExprTestCase{
.expr =
"test_msg.single_nested_enum == TestAllTypes.NestedEnum.BAR",
.expected_result_type = AstType(ast_internal::PrimitiveType::kBool),
},
CheckedExprTestCase{
.expr = "has(test_msg.not_a_field)",
.expected_result_type = AstType(),
.error_substring =
"undefined field 'not_a_field' not found in "
"struct 'google.api.expr.test.v1.proto3.TestAllTypes'"},
CheckedExprTestCase{
.expr = "has(test_msg.single_int64)",
.expected_result_type = AstType(ast_internal::PrimitiveType::kBool),
},
CheckedExprTestCase{
.expr = "test_msg.single_int32",
.expected_result_type =
AstType(ast_internal::PrimitiveType::kInt64),
},
CheckedExprTestCase{
.expr = "test_msg.single_uint64",
.expected_result_type =
AstType(ast_internal::PrimitiveType::kUint64),
},
CheckedExprTestCase{
.expr = "test_msg.single_uint32",
.expected_result_type =
AstType(ast_internal::PrimitiveType::kUint64),
},
CheckedExprTestCase{
.expr = "test_msg.single_sint64",
.expected_result_type =
AstType(ast_internal::PrimitiveType::kInt64),
},
CheckedExprTestCase{
.expr = "test_msg.single_sint32",
.expected_result_type =
AstType(ast_internal::PrimitiveType::kInt64),
},
CheckedExprTestCase{
.expr = "test_msg.single_fixed64",
.expected_result_type =
AstType(ast_internal::PrimitiveType::kUint64),
},
CheckedExprTestCase{
.expr = "test_msg.single_fixed32",
.expected_result_type =
AstType(ast_internal::PrimitiveType::kUint64),
},
CheckedExprTestCase{
.expr = "test_msg.single_sfixed64",
.expected_result_type =
AstType(ast_internal::PrimitiveType::kInt64),
},
CheckedExprTestCase{
.expr = "test_msg.single_sfixed32",
.expected_result_type =
AstType(ast_internal::PrimitiveType::kInt64),
},
CheckedExprTestCase{
.expr = "test_msg.single_float",
.expected_result_type =
AstType(ast_internal::PrimitiveType::kDouble),
},
CheckedExprTestCase{
.expr = "test_msg.single_double",
.expected_result_type =
AstType(ast_internal::PrimitiveType::kDouble),
},
CheckedExprTestCase{
.expr = "test_msg.single_string",
.expected_result_type =
AstType(ast_internal::PrimitiveType::kString),
},
CheckedExprTestCase{
.expr = "test_msg.single_bool",
.expected_result_type = AstType(ast_internal::PrimitiveType::kBool),
},
CheckedExprTestCase{
.expr = "test_msg.single_bytes",
.expected_result_type =
AstType(ast_internal::PrimitiveType::kBytes),
},
CheckedExprTestCase{
.expr = "test_msg.repeated_int32",
.expected_result_type =
AstType(ast_internal::ListType(std::make_unique<AstType>(
ast_internal::PrimitiveType::kInt64))),
},
CheckedExprTestCase{
.expr = "test_msg.repeated_string",
.expected_result_type =
AstType(ast_internal::ListType(std::make_unique<AstType>(
ast_internal::PrimitiveType::kString))),
},
CheckedExprTestCase{
.expr = "test_msg.map_bool_bool",
.expected_result_type = AstType(ast_internal::MapType(
std::make_unique<AstType>(ast_internal::PrimitiveType::kBool),
std::make_unique<AstType>(ast_internal::PrimitiveType::kBool))),
},
CheckedExprTestCase{
.expr = "test_msg.map_bool_bool.field_like_key",
.expected_result_type = AstType(),
.error_substring =
"expression of type 'map<bool, bool>' cannot be the operand"
" of a select operation",
},
CheckedExprTestCase{
.expr = "test_msg.map_string_int64",
.expected_result_type = AstType(ast_internal::MapType(
std::make_unique<AstType>(ast_internal::PrimitiveType::kString),
std::make_unique<AstType>(
ast_internal::PrimitiveType::kInt64))),
},
CheckedExprTestCase{
.expr = "test_msg.map_string_int64.field_like_key",
.expected_result_type =
AstType(ast_internal::PrimitiveType::kInt64),
},
CheckedExprTestCase{
.expr = "test_msg.single_duration",
.expected_result_type =
AstType(ast_internal::WellKnownType::kDuration),
},
CheckedExprTestCase{
.expr = "test_msg.single_timestamp",
.expected_result_type =
AstType(ast_internal::WellKnownType::kTimestamp),
},
CheckedExprTestCase{
.expr = "test_msg.single_any",
.expected_result_type = AstType(ast_internal::WellKnownType::kAny),
},
CheckedExprTestCase{
.expr = "test_msg.single_int64_wrapper",
.expected_result_type = AstType(ast_internal::PrimitiveTypeWrapper(
ast_internal::PrimitiveType::kInt64)),
},
CheckedExprTestCase{
.expr = "test_msg.single_struct",
.expected_result_type = AstType(ast_internal::MapType(
std::make_unique<AstType>(ast_internal::PrimitiveType::kString),
std::make_unique<AstType>(ast_internal::DynamicType()))),
},
CheckedExprTestCase{
.expr = "test_msg.list_value",
.expected_result_type = AstType(ast_internal::ListType(
std::make_unique<AstType>(ast_internal::DynamicType()))),
},
CheckedExprTestCase{
.expr = "test_msg.list_value",
.expected_result_type = AstType(ast_internal::ListType(
std::make_unique<AstType>(ast_internal::DynamicType()))),
},
CheckedExprTestCase{
.expr = "NestedTestAllTypes{}.child.child.payload.single_int64",
.expected_result_type =
AstType(ast_internal::PrimitiveType::kInt64),
}
));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/checker/internal/type_checker_impl.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/checker/internal/type_checker_impl_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
2d57c5af-e081-4997-99fd-8108d330293d | cpp | google/tensorstore | zstd_compressor | tensorstore/internal/compression/zstd_compressor.cc | tensorstore/driver/n5/zstd_compressor_test.cc | #include "tensorstore/internal/compression/zstd_compressor.h"
#include <cstddef>
#include <memory>
#include <utility>
#include "riegeli/bytes/writer.h"
#include "riegeli/zstd/zstd_reader.h"
#include "riegeli/zstd/zstd_writer.h"
namespace tensorstore {
namespace internal {
std::unique_ptr<riegeli::Writer> ZstdCompressor::GetWriter(
std::unique_ptr<riegeli::Writer> base_writer, size_t element_bytes) const {
using Writer = riegeli::ZstdWriter<std::unique_ptr<riegeli::Writer>>;
Writer::Options options;
options.set_compression_level(level);
return std::make_unique<Writer>(std::move(base_writer), options);
}
std::unique_ptr<riegeli::Reader> ZstdCompressor::GetReader(
std::unique_ptr<riegeli::Reader> base_reader, size_t element_bytes) const {
using Reader = riegeli::ZstdReader<std::unique_ptr<riegeli::Reader>>;
return std::make_unique<Reader>(std::move(base_reader));
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/driver/n5/compressor.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_n5::Compressor;
TEST(ZstdCompressorTest, SmallRoundtrip) {
auto compressor =
Compressor::FromJson({{"type", "zstd"}, {"level", 6}}).value();
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
absl::Cord encode_result, decode_result;
TENSORSTORE_ASSERT_OK(compressor->Encode(input, &encode_result, 1));
TENSORSTORE_ASSERT_OK(compressor->Decode(encode_result, &decode_result, 1));
EXPECT_EQ(input, decode_result);
}
TEST(ZstdCompressorTest, DefaultLevel) {
auto compressor1 = Compressor::FromJson({{"type", "zstd"}}).value();
auto compressor2 =
Compressor::FromJson({{"type", "zstd"}, {"level", 1}}).value();
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
absl::Cord encode_result1, encode_result2;
TENSORSTORE_ASSERT_OK(compressor1->Encode(input, &encode_result1, 1));
TENSORSTORE_ASSERT_OK(compressor2->Encode(input, &encode_result2, 1));
EXPECT_EQ(encode_result1, encode_result2);
}
TEST(ZstdCompressorTest, NonDefaultLevel) {
auto compressor =
Compressor::FromJson({{"type", "zstd"}, {"level", 9}}).value();
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
absl::Cord encode_result;
TENSORSTORE_ASSERT_OK(compressor->Encode(input, &encode_result, 1));
absl::Cord decode_result;
TENSORSTORE_ASSERT_OK(compressor->Decode(encode_result, &decode_result, 1));
EXPECT_EQ(input, decode_result);
}
TEST(ZstdCompressorTest, InvalidParameter) {
EXPECT_THAT(Compressor::FromJson({{"type", "zstd"}, {"level", "6"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"level\": .*"));
EXPECT_THAT(Compressor::FromJson({{"type", "zstd"}, {"level", -131073}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"level\": .*"));
EXPECT_THAT(Compressor::FromJson({{"type", "zstd"}, {"level", 23}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"level\": .*"));
EXPECT_THAT(Compressor::FromJson({{"type", "zstd"}, {"foo", 10}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Object includes extra members: \"foo\""));
}
TEST(ZstdCompressorTest, ToJson) {
auto compressor =
Compressor::FromJson({{"type", "zstd"}, {"level", 5}}).value();
EXPECT_EQ(nlohmann::json({{"type", "zstd"}, {"level", 5}}),
compressor.ToJson());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/compression/zstd_compressor.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/n5/zstd_compressor_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
8867b303-3760-4c6a-ae22-deb8039dc228 | cpp | google/quiche | null_decrypter | quiche/quic/core/crypto/null_decrypter.cc | quiche/quic/core/crypto/null_decrypter_test.cc | #include "quiche/quic/core/crypto/null_decrypter.h"
#include <cstdint>
#include <limits>
#include <string>
#include "absl/numeric/int128.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/quic_data_reader.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/common/quiche_endian.h"
namespace quic {
NullDecrypter::NullDecrypter(Perspective perspective)
: perspective_(perspective) {}
bool NullDecrypter::SetKey(absl::string_view key) { return key.empty(); }
bool NullDecrypter::SetNoncePrefix(absl::string_view nonce_prefix) {
return nonce_prefix.empty();
}
bool NullDecrypter::SetIV(absl::string_view iv) { return iv.empty(); }
bool NullDecrypter::SetHeaderProtectionKey(absl::string_view key) {
return key.empty();
}
bool NullDecrypter::SetPreliminaryKey(absl::string_view ) {
QUIC_BUG(quic_bug_10652_1) << "Should not be called";
return false;
}
bool NullDecrypter::SetDiversificationNonce(
const DiversificationNonce& ) {
QUIC_BUG(quic_bug_10652_2) << "Should not be called";
return true;
}
bool NullDecrypter::DecryptPacket(uint64_t ,
absl::string_view associated_data,
absl::string_view ciphertext, char* output,
size_t* output_length,
size_t max_output_length) {
QuicDataReader reader(ciphertext.data(), ciphertext.length(),
quiche::HOST_BYTE_ORDER);
absl::uint128 hash;
if (!ReadHash(&reader, &hash)) {
return false;
}
absl::string_view plaintext = reader.ReadRemainingPayload();
if (plaintext.length() > max_output_length) {
QUIC_BUG(quic_bug_10652_3)
<< "Output buffer must be larger than the plaintext.";
return false;
}
if (hash != ComputeHash(associated_data, plaintext)) {
return false;
}
memcpy(output, plaintext.data(), plaintext.length());
*output_length = plaintext.length();
return true;
}
std::string NullDecrypter::GenerateHeaderProtectionMask(
QuicDataReader* ) {
return std::string(5, 0);
}
size_t NullDecrypter::GetKeySize() const { return 0; }
size_t NullDecrypter::GetNoncePrefixSize() const { return 0; }
size_t NullDecrypter::GetIVSize() const { return 0; }
absl::string_view NullDecrypter::GetKey() const { return absl::string_view(); }
absl::string_view NullDecrypter::GetNoncePrefix() const {
return absl::string_view();
}
uint32_t NullDecrypter::cipher_id() const { return 0; }
QuicPacketCount NullDecrypter::GetIntegrityLimit() const {
return std::numeric_limits<QuicPacketCount>::max();
}
bool NullDecrypter::ReadHash(QuicDataReader* reader, absl::uint128* hash) {
uint64_t lo;
uint32_t hi;
if (!reader->ReadUInt64(&lo) || !reader->ReadUInt32(&hi)) {
return false;
}
*hash = absl::MakeUint128(hi, lo);
return true;
}
absl::uint128 NullDecrypter::ComputeHash(const absl::string_view data1,
const absl::string_view data2) const {
absl::uint128 correct_hash;
if (perspective_ == Perspective::IS_CLIENT) {
correct_hash = QuicUtils::FNV1a_128_Hash_Three(data1, data2, "Server");
} else {
correct_hash = QuicUtils::FNV1a_128_Hash_Three(data1, data2, "Client");
}
absl::uint128 mask = absl::MakeUint128(UINT64_C(0x0), UINT64_C(0xffffffff));
mask <<= 96;
correct_hash &= ~mask;
return correct_hash;
}
} | #include "quiche/quic/core/crypto/null_decrypter.h"
#include "absl/base/macros.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
namespace quic {
namespace test {
class NullDecrypterTest : public QuicTestWithParam<bool> {};
TEST_F(NullDecrypterTest, DecryptClient) {
unsigned char expected[] = {
0x97,
0xdc,
0x27,
0x2f,
0x18,
0xa8,
0x56,
0x73,
0xdf,
0x8d,
0x1d,
0xd0,
'g',
'o',
'o',
'd',
'b',
'y',
'e',
'!',
};
const char* data = reinterpret_cast<const char*>(expected);
size_t len = ABSL_ARRAYSIZE(expected);
NullDecrypter decrypter(Perspective::IS_SERVER);
char buffer[256];
size_t length = 0;
ASSERT_TRUE(decrypter.DecryptPacket(
0, "hello world!", absl::string_view(data, len), buffer, &length, 256));
EXPECT_LT(0u, length);
EXPECT_EQ("goodbye!", absl::string_view(buffer, length));
}
TEST_F(NullDecrypterTest, DecryptServer) {
unsigned char expected[] = {
0x63,
0x5e,
0x08,
0x03,
0x32,
0x80,
0x8f,
0x73,
0xdf,
0x8d,
0x1d,
0x1a,
'g',
'o',
'o',
'd',
'b',
'y',
'e',
'!',
};
const char* data = reinterpret_cast<const char*>(expected);
size_t len = ABSL_ARRAYSIZE(expected);
NullDecrypter decrypter(Perspective::IS_CLIENT);
char buffer[256];
size_t length = 0;
ASSERT_TRUE(decrypter.DecryptPacket(
0, "hello world!", absl::string_view(data, len), buffer, &length, 256));
EXPECT_LT(0u, length);
EXPECT_EQ("goodbye!", absl::string_view(buffer, length));
}
TEST_F(NullDecrypterTest, BadHash) {
unsigned char expected[] = {
0x46,
0x11,
0xea,
0x5f,
0xcf,
0x1d,
0x66,
0x5b,
0xba,
0xf0,
0xbc,
0xfd,
'g',
'o',
'o',
'd',
'b',
'y',
'e',
'!',
};
const char* data = reinterpret_cast<const char*>(expected);
size_t len = ABSL_ARRAYSIZE(expected);
NullDecrypter decrypter(Perspective::IS_CLIENT);
char buffer[256];
size_t length = 0;
ASSERT_FALSE(decrypter.DecryptPacket(
0, "hello world!", absl::string_view(data, len), buffer, &length, 256));
}
TEST_F(NullDecrypterTest, ShortInput) {
unsigned char expected[] = {
0x46, 0x11, 0xea, 0x5f, 0xcf, 0x1d, 0x66, 0x5b, 0xba, 0xf0, 0xbc,
};
const char* data = reinterpret_cast<const char*>(expected);
size_t len = ABSL_ARRAYSIZE(expected);
NullDecrypter decrypter(Perspective::IS_CLIENT);
char buffer[256];
size_t length = 0;
ASSERT_FALSE(decrypter.DecryptPacket(
0, "hello world!", absl::string_view(data, len), buffer, &length, 256));
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/null_decrypter.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/null_decrypter_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
e022bc24-e0e7-4a97-befe-c3cdf0fbf2d6 | cpp | tensorflow/tensorflow | kernel | tensorflow/lite/delegates/flex/kernel.cc | tensorflow/lite/delegates/flex/kernel_test.cc | #include "tensorflow/lite/delegates/flex/kernel.h"
#include <algorithm>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/api/profiler.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/flex/delegate.h"
#include "tensorflow/lite/delegates/flex/delegate_data.h"
#include "tensorflow/lite/delegates/flex/util.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/string_type.h"
using tensorflow::shape_inference::DimensionHandle;
using tensorflow::shape_inference::InferenceContext;
using tensorflow::shape_inference::ShapeAndType;
using tensorflow::shape_inference::ShapeHandle;
namespace tflite {
namespace flex {
constexpr char kReadVariableOp[] = "ReadVariableOp";
constexpr char kInterOpParallelismAttrName[] = "use_inter_op_parallelism";
struct OpNode;
struct TensorSource {
OpNode* node;
int node_output_index;
};
class OpInputs {
public:
explicit OpInputs(const TfLiteIntArray* indexes) {
for (int index : TfLiteIntArrayView(indexes)) {
inputs_.push_back(index);
}
forwardable_.resize(inputs_.size());
}
~OpInputs() = default;
int Size() const { return inputs_.size(); }
int TfLiteIndex(int i) const { return inputs_[i]; }
void InitializeTensorSources(
const std::map<int, TensorSource>& tflite_tensor_sources) {
sources_.clear();
for (int i : inputs_) {
auto it = tflite_tensor_sources.find(i);
if (it == tflite_tensor_sources.end()) {
sources_.push_back({nullptr, 0});
} else {
sources_.push_back(it->second);
}
}
}
void SetForwardable(int i, bool v) { forwardable_[i] = v; }
bool IsForwardable(int i) const { return forwardable_[i]; }
TensorSource GetTensorSource(int i) const { return sources_[i]; }
private:
std::vector<int> inputs_;
std::vector<TensorSource> sources_;
std::vector<int> forwardable_;
};
class OpOutputs {
public:
explicit OpOutputs(const TfLiteIntArray* indexes) {
for (int index : TfLiteIntArrayView(indexes)) {
outputs_.push_back(index);
}
vector_.resize(outputs_.size());
}
~OpOutputs() = default;
void InitializeGraphOutputs(const std::set<int>& subgraph_outputs) {
subgraph_outputs_.clear();
for (int i : outputs_) {
subgraph_outputs_.push_back(subgraph_outputs.count(i) > 0);
}
}
bool IsSubgraphOutput(int i) const { return subgraph_outputs_[i]; }
const tensorflow::Tensor& GetTensor(int i) const { return vector_[i]; }
tensorflow::Tensor ReleaseTensor(int i) { return std::move(vector_[i]); }
int Size() const { return outputs_.size(); }
int TfLiteIndex(int i) const { return outputs_[i]; }
absl::InlinedVector<tensorflow::Tensor, 2UL>* GetTensors() {
return &vector_;
}
private:
std::vector<int> outputs_;
std::vector<bool> subgraph_outputs_;
absl::InlinedVector<tensorflow::Tensor, 2UL> vector_;
};
struct OpDataInfo {
BufferMap* buffer_map;
std::map<int, int>* tensor_release_map;
std::set<int> already_transferred_outputs;
};
class OpNode {
public:
OpNode(const TfLiteIntArray* inputs, const TfLiteIntArray* outputs)
: inputs_(inputs), outputs_(outputs) {}
~OpNode() = default;
const string& name() const { return name_; }
void set_name(const string& name) { name_ = name; }
int index() const { return index_; }
void set_index(int index) { index_ = index; }
const tensorflow::NodeDef& nodedef() const { return nodedef_; }
const tensorflow::OpRegistrationData* op_reg_data() const {
return op_reg_data_;
}
const OpInputs& inputs() const { return inputs_; }
OpInputs* mutable_inputs() { return &inputs_; }
const OpOutputs& outputs() const { return outputs_; }
OpOutputs* mutable_outputs() { return &outputs_; }
int NumInputs() const { return inputs_.Size(); }
int NumOutputs() const { return outputs_.Size(); }
const tensorflow::tfrt_stub::OpKernelRunner& op_kernel_runner() const {
return op_kernel_runner_;
}
tensorflow::Status InitializeNodeDef(const void* custom_initial_data,
int custom_initial_data_size) {
if (!custom_initial_data) {
return tensorflow::errors::Internal(
"Cannot convert empty data into a valid NodeDef");
}
const flexbuffers::Vector& v =
flexbuffers::GetRoot(
reinterpret_cast<const uint8_t*>(custom_initial_data),
custom_initial_data_size)
.AsVector();
name_ = v[0].AsString().str();
if (!nodedef_.ParseFromString(v[1].AsString().str())) {
nodedef_.Clear();
return tensorflow::errors::Internal(
"Failed to parse data into a valid NodeDef");
}
TF_RETURN_IF_ERROR(
tensorflow::OpRegistry::Global()->LookUp(nodedef_.op(), &op_reg_data_));
AddDefaultsToNodeDef(op_reg_data_->op_def, &nodedef_);
const auto& op_def = op_reg_data_->op_def;
for (const auto& attr : op_def.attr()) {
if (attr.name() == kInterOpParallelismAttrName) {
(*nodedef_.mutable_attr())[kInterOpParallelismAttrName].set_b(false);
break;
}
}
return absl::OkStatus();
}
tensorflow::Status BuildOpKernelRunner(
tensorflow::EagerContext* eager_context) {
TF_ASSIGN_OR_RETURN(op_kernel_runner_,
tensorflow::tfrt_stub::OpKernelRunner::Create(
name_, inputs_.Size(),
[this](tensorflow::AttrValueMap* attr_value_map) {
*attr_value_map = nodedef_.attr();
return absl::OkStatus();
},
*eager_context->pflr(),
eager_context->local_device_mgr()->HostCPU()));
return absl::OkStatus();
}
tensorflow::Status BuildOpKernelInputs(
const BufferMap* buffer_map,
tensorflow::tfrt_stub::OpKernelRunState* run_state) {
run_state->input_tf_tensors.resize(inputs_.Size());
run_state->input_tf_tensor_values.resize(inputs_.Size());
for (int i = 0; i < inputs_.Size(); ++i) {
int input_index = inputs_.TfLiteIndex(i);
TensorSource s = inputs_.GetTensorSource(i);
if (!s.node) {
if (!buffer_map->HasTensor(input_index)) {
return tensorflow::errors::Internal(
"Cannot read from invalid tensor index ", input_index);
}
run_state->input_tf_tensors[i] = buffer_map->GetTensor(input_index);
} else {
if (inputs_.IsForwardable(i)) {
run_state->input_tf_tensors[i] =
s.node->outputs_.ReleaseTensor(s.node_output_index);
} else {
run_state->input_tf_tensors[i] =
s.node->outputs_.GetTensor(s.node_output_index);
}
}
run_state->input_tf_tensor_values[i].tensor =
&run_state->input_tf_tensors[i];
}
return absl::OkStatus();
}
bool ShouldPersistTensorflowTensor(TfLiteContext* context,
const OpDataInfo* shared_info,
int tensor_index, int node_index) {
TfLiteTensor* tensor = &context->tensors[tensor_index];
if (IsResourceOrVariant(tensor) || tensor->type == kTfLiteString) {
return true;
}
auto it = shared_info->tensor_release_map->find(tensor_index);
return it != shared_info->tensor_release_map->end() &&
it->second > node_index;
}
TfLiteStatus CopyToTfLiteTensor(TfLiteContext* context,
OpDataInfo* shared_info, TfLiteTensor* tensor,
tensorflow::Tensor* tf_tensor,
int tensor_index) const {
if (tensor->allocation_type == kTfLiteDynamic) {
CopyShapeAndType(context, *tf_tensor, tensor);
}
tensorflow::StringPiece t_data = tf_tensor->tensor_data();
if (tf_tensor->NumElements() != NumElements(tensor) ||
tf_tensor->TotalBytes() != tensor->bytes) {
TF_LITE_KERNEL_LOG(context,
"FlexDelegate: Tensor %s(%d) buffer size mismatch "
"%zu(%lld) != %ld(%ld)",
tensor->name, tensor_index, tf_tensor->TotalBytes(),
tf_tensor->NumElements(), tensor->bytes,
NumElements(tensor));
return kTfLiteError;
}
memcpy(tensor->data.raw, t_data.data(), t_data.size());
*tf_tensor = {};
shared_info->already_transferred_outputs.insert(tensor_index);
return kTfLiteOk;
}
tensorflow::Status MaybePersistTensorflowOutputs(TfLiteContext* context,
OpDataInfo* shared_info,
int node_index) {
auto* tensors = outputs_.GetTensors();
for (int i = 0; i < outputs_.Size(); ++i) {
if (outputs_.IsSubgraphOutput(i)) {
tensorflow::Tensor& tf_tensor = tensors->at(i);
const int tflite_index = outputs_.TfLiteIndex(i);
TfLiteTensor* tensor = &context->tensors[tflite_index];
if (!ShouldPersistTensorflowTensor(context, shared_info, tflite_index,
node_index)) {
if (CopyToTfLiteTensor(context, shared_info, tensor, &tf_tensor,
tflite_index) != kTfLiteOk) {
return tensorflow::Status(absl::StatusCode::kInternal,
"failed to copy data from TF tensor");
}
} else {
shared_info->buffer_map->SetFromTensorFlow(outputs_.TfLiteIndex(i),
tf_tensor);
}
}
}
return absl::OkStatus();
}
private:
OpNode(const OpNode&) = delete;
OpNode& operator=(const OpNode&) = delete;
string name_;
int index_;
tensorflow::NodeDef nodedef_;
const tensorflow::OpRegistrationData* op_reg_data_;
OpInputs inputs_;
OpOutputs outputs_;
tensorflow::tfrt_stub::OpKernelRunner op_kernel_runner_;
};
struct OpData {
tensorflow::EagerContext* eager_context;
tensorflow::CancellationManager* cancellation_manager;
std::vector<std::unique_ptr<OpNode>> nodes;
std::vector<int> subgraph_inputs;
std::vector<int> subgraph_outputs;
std::set<int>
disable_reusing_buffer_tensors;
OpDataInfo shared_info;
};
tensorflow::Status DelegateKernel::ExecuteOpKernelRunner(
tensorflow::tfrt_stub::OpKernelRunState* run_state, TfLiteContext* context,
OpNode* node_data) {
const auto& op_kernel_runner = node_data->op_kernel_runner();
if (op_kernel_runner.op_kernel()->num_outputs() != node_data->NumOutputs()) {
return tensorflow::errors::Internal(
"Unexpected number of outputs from tensorflow::OpKernel");
}
TF_RETURN_IF_ERROR(node_data->BuildOpKernelInputs(
op_data_->shared_info.buffer_map, run_state));
run_state->params.inputs = run_state->input_tf_tensor_values;
run_state->params.op_kernel = op_kernel_runner.op_kernel();
run_state->params.input_alloc_attrs = op_kernel_runner.input_alloc_attrs();
run_state->params.output_attr_array =
op_kernel_runner.output_alloc_attrs().data();
run_state->params.function_library =
op_kernel_runner.function_library_runtime();
tensorflow::OpKernelContext tf_context(&run_state->params,
node_data->NumOutputs());
op_kernel_runner.Run(&tf_context);
TF_RETURN_IF_ERROR(tf_context.status());
auto& outputs = *node_data->mutable_outputs()->GetTensors();
for (int i = 0; i < tf_context.num_outputs(); ++i) {
outputs[i] = std::move(*tf_context.mutable_output(i));
}
return node_data->MaybePersistTensorflowOutputs(
context, &(op_data_->shared_info), node_data->index());
}
DelegateKernel::DelegateKernel() : op_data_(new OpData) {}
DelegateKernel::~DelegateKernel() = default;
TfLiteStatus DelegateKernel::Init(TfLiteContext* context,
const TfLiteDelegateParams* params) {
auto* flex_delegate_data =
reinterpret_cast<FlexDelegate*>(params->delegate->data_)->mutable_data();
op_data_->eager_context = flex_delegate_data->GetEagerContext();
op_data_->cancellation_manager = flex_delegate_data->GetCancellationManager();
op_data_->shared_info.buffer_map = flex_delegate_data->GetBufferMap(context);
op_data_->shared_info.tensor_release_map =
flex_delegate_data->GetTensorReleaseMap(context);
CHECK(params->output_tensors);
std::set<int> output_set;
for (auto tensor_index : TfLiteIntArrayView(params->output_tensors)) {
op_data_->subgraph_outputs.push_back(tensor_index);
output_set.insert(tensor_index);
}
CHECK(params->input_tensors);
for (auto tensor_index : TfLiteIntArrayView(params->input_tensors)) {
op_data_->subgraph_inputs.push_back(tensor_index);
}
std::set<int> subgraph_inputs(op_data_->subgraph_inputs.begin(),
op_data_->subgraph_inputs.end());
op_data_->nodes.reserve(params->nodes_to_replace->size);
CHECK(params->nodes_to_replace);
tensorflow::Status status;
auto check_if_op_reuses_input = [](const string& op_name) {
return op_name == "TensorListPushBack" || op_name == "TensorListSetItem" ||
op_name == "SparseReshape" || op_name == "StridedSlice" ||
op_name == "RaggedTensorToVariant" || op_name == "TensorMapInsert";
};
for (auto node_index : TfLiteIntArrayView(params->nodes_to_replace)) {
TfLiteNode* node;
TfLiteRegistration* reg;
context->GetNodeAndRegistration(context, node_index, &node, ®);
op_data_->nodes.emplace_back(new OpNode(node->inputs, node->outputs));
OpNode& node_data = *op_data_->nodes.back();
node_data.set_index(node_index);
node_data.set_name("");
status = node_data.InitializeNodeDef(node->custom_initial_data,
node->custom_initial_data_size);
if (!status.ok()) break;
status = node_data.BuildOpKernelRunner(op_data_->eager_context);
if (!status.ok()) break;
for (auto tensor_index : TfLiteIntArrayView(node->inputs)) {
int node_id = node_index;
if (const std::map<int, int>::iterator it =
op_data_->shared_info.tensor_release_map->find(tensor_index);
it != op_data_->shared_info.tensor_release_map->end()) {
node_id = std::max(it->second, node_index);
}
(*op_data_->shared_info.tensor_release_map)[tensor_index] = node_id;
if (subgraph_inputs.count(tensor_index) &&
check_if_op_reuses_input(node_data.nodedef().op())) {
op_data_->disable_reusing_buffer_tensors.insert(tensor_index);
}
}
}
TF_LITE_ENSURE_STATUS(ConvertStatus(context, status));
std::map<int, TensorSource> tflite_tensor_sources;
for (auto& node_data : op_data_->nodes) {
node_data->mutable_outputs()->InitializeGraphOutputs(output_set);
for (int i = 0; i < node_data->outputs().Size(); ++i) {
int output_index = node_data->outputs().TfLiteIndex(i);
tflite_tensor_sources[output_index] = TensorSource{node_data.get(), i};
}
}
for (auto& node_data : op_data_->nodes) {
node_data->mutable_inputs()->InitializeTensorSources(tflite_tensor_sources);
}
return kTfLiteOk;
}
TfLiteStatus DelegateKernel::Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_MSG(
context, op_data_->eager_context != nullptr,
"Failed to initialize eager context. This often happens when a CPU "
"device has not been registered, presumably because some symbols from "
"tensorflow/core:core_cpu_impl were not linked into the binary.");
std::map<int, int> tensor_ref_count;
BufferMap* buffer_map = op_data_->shared_info.buffer_map;
for (auto tensor_index : op_data_->subgraph_inputs) {
TfLiteTensor* tensor = &context->tensors[tensor_index];
if (IsConstantTensor(tensor)) {
if (!tensor->data_is_stale || !buffer_map->HasTensor(tensor_index)) {
buffer_map->SetFromTfLite(tensor_index, tensor);
}
}
tensor_ref_count[tensor_index] += 2;
}
if (shapes_are_valid_) {
shapes_are_valid_ =
(ValidateOutputTensorShapeConsistency(context) == kTfLiteOk);
if (shapes_are_valid_) {
TFLITE_LOG(tflite::TFLITE_LOG_INFO,
"FlexDelegate: All tensor shapes are consistent.");
} else {
TFLITE_LOG(tflite::TFLITE_LOG_WARNING,
"FlexDelegate: Some tensor shapes are inconsistent.");
}
}
for (auto tensor_index : op_data_->subgraph_outputs) {
if (!shapes_are_valid_) {
SetTensorToDynamic(&context->tensors[tensor_index]);
}
++tensor_ref_count[tensor_index];
}
for (const auto& node_data : op_data_->nodes) {
if (node_data->nodedef().op().empty()) {
TF_LITE_KERNEL_LOG(context, "Invalid NodeDef in Flex op '%s'",
node_data->name().c_str());
return kTfLiteError;
}
TF_LITE_ENSURE(context, node_data->op_kernel_runner());
for (int i = 0; i < node_data->inputs().Size(); ++i) {
++tensor_ref_count[node_data->inputs().TfLiteIndex(i)];
}
}
for (auto& node_data : op_data_->nodes) {
for (int i = 0; i < node_data->inputs().Size(); ++i) {
bool f = (tensor_ref_count[node_data->inputs().TfLiteIndex(i)] == 1);
node_data->mutable_inputs()->SetForwardable(i, f);
}
}
return kTfLiteOk;
}
TfLiteStatus DelegateKernel::ValidateOutputTensorShapeConsistency(
TfLiteContext* context) const {
for (const auto& node_data : op_data_->nodes) {
auto op_name = node_data->name().c_str();
auto num_inputs = node_data->inputs().Size();
std::vector<const tensorflow::Tensor*> input_tensors_vector(num_inputs,
nullptr);
InferenceContext c(
TF_GRAPH_DEF_VERSION, node_data->nodedef(),
node_data->op_reg_data()->op_def, std::vector<ShapeHandle>(num_inputs),
input_tensors_vector, {},
std::vector<std::unique_ptr<std::vector<ShapeAndType>>>());
for (int i = 0; i < num_inputs; ++i) {
const auto input_tensor_index = node_data->inputs().TfLiteIndex(i);
TfLiteTensor* tfl_tensor = &context->tensors[input_tensor_index];
if (IsConstantTensor(tfl_tensor)) {
input_tensors_vector[i] =
op_data_->shared_info.buffer_map->GetTensorPtr(input_tensor_index);
}
const auto dims_array = tfl_tensor->dims;
std::vector<DimensionHandle> dims(dims_array->size);
for (int j = 0; j < dims_array->size; ++j) {
dims[j] = c.MakeDim(dims_array->data[j]);
}
c.SetInput(i, c.MakeShape(dims));
}
c.set_input_tensors(input_tensors_vector);
tensorflow::Status status = c.construction_status();
if (!status.ok()) {
TFLITE_LOG(tflite::TFLITE_LOG_WARNING,
"Shape construction failed for op '%s'", op_name);
return kTfLiteError;
}
if (node_data->op_reg_data()->shape_inference_fn == nullptr) {
TFLITE_LOG(tflite::TFLITE_LOG_WARNING,
"No shape inference function exists for op '%s'", op_name);
return kTfLiteError;
}
status = c.Run(node_data->op_reg_data()->shape_inference_fn);
auto num_outputs = node_data->outputs().Size();
if (num_outputs != c.num_outputs()) {
TFLITE_LOG(tflite::TFLITE_LOG_WARNING,
"Number of output tensors are mismatched for op '%s' %d != %d",
op_name, num_outputs, c.num_outputs());
return kTfLiteError;
}
for (int i = 0; i < num_outputs; ++i) {
const auto output_tensor_index = node_data->outputs().TfLiteIndex(i);
TfLiteTensor* tfl_tensor = &context->tensors[output_tensor_index];
const std::string tfl_shape_string =
GetShapeDebugString(tfl_tensor->dims);
const std::string calculated_shape_string = c.DebugString(c.output(i));
if (tfl_shape_string != calculated_shape_string) {
if ((strcmp(op_name, kReadVariableOp) == 0) &&
(tfl_tensor->dims->size > 0)) {
continue;
}
TFLITE_LOG(tflite::TFLITE_LOG_WARNING,
"op '%s' output%d tensor#%d shape mismatch for %s != %s",
op_name, i, output_tensor_index, tfl_shape_string.c_str(),
calculated_shape_string.c_str());
return kTfLiteError;
}
}
}
return kTfLiteOk;
}
static tensorflow::CancellationManager* GetDefaultCancellationManager() {
static auto* const cancellation_manager = new tensorflow::CancellationManager;
return cancellation_manager;
}
TfLiteStatus DelegateKernel::Eval(TfLiteContext* context, TfLiteNode* node) {
BufferMap* buffer_map = op_data_->shared_info.buffer_map;
for (auto tensor_index : op_data_->subgraph_inputs) {
TfLiteTensor* tensor = &context->tensors[tensor_index];
if (!IsConstantTensor(tensor)) {
if (!tensor->data_is_stale || !buffer_map->HasTensor(tensor_index)) {
buffer_map->SetFromTfLite(
tensor_index, tensor,
!op_data_->disable_reusing_buffer_tensors.count(tensor_index));
}
}
}
auto& eager_context = *op_data_->eager_context;
{
tensorflow::tfrt_stub::OpKernelRunState run_state;
run_state.params.step_container = eager_context.StepContainer();
auto* device = eager_context.local_device_mgr()->HostCPU();
run_state.params.device = device;
run_state.params.resource_manager = device->resource_manager();
run_state.params.runner = eager_context.runner();
run_state.params.cancellation_manager =
op_data_->cancellation_manager ? op_data_->cancellation_manager
: GetDefaultCancellationManager();
for (auto& node_data : op_data_->nodes) {
TFLITE_SCOPED_DELEGATE_PROFILED_OPERATOR_PROFILE(
reinterpret_cast<Profiler*>(context->profiler),
node_data->name().c_str(), node_data->index());
if (op_data_->cancellation_manager != nullptr &&
op_data_->cancellation_manager->IsCancelled()) {
TF_LITE_KERNEL_LOG(
context, "Client requested cancel during DelegateKernel::Eval");
return kTfLiteError;
}
auto status = ExecuteOpKernelRunner(&run_state, context, node_data.get());
TF_LITE_ENSURE_OK(context, ConvertStatus(context, status));
}
}
for (auto tensor_index : op_data_->subgraph_outputs) {
if (op_data_->shared_info.already_transferred_outputs.count(tensor_index) !=
0) {
continue;
}
if (!buffer_map->HasTensor(tensor_index)) {
TF_LITE_KERNEL_LOG(context, "Cannot write to invalid tensor index %d",
tensor_index);
return kTfLiteError;
}
TfLiteTensor* tensor = &context->tensors[tensor_index];
const tensorflow::Tensor& tf_tensor = buffer_map->GetTensor(tensor_index);
if (tensor->allocation_type == kTfLiteDynamic) {
TF_LITE_ENSURE_OK(context, CopyShapeAndType(context, tf_tensor, tensor));
tensor->buffer_handle = tensor_index;
tensor->data_is_stale = true;
continue;
}
if (tf_tensor.NumElements() != NumElements(tensor) ||
tf_tensor.TotalBytes() != tensor->bytes) {
TF_LITE_KERNEL_LOG(context,
"FlexDelegate: Tensor %s(%d) buffer size mismatch "
"%zu(%lld) != %ld(%ld)",
tensor->name, tensor_index, tf_tensor.TotalBytes(),
tf_tensor.NumElements(), tensor->bytes,
NumElements(tensor));
return kTfLiteError;
}
tensorflow::StringPiece t_data = tf_tensor.tensor_data();
memcpy(tensor->data.raw, t_data.data(), t_data.size());
}
return kTfLiteOk;
}
const std::map<int, int>& DelegateKernel::GetTensorReleaseMap() const {
return *(op_data_->shared_info.tensor_release_map);
}
}
} | #include "tensorflow/lite/delegates/flex/kernel.h"
#include <functional>
#include <initializer_list>
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/flex/delegate.h"
#include "tensorflow/lite/delegates/flex/delegate_data.h"
#include "tensorflow/lite/delegates/flex/test_util.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace flex {
namespace testing {
using ::testing::ContainsRegex;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
class TestFlexDelegate : public FlexDelegate {
protected:
bool IsNodeSupportedByDelegate(const TfLiteRegistration* registration,
const TfLiteNode* node,
TfLiteContext* context) const override {
return true;
}
};
class KernelTest : public testing::FlexModelTest {
public:
static constexpr int kOnes = 1;
static constexpr int kTwos = 2;
static constexpr int kMaxTensors = 30;
KernelTest() {
interpreter_ = std::make_unique<Interpreter>(&error_reporter_);
}
void ApplyFlexDelegate(std::unique_ptr<FlexDelegate> delegate = nullptr) {
auto flex_delegate = FlexDelegate::Create(std::move(delegate));
delegate_data_ =
reinterpret_cast<FlexDelegate*>(flex_delegate->data_)->mutable_data();
CHECK(delegate_data_->Prepare(tensorflow::SessionOptions{}).ok());
CHECK(interpreter_->ModifyGraphWithDelegate(std::move(flex_delegate)) ==
kTfLiteOk);
}
const std::map<int, int>& GetTensorReleaseMap(DelegateKernel* kernel) {
return kernel->GetTensorReleaseMap();
}
protected:
tflite::flex::DelegateData* delegate_data_;
};
TEST_F(KernelTest, FullGraph) {
AddTensors(9, {0, 3}, {8}, kTfLiteFloat32, {3});
AddTfOp(testing::kUnpack, {0}, {1, 2});
AddTfOp(testing::kUnpack, {3}, {4, 5});
AddTfOp(testing::kAdd, {1, 4}, {6});
AddTfOp(testing::kAdd, {2, 5}, {7});
AddTfOp(testing::kMul, {6, 7}, {8});
ApplyFlexDelegate();
SetShape(0, {2, 2, 1});
SetValues(0, {1.1f, 2.2f, 3.3f, 4.4f});
SetShape(3, {2, 2, 1});
SetValues(3, {1.1f, 2.2f, 3.3f, 4.4f});
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(8), ElementsAre(2, 1));
ASSERT_THAT(GetValues(8), ElementsAre(14.52f, 38.72f));
SetShape(0, {2, 3, 1});
SetValues(0, {2.0f, 2.0f, 3.0f, 3.0f, 4.0f, 4.0f});
SetShape(3, {2, 3, 1});
SetValues(3, {2.0f, 2.0f, 3.0f, 3.0f, 4.0f, 4.0f});
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(8), ElementsAre(3, 1));
ASSERT_THAT(GetValues(8), ElementsAre(24.0f, 32.0f, 48.0f));
}
TEST_F(KernelTest, ValidateTensorReleaseMap) {
AddTensors(9, {0, 3}, {8}, kTfLiteFloat32, {3});
AddTfOp(testing::kUnpack, {0}, {1, 2});
AddTfOp(testing::kUnpack, {3}, {4, 5});
AddTfOp(testing::kAdd, {1, 4}, {6});
AddTfOp(testing::kAdd, {2, 5}, {7});
AddTfOp(testing::kMul, {6, 7}, {8});
ApplyFlexDelegate();
const int node_size = interpreter_->primary_subgraph().nodes_size();
const std::pair<TfLiteNode, TfLiteRegistration>* node_and_reg =
interpreter_->primary_subgraph().node_and_registration(node_size - 1);
DelegateKernel* delegate_kernel =
reinterpret_cast<DelegateKernel*>(node_and_reg->first.user_data);
const auto& tensor_release_map = GetTensorReleaseMap(delegate_kernel);
EXPECT_THAT(
tensor_release_map,
UnorderedElementsAre(Pair(0, 0), Pair(1, 2), Pair(2, 3), Pair(3, 1),
Pair(4, 2), Pair(5, 3), Pair(6, 4), Pair(7, 4)));
}
TEST_F(KernelTest, PersistEagerTensor) {
AddTensors(10, {0, 3}, {9}, kTfLiteFloat32, {3});
AddTfOp(testing::kUnpack, {0}, {1, 2});
AddTfOp(testing::kUnpack, {3}, {4, 5});
AddTfOp(testing::kAdd, {1, 4}, {6});
AddTfOp(testing::kAdd, {2, 5}, {7});
AddTfLiteMulOp({6, 7}, {8});
AddTfOp(testing::kAdd, {6, 8}, {9});
ApplyFlexDelegate();
SetShape(0, {2, 2, 1});
SetValues(0, {1.1f, 2.2f, 3.3f, 4.4f});
SetShape(3, {2, 2, 1});
SetValues(3, {1.1f, 2.2f, 3.3f, 4.4f});
ASSERT_TRUE(Invoke());
auto* buffer_map =
delegate_data_->GetBufferMap(interpreter_->primary_subgraph().context());
EXPECT_TRUE(buffer_map->HasTensor(6));
EXPECT_FALSE(buffer_map->HasTensor(7));
}
TEST_F(KernelTest, BadTensorFlowOp) {
AddTensors(2, {0}, {1}, kTfLiteFloat32, {3});
AddTfOp(testing::kNonExistent, {0}, {1});
ApplyFlexDelegate(std::unique_ptr<FlexDelegate>(new TestFlexDelegate()));
ASSERT_NE(interpreter_->AllocateTensors(), kTfLiteOk);
ASSERT_THAT(error_reporter().error_messages(),
ContainsRegex("Op type not registered 'NonExistentOp'"));
}
TEST_F(KernelTest, BadNumberOfOutputs) {
AddTensors(3, {0}, {1, 2}, kTfLiteFloat32, {3});
AddTfOp(testing::kIdentity, {0}, {1, 2});
ApplyFlexDelegate();
SetShape(0, {2, 2, 1});
SetValues(0, {1.1f, 2.2f, 3.3f, 4.4f});
ASSERT_FALSE(Invoke());
ASSERT_THAT(error_reporter().error_messages(),
ContainsRegex("Unexpected number of outputs"));
}
TEST_F(KernelTest, IncompatibleNodeDef) {
AddTensors(2, {0}, {1}, kTfLiteFloat32, {3});
AddTfOp(testing::kIncompatibleNodeDef, {0}, {1});
ApplyFlexDelegate();
ASSERT_NE(interpreter_->AllocateTensors(), kTfLiteOk);
ASSERT_THAT(error_reporter().error_messages(),
ContainsRegex("No attr named 'SrcT' in NodeDef"));
}
TEST_F(KernelTest, WrongSetOfNodes) {
AddTensors(4, {0}, {3}, kTfLiteFloat32, {3});
AddTfOp(testing::kUnpack, {0}, {1, 2});
AddTfLiteMulOp({1, 2}, {3});
ApplyFlexDelegate(std::unique_ptr<FlexDelegate>(new TestFlexDelegate()));
ASSERT_NE(interpreter_->AllocateTensors(), kTfLiteOk);
ASSERT_THAT(error_reporter().error_messages(),
ContainsRegex("Cannot convert empty data into a valid NodeDef"));
}
TEST_F(KernelTest, MixedGraph) {
AddTensors(9, {0, 3}, {8}, kTfLiteFloat32, {3});
AddTfOp(testing::kUnpack, {0}, {1, 2});
AddTfOp(testing::kUnpack, {3}, {4, 5});
AddTfOp(testing::kAdd, {1, 4}, {6});
AddTfOp(testing::kAdd, {2, 5}, {7});
AddTfLiteMulOp({6, 7}, {8});
ApplyFlexDelegate();
SetShape(0, {2, 2, 1});
SetValues(0, {1.1f, 2.2f, 3.3f, 4.4f});
SetShape(3, {2, 2, 1});
SetValues(3, {1.1f, 2.2f, 3.3f, 4.4f});
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(8), ElementsAre(2, 1));
ASSERT_THAT(GetValues(8), ElementsAre(14.52f, 38.72f));
}
TEST_F(KernelTest, SplitGraph) {
std::vector<float> a = {3.0f, 1.0f, 0.5f, -1.0f, 4.0f, -1.0f, -2.0f, 5.0f};
std::vector<float> b = {0.0f, 1.0f, 1.5f, 3.0f};
AddTensors(18, {0, 1}, {17}, kTfLiteFloat32, {3});
AddTfOp(testing::kUnpack, {0}, {2, 10});
AddTfOp(testing::kAdd, {1, 2}, {3});
AddTfOp(testing::kUnpack, {3}, {4, 5});
AddTfLiteMulOp({4, 5}, {6});
AddTfOp(testing::kUnpack, {6}, {7, 8});
AddTfOp(testing::kAdd, {7, 8}, {9});
AddTfOp(testing::kUnpack, {10}, {11, 12});
AddTfOp(testing::kAdd, {11, 12}, {13});
AddTfOp(testing::kUnpack, {13}, {14, 15});
AddTfOp(testing::kAdd, {14, 15}, {16});
AddTfOp(testing::kAdd, {9, 16}, {17});
ApplyFlexDelegate();
SetShape(0, {2, 2, 2, 1});
SetValues(0, a);
SetShape(1, {2, 2, 1});
SetValues(1, b);
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(17), ElementsAre(1));
ASSERT_THAT(GetValues(17), ElementsAre(16.0f));
SetShape(0, {2, 2, 2, 1});
SetValues(0, {4.0f, 1.0f, 1.5f, -2.0f, 2.0f, 0.0f, -2.0f, 3.0f});
SetShape(1, {2, 2, 1});
SetValues(1, {0.0f, 2.0f, 1.5f, 3.0f});
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetShape(17), ElementsAre(1));
ASSERT_THAT(GetValues(17), ElementsAre(18.0f));
}
class MultipleSubgraphsTest : public KernelTest {
public:
static constexpr int kInput = 0;
void PrepareInterpreter(const std::vector<float>& input) {
ApplyFlexDelegate();
SetShape(kOnes, {3});
SetValues(kOnes, {1.0f, 1.0f, 1.0f});
SetShape(kTwos, {3});
SetValues(kTwos, {2.0f, 2.0f, 2.0f});
SetValues(kInput, input);
}
std::vector<float> Apply(const std::vector<float>& input,
std::function<float(float)> function) {
std::vector<float> result;
for (float f : input) {
result.push_back(function(f));
}
return result;
}
};
TEST_F(MultipleSubgraphsTest, ForwardabilityIsLocal) {
AddTensors(kMaxTensors, {kInput, kOnes, kTwos}, {12}, kTfLiteFloat32, {3});
AddTfOp(testing::kAdd, {0, kOnes}, {3});
AddTfOp(testing::kAdd, {0, kOnes}, {10});
AddTfLiteMulOp({3, kTwos}, {4});
AddTfOp(testing::kAdd, {10, 4}, {11});
AddTfOp(testing::kAdd, {11, 10}, {7});
AddTfLiteMulOp({10, 7}, {12});
auto input = {3.0f, 4.0f, 5.0f};
PrepareInterpreter(input);
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetValues(12), ElementsAreArray(Apply(input, [](float in) {
return (4 * in + 4) * (in + 1);
})));
}
TEST_F(MultipleSubgraphsTest, DoNotRemoveInputTensors) {
AddTensors(kMaxTensors, {kInput, kOnes, kTwos}, {12}, kTfLiteFloat32, {3});
AddTfOp(testing::kAdd, {0, kOnes}, {3});
AddTfOp(testing::kAdd, {0, kOnes}, {10});
AddTfOp(testing::kAdd, {10, kOnes}, {15});
AddTfOp(testing::kAdd, {10, kOnes}, {16});
AddTfLiteMulOp({3, kTwos}, {4});
AddTfOp(testing::kAdd, {10, 4}, {11});
AddTfOp(testing::kAdd, {10, 11}, {7});
AddTfLiteMulOp({10, 7}, {12});
auto input = {3.0f, 4.0f, 5.0f};
PrepareInterpreter(input);
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetValues(12), ElementsAreArray(Apply(input, [](float in) {
return (4 * in + 4) * (in + 1);
})));
}
TEST_F(MultipleSubgraphsTest, DoNotForwardInputTensors) {
AddTensors(kMaxTensors, {kInput, kOnes, kTwos}, {12}, kTfLiteFloat32, {3});
AddTfOp(testing::kAdd, {0, kOnes}, {3});
AddTfOp(testing::kAdd, {0, kOnes}, {10});
AddTfLiteMulOp({3, kTwos}, {4});
AddTfOp(testing::kAdd, {10, 4}, {11});
AddTfOp(testing::kAdd, {11, 4}, {7});
AddTfLiteMulOp({10, 7}, {12});
auto input = {3.0f, 4.0f, 5.0f};
PrepareInterpreter(input);
ASSERT_TRUE(Invoke());
ASSERT_THAT(GetValues(12), ElementsAreArray(Apply(input, [](float in) {
return (5 * in + 5) * (in + 1);
})));
}
tensorflow::OpDef MakeOpDef(int num_inputs, int num_outputs) {
tensorflow::OpRegistrationData op_reg_data;
tensorflow::OpDefBuilder b("dummy");
for (int i = 0; i < num_inputs; ++i) {
b.Input(tensorflow::strings::StrCat("i", i, ": float"));
}
for (int i = 0; i < num_outputs; ++i) {
b.Output(tensorflow::strings::StrCat("o", i, ": float"));
}
CHECK(b.Attr("foo:string").Finalize(&op_reg_data).ok());
return op_reg_data.op_def;
}
tensorflow::PartialTensorShape S(std::initializer_list<int64_t> dims) {
return tensorflow::PartialTensorShape(dims);
}
TEST(ValidateOutputTensorShapeConsistencyTest, ShapeHandleDebugString) {
tensorflow::OpDef op_def = MakeOpDef(4, 1);
tensorflow::NodeDef def;
tensorflow::shape_inference::InferenceContext c(
0, def, op_def, {S({1}), S({2, 3}), S({4, 5, 6}), {}}, {}, {}, {});
c.SetInput(3, c.UnknownShape());
std::vector<tensorflow::shape_inference::ShapeHandle> shapes;
EXPECT_EQ("[1]", c.DebugString(c.input(0)));
EXPECT_EQ("[2,3]", c.DebugString(c.input(1)));
EXPECT_EQ("[4,5,6]", c.DebugString(c.input(2)));
EXPECT_EQ("?", c.DebugString(c.input(3)));
}
TEST(ValidateOutputTensorShapeConsistencyTest, GetShapeDebugString) {
TfLiteIntArray* dims1 = TfLiteIntArrayCreate(1);
dims1->data[0] = 1;
EXPECT_EQ("[1]", GetShapeDebugString(dims1));
TfLiteIntArrayFree(dims1);
TfLiteIntArray* dims2 = TfLiteIntArrayCreate(2);
dims2->data[0] = 2;
dims2->data[1] = 3;
EXPECT_EQ("[2,3]", GetShapeDebugString(dims2));
TfLiteIntArrayFree(dims2);
TfLiteIntArray* dims3 = TfLiteIntArrayCreate(3);
dims3->data[0] = 4;
dims3->data[1] = 5;
dims3->data[2] = 6;
EXPECT_EQ("[4,5,6]", GetShapeDebugString(dims3));
TfLiteIntArrayFree(dims3);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/flex/kernel.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/flex/kernel_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2653932b-4768-456b-9d5e-d90084229d4f | cpp | google/libaddressinput | null_storage | cpp/src/null_storage.cc | cpp/test/null_storage_test.cc | #include <libaddressinput/null_storage.h>
#include <cassert>
#include <cstddef>
#include <string>
namespace i18n {
namespace addressinput {
NullStorage::NullStorage() = default;
NullStorage::~NullStorage() = default;
void NullStorage::Put(const std::string& key, std::string* data) {
assert(data != nullptr);
delete data;
}
void NullStorage::Get(const std::string& key,
const Callback& data_ready) const {
data_ready(false, key, nullptr);
}
}
} | #include <libaddressinput/null_storage.h>
#include <libaddressinput/callback.h>
#include <libaddressinput/storage.h>
#include <cstddef>
#include <memory>
#include <string>
#include <gtest/gtest.h>
namespace {
using i18n::addressinput::BuildCallback;
using i18n::addressinput::NullStorage;
using i18n::addressinput::Storage;
class NullStorageTest : public testing::Test {
public:
NullStorageTest(const NullStorageTest&) = delete;
NullStorageTest& operator=(const NullStorageTest&) = delete;
protected:
NullStorageTest()
: data_ready_(BuildCallback(this, &NullStorageTest::OnDataReady)) {}
NullStorage storage_;
bool success_;
std::string key_;
std::string data_;
const std::unique_ptr<const Storage::Callback> data_ready_;
static const char kKey[];
private:
void OnDataReady(bool success, const std::string& key, std::string* data) {
ASSERT_FALSE(success && data == nullptr);
success_ = success;
key_ = key;
if (data != nullptr) {
data_ = *data;
delete data;
}
}
};
const char NullStorageTest::kKey[] = "foo";
TEST_F(NullStorageTest, Put) {
storage_.Put(kKey, new std::string("bar"));
}
TEST_F(NullStorageTest, Get) {
storage_.Get(kKey, *data_ready_);
EXPECT_FALSE(success_);
EXPECT_EQ(kKey, key_);
EXPECT_TRUE(data_.empty());
}
} | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/src/null_storage.cc | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/test/null_storage_test.cc | 2610f7b1043d6784ada41392fc9392d1ea09ea07 |
1167bf89-fa47-4662-a7a7-1034deab377c | cpp | google/arolla | expr_stack_trace | arolla/expr/expr_stack_trace.cc | arolla/expr/expr_stack_trace_test.cc | #include "arolla/expr/expr_stack_trace.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/expr/expr_debug_string.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_visitor.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/text.h"
namespace arolla::expr {
void DetailedExprStackTrace::AddTrace(ExprNodePtr target_node,
ExprNodePtr source_node,
TransformationType t) {
if (!target_node->is_op()) {
return;
}
if (target_node->fingerprint() == source_node->fingerprint()) {
return;
}
traceback_.insert(
{target_node->fingerprint(), {source_node->fingerprint(), t}});
if (traceback_.find(source_node->fingerprint()) == traceback_.end()) {
repr_[source_node->fingerprint()] = source_node;
}
if (t != TransformationType::kUntraced) {
repr_[target_node->fingerprint()] = target_node;
}
}
std::optional<std::pair<Fingerprint, TransformationType>>
DetailedExprStackTrace::GetTrace(Fingerprint fp) const {
auto it = traceback_.find(fp);
if (it == traceback_.end()) {
return std::nullopt;
}
return it->second;
}
std::string DetailedExprStackTrace::GetRepr(Fingerprint fp) const {
if (auto it = repr_.find(fp); it != repr_.end()) {
return GetDebugSnippet(it->second);
} else {
return absl::StrCat("Could not find representation for node ",
fp.AsString());
}
}
std::vector<DetailedExprStackTrace::Transformation>
DetailedExprStackTrace::GetTransformations(Fingerprint fp) const {
auto current_fp = fp;
std::vector<Transformation> transformations;
absl::flat_hash_set<Fingerprint> visited;
visited.insert(current_fp);
auto nxt = GetTrace(current_fp);
while (nxt.has_value()) {
if (nxt->second != TransformationType::kUntraced) {
transformations.push_back({current_fp, nxt->first, nxt->second});
}
current_fp = nxt->first;
if (!visited.insert(current_fp).second) {
break;
}
nxt = GetTrace(current_fp);
}
std::reverse(transformations.begin(), transformations.end());
if (!transformations.empty()) {
transformations.begin()->source_fp = current_fp;
}
return transformations;
}
std::string DetailedExprStackTrace::FullTrace(Fingerprint fp) const {
auto transformations = GetTransformations(fp);
if (transformations.empty()) return "";
std::string stack_trace = absl::StrCat(
"ORIGINAL NODE: ", GetRepr(transformations.front().source_fp),
"\nCOMPILED NODE: ", GetRepr(transformations.back().target_fp));
if (transformations.size() == 1) return stack_trace;
stack_trace += absl::StrCat("\nDETAILED STACK TRACE:\n",
GetRepr(transformations.begin()->source_fp));
for (auto it = transformations.begin(); it != transformations.end(); ++it) {
stack_trace += absl::StrCat("\n ", TransformationString(it->type), "\n",
GetRepr(it->target_fp));
}
return stack_trace;
}
void LightweightExprStackTrace::AddTrace(ExprNodePtr target_node,
ExprNodePtr source_node,
TransformationType t) {
if (!target_node->is_op()) {
return;
}
if (target_node->fingerprint() == source_node->fingerprint()) {
return;
}
auto original_it = original_node_mapping_.find(source_node->fingerprint());
bool source_node_is_original = (original_it == original_node_mapping_.end());
if (source_node_is_original) {
original_node_mapping_.insert(
{target_node->fingerprint(), source_node->fingerprint()});
} else {
DCHECK(!original_node_mapping_.contains(original_it->second));
original_node_mapping_.insert(
{target_node->fingerprint(), original_it->second});
}
}
void LightweightExprStackTrace::AddRepresentations(ExprNodePtr compiled_node,
ExprNodePtr original_node) {
auto compiled_post_order = PostOrder(compiled_node);
for (const auto& node : compiled_post_order.nodes()) {
repr_.insert({node->fingerprint(), node});
}
auto original_post_order = PostOrder(original_node);
for (const auto& node : original_post_order.nodes()) {
repr_.insert({node->fingerprint(), node});
}
}
std::string LightweightExprStackTrace::GetRepr(Fingerprint fp) const {
if (auto it = repr_.find(fp); it != repr_.end()) {
return GetDebugSnippet(it->second);
} else {
return "?";
}
}
std::string LightweightExprStackTrace::FullTrace(Fingerprint fp) const {
if (auto it = original_node_mapping_.find(fp);
it != original_node_mapping_.end()) {
return absl::StrCat("ORIGINAL NODE: ", GetRepr(it->second),
"\nCOMPILED NODE: ", GetRepr(fp));
} else {
return absl::StrCat("NODE: ", GetRepr(fp));
}
}
BoundExprStackTraceBuilder::BoundExprStackTraceBuilder(
std::shared_ptr<const ExprStackTrace> stack_trace)
: stack_trace_(stack_trace) {}
void BoundExprStackTraceBuilder::RegisterIp(int64_t ip,
const ExprNodePtr& node) {
ip_to_fingerprint_.insert({ip, node->fingerprint()});
}
DenseArray<Text> BoundExprStackTraceBuilder::Build(
int64_t num_operators) const {
DenseArrayBuilder<Text> traces_array_builder(num_operators);
for (int64_t i = 0; i < num_operators; ++i) {
if (auto it = ip_to_fingerprint_.find(i); it != ip_to_fingerprint_.end()) {
traces_array_builder.Add(i, Text{stack_trace_->FullTrace(it->second)});
}
}
return std::move(traces_array_builder).Build();
}
} | #include "arolla/expr/expr_stack_trace.h"
#include "gtest/gtest.h"
#include "arolla/util/fingerprint.h"
namespace arolla::expr {
namespace {
TEST(ExprStackTraceTest, ExprStackTraceSafeReturnsOnUnregisteredFingerprint) {
DetailedExprStackTrace stack_trace;
EXPECT_EQ(stack_trace.FullTrace(Fingerprint{0}), "");
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/expr_stack_trace.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/expr_stack_trace_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
071aecc4-70a3-4954-804b-28a2aced5282 | cpp | google/arolla | dynamic_compiled_operator | arolla/expr/eval/dynamic_compiled_operator.cc | arolla/expr/eval/dynamic_compiled_operator_test.cc | #include "arolla/expr/eval/dynamic_compiled_operator.h"
#include <cstddef>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "arolla/expr/eval/dynamic_compiled_expr.h"
#include "arolla/expr/eval/eval.h"
#include "arolla/expr/eval/executable_builder.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/qtype/qtype.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr::eval_internal {
template <typename T, typename U>
std::unique_ptr<T> dynamic_unique_ptr_cast(std::unique_ptr<U> unique) {
T* casted = dynamic_cast<T*>(unique.get());
if (casted != nullptr) {
unique.release();
}
return std::unique_ptr<T>(casted);
}
absl::StatusOr<DynamicCompiledOperator> DynamicCompiledOperator::Build(
const DynamicEvaluationEngineOptions& options, const ExprOperatorPtr& op,
std::vector<QTypePtr> input_qtypes) {
std::vector<absl::StatusOr<ExprNodePtr>> inputs;
std::vector<std::string> input_arg_names;
absl::flat_hash_map<std::string, QTypePtr> input_qtypes_map;
inputs.reserve(input_qtypes.size());
input_qtypes_map.reserve(input_qtypes.size());
input_arg_names.reserve(input_qtypes.size());
for (size_t i = 0; i < input_qtypes.size(); ++i) {
std::string name = absl::StrFormat("_%d", i);
inputs.push_back(Leaf(name));
input_qtypes_map.emplace(name, input_qtypes[i]);
input_arg_names.emplace_back(std::move(name));
}
ASSIGN_OR_RETURN(auto expr, CallOp(op, inputs));
ASSIGN_OR_RETURN(auto compiled_expr, CompileForDynamicEvaluation(
options, expr, input_qtypes_map));
std::unique_ptr<const DynamicCompiledExpr> dynamic_compiled_expr =
dynamic_unique_ptr_cast<const DynamicCompiledExpr>(
std::move(compiled_expr));
DCHECK(dynamic_compiled_expr);
return DynamicCompiledOperator(
std::string(op->display_name()), std::move(input_qtypes),
std::move(dynamic_compiled_expr), std::move(input_arg_names),
FingerprintHasher("arolla::expr::eval_internal::DynamicCompiledOperator")
.Combine(op->fingerprint())
.CombineSpan(input_qtypes)
.Finish());
}
absl::Status DynamicCompiledOperator::BindTo(
ExecutableBuilder& executable_builder,
absl::Span<const TypedSlot> input_slots, TypedSlot output_slot) const {
if (input_slots.size() != input_arg_names_.size()) {
return absl::InternalError(absl::StrFormat(
"input count mismatch in DynamicCompiledOperator: expected %d, got %d",
input_arg_names_.size(), input_slots.size()));
}
absl::flat_hash_map<std::string, TypedSlot> input_slots_map;
input_slots_map.reserve(input_slots.size());
for (size_t i = 0; i < input_slots.size(); ++i) {
input_slots_map.emplace(input_arg_names_[i], input_slots[i]);
}
return compiled_expr_->BindToExecutableBuilder(executable_builder,
input_slots_map, output_slot);
}
DynamicCompiledOperator::DynamicCompiledOperator(
std::string display_name, std::vector<QTypePtr> input_qtypes,
std::unique_ptr<const DynamicCompiledExpr> compiled_expr,
std::vector<std::string> input_arg_names, Fingerprint fingerprint)
: display_name_(std::move(display_name)),
input_qtypes_(input_qtypes.begin(), input_qtypes.end()),
compiled_expr_(std::move(compiled_expr)),
input_arg_names_(std::move(input_arg_names)),
fingerprint_(fingerprint) {
DCHECK_EQ(input_qtypes_.size(), input_arg_names_.size());
}
} | #include "arolla/expr/eval/dynamic_compiled_operator.h"
#include <memory>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/expr/eval/eval.h"
#include "arolla/expr/eval/executable_builder.h"
#include "arolla/expr/eval/test_utils.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/lambda_expr_operator.h"
#include "arolla/memory/frame.h"
#include "arolla/qexpr/evaluation_engine.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_slot.h"
namespace arolla::expr::eval_internal {
namespace {
using ::absl_testing::StatusIs;
using ::testing::HasSubstr;
TEST(DynamicCompiledOperatorTest, DynamicCompiledOperator) {
ASSERT_OK_AND_ASSIGN(
auto lambda,
MakeLambdaOperator(
ExprOperatorSignature::Make("x, y"),
CallOp("math.add",
{CallOp("math.add", {Placeholder("x"), Placeholder("y")}),
Literal(1.)})));
ASSERT_OK_AND_ASSIGN(
DynamicCompiledOperator op,
DynamicCompiledOperator::Build(DynamicEvaluationEngineOptions{}, lambda,
{GetQType<float>(), GetQType<double>()}));
FrameLayout::Builder layout_builder;
auto x_slot = layout_builder.AddSlot<float>();
auto y_slot = layout_builder.AddSlot<double>();
auto output_slot = layout_builder.AddSlot<double>();
ExecutableBuilder executable_builder(&layout_builder,
true);
EXPECT_THAT(op.BindTo(executable_builder, {TypedSlot::FromSlot(x_slot)},
TypedSlot::FromSlot(output_slot)),
StatusIs(absl::StatusCode::kInternal,
"input count mismatch in DynamicCompiledOperator: "
"expected 2, got 1"));
EXPECT_THAT(
op.BindTo(executable_builder,
{TypedSlot::FromSlot(x_slot), TypedSlot::FromSlot(x_slot)},
TypedSlot::FromSlot(output_slot)),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("slot types mismatch")));
ASSERT_OK(
op.BindTo(executable_builder,
{TypedSlot::FromSlot(x_slot), TypedSlot::FromSlot(y_slot)},
TypedSlot::FromSlot(output_slot)));
std::unique_ptr<BoundExpr> executable_expr =
std::move(executable_builder)
.Build({{"x", TypedSlot::FromSlot(x_slot)},
{"y", TypedSlot::FromSlot(y_slot)}},
TypedSlot::FromSlot(output_slot));
EXPECT_THAT(
executable_expr,
AllOf(InitOperationsAre("FLOAT64 [0x28] = float64{1}"),
EvalOperationsAre(
"FLOAT64 [0x18] = core.to_float64(FLOAT32 [0x00])",
"FLOAT64 [0x20] = math.add(FLOAT64 [0x18], FLOAT64 [0x08])",
"FLOAT64 [0x10] = math.add(FLOAT64 [0x20], FLOAT64 [0x28])")));
}
TEST(DynamicCompiledOperatorTest, DynamicCompiledOperator_Literal) {
ASSERT_OK_AND_ASSIGN(
auto lambda, MakeLambdaOperator(ExprOperatorSignature{}, Literal(1.)));
ASSERT_OK_AND_ASSIGN(DynamicCompiledOperator op,
DynamicCompiledOperator::Build(
DynamicEvaluationEngineOptions{}, lambda, {}));
FrameLayout::Builder layout_builder;
auto output_slot = layout_builder.AddSlot<double>();
ExecutableBuilder executable_builder(&layout_builder,
true);
ASSERT_OK(
op.BindTo(executable_builder, {}, TypedSlot::FromSlot(output_slot)));
std::unique_ptr<BoundExpr> executable_expr =
std::move(executable_builder).Build({}, TypedSlot::FromSlot(output_slot));
EXPECT_THAT(
executable_expr,
AllOf(InitOperationsAre("FLOAT64 [0x08] = float64{1}"),
EvalOperationsAre("FLOAT64 [0x00] = core._copy(FLOAT64 [0x08])")));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/eval/dynamic_compiled_operator.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/eval/dynamic_compiled_operator_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
0d018e54-5730-4a78-9799-48cd0f7ff152 | cpp | tensorflow/tensorflow | gpu_plugin | tensorflow/lite/core/acceleration/configuration/c/gpu_plugin.cc | tensorflow/lite/core/acceleration/configuration/c/gpu_plugin_test.cc | #include "tensorflow/lite/core/acceleration/configuration/c/gpu_plugin.h"
#include <memory>
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/acceleration/configuration/gpu_plugin.h"
#include "tensorflow/lite/core/acceleration/configuration/c/delegate_plugin.h"
#include "tensorflow/lite/core/c/common.h"
#if TFLITE_SUPPORTS_GPU_DELEGATE
#include "tensorflow/lite/delegates/gpu/delegate.h"
#elif defined(REAL_IPHONE_DEVICE)
#include "tensorflow/lite/delegates/gpu/metal_delegate.h"
#endif
extern "C" {
static TfLiteDelegate* CreateDelegate(const void* settings) {
const ::tflite::TFLiteSettings* tflite_settings =
static_cast<const ::tflite::TFLiteSettings*>(settings);
tflite::delegates::GpuPlugin gpu_plugin(*tflite_settings);
#if TFLITE_SUPPORTS_GPU_DELEGATE
return TfLiteGpuDelegateV2Create(&gpu_plugin.Options());
#elif defined(REAL_IPHONE_DEVICE)
return TFLGpuDelegateCreate(&gpu_plugin.Options());
#else
return nullptr;
#endif
}
static void DestroyDelegate(TfLiteDelegate* delegate) {
#if TFLITE_SUPPORTS_GPU_DELEGATE
TfLiteGpuDelegateV2Delete(delegate);
#elif defined(REAL_IPHONE_DEVICE)
TFLGpuDelegateDelete(delegate);
#endif
}
static int DelegateErrno(TfLiteDelegate* from_delegate) { return 0; }
static constexpr TfLiteDelegatePlugin kPluginCApi{
CreateDelegate,
DestroyDelegate,
DelegateErrno,
};
const TfLiteDelegatePlugin* TfLiteGpuDelegatePluginCApi() {
return &kPluginCApi;
}
} | #include "tensorflow/lite/core/acceleration/configuration/c/gpu_plugin.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
class GpuTest : public testing::Test {
public:
void SetUp() override {
GPUSettingsBuilder gpu_settings_builder(flatbuffer_builder_);
flatbuffers::Offset<GPUSettings> gpu_settings =
gpu_settings_builder.Finish();
TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder_);
tflite_settings_builder.add_gpu_settings(gpu_settings);
flatbuffers::Offset<TFLiteSettings> tflite_settings =
tflite_settings_builder.Finish();
flatbuffer_builder_.Finish(tflite_settings);
settings_ = flatbuffers::GetRoot<TFLiteSettings>(
flatbuffer_builder_.GetBufferPointer());
}
~GpuTest() override {}
protected:
flatbuffers::FlatBufferBuilder flatbuffer_builder_;
const TFLiteSettings *settings_;
};
TEST_F(GpuTest, CanCreateAndDestroyDelegate) {
TfLiteDelegate *delegate = TfLiteGpuDelegatePluginCApi()->create(settings_);
EXPECT_NE(delegate, nullptr);
TfLiteGpuDelegatePluginCApi()->destroy(delegate);
}
TEST_F(GpuTest, CanGetDelegateErrno) {
TfLiteDelegate *delegate = TfLiteGpuDelegatePluginCApi()->create(settings_);
int error_number =
TfLiteGpuDelegatePluginCApi()->get_delegate_errno(delegate);
EXPECT_EQ(error_number, 0);
TfLiteGpuDelegatePluginCApi()->destroy(delegate);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/acceleration/configuration/c/gpu_plugin.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/acceleration/configuration/c/gpu_plugin_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c92b34de-4bee-4567-aaac-b8cb9c8886ee | cpp | google/quiche | hpack_decoder_adapter | quiche/http2/hpack/hpack_decoder_adapter.cc | quiche/http2/hpack/hpack_decoder_adapter_test.cc | #include "quiche/http2/hpack/hpack_decoder_adapter.h"
#include <cstddef>
#include <string>
#include "absl/strings/string_view.h"
#include "quiche/http2/core/spdy_headers_handler_interface.h"
#include "quiche/http2/decoder/decode_buffer.h"
#include "quiche/http2/hpack/decoder/hpack_decoding_error.h"
#include "quiche/common/http/http_header_block.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace spdy {
namespace {
const size_t kMaxDecodeBufferSizeBytes = 32 * 1024;
}
HpackDecoderAdapter::HpackDecoderAdapter()
: hpack_decoder_(&listener_adapter_, kMaxDecodeBufferSizeBytes),
max_decode_buffer_size_bytes_(kMaxDecodeBufferSizeBytes),
max_header_block_bytes_(0),
header_block_started_(false),
error_(http2::HpackDecodingError::kOk) {}
HpackDecoderAdapter::~HpackDecoderAdapter() = default;
void HpackDecoderAdapter::ApplyHeaderTableSizeSetting(size_t size_setting) {
QUICHE_DVLOG(2) << "HpackDecoderAdapter::ApplyHeaderTableSizeSetting";
hpack_decoder_.ApplyHeaderTableSizeSetting(size_setting);
}
size_t HpackDecoderAdapter::GetCurrentHeaderTableSizeSetting() const {
return hpack_decoder_.GetCurrentHeaderTableSizeSetting();
}
void HpackDecoderAdapter::HandleControlFrameHeadersStart(
SpdyHeadersHandlerInterface* handler) {
QUICHE_DVLOG(2) << "HpackDecoderAdapter::HandleControlFrameHeadersStart";
QUICHE_DCHECK(!header_block_started_);
listener_adapter_.set_handler(handler);
}
bool HpackDecoderAdapter::HandleControlFrameHeadersData(
const char* headers_data, size_t headers_data_length) {
QUICHE_DVLOG(2) << "HpackDecoderAdapter::HandleControlFrameHeadersData: len="
<< headers_data_length;
if (!header_block_started_) {
header_block_started_ = true;
if (!hpack_decoder_.StartDecodingBlock()) {
header_block_started_ = false;
error_ = hpack_decoder_.error();
return false;
}
}
if (headers_data_length > 0) {
QUICHE_DCHECK_NE(headers_data, nullptr);
if (headers_data_length > max_decode_buffer_size_bytes_) {
QUICHE_DVLOG(1) << "max_decode_buffer_size_bytes_ < headers_data_length: "
<< max_decode_buffer_size_bytes_ << " < "
<< headers_data_length;
error_ = http2::HpackDecodingError::kFragmentTooLong;
return false;
}
listener_adapter_.AddToTotalHpackBytes(headers_data_length);
if (max_header_block_bytes_ != 0 &&
listener_adapter_.total_hpack_bytes() > max_header_block_bytes_) {
error_ = http2::HpackDecodingError::kCompressedHeaderSizeExceedsLimit;
return false;
}
http2::DecodeBuffer db(headers_data, headers_data_length);
bool ok = hpack_decoder_.DecodeFragment(&db);
QUICHE_DCHECK(!ok || db.Empty()) << "Remaining=" << db.Remaining();
if (!ok) {
error_ = hpack_decoder_.error();
}
return ok;
}
return true;
}
bool HpackDecoderAdapter::HandleControlFrameHeadersComplete() {
QUICHE_DVLOG(2) << "HpackDecoderAdapter::HandleControlFrameHeadersComplete";
if (!hpack_decoder_.EndDecodingBlock()) {
QUICHE_DVLOG(3) << "EndDecodingBlock returned false";
error_ = hpack_decoder_.error();
return false;
}
header_block_started_ = false;
return true;
}
void HpackDecoderAdapter::set_max_decode_buffer_size_bytes(
size_t max_decode_buffer_size_bytes) {
QUICHE_DVLOG(2) << "HpackDecoderAdapter::set_max_decode_buffer_size_bytes";
max_decode_buffer_size_bytes_ = max_decode_buffer_size_bytes;
hpack_decoder_.set_max_string_size_bytes(max_decode_buffer_size_bytes);
}
void HpackDecoderAdapter::set_max_header_block_bytes(
size_t max_header_block_bytes) {
max_header_block_bytes_ = max_header_block_bytes;
}
HpackDecoderAdapter::ListenerAdapter::ListenerAdapter()
: no_op_handler_(nullptr), handler_(&no_op_handler_) {}
HpackDecoderAdapter::ListenerAdapter::~ListenerAdapter() = default;
void HpackDecoderAdapter::ListenerAdapter::set_handler(
SpdyHeadersHandlerInterface* handler) {
QUICHE_CHECK_NE(handler, nullptr);
handler_ = handler;
}
void HpackDecoderAdapter::ListenerAdapter::OnHeaderListStart() {
QUICHE_DVLOG(2) << "HpackDecoderAdapter::ListenerAdapter::OnHeaderListStart";
total_hpack_bytes_ = 0;
total_uncompressed_bytes_ = 0;
handler_->OnHeaderBlockStart();
}
void HpackDecoderAdapter::ListenerAdapter::OnHeader(absl::string_view name,
absl::string_view value) {
QUICHE_DVLOG(2) << "HpackDecoderAdapter::ListenerAdapter::OnHeader:\n name: "
<< name << "\n value: " << value;
total_uncompressed_bytes_ += name.size() + value.size();
handler_->OnHeader(name, value);
}
void HpackDecoderAdapter::ListenerAdapter::OnHeaderListEnd() {
QUICHE_DVLOG(2) << "HpackDecoderAdapter::ListenerAdapter::OnHeaderListEnd";
handler_->OnHeaderBlockEnd(total_uncompressed_bytes_, total_hpack_bytes_);
handler_ = &no_op_handler_;
}
void HpackDecoderAdapter::ListenerAdapter::OnHeaderErrorDetected(
absl::string_view error_message) {
QUICHE_VLOG(1) << error_message;
}
} | #include "quiche/http2/hpack/hpack_decoder_adapter.h"
#include <stdint.h>
#include <cstddef>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/base/macros.h"
#include "absl/strings/escaping.h"
#include "absl/strings/string_view.h"
#include "quiche/http2/core/recording_headers_handler.h"
#include "quiche/http2/hpack/decoder/hpack_decoder.h"
#include "quiche/http2/hpack/decoder/hpack_decoder_state.h"
#include "quiche/http2/hpack/decoder/hpack_decoder_tables.h"
#include "quiche/http2/hpack/hpack_constants.h"
#include "quiche/http2/hpack/hpack_encoder.h"
#include "quiche/http2/hpack/hpack_output_stream.h"
#include "quiche/http2/hpack/http2_hpack_constants.h"
#include "quiche/http2/test_tools/hpack_block_builder.h"
#include "quiche/http2/test_tools/http2_random.h"
#include "quiche/common/http/http_header_block.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_test.h"
#include "quiche/common/quiche_text_utils.h"
using ::http2::HpackEntryType;
using ::http2::HpackStringPair;
using ::http2::test::HpackBlockBuilder;
using ::http2::test::HpackDecoderPeer;
using ::testing::ElementsAre;
using ::testing::Pair;
namespace http2 {
namespace test {
class HpackDecoderStatePeer {
public:
static HpackDecoderTables* GetDecoderTables(HpackDecoderState* state) {
return &state->decoder_tables_;
}
};
class HpackDecoderPeer {
public:
static HpackDecoderState* GetDecoderState(HpackDecoder* decoder) {
return &decoder->decoder_state_;
}
static HpackDecoderTables* GetDecoderTables(HpackDecoder* decoder) {
return HpackDecoderStatePeer::GetDecoderTables(GetDecoderState(decoder));
}
};
}
}
namespace spdy {
namespace test {
class HpackDecoderAdapterPeer {
public:
explicit HpackDecoderAdapterPeer(HpackDecoderAdapter* decoder)
: decoder_(decoder) {}
void HandleHeaderRepresentation(const std::string& name,
const std::string& value) {
decoder_->listener_adapter_.OnHeader(name, value);
}
http2::HpackDecoderTables* GetDecoderTables() {
return HpackDecoderPeer::GetDecoderTables(&decoder_->hpack_decoder_);
}
const HpackStringPair* GetTableEntry(uint32_t index) {
return GetDecoderTables()->Lookup(index);
}
size_t current_header_table_size() {
return GetDecoderTables()->current_header_table_size();
}
size_t header_table_size_limit() {
return GetDecoderTables()->header_table_size_limit();
}
void set_header_table_size_limit(size_t size) {
return GetDecoderTables()->DynamicTableSizeUpdate(size);
}
private:
HpackDecoderAdapter* decoder_;
};
class HpackEncoderPeer {
public:
static void CookieToCrumbs(const HpackEncoder::Representation& cookie,
HpackEncoder::Representations* crumbs_out) {
HpackEncoder::CookieToCrumbs(cookie, crumbs_out);
}
};
namespace {
const bool kNoCheckDecodedSize = false;
const char* kCookieKey = "cookie";
class HpackDecoderAdapterTest : public quiche::test::QuicheTestWithParam<bool> {
protected:
HpackDecoderAdapterTest() : decoder_(), decoder_peer_(&decoder_) {}
void SetUp() override { randomly_split_input_buffer_ = GetParam(); }
void HandleControlFrameHeadersStart() {
bytes_passed_in_ = 0;
decoder_.HandleControlFrameHeadersStart(&handler_);
}
bool HandleControlFrameHeadersData(absl::string_view str) {
QUICHE_VLOG(3) << "HandleControlFrameHeadersData:\n"
<< quiche::QuicheTextUtils::HexDump(str);
bytes_passed_in_ += str.size();
return decoder_.HandleControlFrameHeadersData(str.data(), str.size());
}
bool HandleControlFrameHeadersComplete() {
bool rc = decoder_.HandleControlFrameHeadersComplete();
return rc;
}
bool DecodeHeaderBlock(absl::string_view str,
bool check_decoded_size = true) {
EXPECT_FALSE(decode_has_failed_);
HandleControlFrameHeadersStart();
if (randomly_split_input_buffer_) {
do {
size_t bytes = str.size();
if (!str.empty()) {
bytes = random_.Uniform(str.size()) + 1;
}
EXPECT_LE(bytes, str.size());
if (!HandleControlFrameHeadersData(str.substr(0, bytes))) {
decode_has_failed_ = true;
return false;
}
str.remove_prefix(bytes);
} while (!str.empty());
} else if (!HandleControlFrameHeadersData(str)) {
decode_has_failed_ = true;
return false;
}
if (!HandleControlFrameHeadersComplete()) {
decode_has_failed_ = true;
return false;
}
EXPECT_EQ(handler_.compressed_header_bytes(), bytes_passed_in_);
if (check_decoded_size) {
EXPECT_EQ(handler_.uncompressed_header_bytes(),
SizeOfHeaders(decoded_block()));
}
return true;
}
bool EncodeAndDecodeDynamicTableSizeUpdates(size_t first, size_t second) {
HpackBlockBuilder hbb;
hbb.AppendDynamicTableSizeUpdate(first);
if (second != first) {
hbb.AppendDynamicTableSizeUpdate(second);
}
return DecodeHeaderBlock(hbb.buffer());
}
const quiche::HttpHeaderBlock& decoded_block() const {
return handler_.decoded_block();
}
static size_t SizeOfHeaders(const quiche::HttpHeaderBlock& headers) {
size_t size = 0;
for (const auto& kv : headers) {
if (kv.first == kCookieKey) {
HpackEncoder::Representations crumbs;
HpackEncoderPeer::CookieToCrumbs(kv, &crumbs);
for (const auto& crumb : crumbs) {
size += crumb.first.size() + crumb.second.size();
}
} else {
size += kv.first.size() + kv.second.size();
}
}
return size;
}
const quiche::HttpHeaderBlock& DecodeBlockExpectingSuccess(
absl::string_view str) {
EXPECT_TRUE(DecodeHeaderBlock(str));
return decoded_block();
}
void expectEntry(size_t index, size_t size, const std::string& name,
const std::string& value) {
const HpackStringPair* entry = decoder_peer_.GetTableEntry(index);
EXPECT_EQ(name, entry->name) << "index " << index;
EXPECT_EQ(value, entry->value);
EXPECT_EQ(size, entry->size());
}
quiche::HttpHeaderBlock MakeHeaderBlock(
const std::vector<std::pair<std::string, std::string>>& headers) {
quiche::HttpHeaderBlock result;
for (const auto& kv : headers) {
result.AppendValueOrAddHeader(kv.first, kv.second);
}
return result;
}
http2::test::Http2Random random_;
HpackDecoderAdapter decoder_;
test::HpackDecoderAdapterPeer decoder_peer_;
RecordingHeadersHandler handler_;
const quiche::HttpHeaderBlock dummy_block_;
bool randomly_split_input_buffer_;
bool decode_has_failed_ = false;
size_t bytes_passed_in_;
};
INSTANTIATE_TEST_SUITE_P(NoHandler, HpackDecoderAdapterTest, ::testing::Bool());
INSTANTIATE_TEST_SUITE_P(WithHandler, HpackDecoderAdapterTest,
::testing::Bool());
TEST_P(HpackDecoderAdapterTest, ApplyHeaderTableSizeSetting) {
EXPECT_EQ(4096u, decoder_.GetCurrentHeaderTableSizeSetting());
decoder_.ApplyHeaderTableSizeSetting(12 * 1024);
EXPECT_EQ(12288u, decoder_.GetCurrentHeaderTableSizeSetting());
}
TEST_P(HpackDecoderAdapterTest,
AddHeaderDataWithHandleControlFrameHeadersData) {
HandleControlFrameHeadersStart();
const size_t kMaxBufferSizeBytes = 50;
const std::string a_value = std::string(49, 'x');
decoder_.set_max_decode_buffer_size_bytes(kMaxBufferSizeBytes);
HpackBlockBuilder hbb;
hbb.AppendLiteralNameAndValue(HpackEntryType::kNeverIndexedLiteralHeader,
false, "a", false, a_value);
const std::string& s = hbb.buffer();
EXPECT_GT(s.size(), kMaxBufferSizeBytes);
EXPECT_TRUE(HandleControlFrameHeadersData(s.substr(0, s.size() / 2)));
EXPECT_TRUE(HandleControlFrameHeadersData(s.substr(s.size() / 2)));
EXPECT_FALSE(HandleControlFrameHeadersData(s));
quiche::HttpHeaderBlock expected_block = MakeHeaderBlock({{"a", a_value}});
EXPECT_EQ(expected_block, decoded_block());
}
TEST_P(HpackDecoderAdapterTest, NameTooLong) {
const size_t kMaxBufferSizeBytes = 50;
const std::string name = std::string(2 * kMaxBufferSizeBytes, 'x');
const std::string value = "abc";
decoder_.set_max_decode_buffer_size_bytes(kMaxBufferSizeBytes);
HpackBlockBuilder hbb;
hbb.AppendLiteralNameAndValue(HpackEntryType::kNeverIndexedLiteralHeader,
false, name, false, value);
const size_t fragment_size = (3 * kMaxBufferSizeBytes) / 2;
const std::string fragment = hbb.buffer().substr(0, fragment_size);
HandleControlFrameHeadersStart();
EXPECT_FALSE(HandleControlFrameHeadersData(fragment));
}
TEST_P(HpackDecoderAdapterTest, HeaderTooLongToBuffer) {
const std::string name = "some-key";
const std::string value = "some-value";
const size_t kMaxBufferSizeBytes = name.size() + value.size() - 2;
decoder_.set_max_decode_buffer_size_bytes(kMaxBufferSizeBytes);
HpackBlockBuilder hbb;
hbb.AppendLiteralNameAndValue(HpackEntryType::kNeverIndexedLiteralHeader,
false, name, false, value);
const size_t fragment_size = hbb.size() - 1;
const std::string fragment = hbb.buffer().substr(0, fragment_size);
HandleControlFrameHeadersStart();
EXPECT_FALSE(HandleControlFrameHeadersData(fragment));
}
TEST_P(HpackDecoderAdapterTest, HeaderBlockTooLong) {
const std::string name = "some-key";
const std::string value = "some-value";
const size_t kMaxBufferSizeBytes = 1024;
HpackBlockBuilder hbb;
hbb.AppendLiteralNameAndValue(HpackEntryType::kIndexedLiteralHeader, false,
name, false, value);
while (hbb.size() < kMaxBufferSizeBytes) {
hbb.AppendLiteralNameAndValue(HpackEntryType::kIndexedLiteralHeader, false,
"", false, "");
}
HandleControlFrameHeadersStart();
EXPECT_TRUE(HandleControlFrameHeadersData(hbb.buffer()));
EXPECT_TRUE(HandleControlFrameHeadersComplete());
decoder_.set_max_header_block_bytes(kMaxBufferSizeBytes);
HandleControlFrameHeadersStart();
EXPECT_FALSE(HandleControlFrameHeadersData(hbb.buffer()));
}
TEST_P(HpackDecoderAdapterTest, DecodeWithIncompleteData) {
HandleControlFrameHeadersStart();
EXPECT_TRUE(HandleControlFrameHeadersData("\x82\x85\x82"));
std::vector<std::pair<std::string, std::string>> expected_headers = {
{":method", "GET"}, {":path", "/index.html"}, {":method", "GET"}};
quiche::HttpHeaderBlock expected_block1 = MakeHeaderBlock(expected_headers);
EXPECT_EQ(expected_block1, decoded_block());
EXPECT_TRUE(
HandleControlFrameHeadersData("\x40\x03goo"
"\x03gar\xbe\x40\x04spam"));
expected_headers.push_back({"goo", "gar"});
expected_headers.push_back({"goo", "gar"});
quiche::HttpHeaderBlock expected_block2 = MakeHeaderBlock(expected_headers);
EXPECT_EQ(expected_block2, decoded_block());
EXPECT_TRUE(HandleControlFrameHeadersData("\x04gggs"));
EXPECT_TRUE(HandleControlFrameHeadersComplete());
expected_headers.push_back({"spam", "gggs"});
quiche::HttpHeaderBlock expected_block3 = MakeHeaderBlock(expected_headers);
EXPECT_EQ(expected_block3, decoded_block());
}
TEST_P(HpackDecoderAdapterTest, HandleHeaderRepresentation) {
HandleControlFrameHeadersStart();
HandleControlFrameHeadersData("");
decoder_peer_.HandleHeaderRepresentation("cookie", " part 1");
decoder_peer_.HandleHeaderRepresentation("cookie", "part 2 ");
decoder_peer_.HandleHeaderRepresentation("cookie", "part3");
decoder_peer_.HandleHeaderRepresentation("passed-through",
std::string("foo\0baz", 7));
decoder_peer_.HandleHeaderRepresentation("joined", "joined");
decoder_peer_.HandleHeaderRepresentation("joineD", "value 1");
decoder_peer_.HandleHeaderRepresentation("joineD", "value 2");
decoder_peer_.HandleHeaderRepresentation("empty", "");
decoder_peer_.HandleHeaderRepresentation("empty-joined", "");
decoder_peer_.HandleHeaderRepresentation("empty-joined", "foo");
decoder_peer_.HandleHeaderRepresentation("empty-joined", "");
decoder_peer_.HandleHeaderRepresentation("empty-joined", "");
decoder_peer_.HandleHeaderRepresentation("cookie", " fin!");
decoder_.HandleControlFrameHeadersComplete();
EXPECT_THAT(
decoded_block(),
ElementsAre(
Pair("cookie", " part 1; part 2 ; part3; fin!"),
Pair("passed-through", absl::string_view("foo\0baz", 7)),
Pair("joined", absl::string_view("joined\0value 1\0value 2", 22)),
Pair("empty", ""),
Pair("empty-joined", absl::string_view("\0foo\0\0", 6))));
}
TEST_P(HpackDecoderAdapterTest, IndexedHeaderStatic) {
const quiche::HttpHeaderBlock& header_set1 =
DecodeBlockExpectingSuccess("\x82\x85");
quiche::HttpHeaderBlock expected_header_set1;
expected_header_set1[":method"] = "GET";
expected_header_set1[":path"] = "/index.html";
EXPECT_EQ(expected_header_set1, header_set1);
const quiche::HttpHeaderBlock& header_set2 =
DecodeBlockExpectingSuccess("\x82");
quiche::HttpHeaderBlock expected_header_set2;
expected_header_set2[":method"] = "GET";
EXPECT_EQ(expected_header_set2, header_set2);
}
TEST_P(HpackDecoderAdapterTest, IndexedHeaderDynamic) {
const quiche::HttpHeaderBlock& header_set1 = DecodeBlockExpectingSuccess(
"\x40\x03"
"foo"
"\x03"
"bar");
quiche::HttpHeaderBlock expected_header_set1;
expected_header_set1["foo"] = "bar";
EXPECT_EQ(expected_header_set1, header_set1);
const quiche::HttpHeaderBlock& header_set2 = DecodeBlockExpectingSuccess(
"\xbe\x40\x04"
"spam"
"\x04"
"eggs");
quiche::HttpHeaderBlock expected_header_set2;
expected_header_set2["foo"] = "bar";
expected_header_set2["spam"] = "eggs";
EXPECT_EQ(expected_header_set2, header_set2);
const quiche::HttpHeaderBlock& header_set3 =
DecodeBlockExpectingSuccess("\xbe");
quiche::HttpHeaderBlock expected_header_set3;
expected_header_set3["spam"] = "eggs";
EXPECT_EQ(expected_header_set3, header_set3);
}
TEST_P(HpackDecoderAdapterTest, InvalidIndexedHeader) {
EXPECT_FALSE(DecodeHeaderBlock("\xbe"));
}
TEST_P(HpackDecoderAdapterTest, ContextUpdateMaximumSize) {
EXPECT_EQ(kDefaultHeaderTableSizeSetting,
decoder_peer_.header_table_size_limit());
std::string input;
{
HpackOutputStream output_stream;
output_stream.AppendPrefix(kHeaderTableSizeUpdateOpcode);
output_stream.AppendUint32(126);
input = output_stream.TakeString();
EXPECT_TRUE(DecodeHeaderBlock(input));
EXPECT_EQ(126u, decoder_peer_.header_table_size_limit());
}
{
HpackOutputStream output_stream;
output_stream.AppendPrefix(kHeaderTableSizeUpdateOpcode);
output_stream.AppendUint32(kDefaultHeaderTableSizeSetting);
input = output_stream.TakeString();
EXPECT_TRUE(DecodeHeaderBlock(input));
EXPECT_EQ(kDefaultHeaderTableSizeSetting,
decoder_peer_.header_table_size_limit());
}
{
HpackOutputStream output_stream;
output_stream.AppendPrefix(kHeaderTableSizeUpdateOpcode);
output_stream.AppendUint32(kDefaultHeaderTableSizeSetting + 1);
input = output_stream.TakeString();
EXPECT_FALSE(DecodeHeaderBlock(input));
EXPECT_EQ(kDefaultHeaderTableSizeSetting,
decoder_peer_.header_table_size_limit());
}
}
TEST_P(HpackDecoderAdapterTest, TwoTableSizeUpdates) {
std::string input;
{
HpackOutputStream output_stream;
output_stream.AppendPrefix(kHeaderTableSizeUpdateOpcode);
output_stream.AppendUint32(0);
output_stream.AppendPrefix(kHeaderTableSizeUpdateOpcode);
output_stream.AppendUint32(122);
input = output_stream.TakeString();
EXPECT_TRUE(DecodeHeaderBlock(input));
EXPECT_EQ(122u, decoder_peer_.header_table_size_limit());
}
}
TEST_P(HpackDecoderAdapterTest, ThreeTableSizeUpdatesError) {
std::string input;
{
HpackOutputStream output_stream;
output_stream.AppendPrefix(kHeaderTableSizeUpdateOpcode);
output_stream.AppendUint32(5);
output_stream.AppendPrefix(kHeaderTableSizeUpdateOpcode);
output_stream.AppendUint32(10);
output_stream.AppendPrefix(kHeaderTableSizeUpdateOpcode);
output_stream.AppendUint32(15);
input = output_stream.TakeString();
EXPECT_FALSE(DecodeHeaderBlock(input));
EXPECT_EQ(10u, decoder_peer_.header_table_size_limit());
}
}
TEST_P(HpackDecoderAdapterTest, TableSizeUpdateSecondError) {
std::string input;
{
HpackOutputStream output_stream;
output_stream.AppendBytes("\x82\x85");
output_stream.AppendPrefix(kHeaderTableSizeUpdateOpcode);
output_stream.AppendUint32(123);
input = output_stream.TakeString();
EXPECT_FALSE(DecodeHeaderBlock(input));
EXPECT_EQ(kDefaultHeaderTableSizeSetting,
decoder_peer_.header_table_size_limit());
}
}
TEST_P(HpackDecoderAdapterTest, TableSizeUpdateFirstThirdError) {
std::string input;
{
HpackOutputStream output_stream;
output_stream.AppendPrefix(kHeaderTableSizeUpdateOpcode);
output_stream.AppendUint32(60);
output_stream.AppendBytes("\x82\x85");
output_stream.AppendPrefix(kHeaderTableSizeUpdateOpcode);
output_stream.AppendUint32(125);
input = output_stream.TakeString();
EXPECT_FALSE(DecodeHeaderBlock(input));
EXPECT_EQ(60u, decoder_peer_.header_table_size_limit());
}
}
TEST_P(HpackDecoderAdapterTest, LiteralHeaderNoIndexing) {
const char input[] = "\x04\x0c/sample/path\x00\x06:path2\x0e/sample/path/2";
const quiche::HttpHeaderBlock& header_set = DecodeBlockExpectingSuccess(
absl::string_view(input, ABSL_ARRAYSIZE(input) - 1));
quiche::HttpHeaderBlock expected_header_set;
expected_header_set[":path"] = "/sample/path";
expected_header_set[":path2"] = "/sample/path/2";
EXPECT_EQ(expected_header_set, header_set);
}
TEST_P(HpackDecoderAdapterTest, LiteralHeaderIncrementalIndexing) {
const char input[] = "\x44\x0c/sample/path\x40\x06:path2\x0e/sample/path/2";
const quiche::HttpHeaderBlock& header_set = DecodeBlockExpectingSuccess(
absl::string_view(input, ABSL_ARRAYSIZE(input) - 1));
quiche::HttpHeaderBlock expected_header_set;
expected_header_set[":path"] = "/sample/path";
expected_header_set[":path2"] = "/sample/path/2";
EXPECT_EQ(expected_header_set, header_set);
}
TEST_P(HpackDecoderAdapterTest, LiteralHeaderWithIndexingInvalidNameIndex) {
decoder_.ApplyHeaderTableSizeSetting(0);
EXPECT_TRUE(EncodeAndDecodeDynamicTableSizeUpdates(0, 0));
EXPECT_TRUE(DecodeHeaderBlock(absl::string_view("\x7d\x03ooo")));
EXPECT_FALSE(DecodeHeaderBlock(absl::string_view("\x7e\x03ooo")));
}
TEST_P(HpackDecoderAdapterTest, LiteralHeaderNoIndexingInvalidNameIndex) {
EXPECT_TRUE(DecodeHeaderBlock(absl::string_view("\x0f\x2e\x03ooo")));
EXPECT_FALSE(DecodeHeaderBlock(absl::string_view("\x0f\x2f\x03ooo")));
}
TEST_P(HpackDecoderAdapterTest, LiteralHeaderNeverIndexedInvalidNameIndex) {
EXPECT_TRUE(DecodeHeaderBlock(absl::string_view("\x1f\x2e\x03ooo")));
EXPECT_FALSE(DecodeHeaderBlock(absl::string_view("\x1f\x2f\x03ooo")));
}
TEST_P(HpackDecoderAdapterTest, TruncatedIndex) {
EXPECT_FALSE(DecodeHeaderBlock("\xff"));
}
TEST_P(HpackDecoderAdapterTest, TruncatedHuffmanLiteral) {
std::string first;
ASSERT_TRUE(absl::HexStringToBytes("418cf1e3c2e5f23a6ba0ab90f4ff", &first));
EXPECT_TRUE(DecodeHeaderBlock(first));
first.pop_back();
EXPECT_FALSE(DecodeHeaderBlock(first));
}
TEST_P(HpackDecoderAdapterTest, HuffmanEOSError) {
std::string first;
ASSERT_TRUE(absl::HexStringToBytes("418cf1e3c2e5f23a6ba0ab90f4ff", &first));
EXPECT_TRUE(DecodeHeaderBlock(first));
ASSERT_TRUE(absl::HexStringToBytes("418df1e3c2e5f23a6ba0ab90f4ffff", &first));
EXPECT_FALSE(DecodeHeaderBlock(first));
}
TEST_P(HpackDecoderAdapterTest, BasicC31) {
HpackEncoder encoder;
quiche::HttpHeaderBlock expected_header_set;
expected_header_set[":method"] = "GET";
expected_header_set[":scheme"] = "http";
expected_header_set[":path"] = "/";
expected_header_set[":authority"] = "www.example.com";
std::string encoded_header_set =
encoder.EncodeHeaderBlock(expected_header_set);
EXPECT_TRUE(DecodeHeaderBlock(encoded_header_set));
EXPECT_EQ(expected_header_set, decoded_block());
}
TEST_P(HpackDecoderAdapterTest, SectionC4RequestHuffmanExamples) {
std::string first;
ASSERT_TRUE(
absl::HexStringToBytes("828684418cf1e3c2e5f23a6ba0ab90f4ff", &first));
const quiche::HttpHeaderBlock& first_header_set =
DecodeBlockExpectingSuccess(first);
EXPECT_THAT(first_header_set,
ElementsAre(
Pair(":method", "GET"),
Pair(":scheme", "http"),
Pair(":path", "/"),
Pair(":authority", "www.example.com")));
expectEntry(62, 57, ":authority", "www.example.com");
EXPECT_EQ(57u, decoder_peer_.current_header_table_size());
std::string second;
ASSERT_TRUE(absl::HexStringToBytes("828684be5886a8eb10649cbf", &second));
const quiche::HttpHeaderBlock& second_header_set =
DecodeBlockExpectingSuccess(second);
EXPECT_THAT(second_header_set,
ElementsAre(
Pair(":method", "GET"),
Pair(":scheme", "http"),
Pair(":path", "/"),
Pair(":authority", "www.example.com"),
Pair("cache-control", "no-cache")));
expectEntry(62, 53, "cache-control", "no-cache");
expectEntry(63, 57, ":authority", "www.example.com");
EXPECT_EQ(110u, decoder_peer_.current_header_table_size());
std::string third;
ASSERT_TRUE(absl::HexStringToBytes(
"828785bf408825a849e95ba97d7f8925a849e95bb8e8b4bf", &third));
const quiche::HttpHeaderBlock& third_header_set =
DecodeBlockExpectingSuccess(third);
EXPECT_THAT(
third_header_set,
ElementsAre(
Pair(":method", "GET"),
Pair(":scheme", "https"),
Pair(":path", "/index.html"),
Pair(":authority", "www.example.com"),
Pair("custom-key", "custom-value")));
expectEntry(62, 54, "custom-key", "custom-value");
expectEntry(63, 53, "cache-control", "no-cache");
expectEntry(64, 57, ":authority", "www.example.com");
EXPECT_EQ(164u, decoder_peer_.current_header_table_size());
}
TEST_P(HpackDecoderAdapterTest, SectionC6ResponseHuffmanExamples) {
decoder_peer_.set_header_table_size_limit(256);
std::string first;
ASSERT_TRUE(absl::HexStringToBytes(
"488264025885aec3771a4b6196d07abe941054d444a8200595040b8166e082a62d1bff6e"
"919d29ad171863c78f0b97c8e9ae82ae43d3",
&first));
const quiche::HttpHeaderBlock& first_header_set =
DecodeBlockExpectingSuccess(first);
EXPECT_THAT(first_header_set,
ElementsAre(
Pair(":status", "302"),
Pair("cache-control", "private"),
Pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
Pair("location", "https:
expectEntry(62, 63, "location", "https:
expectEntry(63, 65, "date", "Mon, 21 Oct 2013 20:13:21 GMT");
expectEntry(64, 52, "cache-control", "private");
expectEntry(65, 42, ":status", "302");
EXPECT_EQ(222u, decoder_peer_.current_header_table_size());
std::string second;
ASSERT_TRUE(absl::HexStringToBytes("4883640effc1c0bf", &second));
const quiche::HttpHeaderBlock& second_header_set =
DecodeBlockExpectingSuccess(second);
EXPECT_THAT(second_header_set,
ElementsAre(
Pair(":status", "307"),
Pair("cache-control", "private"),
Pair("date", "Mon, 21 Oct 2013 20:13:21 GMT"),
Pair("location", "https:
expectEntry(62, 42, ":status", "307");
expectEntry(63, 63, "location", "https:
expectEntry(64, 65, "date", "Mon, 21 Oct 2013 20:13:21 GMT");
expectEntry(65, 52, "cache-control", "private");
EXPECT_EQ(222u, decoder_peer_.current_header_table_size());
std::string third;
ASSERT_TRUE(absl::HexStringToBytes(
"88c16196d07abe941054d444a8200595040b8166e084a62d1bffc05a839bd9ab77ad94e7"
"821dd7f2e6c7b335dfdfcd5b3960d5af27087f3672c1ab270fb5291f9587316065c003ed"
"4ee5b1063d5007",
&third));
const quiche::HttpHeaderBlock& third_header_set =
DecodeBlockExpectingSuccess(third);
EXPECT_THAT(third_header_set,
ElementsAre(
Pair(":status", "200"),
Pair("cache-control", "private"),
Pair("date", "Mon, 21 Oct 2013 20:13:22 GMT"),
Pair("location", "https:
Pair("content-encoding", "gzip"),
Pair("set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU;"
" max-age=3600; version=1")));
expectEntry(62, 98, "set-cookie",
"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU;"
" max-age=3600; version=1");
expectEntry(63, 52, "content-encoding", "gzip");
expectEntry(64, 65, "date", "Mon, 21 Oct 2013 20:13:22 GMT");
EXPECT_EQ(215u, decoder_peer_.current_header_table_size());
}
TEST_P(HpackDecoderAdapterTest, ReuseNameOfEvictedEntry) {
decoder_.ApplyHeaderTableSizeSetting(63);
HpackBlockBuilder hbb;
hbb.AppendDynamicTableSizeUpdate(0);
hbb.AppendDynamicTableSizeUpdate(63);
const absl::string_view name("some-name");
const absl::string_view value1("some-value");
const absl::string_view value2("another-value");
const absl::string_view value3("yet-another-value");
hbb.AppendLiteralNameAndValue(HpackEntryType::kIndexedLiteralHeader, false,
name, false, value1);
hbb.AppendIndexedHeader(62);
hbb.AppendNameIndexAndLiteralValue(HpackEntryType::kIndexedLiteralHeader, 62,
false, value2);
hbb.AppendIndexedHeader(62);
hbb.AppendNameIndexAndLiteralValue(HpackEntryType::kIndexedLiteralHeader, 62,
false, value3);
hbb.AppendIndexedHeader(62);
EXPECT_TRUE(DecodeHeaderBlock(hbb.buffer(), kNoCheckDecodedSize));
quiche::HttpHeaderBlock expected_header_set;
expected_header_set.AppendValueOrAddHeader(name, value1);
expected_header_set.AppendValueOrAddHeader(name, value1);
expected_header_set.AppendValueOrAddHeader(name, value2);
expected_header_set.AppendValueOrAddHeader(name, value2);
expected_header_set.AppendValueOrAddHeader(name, value3);
expected_header_set.AppendValueOrAddHeader(name, value3);
std::string joined_values = expected_header_set[name].as_string();
EXPECT_EQ(joined_values.size(),
2 * value1.size() + 2 * value2.size() + 2 * value3.size() + 5);
EXPECT_EQ(expected_header_set, decoded_block());
EXPECT_EQ(handler_.uncompressed_header_bytes(),
6 * name.size() + 2 * value1.size() + 2 * value2.size() +
2 * value3.size());
}
TEST_P(HpackDecoderAdapterTest, Cookies) {
quiche::HttpHeaderBlock expected_header_set;
expected_header_set["cookie"] = "foo; bar";
std::string encoded_block;
ASSERT_TRUE(absl::HexStringToBytes("608294e76003626172", &encoded_block));
EXPECT_TRUE(DecodeHeaderBlock(encoded_block));
EXPECT_EQ(expected_header_set, decoded_block());
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/hpack/hpack_decoder_adapter.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/hpack/hpack_decoder_adapter_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
31a835cc-caae-4afb-b0a3-525ca43c83f7 | cpp | tensorflow/tensorflow | pjrt_util | tensorflow/core/tfrt/common/pjrt_util.cc | tensorflow/core/tfrt/common/pjrt_util_test.cc | #include "tensorflow/core/tfrt/common/pjrt_util.h"
#include <memory>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/pjrt/pjrt_client.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/tfrt/common/global_state.h"
#include "tensorflow/core/tfrt/common/pjrt_state.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
Status SetPjRtClientInTFGlobalResourceManager(
const DeviceType& device_type, std::unique_ptr<xla::PjRtClient> client) {
ResourceMgr* rmgr = tfrt_global::GetTFGlobalResourceMgr();
PjRtState* pjrt_state;
TF_RETURN_IF_ERROR(rmgr->LookupOrCreate<PjRtState>(
rmgr->default_container(), kPjRtStateResourceName, &pjrt_state,
[&](PjRtState** ret) {
*ret = PjRtState::Create();
return absl::OkStatus();
}));
core::ScopedUnref pjrt_state_ref(pjrt_state);
if (client == nullptr) {
return errors::InvalidArgument("PJRT client is nullptr.");
}
TF_RETURN_IF_ERROR(pjrt_state->SetPjRtClient(device_type, std::move(client)));
return absl::OkStatus();
}
absl::StatusOr<xla::PjRtClient*> GetPjRtClient(const DeviceType& device_type) {
ResourceMgr* rmgr = tfrt_global::GetTFGlobalResourceMgr();
PjRtState* pjrt_state;
TF_RETURN_IF_ERROR(rmgr->LookupOrCreate<PjRtState>(
rmgr->default_container(), kPjRtStateResourceName, &pjrt_state,
[&](PjRtState** ret) {
*ret = PjRtState::Create();
return absl::OkStatus();
}));
core::ScopedUnref pjrt_state_ref(pjrt_state);
return pjrt_state->GetPjRtClient(device_type);
}
absl::Status SetPjRtGpuClientCreationInfoInTFGlobalResourceManager(
std::unique_ptr<PjRtGpuClientCreationInfo> info) {
ResourceMgr* rmgr = tfrt_global::GetTFGlobalResourceMgr();
PjRtState* pjrt_state;
TF_RETURN_IF_ERROR(rmgr->LookupOrCreate<PjRtState>(
rmgr->default_container(), kPjRtStateResourceName, &pjrt_state,
[&](PjRtState** ret) {
*ret = PjRtState::Create();
return absl::OkStatus();
}));
core::ScopedUnref pjrt_state_ref(pjrt_state);
if (info == nullptr) {
return absl::InvalidArgumentError("PJRT client creation info is nullptr.");
}
TF_RETURN_IF_ERROR(pjrt_state->SetPjRtGpuClientCreationInfo(std::move(info)));
return absl::OkStatus();
}
absl::StatusOr<PjRtGpuClientCreationInfo*> GetPjRtGpuClientCreationInfo() {
ResourceMgr* rmgr = tfrt_global::GetTFGlobalResourceMgr();
PjRtState* pjrt_state;
TF_RETURN_IF_ERROR(rmgr->LookupOrCreate<PjRtState>(
rmgr->default_container(), kPjRtStateResourceName, &pjrt_state,
[&](PjRtState** ret) {
*ret = PjRtState::Create();
return absl::OkStatus();
}));
core::ScopedUnref pjrt_state_ref(pjrt_state);
return pjrt_state->GetPjRtGpuClientCreationInfo();
}
} | #include "tensorflow/core/tfrt/common/pjrt_util.h"
#include <memory>
#include <utility>
#include "xla/pjrt/cpu/cpu_client.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/tfrt/common/pjrt_state.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace {
using ::testing::ElementsAre;
using ::testing::HasSubstr;
using ::tsl::testing::StatusIs;
TEST(PjRtUtilTest, SetGetAndDeletePjRtClient) {
TF_ASSERT_OK(SetPjRtClientInTFGlobalResourceManager(
DEVICE_CPU,
xla::GetTfrtCpuClient(true, 1)
.value()));
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client, GetPjRtClient(DEVICE_CPU));
EXPECT_THAT(pjrt_client, ::testing::NotNull());
}
TEST(PjRtStateResourceManagerTest, SetNullPjRtClient) {
EXPECT_THAT(
SetPjRtClientInTFGlobalResourceManager(DEVICE_CPU, nullptr),
StatusIs(error::INVALID_ARGUMENT, HasSubstr("PJRT client is nullptr")));
}
TEST(PjRtGpuClientCreationInfoTest, SetAndGet) {
auto info = std::make_unique<PjRtGpuClientCreationInfo>();
info->allowed_devices.insert(123);
TF_ASSERT_OK(
SetPjRtGpuClientCreationInfoInTFGlobalResourceManager(std::move(info)));
TF_ASSERT_OK_AND_ASSIGN(PjRtGpuClientCreationInfo * retrieved_info,
GetPjRtGpuClientCreationInfo());
EXPECT_THAT(retrieved_info->allowed_devices, ElementsAre(123));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/common/pjrt_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/common/pjrt_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b06d8b0c-17ce-447c-92e5-73775c5dc0a2 | cpp | abseil/abseil-cpp | per_thread_sem | absl/synchronization/internal/per_thread_sem.cc | absl/synchronization/internal/per_thread_sem_test.cc | #include "absl/base/internal/low_level_alloc.h"
#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
#include "absl/synchronization/internal/per_thread_sem.h"
#include <atomic>
#include "absl/base/attributes.h"
#include "absl/base/internal/thread_identity.h"
#include "absl/synchronization/internal/waiter.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
void PerThreadSem::SetThreadBlockedCounter(std::atomic<int> *counter) {
base_internal::ThreadIdentity *identity;
identity = GetOrCreateCurrentThreadIdentity();
identity->blocked_count_ptr = counter;
}
std::atomic<int> *PerThreadSem::GetThreadBlockedCounter() {
base_internal::ThreadIdentity *identity;
identity = GetOrCreateCurrentThreadIdentity();
return identity->blocked_count_ptr;
}
void PerThreadSem::Tick(base_internal::ThreadIdentity *identity) {
const int ticker =
identity->ticker.fetch_add(1, std::memory_order_relaxed) + 1;
const int wait_start = identity->wait_start.load(std::memory_order_relaxed);
const bool is_idle = identity->is_idle.load(std::memory_order_relaxed);
if (wait_start && (ticker - wait_start > Waiter::kIdlePeriods) && !is_idle) {
ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPoke)(identity);
}
}
}
ABSL_NAMESPACE_END
}
extern "C" {
ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemInit)(
absl::base_internal::ThreadIdentity *identity) {
new (absl::synchronization_internal::Waiter::GetWaiter(identity))
absl::synchronization_internal::Waiter();
}
ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(
absl::base_internal::ThreadIdentity *identity) {
absl::synchronization_internal::Waiter::GetWaiter(identity)->Post();
}
ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPoke)(
absl::base_internal::ThreadIdentity *identity) {
absl::synchronization_internal::Waiter::GetWaiter(identity)->Poke();
}
ABSL_ATTRIBUTE_WEAK bool ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)(
absl::synchronization_internal::KernelTimeout t) {
bool timeout = false;
absl::base_internal::ThreadIdentity *identity;
identity = absl::synchronization_internal::GetOrCreateCurrentThreadIdentity();
int ticker = identity->ticker.load(std::memory_order_relaxed);
identity->wait_start.store(ticker ? ticker : 1, std::memory_order_relaxed);
identity->is_idle.store(false, std::memory_order_relaxed);
if (identity->blocked_count_ptr != nullptr) {
identity->blocked_count_ptr->fetch_add(1, std::memory_order_relaxed);
}
timeout =
!absl::synchronization_internal::Waiter::GetWaiter(identity)->Wait(t);
if (identity->blocked_count_ptr != nullptr) {
identity->blocked_count_ptr->fetch_sub(1, std::memory_order_relaxed);
}
identity->is_idle.store(false, std::memory_order_relaxed);
identity->wait_start.store(0, std::memory_order_relaxed);
return !timeout;
}
}
#endif | #include "absl/synchronization/internal/per_thread_sem.h"
#include <atomic>
#include <condition_variable>
#include <functional>
#include <limits>
#include <mutex>
#include <string>
#include <thread>
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/base/internal/cycleclock.h"
#include "absl/base/internal/thread_identity.h"
#include "absl/strings/str_cat.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace synchronization_internal {
class SimpleSemaphore {
public:
SimpleSemaphore() : count_(0) {}
void Wait() {
std::unique_lock<std::mutex> lock(mu_);
cv_.wait(lock, [this]() { return count_ > 0; });
--count_;
cv_.notify_one();
}
void Post() {
std::lock_guard<std::mutex> lock(mu_);
++count_;
cv_.notify_one();
}
private:
std::mutex mu_;
std::condition_variable cv_;
int count_;
};
struct ThreadData {
int num_iterations;
SimpleSemaphore identity2_written;
base_internal::ThreadIdentity *identity1;
base_internal::ThreadIdentity *identity2;
KernelTimeout timeout;
};
class PerThreadSemTest : public testing::Test {
public:
static void TimingThread(ThreadData* t) {
t->identity2 = GetOrCreateCurrentThreadIdentity();
t->identity2_written.Post();
while (t->num_iterations--) {
Wait(t->timeout);
Post(t->identity1);
}
}
void TestTiming(const char *msg, bool timeout) {
static const int kNumIterations = 100;
ThreadData t;
t.num_iterations = kNumIterations;
t.timeout = timeout ?
KernelTimeout(absl::Now() + absl::Seconds(10000))
: KernelTimeout::Never();
t.identity1 = GetOrCreateCurrentThreadIdentity();
std::thread partner_thread(std::bind(TimingThread, &t));
t.identity2_written.Wait();
int64_t min_cycles = std::numeric_limits<int64_t>::max();
int64_t total_cycles = 0;
for (int i = 0; i < kNumIterations; ++i) {
absl::SleepFor(absl::Milliseconds(20));
int64_t cycles = base_internal::CycleClock::Now();
Post(t.identity2);
Wait(t.timeout);
cycles = base_internal::CycleClock::Now() - cycles;
min_cycles = std::min(min_cycles, cycles);
total_cycles += cycles;
}
std::string out = StrCat(
msg, "min cycle count=", min_cycles, " avg cycle count=",
absl::SixDigits(static_cast<double>(total_cycles) / kNumIterations));
printf("%s\n", out.c_str());
partner_thread.join();
}
protected:
static void Post(base_internal::ThreadIdentity *id) {
PerThreadSem::Post(id);
}
static bool Wait(KernelTimeout t) {
return PerThreadSem::Wait(t);
}
static bool Wait(absl::Time t) {
return Wait(KernelTimeout(t));
}
static void Tick(base_internal::ThreadIdentity *identity) {
PerThreadSem::Tick(identity);
}
};
namespace {
TEST_F(PerThreadSemTest, WithoutTimeout) {
PerThreadSemTest::TestTiming("Without timeout: ", false);
}
TEST_F(PerThreadSemTest, WithTimeout) {
PerThreadSemTest::TestTiming("With timeout: ", true);
}
TEST_F(PerThreadSemTest, Timeouts) {
const absl::Duration delay = absl::Milliseconds(50);
const absl::Time start = absl::Now();
EXPECT_FALSE(Wait(start + delay));
const absl::Duration elapsed = absl::Now() - start;
absl::Duration slop = absl::Milliseconds(1);
#ifdef _MSC_VER
slop = absl::Milliseconds(16);
#endif
EXPECT_LE(delay - slop, elapsed)
<< "Wait returned " << delay - elapsed
<< " early (with " << slop << " slop), start time was " << start;
absl::Time negative_timeout = absl::UnixEpoch() - absl::Milliseconds(100);
EXPECT_FALSE(Wait(negative_timeout));
EXPECT_LE(negative_timeout, absl::Now() + slop);
Post(GetOrCreateCurrentThreadIdentity());
EXPECT_TRUE(Wait(negative_timeout));
}
TEST_F(PerThreadSemTest, ThreadIdentityReuse) {
for (int i = 0; i < 10000; i++) {
std::thread t([]() { GetOrCreateCurrentThreadIdentity(); });
t.join();
}
}
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/synchronization/internal/per_thread_sem.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/synchronization/internal/per_thread_sem_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
368e0049-e1af-47f0-8969-3f472eb1b158 | cpp | tensorflow/tensorflow | quantize | tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize.cc | tensorflow/lite/delegates/hexagon/builders/tests/quantize_test.cc | #include <memory>
#include <string>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Quant/IR/QuantTypes.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/Types.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassRegistry.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Support/TypeID.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#include "tensorflow/compiler/mlir/lite/quantization/ir/QuantOps.h"
#include "tensorflow/compiler/mlir/lite/transforms/passes.h"
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_config.h"
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/core/framework/types.pb.h"
namespace mlir {
namespace quant {
namespace {
using ::tensorflow::quantization::OpSet;
enum QuantizationTrait { kFullQuantization, kDynamicRangeQuantization };
template <QuantizationTrait quantization_trait, typename ConcreteT,
typename RootOpT = quantfork::DequantizeCastOp>
struct TFQuantizationBase
: public QuantizationPattern<ConcreteT, quantfork::QuantizeCastOp,
quantfork::DequantizeCastOp,
void, RootOpT> {
explicit TFQuantizationBase(MLIRContext* ctx,
const QuantPassSpec& quant_params)
: QuantizationPattern<ConcreteT, quantfork::QuantizeCastOp,
quantfork::DequantizeCastOp,
void, RootOpT>(ctx, quant_params) {}
static bool IsQuantizableCustomOp(Operation* op,
const CustomMap& custom_op_map) {
return false;
}
static bool AllowDynamicRangeQuantizedOperand(
Operation* quantized_op, const CustomMap& custom_op_map) {
auto call_op = cast<TF::PartitionedCallOp>(quantized_op);
StringRef function_name =
call_op.getFAttr().cast<FlatSymbolRefAttr>().getValue();
const bool is_gather = function_name.contains("gather");
return quantization_trait != kFullQuantization || is_gather;
}
static bool AllowDynamicRangeQuantizedResult(Operation* quantized_op,
const CustomMap& custom_op_map) {
auto call_op = cast<TF::PartitionedCallOp>(quantized_op);
StringRef function_name =
call_op.getFAttr().cast<FlatSymbolRefAttr>().getValue();
bool is_gather = false;
if (function_name.contains("gather")) is_gather = true;
return quantization_trait != kFullQuantization ||
(quantization_trait == kFullQuantization && is_gather);
}
static bool IsWeightOnlyOp(Operation* quantized_op,
absl::flat_hash_set<std::string>& ops_blocklist,
bool weight_only_quantization,
const CustomMap& custom_op_map) {
return weight_only_quantization;
}
};
struct TFFullQuantization
: public TFQuantizationBase<kFullQuantization, TFFullQuantization> {
explicit TFFullQuantization(MLIRContext* ctx,
const QuantPassSpec& quant_params)
: TFQuantizationBase<kFullQuantization, TFFullQuantization>(
ctx, quant_params) {}
};
struct TFFullQuantizationReverse
: public TFQuantizationBase<kFullQuantization, TFFullQuantizationReverse,
quantfork::QuantizeCastOp> {
explicit TFFullQuantizationReverse(MLIRContext* ctx,
const QuantPassSpec& quant_params)
: TFQuantizationBase<kFullQuantization, TFFullQuantizationReverse,
quantfork::QuantizeCastOp>(ctx, quant_params) {}
};
struct TFDynamicRangeQuantization
: public TFQuantizationBase<kDynamicRangeQuantization,
TFDynamicRangeQuantization> {
explicit TFDynamicRangeQuantization(MLIRContext* ctx,
const quant::QuantPassSpec& quant_params)
: TFQuantizationBase<kDynamicRangeQuantization,
TFDynamicRangeQuantization>(ctx, quant_params) {}
};
class RemoveUnusedQdqPattern
: public OpRewritePattern<quantfork::DequantizeCastOp> {
public:
explicit RemoveUnusedQdqPattern(MLIRContext* context)
: OpRewritePattern<quantfork::DequantizeCastOp>(context) {}
LogicalResult matchAndRewrite(quantfork::DequantizeCastOp dq_op,
PatternRewriter& rewriter) const override {
auto q_op = dq_op.getArg().getDefiningOp<quantfork::QuantizeCastOp>();
if (!q_op) return failure();
dq_op.replaceAllUsesWith(q_op.getArg());
return success();
}
};
class QuantizeSameScaleOpsPattern
: public OpRewritePattern<quantfork::DequantizeCastOp> {
public:
explicit QuantizeSameScaleOpsPattern(
MLIRContext* context, OpQuantScaleSpecGetter op_quant_scale_spec_getter,
OpSet target_opset)
: OpRewritePattern<quantfork::DequantizeCastOp>(context, 200),
op_quant_scale_spec_getter_(op_quant_scale_spec_getter),
target_opset_(target_opset) {}
LogicalResult matchAndRewrite(quantfork::DequantizeCastOp op,
PatternRewriter& rewriter) const override {
SmallVector<Operation*, 4> quantizing_ops;
auto users = op.getResult().getUsers();
quantizing_ops.append(users.begin(), users.end());
bool changed = false;
for (Operation* quantizing_op : quantizing_ops) {
if (llvm::isa<quantfork::QuantizeCastOp, quantfork::DequantizeCastOp>(
quantizing_op)) {
return failure();
}
if (quantizing_op->hasTrait<OpTrait::IsTerminator>()) {
return failure();
}
if (!op_quant_scale_spec_getter_(quantizing_op)
->has_same_scale_requirement) {
continue;
}
if (target_opset_ == OpSet::XLA &&
!IsConnectedWithCompsiteFunction(quantizing_op)) {
continue;
}
if (target_opset_ == OpSet::UNIFORM_QUANTIZED) {
continue;
}
SmallVector<Value, 4> inputs;
inputs.reserve(quantizing_op->getNumOperands());
for (const auto& operand : quantizing_op->getOperands()) {
Type operand_type = operand.getType();
if (operand_type.isa<NoneType>()) {
inputs.push_back(operand);
continue;
}
Type elem_type = operand_type.cast<TensorType>().getElementType();
if (auto dq_op = dyn_cast_or_null<quantfork::DequantizeCastOp>(
operand.getDefiningOp())) {
auto dq_arg_type = dq_op.getArg().getType().cast<TensorType>();
auto qtype = dq_arg_type.getElementType().cast<QuantizedType>();
auto scast_op = rewriter.create<quantfork::StorageCastOp>(
dq_op->getLoc(), dq_arg_type.clone(qtype.getStorageType()),
dq_op.getArg());
inputs.push_back(scast_op.getResult());
} else if (!elem_type.isF32()) {
inputs.push_back(operand);
} else {
return failure();
}
}
llvm::SmallDenseMap<Value, int> outputs_replaced;
SmallVector<Type, 4> output_types;
output_types.reserve(quantizing_op->getNumResults());
for (const auto& enumerated_result :
llvm::enumerate(quantizing_op->getResults())) {
Value result = enumerated_result.value();
Type result_type = result.getType();
if (result_type.isa<NoneType>()) {
outputs_replaced.insert({result, enumerated_result.index()});
output_types.push_back(result_type);
continue;
}
auto result_tensor_type = result_type.cast<TensorType>();
if (result.hasOneUse() &&
llvm::isa<quantfork::QuantizeCastOp>(*result.user_begin())) {
auto user =
llvm::cast<quantfork::QuantizeCastOp>(*result.user_begin());
outputs_replaced.insert(
{user.getResult(), enumerated_result.index()});
auto qtype = user.getType()
.cast<TensorType>()
.getElementType()
.cast<QuantizedType>();
output_types.push_back(
result_tensor_type.clone(qtype.getStorageType()));
} else if (!result_tensor_type.getElementType().isF32()) {
outputs_replaced.insert({result, enumerated_result.index()});
output_types.push_back(result.getType());
} else {
return failure();
}
}
rewriter.setInsertionPointAfter(quantizing_op);
OperationState new_state(quantizing_op->getLoc(),
quantizing_op->getName().getStringRef(), inputs,
output_types, quantizing_op->getAttrs());
for (int i = 0; i < quantizing_op->getNumRegions(); ++i) {
new_state.addRegion();
}
Operation* quantized_op = rewriter.create(new_state);
if (quantizing_op->getNumRegions() != 0) {
for (const auto& indexed_regions :
llvm::enumerate(quantizing_op->getRegions())) {
IRMapping mapping;
indexed_regions.value().cloneInto(
&quantized_op->getRegion(indexed_regions.index()), mapping);
}
}
for (const auto& output_index_pair : outputs_replaced) {
Value output = output_index_pair.getFirst();
int output_index = output_index_pair.getSecond();
auto scast_op = rewriter.create<quantfork::StorageCastOp>(
output.getLoc(), output.getType(),
quantized_op->getResult(output_index));
output.replaceAllUsesWith(scast_op);
}
changed = true;
}
return success(changed);
}
private:
bool IsConnectedWithCompsiteFunction(Operation* same_scale_op) const {
for (const auto& operand : same_scale_op->getOperands()) {
auto dq_op = dyn_cast_or_null<quantfork::DequantizeCastOp>(
operand.getDefiningOp());
if (!dq_op) continue;
Operation* preceding_op = dq_op.getArg().getDefiningOp();
if (!preceding_op) continue;
if (llvm::isa<TF::PartitionedCallOp>(preceding_op)) {
auto call_op = llvm::cast<TF::PartitionedCallOp>(preceding_op);
if (!IsCompositeFunction(call_op)) continue;
return true;
}
if (llvm::isa<quantfork::StorageCastOp>(preceding_op)) {
auto sc_op = llvm::cast<quantfork::StorageCastOp>(preceding_op);
auto sc_arg_type = sc_op.getArg().getType().dyn_cast<TensorType>();
if (sc_arg_type.getElementType().isInteger(8)) {
return true;
}
}
}
for (const auto& result : same_scale_op->getResults()) {
if (!result.hasOneUse() ||
!llvm::isa<quantfork::QuantizeCastOp>(*result.user_begin())) {
continue;
}
auto q_op = llvm::cast<quantfork::QuantizeCastOp>(*result.user_begin());
for (auto following_op : q_op->getUsers()) {
if (llvm::isa<TF::PartitionedCallOp>(following_op)) {
auto call_op = llvm::cast<TF::PartitionedCallOp>(following_op);
if (!IsCompositeFunction(call_op)) continue;
return true;
}
if (llvm::isa<quantfork::StorageCastOp>(following_op)) {
auto sc_op = llvm::cast<quantfork::StorageCastOp>(following_op);
auto sc_arg_type = sc_op.getResult().getType().dyn_cast<TensorType>();
if (sc_arg_type.getElementType().isInteger(8)) {
return true;
}
}
}
}
return false;
}
bool IsCompositeFunction(TF::PartitionedCallOp call_op) const {
if (!call_op->hasAttr(kQuantTraitAttrName)) {
return false;
}
const auto f_attr = call_op.getFAttr().dyn_cast<FlatSymbolRefAttr>();
if (!f_attr || !f_attr.getValue().starts_with("composite_")) {
return false;
}
bool has_quantized_types = false;
for (Value input : call_op.getArgs()) {
if (auto type = input.getType().dyn_cast<TensorType>()) {
if (type.getElementType().isa<FloatType>()) {
return false;
}
if (type.getElementType().isa<QuantizedType>()) {
has_quantized_types = true;
}
}
}
for (Value output : call_op.getOutput()) {
if (auto type = output.getType().dyn_cast<TensorType>()) {
if (type.getElementType().isa<FloatType>()) {
return false;
}
if (type.getElementType().isa<QuantizedType>()) {
has_quantized_types = true;
}
}
}
return has_quantized_types;
}
OpQuantScaleSpecGetter op_quant_scale_spec_getter_;
OpSet target_opset_;
};
struct QuantizeAvgPoolOpPattern
: public OpRewritePattern<quantfork::StorageCastOp> {
explicit QuantizeAvgPoolOpPattern(MLIRContext* context)
: OpRewritePattern<quantfork::StorageCastOp>(context, 100) {}
LogicalResult matchAndRewrite(quantfork::StorageCastOp sc_op,
PatternRewriter& rewriter) const override {
auto avg_pool_op = sc_op.getArg().getDefiningOp<TF::AvgPoolOp>();
if (!avg_pool_op) return failure();
auto preceding_sc_op = dyn_cast_or_null<quantfork::StorageCastOp>(
avg_pool_op.getValue().getDefiningOp());
if (!preceding_sc_op) return failure();
auto dq_arg_type = preceding_sc_op.getArg().getType().cast<TensorType>();
auto qtype = dq_arg_type.getElementType().cast<QuantizedType>();
auto q_result_type = sc_op.getType().cast<TensorType>();
auto out_qtype = q_result_type.getElementType().cast<QuantizedType>();
if (qtype != out_qtype) {
avg_pool_op.emitError(
"The preceding StorageCastOp and the following "
"StorageCastOp must have the same quantized type");
return failure();
}
OpBuilder::InsertionGuard g(rewriter);
rewriter.setInsertionPointAfter(preceding_sc_op);
auto fcast_op = rewriter.create<TF::CastOp>(
preceding_sc_op->getLoc(), dq_arg_type.clone(rewriter.getF32Type()),
preceding_sc_op.getResult());
TF::AvgPoolOp float_avg_pool_op = rewriter.create<TF::AvgPoolOp>(
avg_pool_op->getLoc(),
avg_pool_op.getType().clone(rewriter.getF32Type()),
fcast_op.getResult(),
avg_pool_op->getAttrs());
auto round_val = rewriter.create<TF::RoundOp>(
sc_op.getLoc(), float_avg_pool_op.getOutput());
auto icast_op = rewriter.create<TF::CastOp>(
sc_op.getLoc(), q_result_type.clone(qtype.getStorageType()), round_val);
avg_pool_op.getResult().replaceAllUsesWith(icast_op.getResult());
return success();
}
};
class QuantizePass
: public PassWrapper<QuantizePass, OperationPass<func::FuncOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(QuantizePass)
explicit QuantizePass() {
quant_specs_.inference_type = tensorflow::DT_QINT8;
}
explicit QuantizePass(const QuantizationSpecs& quant_specs,
OpSet target_opset)
: quant_specs_(quant_specs) {
weight_quantization_ = quant_specs.weight_quantization;
target_opset_ = target_opset;
}
QuantizePass(const QuantizePass& other) : quant_specs_(other.quant_specs_) {
weight_quantization_ = other.weight_quantization_;
target_opset_ = other.target_opset_;
}
StringRef getArgument() const final {
return "quant-quantize";
}
StringRef getDescription() const final {
return "Apply quantization on models in TensorFlow dialect";
}
bool shouldKeepUnusedQdqPattern();
void runOnOperation() override;
private:
QuantizationSpecs quant_specs_;
Option<bool> weight_quantization_{
*this, "weight-quantization", llvm::cl::init(false),
llvm::cl::desc("Whether to enable weight quantization.")};
Option<OpSet> target_opset_{
*this, "target-opset", llvm::cl::init(OpSet::TF),
llvm::cl::desc("Choose target opset."),
llvm::cl::values(
clEnumValN(OpSet::TF, "TF",
"Uses TF ops that mimic quantization behavior"),
clEnumValN(OpSet::XLA, "XLA", "Uses TF XLA ops"),
clEnumValN(OpSet::UNIFORM_QUANTIZED, "UNIFORM_QUANTIZED",
"Uses TF Uniform Quantized ops"))};
};
bool QuantizePass::shouldKeepUnusedQdqPattern() {
return target_opset_ == OpSet::XLA &&
(quant_specs_.weight_only_quantization ||
quant_specs_.weight_quantization);
}
void QuantizePass::runOnOperation() {
RewritePatternSet patterns(&getContext());
auto func = getOperation();
auto* ctx = func.getContext();
quant_specs_.weight_quantization = weight_quantization_;
const QuantPassSpec quant_params = {
{quant_specs_.verify_numeric, 5.0f,
quant_specs_.whole_model_verify, false},
quant_specs_};
if (quant_specs_.weight_quantization) {
patterns.add<TFDynamicRangeQuantization>(ctx, quant_params);
} else {
patterns.add<TFFullQuantization, TFFullQuantizationReverse>(ctx,
quant_params);
patterns.add<QuantizeSameScaleOpsPattern>(ctx, GetTfQuantScaleSpec,
target_opset_);
patterns.add<QuantizeAvgPoolOpPattern>(ctx);
}
if (failed(applyPatternsAndFoldGreedily(func, std::move(patterns)))) {
func.emitWarning("Failed to converge pattern at QuantizePass.");
}
if (!shouldKeepUnusedQdqPattern()) {
RewritePatternSet patterns_2(&getContext());
patterns_2.add<RemoveUnusedQdqPattern>(ctx);
if (failed(applyPatternsAndFoldGreedily(func, std::move(patterns_2)))) {
signalPassFailure();
}
}
}
}
std::unique_ptr<OperationPass<func::FuncOp>> CreateQuantizePass() {
QuantizationSpecs quant_specs;
return std::make_unique<QuantizePass>(quant_specs, OpSet::TF);
}
std::unique_ptr<OperationPass<func::FuncOp>> CreateQuantizePass(
QuantizationSpecs quant_specs, OpSet target_opset) {
return std::make_unique<QuantizePass>(quant_specs, target_opset);
}
static PassRegistration<QuantizePass> pass;
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
using testing::ElementsAreArray;
class QuantizeOpModel : public SingleOpModelWithHexagon {
public:
explicit QuantizeOpModel(const TensorData& input, const TensorData& output) {
input_ = AddInput(input);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_QUANTIZE, BuiltinOptions_QuantizeOptions,
CreateQuantizeOptions(builder_).Union());
BuildInterpreter({GetShape(input_)});
}
template <typename T>
void SetInput(const std::vector<float>& data) {
QuantizeAndPopulate<T>(input_, data);
}
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
protected:
BuiltinOperator op_code_;
int input_;
int output_;
};
TEST(QuantizeOpTest, UInt8UInt8SameScale) {
QuantizeOpModel m({TensorType_UINT8, {1, 1, 2, 5}, -63.5, 64},
{TensorType_UINT8, {1, 1, 2, 5}, -63.5, 64});
m.SetInput<uint8_t>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(
m.GetOutput<uint8_t>(),
ElementsAreArray({129, 131, 133, 135, 137, 139, 141, 143, 145, 147}));
}
TEST(QuantizeOpTest, Uint8Uint8LargerScale) {
QuantizeOpModel m({TensorType_UINT8, {1, 1, 2, 5}, -63.5, 64},
{TensorType_UINT8, {1, 1, 2, 5}, -127, 128});
m.SetInput<uint8_t>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(
m.GetOutput<uint8_t>(),
ElementsAreArray({128, 129, 130, 131, 132, 133, 134, 135, 136, 137}));
}
TEST(QuantizeOpTest, Uint8Uint8SmallerScale) {
QuantizeOpModel m({TensorType_UINT8, {1, 1, 2, 5}, -127, 128},
{TensorType_UINT8, {1, 1, 2, 5}, -63.5, 64});
m.SetInput<uint8_t>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(
m.GetOutput<uint8_t>(),
ElementsAreArray({129, 131, 133, 135, 137, 139, 141, 143, 145, 147}));
}
TEST(QuantizeOpTest, Int8Uint8SmallerScale) {
QuantizeOpModel m({TensorType_INT8, {1, 1, 2, 5}, -127, 128},
{TensorType_UINT8, {1, 1, 2, 5}, -63.5, 64});
m.SetInput<int8_t>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(
m.GetOutput<uint8_t>(),
ElementsAreArray({129, 131, 133, 135, 137, 139, 141, 143, 145, 147}));
}
TEST(QuantizeOpTest, Int8Uint8LargerScale) {
QuantizeOpModel m({TensorType_INT8, {1, 1, 2, 5}, -127, 128},
{TensorType_UINT8, {1, 1, 2, 5}, -254, 256});
m.SetInput<int8_t>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(
m.GetOutput<uint8_t>(),
ElementsAreArray({128, 128, 129, 129, 130, 130, 131, 131, 132, 132}));
}
TEST(QuantizeOpTest, UInt8Int8SameScale128Diff) {
QuantizeOpModel m({TensorType_UINT8, {1, 1, 2, 5}, -127, 128},
{TensorType_INT8, {1, 1, 2, 5}, -127, 128});
m.SetInput<uint8_t>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetOutput<int8_t>(),
ElementsAreArray({0, 1, 2, 3, 4, 5, 6, 7, 8, 9}));
}
TEST(QuantizeOpTest, Int8Int8SameScale) {
QuantizeOpModel m({TensorType_INT8, {1, 1, 2, 5}, -63.5, 64},
{TensorType_INT8, {1, 1, 2, 5}, -63.5, 64});
m.SetInput<int8_t>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetOutput<int8_t>(),
ElementsAreArray({1, 3, 5, 7, 9, 11, 13, 15, 17, 19}));
}
TEST(QuantizeOpTest, Int8Int8LargerScale) {
QuantizeOpModel m({TensorType_INT8, {1, 1, 2, 5}, -63.5, 64},
{TensorType_INT8, {1, 1, 2, 5}, -127, 128});
m.SetInput<int8_t>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetOutput<int8_t>(),
ElementsAreArray({0, 1, 2, 3, 4, 5, 6, 7, 8, 9}));
}
TEST(QuantizeOpTest, Int8Int8SmallerScale) {
QuantizeOpModel m({TensorType_INT8, {1, 1, 2, 5}, -127, 128},
{TensorType_INT8, {1, 1, 2, 5}, -63.5, 64});
m.SetInput<int8_t>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetOutput<int8_t>(),
ElementsAreArray({1, 3, 5, 7, 9, 11, 13, 15, 17, 19}));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/hexagon/builders/tests/quantize_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ab694cd8-38e1-4516-a33b-db2d3a7779e5 | cpp | tensorflow/tensorflow | memory_info | tensorflow/lite/profiling/memory_info.cc | tensorflow/lite/profiling/memory_info_test.cc | #include "tensorflow/lite/profiling/memory_info.h"
#include <stddef.h>
#include <ostream>
#ifdef __linux__
#include <malloc.h>
#include <sys/resource.h>
#include <sys/time.h>
#elif defined(__APPLE__)
#include <mach/mach.h>
#include <malloc/malloc.h>
#endif
namespace tflite {
namespace profiling {
namespace memory {
const size_t MemoryUsage::kValueNotSet = 0;
bool MemoryUsage::IsSupported() {
#if defined(__linux__) || defined(__APPLE__)
return true;
#endif
return false;
}
MemoryUsage GetMemoryUsage() {
MemoryUsage result;
#ifdef __linux__
rusage res;
if (getrusage(RUSAGE_SELF, &res) == 0) {
result.mem_footprint_kb = res.ru_maxrss;
}
#if defined(__NO_MALLINFO__)
result.total_allocated_bytes = -1;
result.in_use_allocated_bytes = -1;
#elif defined(__GLIBC__) && __GLIBC_MINOR__ >= 33
const auto mem = mallinfo2();
result.total_allocated_bytes = mem.arena;
result.in_use_allocated_bytes = mem.uordblks;
#else
const auto mem = mallinfo();
result.total_allocated_bytes = mem.arena;
result.in_use_allocated_bytes = mem.uordblks;
#endif
#elif defined(__APPLE__)
struct task_vm_info vm_info;
mach_msg_type_number_t count = TASK_VM_INFO_COUNT;
auto status = task_info(mach_task_self(), TASK_VM_INFO,
reinterpret_cast<task_info_t>(&vm_info), &count);
if (status == KERN_SUCCESS) {
result.mem_footprint_kb =
static_cast<int64_t>(vm_info.phys_footprint / 1024.0);
}
struct mstats stats = mstats();
result.total_allocated_bytes = stats.bytes_total;
result.in_use_allocated_bytes = stats.bytes_used;
#endif
return result;
}
void MemoryUsage::AllStatsToStream(std::ostream* stream) const {
*stream << "max resident set size/physical footprint = "
<< mem_footprint_kb / 1000.0 << " MB, total non-mmapped heap size = "
<< total_allocated_bytes / 1000.0 / 1000.0
<< " MB, in-use heap size = "
<< in_use_allocated_bytes / 1000.0 / 1000.0 << " MB";
}
}
}
} | #include "tensorflow/lite/profiling/memory_info.h"
#include <memory>
#include <new>
#include <sstream>
#include <string>
#include <gtest/gtest.h>
namespace tflite {
namespace profiling {
namespace memory {
TEST(MemoryUsage, AddAndSub) {
MemoryUsage mem1, mem2;
mem1.mem_footprint_kb = 5;
mem1.total_allocated_bytes = 7000;
mem1.in_use_allocated_bytes = 2000;
mem2.mem_footprint_kb = 3;
mem2.total_allocated_bytes = 7000;
mem2.in_use_allocated_bytes = 4000;
const auto add_mem = mem1 + mem2;
EXPECT_EQ(8, add_mem.mem_footprint_kb);
EXPECT_EQ(14000, add_mem.total_allocated_bytes);
EXPECT_EQ(6000, add_mem.in_use_allocated_bytes);
const auto sub_mem = mem1 - mem2;
EXPECT_EQ(2, sub_mem.mem_footprint_kb);
EXPECT_EQ(0, sub_mem.total_allocated_bytes);
EXPECT_EQ(-2000, sub_mem.in_use_allocated_bytes);
}
TEST(MemoryUsage, GetMemoryUsage) {
MemoryUsage result;
EXPECT_EQ(MemoryUsage::kValueNotSet, result.mem_footprint_kb);
EXPECT_EQ(MemoryUsage::kValueNotSet, result.total_allocated_bytes);
EXPECT_EQ(MemoryUsage::kValueNotSet, result.in_use_allocated_bytes);
#if defined(__linux__) || defined(__APPLE__)
constexpr int size = 10 * 1024 * 1024;
std::unique_ptr<unsigned char[]> byte_array(new unsigned char[size]);
for (int i = 0; i < size; ++i) {
byte_array[i] = i % 256;
}
result = GetMemoryUsage();
for (int i = 0; i < size; ++i) {
EXPECT_EQ(byte_array[i], i % 256);
}
EXPECT_GE(result.mem_footprint_kb, size / 1024);
EXPECT_GE(result.total_allocated_bytes, size);
EXPECT_GE(result.in_use_allocated_bytes, size);
#endif
}
TEST(MemoryUsage, OutputMemoryUsageToStream) {
MemoryUsage memory_usage = GetMemoryUsage();
std::stringstream stream;
stream << memory_usage;
std::string message = stream.str();
EXPECT_STRNE(message.c_str(), "");
}
TEST(MemoryUsage, IsSupported) {
#if defined(__linux__) || defined(__APPLE__)
EXPECT_TRUE(MemoryUsage::IsSupported());
#else
EXPECT_FALSE(MemoryUsage::IsSupported());
#endif
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/memory_info.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/memory_info_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ec5e5467-28e8-4e41-a333-1a91de739759 | cpp | tensorflow/tensorflow | cpu_backend_gemm | tensorflow/lite/kernels/cpu_backend_gemm.h | tensorflow/lite/kernels/cpu_backend_gemm_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_CPU_BACKEND_GEMM_H_
#define TENSORFLOW_LITE_KERNELS_CPU_BACKEND_GEMM_H_
#include <cstdint>
#include "ruy/profiler/instrumentation.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_custom_gemv.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_params.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_ruy.h"
#ifndef TFLITE_WITH_RUY
#include "tensorflow/lite/kernels/cpu_backend_gemm_eigen.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_gemmlowp.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_x86.h"
#endif
namespace tflite {
namespace cpu_backend_gemm {
#if !defined(TFLITE_WITH_RUY) && defined(TFLITE_X86_PLATFORM)
template <typename LhsScalar, typename RhsScalar, typename AccumScalar,
typename DstScalar, QuantizationFlavor quantization_flavor>
struct GemmImpl : detail::GemmImplX86<LhsScalar, RhsScalar, AccumScalar,
DstScalar, quantization_flavor> {};
#else
template <typename LhsScalar, typename RhsScalar, typename AccumScalar,
typename DstScalar, QuantizationFlavor quantization_flavor>
struct GemmImpl : detail::GemmImplUsingRuy<LhsScalar, RhsScalar, AccumScalar,
DstScalar, quantization_flavor> {};
#if !defined(TFLITE_WITH_RUY)
template <typename SrcScalar, typename DstScalar,
QuantizationFlavor quantization_flavor>
struct GemmImpl<SrcScalar, SrcScalar, std::int32_t, DstScalar,
quantization_flavor>
: detail::GemmImplUsingGemmlowp<SrcScalar, SrcScalar, std::int32_t,
DstScalar, quantization_flavor> {};
#if !defined(GEMMLOWP_NEON)
template <typename SrcScalar, QuantizationFlavor quantization_flavor>
struct GemmImpl<SrcScalar, SrcScalar, std::int32_t, std::int8_t,
quantization_flavor>
: detail::GemmImplUsingRuy<SrcScalar, SrcScalar, std::int32_t, std::int8_t,
quantization_flavor> {};
template <typename DstScalar, QuantizationFlavor quantization_flavor>
struct GemmImpl<std::int8_t, std::int8_t, std::int32_t, DstScalar,
quantization_flavor>
: detail::GemmImplUsingRuy<std::int8_t, std::int8_t, std::int32_t,
DstScalar, quantization_flavor> {};
template <QuantizationFlavor quantization_flavor>
struct GemmImpl<std::int8_t, std::int8_t, std::int32_t, std::int8_t,
quantization_flavor>
: detail::GemmImplUsingRuy<std::int8_t, std::int8_t, std::int32_t,
std::int8_t, quantization_flavor> {};
#endif
template <>
struct GemmImpl<float, float, float, float, QuantizationFlavor::kFloatingPoint>
: detail::GemmImplUsingEigen {};
#endif
#endif
template <typename LhsScalar, typename RhsScalar, typename AccumScalar,
typename DstScalar, QuantizationFlavor quantization_flavor>
void Gemm(const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data,
const MatrixParams<RhsScalar>& rhs_params, const RhsScalar* rhs_data,
const MatrixParams<DstScalar>& dst_params, DstScalar* dst_data,
const GemmParams<AccumScalar, DstScalar, quantization_flavor>& params,
CpuBackendContext* context) {
ruy::profiler::ScopeLabel label("cpu_backend_gemm::Gemm");
ValidateParams(lhs_params, rhs_params, dst_params, params);
if (!IsValidGemm(lhs_params, rhs_params, dst_params)) {
TFLITE_DCHECK(false);
return;
}
bool must_use_ruy = false;
if (context->use_caching()) {
must_use_ruy = true;
}
if (lhs_params.order != Order::kRowMajor ||
rhs_params.order != Order::kColMajor ||
dst_params.order != Order::kColMajor) {
must_use_ruy = true;
}
if (must_use_ruy) {
detail::GemmImplUsingRuy<LhsScalar, RhsScalar, AccumScalar, DstScalar,
quantization_flavor>::Run(lhs_params, lhs_data,
rhs_params, rhs_data,
dst_params, dst_data,
params, context);
return;
}
const bool try_custom_gemv = (dst_params.cols == 1);
if (try_custom_gemv) {
if (detail::CustomGemv(lhs_params, lhs_data, rhs_params, rhs_data,
dst_params, dst_data, params, context)) {
return;
}
}
GemmImpl<LhsScalar, RhsScalar, AccumScalar, DstScalar,
quantization_flavor>::Run(lhs_params, lhs_data, rhs_params, rhs_data,
dst_params, dst_data, params, context);
}
template <QuantizationFlavor quantization_flavor>
void Gemm(const MatrixParams<int8_t>& lhs_params, const int8_t* lhs_data,
const MatrixParams<int16_t>& rhs_params, const int16_t* rhs_data,
const MatrixParams<int16_t>& dst_params, int16_t* dst_data,
const GemmParams<int32_t, int16_t, quantization_flavor>& params,
CpuBackendContext* context) {
ruy::profiler::ScopeLabel label("cpu_backend_gemm::Gemm");
ValidateParams(lhs_params, rhs_params, dst_params, params);
if (!IsValidGemm(lhs_params, rhs_params, dst_params)) {
TFLITE_DCHECK(false);
return;
}
detail::GemmImplUsingRuy<int8_t, int16_t, int32_t, int16_t,
quantization_flavor>::Run(lhs_params, lhs_data,
rhs_params, rhs_data,
dst_params, dst_data,
params, context);
}
template <typename LhsScalar, typename RhsScalar,
QuantizationFlavor quantization_flavor>
void Gemm(const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data,
const MatrixParams<RhsScalar>& rhs_params, const RhsScalar* rhs_data,
const MatrixParams<int32_t>& dst_params, int32_t* dst_data,
const GemmParams<int32_t, int32_t, quantization_flavor>& params,
CpuBackendContext* context) {
ruy::profiler::ScopeLabel label("cpu_backend_gemm::Gemm");
ValidateParams(lhs_params, rhs_params, dst_params, params);
ruy::profiler::ScopeLabel label2("cpu_backend_gemm::Gemm: general GEMM");
detail::GemmImplUsingRuy<LhsScalar, RhsScalar, int32_t, int32_t,
quantization_flavor>::Run(lhs_params, lhs_data,
rhs_params, rhs_data,
dst_params, dst_data,
params, context);
}
}
}
#endif | #include "tensorflow/lite/kernels/cpu_backend_gemm.h"
#include <math.h>
#include <stdint.h>
#include <stdlib.h>
#include <algorithm>
#include <iterator>
#include <limits>
#include <random>
#include <sstream>
#include <string>
#include <tuple>
#include <type_traits>
#include <vector>
#include <gtest/gtest.h>
#include "ruy/matrix.h"
#include "ruy/reference_mul.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_params.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_ruy.h"
namespace tflite {
namespace {
using cpu_backend_gemm::Gemm;
using cpu_backend_gemm::GemmParams;
using cpu_backend_gemm::MatrixParams;
using cpu_backend_gemm::QuantizationFlavor;
template <typename Scalar>
std::string ToString(const std::vector<Scalar>& vector) {
std::stringstream s;
if (vector.empty()) {
s << "{}";
} else {
s << "{ " << static_cast<double>(vector[0]);
for (int i = 1; i < vector.size(); i++) {
s << ", " << static_cast<double>(vector[i]);
}
s << "}";
}
return s.str();
}
template <typename Scalar>
void MakeDeterministicPseudoRandomVector(int size,
std::vector<Scalar>* vector) {
std::default_random_engine random_engine;
(void)random_engine();
const double random_min = static_cast<double>(random_engine.min());
const double random_max = static_cast<double>(random_engine.max());
const double result_min =
std::is_floating_point<Scalar>::value
? -1.0
: std::max(-256., static_cast<double>(
std::numeric_limits<Scalar>::lowest()));
const double result_max =
std::is_floating_point<Scalar>::value
? 1.0
: std::min(256.,
static_cast<double>(std::numeric_limits<Scalar>::max()));
const double random_scale =
(result_max - result_min) / (random_max - random_min);
vector->resize(size);
for (int i = 0; i < size; i++) {
double val = random_scale * (random_engine() - random_min);
val = std::max(val,
static_cast<double>(std::numeric_limits<Scalar>::lowest()));
val =
std::min(val, static_cast<double>(std::numeric_limits<Scalar>::max()));
(*vector)[i] = static_cast<Scalar>(val);
}
}
template <typename Scalar>
void MakeVectorFilledWithConsecutiveInts(int size,
std::vector<Scalar>* vector) {
vector->resize(size);
EXPECT_LE(size, std::numeric_limits<Scalar>::max());
for (int i = 0; i < size; i++) {
(*vector)[i] = static_cast<Scalar>(i + 1);
}
}
template <typename Scalar>
Scalar Median(const std::vector<Scalar>& vector) {
EXPECT_GT(vector.size(), 0);
std::vector<Scalar> vector_copy = vector;
std::sort(std::begin(vector_copy), std::end(vector_copy));
return vector_copy[vector_copy.size() / 2];
}
template <typename Scalar>
double MedianAbs(const std::vector<Scalar>& vector) {
EXPECT_GT(vector.size(), 0);
std::vector<double> vector_abs;
vector_abs.resize(vector.size());
for (int i = 0; i < vector.size(); i++) {
vector_abs[i] = std::abs(static_cast<double>(vector[i]));
}
std::sort(std::begin(vector_abs), std::end(vector_abs));
return vector_abs[vector_abs.size() / 2];
}
template <typename Scalar>
void Clamp(const std::vector<Scalar>& src, Scalar clamp_min, Scalar clamp_max,
std::vector<Scalar>* dst) {
dst->resize(src.size());
for (int i = 0; i < src.size(); i++) {
(*dst)[i] = std::max(std::min(src[i], clamp_max), clamp_min);
}
}
template <typename AccumScalar, typename DstScalar,
QuantizationFlavor quantization_flavor>
void Clamp(const GemmParams<AccumScalar, DstScalar, quantization_flavor>& src,
DstScalar clamp_min, DstScalar clamp_max,
GemmParams<AccumScalar, DstScalar, quantization_flavor>* dst) {
*dst = src;
dst->clamp_min = clamp_min;
dst->clamp_max = clamp_max;
}
struct ErrorStats {
int size;
double scale_factor;
double max_abs_diff;
double mean_abs_diff;
double abs_mean_diff;
};
template <typename Scalar>
void ComputeErrorStats(const std::vector<Scalar>& actual,
const std::vector<Scalar>& expected,
ErrorStats* error_stats) {
double max_abs_diff = 0;
double sum_abs_diff = 0;
double sum_diff = 0;
double max_abs_expected = 0;
EXPECT_EQ(actual.size(), expected.size());
for (int i = 0; i < actual.size(); i++) {
double actual_val = static_cast<double>(actual[i]);
double expected_val = static_cast<double>(expected[i]);
double diff = actual_val - expected_val;
max_abs_expected = std::max(max_abs_expected, std::abs(expected_val));
sum_diff += diff;
sum_abs_diff += std::abs(diff);
max_abs_diff = std::max(max_abs_diff, std::abs(diff));
}
error_stats->scale_factor = max_abs_expected;
error_stats->max_abs_diff = max_abs_diff;
error_stats->mean_abs_diff = sum_abs_diff / actual.size();
error_stats->abs_mean_diff = std::abs(sum_diff / actual.size());
error_stats->size = actual.size();
}
template <typename AccumScalar, typename DstScalar>
bool CheckErrorStats(const ErrorStats& error_stats, int accumulation_depth) {
double tolerated_relative_max_abs_diff = 0;
double tolerated_relative_mean_abs_diff = 0;
double tolerated_relative_abs_mean_diff = 0;
double inverse_size = 1. / error_stats.size;
if (std::is_floating_point<AccumScalar>::value) {
tolerated_relative_max_abs_diff =
accumulation_depth * std::numeric_limits<DstScalar>::epsilon();
tolerated_relative_mean_abs_diff =
std::sqrt(static_cast<double>(accumulation_depth)) *
std::numeric_limits<DstScalar>::epsilon();
tolerated_relative_abs_mean_diff =
tolerated_relative_mean_abs_diff * std::sqrt(inverse_size);
} else {
tolerated_relative_max_abs_diff = 1;
tolerated_relative_mean_abs_diff = std::sqrt(inverse_size) * 0.5;
tolerated_relative_abs_mean_diff = inverse_size * 2.;
}
double tolerated_max_abs_diff =
tolerated_relative_max_abs_diff * error_stats.scale_factor;
double tolerated_mean_abs_diff =
tolerated_relative_mean_abs_diff * error_stats.scale_factor;
double tolerated_abs_mean_diff =
tolerated_relative_abs_mean_diff * error_stats.scale_factor;
EXPECT_LE(error_stats.max_abs_diff, tolerated_max_abs_diff);
EXPECT_LE(error_stats.mean_abs_diff, tolerated_mean_abs_diff);
EXPECT_LE(error_stats.abs_mean_diff, tolerated_abs_mean_diff);
return error_stats.max_abs_diff <= tolerated_max_abs_diff &&
error_stats.mean_abs_diff <= tolerated_mean_abs_diff &&
error_stats.abs_mean_diff <= tolerated_abs_mean_diff;
}
template <typename AccumScalar, typename DstScalar>
void CheckErrorForAccumulation(int accumulation_depth,
const std::vector<DstScalar>& actual,
const std::vector<DstScalar>& expected) {
ErrorStats error_stats;
ComputeErrorStats(actual, expected, &error_stats);
bool success =
CheckErrorStats<AccumScalar, DstScalar>(error_stats, accumulation_depth);
EXPECT_TRUE(success) << "Actual vector\n"
<< ToString(actual) << "\ndiffers from expected vector\n"
<< ToString(expected) << "\n";
}
template <typename LhsScalar, typename RhsScalar, typename AccumScalar,
typename DstScalar, QuantizationFlavor quantization_flavor>
void PerformGemmThenCompareResultsThenAgainWithClamping(
const MatrixParams<LhsScalar>& lhs_params,
const std::vector<LhsScalar>& lhs_data,
const MatrixParams<RhsScalar>& rhs_params,
const std::vector<RhsScalar>& rhs_data,
const MatrixParams<DstScalar>& dst_params, std::vector<DstScalar>* dst_data,
const GemmParams<AccumScalar, DstScalar, quantization_flavor>& params,
const std::vector<DstScalar>& expected,
CpuBackendContext* cpu_backend_context) {
const int accumulation_depth = lhs_params.cols;
Gemm(lhs_params, lhs_data.data(), rhs_params, rhs_data.data(), dst_params,
dst_data->data(), params, cpu_backend_context);
CheckErrorForAccumulation<AccumScalar>(accumulation_depth, *dst_data,
expected);
DstScalar expected_median = Median(expected);
std::vector<DstScalar> expected_with_clamp;
GemmParams<AccumScalar, DstScalar, quantization_flavor> params_with_clamp;
DstScalar clamp_min, clamp_max;
clamp_min = std::numeric_limits<DstScalar>::lowest();
clamp_max = expected_median;
Clamp(expected, clamp_min, clamp_max, &expected_with_clamp);
Clamp(params, clamp_min, clamp_max, ¶ms_with_clamp);
Gemm(lhs_params, lhs_data.data(), rhs_params, rhs_data.data(), dst_params,
dst_data->data(), params_with_clamp, cpu_backend_context);
CheckErrorForAccumulation<AccumScalar>(accumulation_depth, *dst_data,
expected_with_clamp);
clamp_min = expected_median;
clamp_max = std::numeric_limits<DstScalar>::max();
Clamp(expected, clamp_min, clamp_max, &expected_with_clamp);
Clamp(params, clamp_min, clamp_max, ¶ms_with_clamp);
Gemm(lhs_params, lhs_data.data(), rhs_params, rhs_data.data(), dst_params,
dst_data->data(), params_with_clamp, cpu_backend_context);
CheckErrorForAccumulation<AccumScalar>(accumulation_depth, *dst_data,
expected_with_clamp);
}
template <typename LhsScalar, typename RhsScalar, typename AccumScalar,
typename DstScalar>
int BisectReasonableMultiplierExponent(
int bisect_min, int bisect_max, const MatrixParams<LhsScalar>& lhs_params,
const std::vector<LhsScalar>& lhs_data,
const MatrixParams<RhsScalar>& rhs_params,
const std::vector<RhsScalar>& rhs_data,
const MatrixParams<DstScalar>& dst_params, std::vector<DstScalar>* dst_data,
const GemmParams<AccumScalar, DstScalar>& params,
CpuBackendContext* cpu_backend_context) {
if (bisect_min == bisect_max) {
return bisect_min;
}
int bisect_mid =
static_cast<int>(std::floor(0.5 * (bisect_min + bisect_max)));
GemmParams<AccumScalar, DstScalar> params_copy(params);
params_copy.multiplier_exponent = bisect_mid;
double clamp_abs = std::max(std::abs(static_cast<double>(params.clamp_min)),
std::abs(static_cast<double>(params.clamp_max)));
Gemm(lhs_params, lhs_data.data(), rhs_params, rhs_data.data(), dst_params,
dst_data->data(), params_copy, cpu_backend_context);
double median_abs = MedianAbs(*dst_data);
if (median_abs < 0.25 * clamp_abs) {
return BisectReasonableMultiplierExponent(
bisect_mid + 1, bisect_max, lhs_params, lhs_data, rhs_params, rhs_data,
dst_params, dst_data, params_copy, cpu_backend_context);
} else {
return BisectReasonableMultiplierExponent(
bisect_min, bisect_mid, lhs_params, lhs_data, rhs_params, rhs_data,
dst_params, dst_data, params_copy, cpu_backend_context);
}
}
template <typename LhsScalar, typename RhsScalar, typename AccumScalar,
typename DstScalar, QuantizationFlavor quantization_flavor>
void ReferenceGemm(
const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data,
const MatrixParams<RhsScalar>& rhs_params, const RhsScalar* rhs_data,
const MatrixParams<DstScalar>& dst_params, DstScalar* dst_data,
const GemmParams<AccumScalar, DstScalar, quantization_flavor>& params,
CpuBackendContext* context) {
ruy::Matrix<LhsScalar> ruy_lhs;
ruy::Matrix<RhsScalar> ruy_rhs;
ruy::Matrix<DstScalar> ruy_dst;
cpu_backend_gemm::detail::MakeRuyMatrix(lhs_params, lhs_data, &ruy_lhs);
cpu_backend_gemm::detail::MakeRuyMatrix(rhs_params, rhs_data, &ruy_rhs);
cpu_backend_gemm::detail::MakeRuyMatrix(dst_params, dst_data, &ruy_dst);
ruy::MulParams<AccumScalar, DstScalar> ruy_mul_params;
cpu_backend_gemm::detail::MakeRuyMulParams(params, &ruy_mul_params);
ruy::ReferenceMul(ruy_lhs, ruy_rhs, ruy_mul_params, &ruy_dst);
}
template <typename LhsScalar, typename RhsScalar, typename AccumScalar,
typename DstScalar>
void TestSomeGemm(int rows, int depth, int cols,
const std::vector<DstScalar>& golden) {
CpuBackendContext cpu_backend_context;
std::default_random_engine random_engine;
cpu_backend_context.SetMaxNumThreads(1 + (random_engine() % 8));
bool use_caching = static_cast<bool>(random_engine() % 2);
cpu_backend_context.SetUseCaching(use_caching);
const bool use_golden = !golden.empty();
std::vector<LhsScalar> lhs_data;
std::vector<RhsScalar> rhs_data;
std::vector<AccumScalar> bias_data;
std::vector<DstScalar> dst_data;
if (use_golden) {
MakeVectorFilledWithConsecutiveInts(rows * depth, &lhs_data);
MakeVectorFilledWithConsecutiveInts(depth * cols, &rhs_data);
MakeVectorFilledWithConsecutiveInts(rows, &bias_data);
} else {
MakeDeterministicPseudoRandomVector(rows * depth, &lhs_data);
MakeDeterministicPseudoRandomVector(depth * cols, &rhs_data);
MakeDeterministicPseudoRandomVector(rows, &bias_data);
}
MakeDeterministicPseudoRandomVector(rows * cols, &dst_data);
auto random_order = [&]() {
return random_engine() % 2 ? cpu_backend_gemm::Order::kRowMajor
: cpu_backend_gemm::Order::kColMajor;
};
MatrixParams<LhsScalar> lhs_params;
lhs_params.order =
use_golden ? cpu_backend_gemm::Order::kRowMajor : random_order();
lhs_params.rows = rows;
lhs_params.cols = depth;
if (!std::is_floating_point<LhsScalar>::value &&
(!std::is_same<LhsScalar, int8_t>::value &&
!std::is_same<RhsScalar, int16_t>::value)) {
lhs_params.zero_point = 1;
if (!use_golden) {
lhs_params.zero_point += random_engine() % 8;
}
}
MatrixParams<RhsScalar> rhs_params;
rhs_params.order =
use_golden ? cpu_backend_gemm::Order::kColMajor : random_order();
rhs_params.rows = depth;
rhs_params.cols = cols;
if (!std::is_floating_point<RhsScalar>::value &&
(!std::is_same<LhsScalar, int8_t>::value &&
!std::is_same<RhsScalar, int16_t>::value)) {
rhs_params.zero_point = 1;
if (!use_golden) {
rhs_params.zero_point += random_engine() % 8;
}
}
MatrixParams<DstScalar> dst_params;
dst_params.order =
use_golden ? cpu_backend_gemm::Order::kColMajor : random_order();
dst_params.rows = rows;
dst_params.cols = cols;
if (!std::is_floating_point<DstScalar>::value &&
(!std::is_same<LhsScalar, int8_t>::value &&
!std::is_same<RhsScalar, int16_t>::value)) {
dst_params.zero_point = 1;
if (!use_golden) {
dst_params.zero_point += random_engine() % 8;
}
}
GemmParams<AccumScalar, DstScalar> params;
if (use_golden || (random_engine() % 2)) {
params.bias = bias_data.data();
}
static constexpr std::int32_t kMultiplierFixedpointMin = 1234567890;
static constexpr std::int32_t kMultiplierFixedpointMax = 1987654321;
if (!std::is_floating_point<AccumScalar>::value) {
params.multiplier_fixedpoint = kMultiplierFixedpointMin;
int bisect_min = -8 * static_cast<int>(sizeof(AccumScalar));
int bisect_max = 0;
params.multiplier_exponent = BisectReasonableMultiplierExponent(
bisect_min, bisect_max, lhs_params, lhs_data, rhs_params, rhs_data,
dst_params, &dst_data, params, &cpu_backend_context);
}
std::vector<DstScalar> expected;
if (use_golden) {
EXPECT_EQ(golden.size(), dst_data.size());
expected = golden;
} else {
expected.resize(dst_data.size());
ReferenceGemm(lhs_params, lhs_data.data(), rhs_params, rhs_data.data(),
dst_params, expected.data(), params, &cpu_backend_context);
}
PerformGemmThenCompareResultsThenAgainWithClamping(
lhs_params, lhs_data, rhs_params, rhs_data, dst_params, &dst_data, params,
expected, &cpu_backend_context);
if (!use_golden && !std::is_floating_point<AccumScalar>::value) {
std::vector<AccumScalar> multiplier_fixedpoint_perchannel(rows);
std::vector<int> multiplier_exponent_perchannel(rows);
for (int i = 0; i < rows; i++) {
multiplier_fixedpoint_perchannel[i] =
kMultiplierFixedpointMin +
(random_engine() %
(kMultiplierFixedpointMax + 1 - kMultiplierFixedpointMin));
const int exponent_min = params.multiplier_exponent - 2;
const int exponent_max = params.multiplier_exponent + 2;
multiplier_exponent_perchannel[i] =
exponent_min + (random_engine() % (exponent_max + 1 - exponent_min));
}
static constexpr QuantizationFlavor perchannel_flavor =
std::is_floating_point<AccumScalar>::value
? QuantizationFlavor::kFloatingPoint
: QuantizationFlavor::kIntegerWithPerRowMultiplier;
GemmParams<AccumScalar, DstScalar, perchannel_flavor> params_perchannel;
params_perchannel.bias = params.bias;
params_perchannel.clamp_min = params.clamp_min;
params_perchannel.clamp_max = params.clamp_max;
params_perchannel.multiplier_fixedpoint_perchannel =
multiplier_fixedpoint_perchannel.data();
params_perchannel.multiplier_exponent_perchannel =
multiplier_exponent_perchannel.data();
ReferenceGemm(lhs_params, lhs_data.data(), rhs_params, rhs_data.data(),
dst_params, expected.data(), params_perchannel,
&cpu_backend_context);
PerformGemmThenCompareResultsThenAgainWithClamping(
lhs_params, lhs_data, rhs_params, rhs_data, dst_params, &dst_data,
params_perchannel, expected, &cpu_backend_context);
}
}
template <typename LhsScalar, typename RhsScalar, typename AccumScalar,
typename DstScalar>
void TestMaybeValidGemm(int lhs_rows, int lhs_cols, int rhs_rows, int rhs_cols,
int dst_rows, int dst_cols) {
CpuBackendContext cpu_backend_context;
std::default_random_engine random_engine;
cpu_backend_context.SetMaxNumThreads(1 + (random_engine() % 8));
bool use_caching = static_cast<bool>(random_engine() % 2);
cpu_backend_context.SetUseCaching(use_caching);
std::vector<LhsScalar> lhs_data;
std::vector<RhsScalar> rhs_data;
std::vector<AccumScalar> bias_data;
std::vector<DstScalar> dst_data;
MakeDeterministicPseudoRandomVector(lhs_rows * lhs_cols, &lhs_data);
MakeDeterministicPseudoRandomVector(rhs_rows * rhs_cols, &rhs_data);
MakeDeterministicPseudoRandomVector(dst_rows, &bias_data);
MakeDeterministicPseudoRandomVector(dst_rows * dst_cols, &dst_data);
MatrixParams<LhsScalar> lhs_params;
lhs_params.order = cpu_backend_gemm::Order::kRowMajor;
lhs_params.rows = lhs_rows;
lhs_params.cols = lhs_cols;
if (!std::is_floating_point<LhsScalar>::value &&
(!std::is_same<LhsScalar, int8_t>::value &&
!std::is_same<RhsScalar, int16_t>::value)) {
lhs_params.zero_point = 1;
}
MatrixParams<RhsScalar> rhs_params;
rhs_params.order = cpu_backend_gemm::Order::kColMajor;
rhs_params.rows = rhs_rows;
rhs_params.cols = rhs_cols;
if (!std::is_floating_point<RhsScalar>::value &&
(!std::is_same<LhsScalar, int8_t>::value &&
!std::is_same<RhsScalar, int16_t>::value)) {
rhs_params.zero_point = 1;
}
MatrixParams<DstScalar> dst_params;
dst_params.order = cpu_backend_gemm::Order::kColMajor;
dst_params.rows = dst_rows;
dst_params.cols = dst_cols;
if (!std::is_floating_point<DstScalar>::value &&
(!std::is_same<LhsScalar, int8_t>::value &&
!std::is_same<RhsScalar, int16_t>::value)) {
dst_params.zero_point = 1;
}
GemmParams<AccumScalar, DstScalar> params;
params.bias = bias_data.data();
static constexpr std::int32_t kMultiplierFixedpointMin = 1234567890;
if (!std::is_floating_point<AccumScalar>::value) {
params.multiplier_fixedpoint = kMultiplierFixedpointMin;
int bisect_min = -8 * static_cast<int>(sizeof(AccumScalar));
int bisect_max = 0;
params.multiplier_exponent = BisectReasonableMultiplierExponent(
bisect_min, bisect_max, lhs_params, lhs_data, rhs_params, rhs_data,
dst_params, &dst_data, params, &cpu_backend_context);
}
Gemm(lhs_params, lhs_data.data(), rhs_params, rhs_data.data(), dst_params,
dst_data.data(), params, &cpu_backend_context);
}
TEST(CpuBackendGemmSimpleTestAgainstGolden, Float) {
TestSomeGemm<float, float, float, float>(2, 3, 4,
{15, 34, 33, 79, 51, 124, 69, 169});
}
TEST(CpuBackendGemmSimpleTestAgainstGolden, Uint8) {
TestSomeGemm<std::uint8_t, std::uint8_t, std::int32_t, std::uint8_t>(
5, 2, 3, {2, 4, 6, 7, 9, 3, 10, 16, 22, 29, 4, 15, 26, 37, 48});
}
TEST(CpuBackendGemmSimpleTestAgainstGolden, Int8) {
TestSomeGemm<std::int8_t, std::int8_t, std::int32_t, std::int8_t>(
2, 6, 3, {13, 32, 31, 81, 50, 127});
}
TEST(CpuBackendGemmInvalidGemmTest, Float) {
TestMaybeValidGemm<float, float, float, float>(2, 3, 3, 4, 2, 4);
#if !defined(TARGET_IPHONE_SIMULATOR) && !defined(TARGET_OS_IPHONE)
ASSERT_DEBUG_DEATH(
(TestMaybeValidGemm<float, float, float, float>(2, 3, 3, 0, 2, 4)), "");
ASSERT_DEBUG_DEATH(
(TestMaybeValidGemm<float, float, float, float>(2, 3, 9, 4, 2, 4)), "");
#endif
}
TEST(CpuBackendGemmSimpleTestAgainstGolden, Int8Int16) {
TestSomeGemm<std::int8_t, std::int8_t, std::int32_t, std::int16_t>(
3, 5, 4, {19, 48, 77, 48, 149, 250, 76, 249, 422, 105, 350, 595});
}
template <typename tLhsScalar, typename tRhsScalar, typename tAccumScalar,
typename tDstScalar>
struct TypesTuple {
using LhsScalar = tLhsScalar;
using RhsScalar = tRhsScalar;
using AccumScalar = tAccumScalar;
using DstScalar = tDstScalar;
};
template <typename TypesTupleType>
void TestRandomGemms(const std::vector<std::tuple<int, int, int>>& shapes) {
using LhsScalar = typename TypesTupleType::LhsScalar;
using RhsScalar = typename TypesTupleType::RhsScalar;
using AccumScalar = typename TypesTupleType::AccumScalar;
using DstScalar = typename TypesTupleType::DstScalar;
for (const auto& shape : shapes) {
int rows = std::get<0>(shape);
int depth = std::get<1>(shape);
int cols = std::get<2>(shape);
TestSomeGemm<LhsScalar, RhsScalar, AccumScalar, DstScalar>(rows, depth,
cols, {});
}
}
template <typename TypesTupleType>
class CpuBackendGemmTest : public testing::Test {};
TYPED_TEST_SUITE_P(CpuBackendGemmTest);
typedef ::testing::Types<
TypesTuple<float, float, float, float>,
TypesTuple<std::uint8_t, std::uint8_t, std::int32_t, std::uint8_t>,
TypesTuple<std::int8_t, std::int8_t, std::int32_t, std::int8_t>,
TypesTuple<std::int8_t, std::int8_t, std::int32_t, std::int16_t>,
TypesTuple<std::int8_t, std::int16_t, std::int32_t, std::int16_t>,
TypesTuple<std::uint8_t, std::uint8_t, std::int32_t, std::int8_t>>
CpuBackendGemmTestInstantiations;
TYPED_TEST_SUITE(CpuBackendGemmTest, CpuBackendGemmTestInstantiations);
TYPED_TEST(CpuBackendGemmTest, Square) {
std::vector<std::tuple<int, int, int>> shapes;
for (int size = 1; size < 50; size++) {
shapes.push_back(std::make_tuple(size, size, size));
}
TestRandomGemms<TypeParam>(shapes);
}
TYPED_TEST(CpuBackendGemmTest, SquarePowerOfTwo) {
std::vector<std::tuple<int, int, int>> shapes;
for (int size = 64; size <= 128; size *= 2) {
shapes.push_back(std::make_tuple(size, size, size));
}
TestRandomGemms<TypeParam>(shapes);
}
TYPED_TEST(CpuBackendGemmTest, MatrixTimesVector) {
std::vector<std::tuple<int, int, int>> shapes;
for (int size = 1; size < 200; size++) {
shapes.push_back(std::make_tuple(size, size, 1));
}
TestRandomGemms<TypeParam>(shapes);
}
TYPED_TEST(CpuBackendGemmTest, VectorTimesMatrix) {
std::vector<std::tuple<int, int, int>> shapes;
for (int size = 1; size < 200; size++) {
shapes.push_back(std::make_tuple(1, size, size));
}
TestRandomGemms<TypeParam>(shapes);
}
TYPED_TEST(CpuBackendGemmTest, MatrixTimesNarrow) {
std::vector<std::tuple<int, int, int>> shapes;
for (int size = 1; size < 50; size++) {
shapes.push_back(std::make_tuple(size, size, 2));
shapes.push_back(std::make_tuple(size, size, 3));
shapes.push_back(std::make_tuple(size, size, 4));
shapes.push_back(std::make_tuple(size, size, 8));
}
TestRandomGemms<TypeParam>(shapes);
}
TYPED_TEST(CpuBackendGemmTest, Rectangular) {
std::vector<std::tuple<int, int, int>> shapes;
for (int size = 1; size < 50; size++) {
shapes.push_back(std::make_tuple(size, size + 5, size + 1));
shapes.push_back(std::make_tuple(size + 10, size + 2, size));
}
TestRandomGemms<TypeParam>(shapes);
}
TYPED_TEST(CpuBackendGemmTest, HighlyRectangular) {
std::vector<std::tuple<int, int, int>> shapes;
for (int size = 1; size <= 1000; size *= 10) {
shapes.push_back(std::make_tuple(size, 10, 10));
shapes.push_back(std::make_tuple(10, size, 10));
shapes.push_back(std::make_tuple(10, 10, size));
}
TestRandomGemms<TypeParam>(shapes);
}
TYPED_TEST(CpuBackendGemmTest, InnerProduct) {
std::vector<std::tuple<int, int, int>> shapes;
for (int size = 1; size < 200; size++) {
shapes.push_back(std::make_tuple(1, size, 1));
}
TestRandomGemms<TypeParam>(shapes);
}
TYPED_TEST(CpuBackendGemmTest, OuterProduct) {
std::vector<std::tuple<int, int, int>> shapes;
for (int size = 1; size < 100; size++) {
shapes.push_back(std::make_tuple(size, 1, size));
}
TestRandomGemms<TypeParam>(shapes);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/cpu_backend_gemm.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/cpu_backend_gemm_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
65d98835-6fe6-493b-8c8c-b0b35aa47eae | cpp | abseil/abseil-cpp | compare | absl/types/compare.h | absl/types/compare_test.cc | #ifndef ABSL_TYPES_COMPARE_H_
#define ABSL_TYPES_COMPARE_H_
#include "absl/base/config.h"
#ifdef ABSL_USES_STD_ORDERING
#include <compare>
#include <type_traits>
#include "absl/meta/type_traits.h"
#else
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <type_traits>
#include "absl/base/attributes.h"
#include "absl/base/macros.h"
#include "absl/meta/type_traits.h"
#endif
namespace absl {
ABSL_NAMESPACE_BEGIN
#ifdef ABSL_USES_STD_ORDERING
using std::partial_ordering;
using std::strong_ordering;
using std::weak_ordering;
#else
namespace compare_internal {
using value_type = int8_t;
class OnlyLiteralZero {
public:
#if ABSL_HAVE_ATTRIBUTE(enable_if)
constexpr OnlyLiteralZero(int n)
__attribute__((enable_if(n == 0, "Only literal `0` is allowed."))) {}
#else
constexpr OnlyLiteralZero(int OnlyLiteralZero::*) noexcept {}
#endif
template <typename T, typename = typename std::enable_if<
std::is_same<T, std::nullptr_t>::value ||
(std::is_integral<T>::value &&
!std::is_same<T, int>::value)>::type>
OnlyLiteralZero(T) {
static_assert(sizeof(T) < 0, "Only literal `0` is allowed.");
}
};
enum class eq : value_type {
equal = 0,
equivalent = equal,
nonequal = 1,
nonequivalent = nonequal,
};
enum class ord : value_type { less = -1, greater = 1 };
enum class ncmp : value_type { unordered = -127 };
#ifdef __cpp_inline_variables
#define ABSL_COMPARE_INLINE_BASECLASS_DECL(name) static_assert(true, "")
#define ABSL_COMPARE_INLINE_SUBCLASS_DECL(type, name) \
static const type name
#define ABSL_COMPARE_INLINE_INIT(type, name, init) \
inline constexpr type type::name(init)
#else
#define ABSL_COMPARE_INLINE_BASECLASS_DECL(name) \
ABSL_CONST_INIT static const T name
#define ABSL_COMPARE_INLINE_SUBCLASS_DECL(type, name) static_assert(true, "")
#define ABSL_COMPARE_INLINE_INIT(type, name, init) \
template <typename T> \
const T compare_internal::type##_base<T>::name(init)
#endif
template <typename T>
struct partial_ordering_base {
ABSL_COMPARE_INLINE_BASECLASS_DECL(less);
ABSL_COMPARE_INLINE_BASECLASS_DECL(equivalent);
ABSL_COMPARE_INLINE_BASECLASS_DECL(greater);
ABSL_COMPARE_INLINE_BASECLASS_DECL(unordered);
};
template <typename T>
struct weak_ordering_base {
ABSL_COMPARE_INLINE_BASECLASS_DECL(less);
ABSL_COMPARE_INLINE_BASECLASS_DECL(equivalent);
ABSL_COMPARE_INLINE_BASECLASS_DECL(greater);
};
template <typename T>
struct strong_ordering_base {
ABSL_COMPARE_INLINE_BASECLASS_DECL(less);
ABSL_COMPARE_INLINE_BASECLASS_DECL(equal);
ABSL_COMPARE_INLINE_BASECLASS_DECL(equivalent);
ABSL_COMPARE_INLINE_BASECLASS_DECL(greater);
};
}
class partial_ordering
: public compare_internal::partial_ordering_base<partial_ordering> {
explicit constexpr partial_ordering(compare_internal::eq v) noexcept
: value_(static_cast<compare_internal::value_type>(v)) {}
explicit constexpr partial_ordering(compare_internal::ord v) noexcept
: value_(static_cast<compare_internal::value_type>(v)) {}
explicit constexpr partial_ordering(compare_internal::ncmp v) noexcept
: value_(static_cast<compare_internal::value_type>(v)) {}
friend struct compare_internal::partial_ordering_base<partial_ordering>;
constexpr bool is_ordered() const noexcept {
return value_ !=
compare_internal::value_type(compare_internal::ncmp::unordered);
}
public:
ABSL_COMPARE_INLINE_SUBCLASS_DECL(partial_ordering, less);
ABSL_COMPARE_INLINE_SUBCLASS_DECL(partial_ordering, equivalent);
ABSL_COMPARE_INLINE_SUBCLASS_DECL(partial_ordering, greater);
ABSL_COMPARE_INLINE_SUBCLASS_DECL(partial_ordering, unordered);
friend constexpr bool operator==(
partial_ordering v, compare_internal::OnlyLiteralZero) noexcept {
return v.is_ordered() && v.value_ == 0;
}
friend constexpr bool operator!=(
partial_ordering v, compare_internal::OnlyLiteralZero) noexcept {
return !v.is_ordered() || v.value_ != 0;
}
friend constexpr bool operator<(
partial_ordering v, compare_internal::OnlyLiteralZero) noexcept {
return v.is_ordered() && v.value_ < 0;
}
friend constexpr bool operator<=(
partial_ordering v, compare_internal::OnlyLiteralZero) noexcept {
return v.is_ordered() && v.value_ <= 0;
}
friend constexpr bool operator>(
partial_ordering v, compare_internal::OnlyLiteralZero) noexcept {
return v.is_ordered() && v.value_ > 0;
}
friend constexpr bool operator>=(
partial_ordering v, compare_internal::OnlyLiteralZero) noexcept {
return v.is_ordered() && v.value_ >= 0;
}
friend constexpr bool operator==(compare_internal::OnlyLiteralZero,
partial_ordering v) noexcept {
return v.is_ordered() && 0 == v.value_;
}
friend constexpr bool operator!=(compare_internal::OnlyLiteralZero,
partial_ordering v) noexcept {
return !v.is_ordered() || 0 != v.value_;
}
friend constexpr bool operator<(compare_internal::OnlyLiteralZero,
partial_ordering v) noexcept {
return v.is_ordered() && 0 < v.value_;
}
friend constexpr bool operator<=(compare_internal::OnlyLiteralZero,
partial_ordering v) noexcept {
return v.is_ordered() && 0 <= v.value_;
}
friend constexpr bool operator>(compare_internal::OnlyLiteralZero,
partial_ordering v) noexcept {
return v.is_ordered() && 0 > v.value_;
}
friend constexpr bool operator>=(compare_internal::OnlyLiteralZero,
partial_ordering v) noexcept {
return v.is_ordered() && 0 >= v.value_;
}
friend constexpr bool operator==(partial_ordering v1,
partial_ordering v2) noexcept {
return v1.value_ == v2.value_;
}
friend constexpr bool operator!=(partial_ordering v1,
partial_ordering v2) noexcept {
return v1.value_ != v2.value_;
}
private:
compare_internal::value_type value_;
};
ABSL_COMPARE_INLINE_INIT(partial_ordering, less, compare_internal::ord::less);
ABSL_COMPARE_INLINE_INIT(partial_ordering, equivalent,
compare_internal::eq::equivalent);
ABSL_COMPARE_INLINE_INIT(partial_ordering, greater,
compare_internal::ord::greater);
ABSL_COMPARE_INLINE_INIT(partial_ordering, unordered,
compare_internal::ncmp::unordered);
class weak_ordering
: public compare_internal::weak_ordering_base<weak_ordering> {
explicit constexpr weak_ordering(compare_internal::eq v) noexcept
: value_(static_cast<compare_internal::value_type>(v)) {}
explicit constexpr weak_ordering(compare_internal::ord v) noexcept
: value_(static_cast<compare_internal::value_type>(v)) {}
friend struct compare_internal::weak_ordering_base<weak_ordering>;
public:
ABSL_COMPARE_INLINE_SUBCLASS_DECL(weak_ordering, less);
ABSL_COMPARE_INLINE_SUBCLASS_DECL(weak_ordering, equivalent);
ABSL_COMPARE_INLINE_SUBCLASS_DECL(weak_ordering, greater);
constexpr operator partial_ordering() const noexcept {
return value_ == 0 ? partial_ordering::equivalent
: (value_ < 0 ? partial_ordering::less
: partial_ordering::greater);
}
friend constexpr bool operator==(
weak_ordering v, compare_internal::OnlyLiteralZero) noexcept {
return v.value_ == 0;
}
friend constexpr bool operator!=(
weak_ordering v, compare_internal::OnlyLiteralZero) noexcept {
return v.value_ != 0;
}
friend constexpr bool operator<(
weak_ordering v, compare_internal::OnlyLiteralZero) noexcept {
return v.value_ < 0;
}
friend constexpr bool operator<=(
weak_ordering v, compare_internal::OnlyLiteralZero) noexcept {
return v.value_ <= 0;
}
friend constexpr bool operator>(
weak_ordering v, compare_internal::OnlyLiteralZero) noexcept {
return v.value_ > 0;
}
friend constexpr bool operator>=(
weak_ordering v, compare_internal::OnlyLiteralZero) noexcept {
return v.value_ >= 0;
}
friend constexpr bool operator==(compare_internal::OnlyLiteralZero,
weak_ordering v) noexcept {
return 0 == v.value_;
}
friend constexpr bool operator!=(compare_internal::OnlyLiteralZero,
weak_ordering v) noexcept {
return 0 != v.value_;
}
friend constexpr bool operator<(compare_internal::OnlyLiteralZero,
weak_ordering v) noexcept {
return 0 < v.value_;
}
friend constexpr bool operator<=(compare_internal::OnlyLiteralZero,
weak_ordering v) noexcept {
return 0 <= v.value_;
}
friend constexpr bool operator>(compare_internal::OnlyLiteralZero,
weak_ordering v) noexcept {
return 0 > v.value_;
}
friend constexpr bool operator>=(compare_internal::OnlyLiteralZero,
weak_ordering v) noexcept {
return 0 >= v.value_;
}
friend constexpr bool operator==(weak_ordering v1,
weak_ordering v2) noexcept {
return v1.value_ == v2.value_;
}
friend constexpr bool operator!=(weak_ordering v1,
weak_ordering v2) noexcept {
return v1.value_ != v2.value_;
}
private:
compare_internal::value_type value_;
};
ABSL_COMPARE_INLINE_INIT(weak_ordering, less, compare_internal::ord::less);
ABSL_COMPARE_INLINE_INIT(weak_ordering, equivalent,
compare_internal::eq::equivalent);
ABSL_COMPARE_INLINE_INIT(weak_ordering, greater,
compare_internal::ord::greater);
class strong_ordering
: public compare_internal::strong_ordering_base<strong_ordering> {
explicit constexpr strong_ordering(compare_internal::eq v) noexcept
: value_(static_cast<compare_internal::value_type>(v)) {}
explicit constexpr strong_ordering(compare_internal::ord v) noexcept
: value_(static_cast<compare_internal::value_type>(v)) {}
friend struct compare_internal::strong_ordering_base<strong_ordering>;
public:
ABSL_COMPARE_INLINE_SUBCLASS_DECL(strong_ordering, less);
ABSL_COMPARE_INLINE_SUBCLASS_DECL(strong_ordering, equal);
ABSL_COMPARE_INLINE_SUBCLASS_DECL(strong_ordering, equivalent);
ABSL_COMPARE_INLINE_SUBCLASS_DECL(strong_ordering, greater);
constexpr operator partial_ordering() const noexcept {
return value_ == 0 ? partial_ordering::equivalent
: (value_ < 0 ? partial_ordering::less
: partial_ordering::greater);
}
constexpr operator weak_ordering() const noexcept {
return value_ == 0
? weak_ordering::equivalent
: (value_ < 0 ? weak_ordering::less : weak_ordering::greater);
}
friend constexpr bool operator==(
strong_ordering v, compare_internal::OnlyLiteralZero) noexcept {
return v.value_ == 0;
}
friend constexpr bool operator!=(
strong_ordering v, compare_internal::OnlyLiteralZero) noexcept {
return v.value_ != 0;
}
friend constexpr bool operator<(
strong_ordering v, compare_internal::OnlyLiteralZero) noexcept {
return v.value_ < 0;
}
friend constexpr bool operator<=(
strong_ordering v, compare_internal::OnlyLiteralZero) noexcept {
return v.value_ <= 0;
}
friend constexpr bool operator>(
strong_ordering v, compare_internal::OnlyLiteralZero) noexcept {
return v.value_ > 0;
}
friend constexpr bool operator>=(
strong_ordering v, compare_internal::OnlyLiteralZero) noexcept {
return v.value_ >= 0;
}
friend constexpr bool operator==(compare_internal::OnlyLiteralZero,
strong_ordering v) noexcept {
return 0 == v.value_;
}
friend constexpr bool operator!=(compare_internal::OnlyLiteralZero,
strong_ordering v) noexcept {
return 0 != v.value_;
}
friend constexpr bool operator<(compare_internal::OnlyLiteralZero,
strong_ordering v) noexcept {
return 0 < v.value_;
}
friend constexpr bool operator<=(compare_internal::OnlyLiteralZero,
strong_ordering v) noexcept {
return 0 <= v.value_;
}
friend constexpr bool operator>(compare_internal::OnlyLiteralZero,
strong_ordering v) noexcept {
return 0 > v.value_;
}
friend constexpr bool operator>=(compare_internal::OnlyLiteralZero,
strong_ordering v) noexcept {
return 0 >= v.value_;
}
friend constexpr bool operator==(strong_ordering v1,
strong_ordering v2) noexcept {
return v1.value_ == v2.value_;
}
friend constexpr bool operator!=(strong_ordering v1,
strong_ordering v2) noexcept {
return v1.value_ != v2.value_;
}
private:
compare_internal::value_type value_;
};
ABSL_COMPARE_INLINE_INIT(strong_ordering, less, compare_internal::ord::less);
ABSL_COMPARE_INLINE_INIT(strong_ordering, equal, compare_internal::eq::equal);
ABSL_COMPARE_INLINE_INIT(strong_ordering, equivalent,
compare_internal::eq::equivalent);
ABSL_COMPARE_INLINE_INIT(strong_ordering, greater,
compare_internal::ord::greater);
#undef ABSL_COMPARE_INLINE_BASECLASS_DECL
#undef ABSL_COMPARE_INLINE_SUBCLASS_DECL
#undef ABSL_COMPARE_INLINE_INIT
#endif
namespace compare_internal {
template <typename BoolT,
absl::enable_if_t<std::is_same<bool, BoolT>::value, int> = 0>
constexpr bool compare_result_as_less_than(const BoolT r) { return r; }
constexpr bool compare_result_as_less_than(const absl::weak_ordering r) {
return r < 0;
}
template <typename Compare, typename K, typename LK>
constexpr bool do_less_than_comparison(const Compare &compare, const K &x,
const LK &y) {
return compare_result_as_less_than(compare(x, y));
}
template <typename Int,
absl::enable_if_t<std::is_same<int, Int>::value, int> = 0>
constexpr absl::weak_ordering compare_result_as_ordering(const Int c) {
return c < 0 ? absl::weak_ordering::less
: c == 0 ? absl::weak_ordering::equivalent
: absl::weak_ordering::greater;
}
constexpr absl::weak_ordering compare_result_as_ordering(
const absl::weak_ordering c) {
return c;
}
template <
typename Compare, typename K, typename LK,
absl::enable_if_t<!std::is_same<bool, absl::result_of_t<Compare(
const K &, const LK &)>>::value,
int> = 0>
constexpr absl::weak_ordering do_three_way_comparison(const Compare &compare,
const K &x, const LK &y) {
return compare_result_as_ordering(compare(x, y));
}
template <
typename Compare, typename K, typename LK,
absl::enable_if_t<std::is_same<bool, absl::result_of_t<Compare(
const K &, const LK &)>>::value,
int> = 0>
constexpr absl::weak_ordering do_three_way_comparison(const Compare &compare,
const K &x, const LK &y) {
return compare(x, y) ? absl::weak_ordering::less
: compare(y, x) ? absl::weak_ordering::greater
: absl::weak_ordering::equivalent;
}
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/types/compare.h"
#include "gtest/gtest.h"
#include "absl/base/casts.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace {
bool Identity(bool b) { return b; }
TEST(Compare, PartialOrdering) {
EXPECT_TRUE(Identity(partial_ordering::less < 0));
EXPECT_TRUE(Identity(0 > partial_ordering::less));
EXPECT_TRUE(Identity(partial_ordering::less <= 0));
EXPECT_TRUE(Identity(0 >= partial_ordering::less));
EXPECT_TRUE(Identity(partial_ordering::equivalent == 0));
EXPECT_TRUE(Identity(0 == partial_ordering::equivalent));
EXPECT_TRUE(Identity(partial_ordering::greater > 0));
EXPECT_TRUE(Identity(0 < partial_ordering::greater));
EXPECT_TRUE(Identity(partial_ordering::greater >= 0));
EXPECT_TRUE(Identity(0 <= partial_ordering::greater));
EXPECT_TRUE(Identity(partial_ordering::unordered != 0));
EXPECT_TRUE(Identity(0 != partial_ordering::unordered));
EXPECT_FALSE(Identity(partial_ordering::unordered < 0));
EXPECT_FALSE(Identity(0 < partial_ordering::unordered));
EXPECT_FALSE(Identity(partial_ordering::unordered <= 0));
EXPECT_FALSE(Identity(0 <= partial_ordering::unordered));
EXPECT_FALSE(Identity(partial_ordering::unordered > 0));
EXPECT_FALSE(Identity(0 > partial_ordering::unordered));
EXPECT_FALSE(Identity(partial_ordering::unordered >= 0));
EXPECT_FALSE(Identity(0 >= partial_ordering::unordered));
const partial_ordering values[] = {
partial_ordering::less, partial_ordering::equivalent,
partial_ordering::greater, partial_ordering::unordered};
for (const auto& lhs : values) {
for (const auto& rhs : values) {
const bool are_equal = &lhs == &rhs;
EXPECT_EQ(lhs == rhs, are_equal);
EXPECT_EQ(lhs != rhs, !are_equal);
}
}
}
TEST(Compare, WeakOrdering) {
EXPECT_TRUE(Identity(weak_ordering::less < 0));
EXPECT_TRUE(Identity(0 > weak_ordering::less));
EXPECT_TRUE(Identity(weak_ordering::less <= 0));
EXPECT_TRUE(Identity(0 >= weak_ordering::less));
EXPECT_TRUE(Identity(weak_ordering::equivalent == 0));
EXPECT_TRUE(Identity(0 == weak_ordering::equivalent));
EXPECT_TRUE(Identity(weak_ordering::greater > 0));
EXPECT_TRUE(Identity(0 < weak_ordering::greater));
EXPECT_TRUE(Identity(weak_ordering::greater >= 0));
EXPECT_TRUE(Identity(0 <= weak_ordering::greater));
const weak_ordering values[] = {
weak_ordering::less, weak_ordering::equivalent, weak_ordering::greater};
for (const auto& lhs : values) {
for (const auto& rhs : values) {
const bool are_equal = &lhs == &rhs;
EXPECT_EQ(lhs == rhs, are_equal);
EXPECT_EQ(lhs != rhs, !are_equal);
}
}
}
TEST(Compare, StrongOrdering) {
EXPECT_TRUE(Identity(strong_ordering::less < 0));
EXPECT_TRUE(Identity(0 > strong_ordering::less));
EXPECT_TRUE(Identity(strong_ordering::less <= 0));
EXPECT_TRUE(Identity(0 >= strong_ordering::less));
EXPECT_TRUE(Identity(strong_ordering::equal == 0));
EXPECT_TRUE(Identity(0 == strong_ordering::equal));
EXPECT_TRUE(Identity(strong_ordering::equivalent == 0));
EXPECT_TRUE(Identity(0 == strong_ordering::equivalent));
EXPECT_TRUE(Identity(strong_ordering::greater > 0));
EXPECT_TRUE(Identity(0 < strong_ordering::greater));
EXPECT_TRUE(Identity(strong_ordering::greater >= 0));
EXPECT_TRUE(Identity(0 <= strong_ordering::greater));
const strong_ordering values[] = {
strong_ordering::less, strong_ordering::equal, strong_ordering::greater};
for (const auto& lhs : values) {
for (const auto& rhs : values) {
const bool are_equal = &lhs == &rhs;
EXPECT_EQ(lhs == rhs, are_equal);
EXPECT_EQ(lhs != rhs, !are_equal);
}
}
EXPECT_TRUE(Identity(strong_ordering::equivalent == strong_ordering::equal));
}
TEST(Compare, Conversions) {
EXPECT_TRUE(
Identity(implicit_cast<partial_ordering>(weak_ordering::less) != 0));
EXPECT_TRUE(
Identity(implicit_cast<partial_ordering>(weak_ordering::less) < 0));
EXPECT_TRUE(
Identity(implicit_cast<partial_ordering>(weak_ordering::less) <= 0));
EXPECT_TRUE(Identity(
implicit_cast<partial_ordering>(weak_ordering::equivalent) == 0));
EXPECT_TRUE(
Identity(implicit_cast<partial_ordering>(weak_ordering::greater) != 0));
EXPECT_TRUE(
Identity(implicit_cast<partial_ordering>(weak_ordering::greater) > 0));
EXPECT_TRUE(
Identity(implicit_cast<partial_ordering>(weak_ordering::greater) >= 0));
EXPECT_TRUE(
Identity(implicit_cast<partial_ordering>(strong_ordering::less) != 0));
EXPECT_TRUE(
Identity(implicit_cast<partial_ordering>(strong_ordering::less) < 0));
EXPECT_TRUE(
Identity(implicit_cast<partial_ordering>(strong_ordering::less) <= 0));
EXPECT_TRUE(
Identity(implicit_cast<partial_ordering>(strong_ordering::equal) == 0));
EXPECT_TRUE(Identity(
implicit_cast<partial_ordering>(strong_ordering::equivalent) == 0));
EXPECT_TRUE(
Identity(implicit_cast<partial_ordering>(strong_ordering::greater) != 0));
EXPECT_TRUE(
Identity(implicit_cast<partial_ordering>(strong_ordering::greater) > 0));
EXPECT_TRUE(
Identity(implicit_cast<partial_ordering>(strong_ordering::greater) >= 0));
EXPECT_TRUE(
Identity(implicit_cast<weak_ordering>(strong_ordering::less) != 0));
EXPECT_TRUE(
Identity(implicit_cast<weak_ordering>(strong_ordering::less) < 0));
EXPECT_TRUE(
Identity(implicit_cast<weak_ordering>(strong_ordering::less) <= 0));
EXPECT_TRUE(
Identity(implicit_cast<weak_ordering>(strong_ordering::equal) == 0));
EXPECT_TRUE(
Identity(implicit_cast<weak_ordering>(strong_ordering::equivalent) == 0));
EXPECT_TRUE(
Identity(implicit_cast<weak_ordering>(strong_ordering::greater) != 0));
EXPECT_TRUE(
Identity(implicit_cast<weak_ordering>(strong_ordering::greater) > 0));
EXPECT_TRUE(
Identity(implicit_cast<weak_ordering>(strong_ordering::greater) >= 0));
}
struct WeakOrderingLess {
template <typename T>
absl::weak_ordering operator()(const T& a, const T& b) const {
return a < b ? absl::weak_ordering::less
: a == b ? absl::weak_ordering::equivalent
: absl::weak_ordering::greater;
}
};
TEST(CompareResultAsLessThan, SanityTest) {
EXPECT_FALSE(absl::compare_internal::compare_result_as_less_than(false));
EXPECT_TRUE(absl::compare_internal::compare_result_as_less_than(true));
EXPECT_TRUE(
absl::compare_internal::compare_result_as_less_than(weak_ordering::less));
EXPECT_FALSE(absl::compare_internal::compare_result_as_less_than(
weak_ordering::equivalent));
EXPECT_FALSE(absl::compare_internal::compare_result_as_less_than(
weak_ordering::greater));
}
TEST(DoLessThanComparison, SanityTest) {
std::less<int> less;
WeakOrderingLess weak;
EXPECT_TRUE(absl::compare_internal::do_less_than_comparison(less, -1, 0));
EXPECT_TRUE(absl::compare_internal::do_less_than_comparison(weak, -1, 0));
EXPECT_FALSE(absl::compare_internal::do_less_than_comparison(less, 10, 10));
EXPECT_FALSE(absl::compare_internal::do_less_than_comparison(weak, 10, 10));
EXPECT_FALSE(absl::compare_internal::do_less_than_comparison(less, 10, 5));
EXPECT_FALSE(absl::compare_internal::do_less_than_comparison(weak, 10, 5));
}
TEST(CompareResultAsOrdering, SanityTest) {
EXPECT_TRUE(
Identity(absl::compare_internal::compare_result_as_ordering(-1) < 0));
EXPECT_FALSE(
Identity(absl::compare_internal::compare_result_as_ordering(-1) == 0));
EXPECT_FALSE(
Identity(absl::compare_internal::compare_result_as_ordering(-1) > 0));
EXPECT_TRUE(Identity(absl::compare_internal::compare_result_as_ordering(
weak_ordering::less) < 0));
EXPECT_FALSE(Identity(absl::compare_internal::compare_result_as_ordering(
weak_ordering::less) == 0));
EXPECT_FALSE(Identity(absl::compare_internal::compare_result_as_ordering(
weak_ordering::less) > 0));
EXPECT_FALSE(
Identity(absl::compare_internal::compare_result_as_ordering(0) < 0));
EXPECT_TRUE(
Identity(absl::compare_internal::compare_result_as_ordering(0) == 0));
EXPECT_FALSE(
Identity(absl::compare_internal::compare_result_as_ordering(0) > 0));
EXPECT_FALSE(Identity(absl::compare_internal::compare_result_as_ordering(
weak_ordering::equivalent) < 0));
EXPECT_TRUE(Identity(absl::compare_internal::compare_result_as_ordering(
weak_ordering::equivalent) == 0));
EXPECT_FALSE(Identity(absl::compare_internal::compare_result_as_ordering(
weak_ordering::equivalent) > 0));
EXPECT_FALSE(
Identity(absl::compare_internal::compare_result_as_ordering(1) < 0));
EXPECT_FALSE(
Identity(absl::compare_internal::compare_result_as_ordering(1) == 0));
EXPECT_TRUE(
Identity(absl::compare_internal::compare_result_as_ordering(1) > 0));
EXPECT_FALSE(Identity(absl::compare_internal::compare_result_as_ordering(
weak_ordering::greater) < 0));
EXPECT_FALSE(Identity(absl::compare_internal::compare_result_as_ordering(
weak_ordering::greater) == 0));
EXPECT_TRUE(Identity(absl::compare_internal::compare_result_as_ordering(
weak_ordering::greater) > 0));
}
TEST(DoThreeWayComparison, SanityTest) {
std::less<int> less;
WeakOrderingLess weak;
EXPECT_TRUE(Identity(
absl::compare_internal::do_three_way_comparison(less, -1, 0) < 0));
EXPECT_FALSE(Identity(
absl::compare_internal::do_three_way_comparison(less, -1, 0) == 0));
EXPECT_FALSE(Identity(
absl::compare_internal::do_three_way_comparison(less, -1, 0) > 0));
EXPECT_TRUE(Identity(
absl::compare_internal::do_three_way_comparison(weak, -1, 0) < 0));
EXPECT_FALSE(Identity(
absl::compare_internal::do_three_way_comparison(weak, -1, 0) == 0));
EXPECT_FALSE(Identity(
absl::compare_internal::do_three_way_comparison(weak, -1, 0) > 0));
EXPECT_FALSE(Identity(
absl::compare_internal::do_three_way_comparison(less, 10, 10) < 0));
EXPECT_TRUE(Identity(
absl::compare_internal::do_three_way_comparison(less, 10, 10) == 0));
EXPECT_FALSE(Identity(
absl::compare_internal::do_three_way_comparison(less, 10, 10) > 0));
EXPECT_FALSE(Identity(
absl::compare_internal::do_three_way_comparison(weak, 10, 10) < 0));
EXPECT_TRUE(Identity(
absl::compare_internal::do_three_way_comparison(weak, 10, 10) == 0));
EXPECT_FALSE(Identity(
absl::compare_internal::do_three_way_comparison(weak, 10, 10) > 0));
EXPECT_FALSE(Identity(
absl::compare_internal::do_three_way_comparison(less, 10, 5) < 0));
EXPECT_FALSE(Identity(
absl::compare_internal::do_three_way_comparison(less, 10, 5) == 0));
EXPECT_TRUE(Identity(
absl::compare_internal::do_three_way_comparison(less, 10, 5) > 0));
EXPECT_FALSE(Identity(
absl::compare_internal::do_three_way_comparison(weak, 10, 5) < 0));
EXPECT_FALSE(Identity(
absl::compare_internal::do_three_way_comparison(weak, 10, 5) == 0));
EXPECT_TRUE(Identity(
absl::compare_internal::do_three_way_comparison(weak, 10, 5) > 0));
}
#ifdef __cpp_inline_variables
TEST(Compare, StaticAsserts) {
static_assert(partial_ordering::less < 0, "");
static_assert(partial_ordering::equivalent == 0, "");
static_assert(partial_ordering::greater > 0, "");
static_assert(partial_ordering::unordered != 0, "");
static_assert(weak_ordering::less < 0, "");
static_assert(weak_ordering::equivalent == 0, "");
static_assert(weak_ordering::greater > 0, "");
static_assert(strong_ordering::less < 0, "");
static_assert(strong_ordering::equal == 0, "");
static_assert(strong_ordering::equivalent == 0, "");
static_assert(strong_ordering::greater > 0, "");
}
#endif
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/types/compare.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/types/compare_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
18a5dc02-a8ff-4ba0-826b-14f5980d794c | cpp | tensorflow/tensorflow | resource_mgr | tensorflow/core/framework/resource_mgr.cc | tensorflow/core/framework/resource_mgr_test.cc | #include "tensorflow/core/framework/resource_mgr.h"
#include <atomic>
#include <memory>
#include <variant>
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/strings/scanner.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/demangle.h"
#include "tensorflow/core/platform/stacktrace.h"
namespace tensorflow {
ResourceHandle MakeResourceHandle(
const string& container, const string& name, const DeviceBase& device,
const TypeIndex& type_index,
const std::vector<DtypeAndPartialTensorShape>& dtypes_and_shapes,
const absl::optional<ManagedStackTrace>& definition_stack_trace) {
ResourceHandle result;
result.set_device(device.name());
result.set_container(container);
result.set_definition_stack_trace(definition_stack_trace);
if (name == ResourceHandle::ANONYMOUS_NAME) {
result.set_name(
strings::StrCat("_AnonymousVar", ResourceHandle::GenerateUniqueId()));
} else {
result.set_name(name);
}
result.set_hash_code(type_index.hash_code());
result.set_maybe_type_name(type_index.name());
result.set_dtypes_and_shapes(dtypes_and_shapes);
return result;
}
Status MakeResourceHandleToOutput(OpKernelContext* context, int output_index,
const string& container, const string& name,
const TypeIndex& type_index) {
Tensor* handle;
TF_RETURN_IF_ERROR(
context->allocate_output(output_index, TensorShape({}), &handle));
handle->scalar<ResourceHandle>()() =
MakeResourceHandle(container, name, *context->device(), type_index);
return absl::OkStatus();
}
namespace internal {
Status ValidateDevice(OpKernelContext* ctx, const ResourceHandle& p) {
if (ctx->device()->attributes().name() != p.device()) {
return errors::InvalidArgument(
"Trying to access resource ", p.name(), " located in device ",
p.device(), " from device ", ctx->device()->attributes().name());
}
return absl::OkStatus();
}
}
Status ResourceMgr::InsertDebugTypeName(uint64 hash_code,
const string& type_name) {
auto iter = debug_type_names_.emplace(hash_code, type_name);
if (iter.first->second != type_name) {
return errors::AlreadyExists("Duplicate hash code found for type ",
type_name);
}
return absl::OkStatus();
}
const char* ResourceMgr::DebugTypeName(uint64 hash_code) const {
auto type_name_iter = debug_type_names_.find(hash_code);
if (type_name_iter == debug_type_names_.end()) {
return "<unknown>";
} else {
return type_name_iter->second.c_str();
}
}
ResourceMgr::ResourceAndName::ResourceAndName() : name(nullptr) {}
ResourceMgr::ResourceAndName::ResourceAndName(const string& name)
: name(std::make_unique<string>(name)) {}
core::RefCountPtr<ResourceBase> ResourceMgr::ResourceAndName::GetResource()
const {
if (std::holds_alternative<core::RefCountPtr<ResourceBase>>(resource)) {
ResourceBase* ptr =
std::get<core::RefCountPtr<ResourceBase>>(resource).get();
ptr->Ref();
return core::RefCountPtr<ResourceBase>(ptr);
} else if (std::holds_alternative<core::WeakPtr<ResourceBase>>(resource)) {
return std::get<core::WeakPtr<ResourceBase>>(resource).GetNewRef();
} else {
return nullptr;
}
}
ResourceMgr::ResourceAndName::ResourceAndName(
ResourceAndName&& other) noexcept {
name = std::move(other.name);
resource = std::move(other.resource);
}
ResourceMgr::ResourceAndName::~ResourceAndName() {}
ResourceMgr::ResourceAndName& ResourceMgr::ResourceAndName::operator=(
ResourceAndName&& other) noexcept {
name = std::move(other.name);
resource = std::move(other.resource);
return *this;
}
ResourceMgr::ResourceMgr() : default_container_("localhost") {}
ResourceMgr::ResourceMgr(const string& default_container)
: default_container_(default_container) {}
ResourceMgr::~ResourceMgr() { Clear(); }
void ResourceMgr::Clear() {
absl::flat_hash_map<string, Container*> tmp_containers;
{
mutex_lock l(mu_);
tmp_containers = std::move(containers_);
containers_.clear();
}
for (const auto& p : tmp_containers) {
delete p.second;
}
}
string ResourceMgr::DebugString() const {
mutex_lock l(mu_);
struct Line {
const string* container;
const string type;
const string* resource;
const string detail;
};
std::vector<Line> lines;
for (const auto& p : containers_) {
const string& container = p.first;
for (const auto& q : *p.second) {
const Key& key = q.first;
const char* type = DebugTypeName(key.first);
const core::RefCountPtr<ResourceBase> resource = q.second.GetResource();
Line l{&container, port::Demangle(type), q.second.name.get(),
resource ? resource->DebugString() : "<nullptr>"};
lines.push_back(l);
}
}
std::vector<string> text;
text.reserve(lines.size());
for (const Line& line : lines) {
text.push_back(strings::Printf(
"%-20s | %-40s | %-40s | %-s", line.container->c_str(),
line.type.c_str(), line.resource->c_str(), line.detail.c_str()));
}
std::sort(text.begin(), text.end());
return absl::StrJoin(text, "\n");
}
Status ResourceMgr::DoCreate(const string& container_name, TypeIndex type,
const string& name, ResourceBase* resource,
bool owns_resource) {
Container* container = [&]() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
Container** ptr = &containers_[container_name];
if (*ptr == nullptr) {
*ptr = new Container;
}
return *ptr;
}();
ResourceAndName resource_and_name(name);
StringPiece borrowed_name(*resource_and_name.name);
if (owns_resource) {
resource_and_name.resource = core::RefCountPtr<ResourceBase>(resource);
} else {
auto cleanup_fn = [this, container, type, borrowed_name]() {
mutex_lock l(mu_);
auto iter = container->find({type.hash_code(), borrowed_name});
if (iter != container->end()) {
container->erase(iter);
}
};
resource_and_name.resource =
core::WeakPtr<ResourceBase>(resource, cleanup_fn);
}
Container::value_type key_and_value(Key(type.hash_code(), borrowed_name),
std::move(resource_and_name));
auto st = container->insert(std::move(key_and_value));
if (st.second) {
TF_RETURN_IF_ERROR(InsertDebugTypeName(type.hash_code(), type.name()));
return absl::OkStatus();
}
return errors::AlreadyExists("Resource ", container_name, "/", name, "/",
type.name());
}
Status ResourceMgr::Lookup(const ResourceHandle& handle,
ResourceBase** resource) const {
tf_shared_lock l(mu_);
return DoLookup(handle.container(), handle.hash_code(),
"ResourceBase", handle.name(), resource);
}
Status ResourceMgr::DoLookup(const string& container, TypeIndex type,
const string& name,
ResourceBase** resource) const {
return DoLookup(container, type.hash_code(), type.name(), name, resource);
}
Status ResourceMgr::DoLookup(const string& container, uint64 type_hash_code,
const string& type_name,
const string& resource_name,
ResourceBase** resource) const {
const Container* b = gtl::FindPtrOrNull(containers_, container);
if (b == nullptr) {
return errors::NotFound("Container ", container,
" does not exist. (Could not find resource: ",
container, "/", resource_name, ")");
}
auto iter = b->find({type_hash_code, resource_name});
if (iter == b->end()) {
return errors::NotFound("Resource ", container, "/", resource_name, "/",
type_name, " does not exist.");
}
ResourceBase* ptr = iter->second.GetResource().release();
if (ptr == nullptr) {
return errors::NotFound("Resource ", container, "/", resource_name, "/",
type_name, " has been destroyed.");
}
*resource = ptr;
return absl::OkStatus();
}
Status ResourceMgr::PopResourceAndName(const string& container,
uint64 type_hash_code,
const string& resource_name,
const string& type_name,
ResourceAndName& resource_and_name) {
mutex_lock l(mu_);
Container* b = gtl::FindPtrOrNull(containers_, container);
if (b == nullptr) {
return errors::NotFound("Container ", container, " does not exist.");
}
auto iter = b->find({type_hash_code, resource_name});
if (iter == b->end()) {
return errors::NotFound("Resource ", container, "/", resource_name, "/",
type_name, " does not exist.");
}
std::swap(resource_and_name, iter->second);
b->erase(iter);
return absl::OkStatus();
}
Status ResourceMgr::DoDelete(const string& container, uint64 type_hash_code,
const string& resource_name,
const string& type_name) {
ResourceAndName resource_and_name;
TF_RETURN_IF_ERROR(PopResourceAndName(
container, type_hash_code, resource_name, type_name, resource_and_name));
if (std::holds_alternative<core::WeakPtr<ResourceBase>>(
resource_and_name.resource)) {
return errors::Internal(
"Cannot delete an unowned Resource ", container, "/", resource_name,
"/", type_name, " from ResourceMgr. ",
"This indicates ref-counting ResourceHandle is exposed to weak "
"ResourceHandle code paths.");
}
return absl::OkStatus();
}
Status ResourceMgr::DoDelete(const string& container, TypeIndex type,
const string& resource_name) {
return DoDelete(container, type.hash_code(), resource_name, type.name());
}
Status ResourceMgr::Delete(const ResourceHandle& handle) {
return DoDelete(handle.container(), handle.hash_code(), handle.name(),
"<unknown>");
}
Status ResourceMgr::Cleanup(const string& container) {
{
tf_shared_lock l(mu_);
if (!gtl::FindOrNull(containers_, container)) {
return absl::OkStatus();
}
}
Container* b = nullptr;
{
mutex_lock l(mu_);
auto iter = containers_.find(container);
if (iter == containers_.end()) {
return absl::OkStatus();
}
b = iter->second;
containers_.erase(iter);
}
CHECK(b != nullptr);
delete b;
return absl::OkStatus();
}
static bool IsValidContainerName(StringPiece s) {
using ::tensorflow::strings::Scanner;
return Scanner(s)
.One(Scanner::LETTER_DIGIT_DOT)
.Any(Scanner::LETTER_DIGIT_DASH_DOT_SLASH)
.Eos()
.GetResult();
}
Status ContainerInfo::Init(ResourceMgr* rmgr, const NodeDef& ndef,
bool use_node_name_as_default) {
CHECK(rmgr);
rmgr_ = rmgr;
string attr_container;
TF_RETURN_IF_ERROR(GetNodeAttr(ndef, "container", &attr_container));
if (!attr_container.empty() && !IsValidContainerName(attr_container)) {
return errors::InvalidArgument("container contains invalid characters: ",
attr_container);
}
string attr_shared_name;
TF_RETURN_IF_ERROR(GetNodeAttr(ndef, "shared_name", &attr_shared_name));
if (!attr_shared_name.empty() && (attr_shared_name[0] == '_')) {
return errors::InvalidArgument("shared_name cannot start with '_':",
attr_shared_name);
}
if (!attr_container.empty()) {
container_ = attr_container;
} else {
container_ = rmgr_->default_container();
}
if (!attr_shared_name.empty()) {
name_ = attr_shared_name;
} else if (use_node_name_as_default) {
name_ = ndef.name();
} else {
resource_is_private_to_kernel_ = true;
static std::atomic<int64_t> counter(0);
name_ = strings::StrCat("_", counter.fetch_add(1), "_", ndef.name());
}
return absl::OkStatus();
}
string ContainerInfo::DebugString() const {
return strings::StrCat("[", container(), ",", name(), ",",
resource_is_private_to_kernel() ? "private" : "public",
"]");
}
const ResourceHandle& HandleFromInput(OpKernelContext* ctx, int input) {
return ctx->input(input).flat<ResourceHandle>()(0);
}
Status HandleFromInput(OpKernelContext* ctx, int input,
ResourceHandle* handle) {
TF_ASSIGN_OR_RETURN(const Tensor* tensor, ctx->get_input(input));
if (tensor->NumElements() == 0) {
return absl::InvalidArgumentError("Empty resource handle");
}
*handle = tensor->flat<ResourceHandle>()(0);
return absl::OkStatus();
}
Status HandleFromInput(OpKernelContext* ctx, StringPiece input,
ResourceHandle* handle) {
const Tensor* tensor;
TF_RETURN_IF_ERROR(ctx->input(input, &tensor));
if (tensor->NumElements() == 0) {
return absl::InvalidArgumentError("Empty resource handle");
}
*handle = tensor->flat<ResourceHandle>()(0);
return absl::OkStatus();
}
Status LookupResource(OpKernelContext* ctx, const ResourceHandle& p,
ResourceBase** value) {
TF_RETURN_IF_ERROR(internal::ValidateDevice(ctx, p));
if (p.IsRefCounting()) {
TF_ASSIGN_OR_RETURN(*value, p.GetResource<ResourceBase>());
(*value)->Ref();
return absl::OkStatus();
}
return ctx->resource_manager()->Lookup(p, value);
}
Status DeleteResource(OpKernelContext* ctx, const ResourceHandle& p) {
TF_RETURN_IF_ERROR(internal::ValidateDevice(ctx, p));
if (p.IsRefCounting()) {
return absl::OkStatus();
}
return ctx->resource_manager()->Delete(p);
}
} | #include "tensorflow/core/framework/resource_mgr.h"
#include <memory>
#include <vector>
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/resource_handle.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/refcount.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/platform/regexp.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
class Resource : public ResourceBase {
public:
explicit Resource(const string& label) : label_(label) {}
~Resource() override {}
string DebugString() const override { return strings::StrCat("R/", label_); }
private:
string label_;
};
class Other : public ResourceBase {
public:
explicit Other(const string& label) : label_(label) {}
~Other() override {}
string DebugString() const override { return strings::StrCat("O/", label_); }
private:
string label_;
};
template <typename T>
string Find(const ResourceMgr& rm, const string& container,
const string& name) {
T* r;
TF_CHECK_OK(rm.Lookup(container, name, &r));
const string ret = r->DebugString();
r->Unref();
return ret;
}
template <typename T>
string LookupOrCreate(ResourceMgr* rm, const string& container,
const string& name, const string& label) {
T* r;
TF_CHECK_OK(rm->LookupOrCreate<T>(container, name, &r, [&label](T** ret) {
*ret = new T(label);
return absl::OkStatus();
}));
const string ret = r->DebugString();
r->Unref();
return ret;
}
static void HasError(const Status& s, const error::Code code,
const string& substr) {
EXPECT_EQ(s.code(), code);
EXPECT_TRUE(absl::StrContains(s.message(), substr))
<< s << ", expected substring " << substr;
}
template <typename T>
Status FindErr(const ResourceMgr& rm, const string& container,
const string& name) {
T* r;
Status s = rm.Lookup(container, name, &r);
CHECK(!s.ok());
return s;
}
TEST(ResourceMgrTest, Basic) {
ResourceMgr rm;
TF_CHECK_OK(rm.Create("foo", "bar", new Resource("cat")));
TF_CHECK_OK(rm.Create("foo", "baz", new Resource("dog")));
TF_CHECK_OK(rm.Create("foo", "bar", new Other("tiger")));
HasError(rm.Create("foo", "bar", new Resource("kitty")),
error::ALREADY_EXISTS, "Resource foo/bar");
EXPECT_EQ("R/cat", Find<Resource>(rm, "foo", "bar"));
EXPECT_EQ("R/dog", Find<Resource>(rm, "foo", "baz"));
EXPECT_EQ("O/tiger", Find<Other>(rm, "foo", "bar"));
HasError(FindErr<Resource>(rm, "bar", "foo"), error::NOT_FOUND,
"Container bar");
HasError(FindErr<Resource>(rm, "foo", "xxx"), error::NOT_FOUND,
"Resource foo/xxx");
HasError(FindErr<Other>(rm, "foo", "baz"), error::NOT_FOUND,
"Resource foo/baz");
TF_CHECK_OK(rm.Delete<Resource>("foo", "bar"));
HasError(FindErr<Resource>(rm, "foo", "bar"), error::NOT_FOUND,
"Resource foo/bar");
HasError(rm.Delete<Resource>("foo", "bar"), error::NOT_FOUND,
"Resource foo/bar");
TF_CHECK_OK(rm.Create("foo", "bar", new Resource("kitty")));
EXPECT_EQ("R/kitty", Find<Resource>(rm, "foo", "bar"));
TF_CHECK_OK(rm.Cleanup("foo"));
HasError(FindErr<Resource>(rm, "foo", "bar"), error::NOT_FOUND,
"Container foo");
TF_CHECK_OK(rm.Cleanup("foo"));
HasError(FindErr<Resource>(rm, "foo", "bar"), error::NOT_FOUND,
"Container foo");
TF_CHECK_OK(rm.Cleanup("bar"));
}
TEST(ResourceMgrTest, CreateUnowned) {
core::RefCountPtr<Resource> cat{new Resource("cat")};
core::RefCountPtr<Resource> kitty{new Resource("kitty")};
ASSERT_TRUE(cat->RefCountIsOne());
ASSERT_TRUE(kitty->RefCountIsOne());
ResourceMgr rm;
TF_CHECK_OK(rm.CreateUnowned("foo", "bar", cat.get()));
EXPECT_TRUE(cat->RefCountIsOne());
HasError(rm.CreateUnowned("foo", "bar", kitty.get()), error::ALREADY_EXISTS,
"Resource foo/bar");
EXPECT_TRUE(kitty->RefCountIsOne());
EXPECT_EQ("R/cat", Find<Resource>(rm, "foo", "bar"));
HasError(FindErr<Resource>(rm, "bar", "foo"), error::NOT_FOUND,
"Container bar");
HasError(FindErr<Resource>(rm, "foo", "xxx"), error::NOT_FOUND,
"Resource foo/xxx");
HasError(rm.Delete<Resource>("foo", "bar"), error::INTERNAL,
"Cannot delete an unowned Resource foo/bar");
TF_CHECK_OK(rm.CreateUnowned("foo", "bar", kitty.get()));
EXPECT_TRUE(kitty->RefCountIsOne());
EXPECT_EQ("R/kitty", Find<Resource>(rm, "foo", "bar"));
{
core::RefCountPtr<Resource> dog{new Resource("dog")};
TF_CHECK_OK(rm.CreateUnowned("foo", "bark", dog.get()));
EXPECT_EQ("R/dog", Find<Resource>(rm, "foo", "bark"));
EXPECT_EQ(1, dog->WeakRefCount());
{
ResourceMgr rm1;
TF_CHECK_OK(rm1.CreateUnowned("foo", "bark", dog.get()));
EXPECT_EQ("R/dog", Find<Resource>(rm1, "foo", "bark"));
EXPECT_EQ(2, dog->WeakRefCount());
}
EXPECT_EQ(1, dog->WeakRefCount());
}
HasError(FindErr<Resource>(rm, "foo", "bark"), error::NOT_FOUND,
"Resource foo/bark");
TF_CHECK_OK(rm.Cleanup("foo"));
HasError(FindErr<Resource>(rm, "foo", "bar"), error::NOT_FOUND,
"Container foo");
TF_CHECK_OK(rm.Cleanup("foo"));
HasError(FindErr<Resource>(rm, "foo", "bar"), error::NOT_FOUND,
"Container foo");
TF_CHECK_OK(rm.Cleanup("bar"));
EXPECT_TRUE(cat->RefCountIsOne());
EXPECT_TRUE(kitty->RefCountIsOne());
}
TEST(ResourceMgrTest, CreateOrLookup) {
ResourceMgr rm;
EXPECT_EQ("R/cat", LookupOrCreate<Resource>(&rm, "foo", "bar", "cat"));
EXPECT_EQ("R/cat", LookupOrCreate<Resource>(&rm, "foo", "bar", "dog"));
EXPECT_EQ("R/cat", Find<Resource>(rm, "foo", "bar"));
EXPECT_EQ("O/tiger", LookupOrCreate<Other>(&rm, "foo", "bar", "tiger"));
EXPECT_EQ("O/tiger", LookupOrCreate<Other>(&rm, "foo", "bar", "lion"));
TF_CHECK_OK(rm.Delete<Other>("foo", "bar"));
HasError(FindErr<Other>(rm, "foo", "bar"), error::NOT_FOUND,
"Resource foo/bar");
}
TEST(ResourceMgrTest, CreateOrLookupRaceCondition) {
ResourceMgr rm;
std::atomic<int> atomic_int(0);
{
thread::ThreadPool threads(Env::Default(), "racing_creates", 2);
for (int i = 0; i < 2; i++) {
threads.Schedule([&rm, &atomic_int] {
Resource* r;
TF_CHECK_OK(rm.LookupOrCreate<Resource>(
"container", "resource-name", &r, [&atomic_int](Resource** ret) {
Env::Default()->SleepForMicroseconds(1 * 1000 * 1000);
atomic_int += 1;
*ret = new Resource("label");
return absl::OkStatus();
}));
r->Unref();
});
}
}
EXPECT_EQ(1, atomic_int);
}
Status ComputePolicy(const string& attr_container,
const string& attr_shared_name,
bool use_node_name_as_default, string* result) {
ContainerInfo cinfo;
ResourceMgr rmgr;
NodeDef ndef;
ndef.set_name("foo");
if (attr_container != "none") {
AddNodeAttr("container", attr_container, &ndef);
}
if (attr_shared_name != "none") {
AddNodeAttr("shared_name", attr_shared_name, &ndef);
}
TF_RETURN_IF_ERROR(cinfo.Init(&rmgr, ndef, use_node_name_as_default));
*result = cinfo.DebugString();
return absl::OkStatus();
}
string Policy(const string& attr_container, const string& attr_shared_name,
bool use_node_name_as_default) {
string ret;
TF_CHECK_OK(ComputePolicy(attr_container, attr_shared_name,
use_node_name_as_default, &ret));
return ret;
}
TEST(ContainerInfo, Basic) {
EXPECT_TRUE(RE2::FullMatch(Policy("", "", false),
"\\[localhost,_\\d+_foo,private\\]"));
EXPECT_EQ(Policy("", "", true), "[localhost,foo,public]");
EXPECT_EQ(Policy("", "bar", false), "[localhost,bar,public]");
EXPECT_EQ(Policy("", "bar", true), "[localhost,bar,public]");
EXPECT_TRUE(
RE2::FullMatch(Policy("cat", "", false), "\\[cat,_\\d+_foo,private\\]"));
EXPECT_EQ(Policy("cat", "", true), "[cat,foo,public]");
EXPECT_EQ(Policy("cat", "bar", false), "[cat,bar,public]");
EXPECT_EQ(Policy("cat", "bar", true), "[cat,bar,public]");
EXPECT_EQ(Policy("cat.0-dog", "bar", true), "[cat.0-dog,bar,public]");
EXPECT_EQ(Policy(".cat", "bar", true), "[.cat,bar,public]");
}
Status WrongPolicy(const string& attr_container, const string& attr_shared_name,
bool use_node_name_as_default) {
string dbg;
auto s = ComputePolicy(attr_container, attr_shared_name,
use_node_name_as_default, &dbg);
CHECK(!s.ok());
return s;
}
TEST(ContainerInfo, Error) {
HasError(WrongPolicy("none", "", false), error::NOT_FOUND, "No attr");
HasError(WrongPolicy("", "none", false), error::NOT_FOUND, "No attr");
HasError(WrongPolicy("none", "none", false), error::NOT_FOUND, "No attr");
HasError(WrongPolicy("12$%", "", false), error::INVALID_ARGUMENT,
"container contains invalid char");
HasError(WrongPolicy("-cat", "", false), error::INVALID_ARGUMENT,
"container contains invalid char");
HasError(WrongPolicy("", "_foo", false), error::INVALID_ARGUMENT,
"shared_name cannot start with '_'");
}
class StubDevice : public DeviceBase {
public:
explicit StubDevice(const string& name) : DeviceBase(nullptr) {
attr_.set_name(name);
}
Allocator* GetAllocator(AllocatorAttributes) override {
return cpu_allocator();
}
const DeviceAttributes& attributes() const override { return attr_; }
const string& name() const override { return attr_.name(); }
private:
DeviceAttributes attr_;
};
class StubResource : public ResourceBase {
public:
string DebugString() const override { return ""; }
int value_{0};
};
TEST(ResourceHandleTest, CRUD) {
ResourceMgr resource_mgr("");
OpKernelContext::Params params;
params.resource_manager = &resource_mgr;
StubDevice device("device_name");
params.device = &device;
OpKernelContext ctx(¶ms, 0);
ResourceHandle p =
MakeResourceHandle<StubResource>(&ctx, "container", "name");
{
auto* r = new StubResource();
r->value_ = 42;
TF_EXPECT_OK(CreateResource(&ctx, p, r));
}
{
core::RefCountPtr<StubResource> r;
TF_ASSERT_OK(LookupResource(&ctx, p, &r));
ASSERT_TRUE(r != nullptr);
EXPECT_EQ(r->value_, 42);
}
{
TF_EXPECT_OK(DeleteResource<StubResource>(&ctx, p));
core::RefCountPtr<StubResource> unused;
EXPECT_FALSE(LookupResource(&ctx, p, &unused).ok());
}
}
TEST(ResourceHandleTest, ResourceFromValidIntInput) {
ResourceMgr resource_mgr("");
OpKernelContext::Params params;
params.resource_manager = &resource_mgr;
StubDevice device("device_name");
params.device = &device;
OpKernelContext ctx(¶ms, 1);
ResourceHandleProto proto;
proto.set_device("cpu:0");
proto.set_container("test_container");
proto.set_name("test_var");
auto handle = std::make_unique<ResourceHandle>(proto);
auto expected_summary =
"ResourceHandle(name=\"test_var\", device=\"cpu:0\", "
"container=\"test_container\", type=\"\", dtype and shapes : \"[ ]\")";
EXPECT_EQ(handle->SummarizeValue(), expected_summary);
Tensor arg0(DT_RESOURCE, TensorShape({2}));
arg0.flat<ResourceHandle>()(0) = *handle;
std::vector<tensorflow::TensorValue> inputs{TensorValue(new Tensor(arg0))};
params.inputs = inputs;
ResourceHandle get_int_handle;
TF_ASSERT_OK(HandleFromInput(&ctx, 0, &get_int_handle));
EXPECT_EQ(get_int_handle.SummarizeValue(), expected_summary);
delete inputs.at(0).tensor;
}
TEST(ResourceHandleTest, ResourceFromInvalidIntInput) {
ResourceMgr resource_mgr("");
OpKernelContext::Params params;
params.resource_manager = &resource_mgr;
StubDevice device("device_name");
params.device = &device;
OpKernelContext ctx(¶ms, 0);
ResourceHandle get_int_handle;
EXPECT_FALSE(HandleFromInput(&ctx, 0, &get_int_handle).ok());
}
TEST(ResourceHandleTest, ResourceFromIntInputWithoutResource) {
ResourceMgr resource_mgr("");
OpKernelContext::Params params;
params.resource_manager = &resource_mgr;
StubDevice device("device_name");
params.device = &device;
OpKernelContext ctx(¶ms, 1);
std::vector<tensorflow::TensorValue> inputs{TensorValue(new Tensor())};
params.inputs = inputs;
ResourceHandle get_int_handle;
EXPECT_FALSE(HandleFromInput(&ctx, 0, &get_int_handle).ok());
delete inputs.at(0).tensor;
}
TEST(ResourceHandleTest, LookupDeleteGenericResource) {
ResourceMgr resource_mgr("");
OpKernelContext::Params params;
params.resource_manager = &resource_mgr;
StubDevice device("device_name");
params.device = &device;
OpKernelContext ctx(¶ms, 0);
ResourceHandle p =
MakeResourceHandle<StubResource>(&ctx, "container", "name");
{
auto* r = new StubResource();
r->value_ = 42;
TF_EXPECT_OK(CreateResource(&ctx, p, r));
}
{
ResourceBase* r;
TF_ASSERT_OK(LookupResource(&ctx, p, &r));
ASSERT_TRUE(r != nullptr);
core::ScopedUnref unref(r);
EXPECT_EQ(static_cast<StubResource*>(r)->value_, 42);
}
{
TF_EXPECT_OK(DeleteResource(&ctx, p));
ResourceBase* unused;
EXPECT_FALSE(LookupResource(&ctx, p, &unused).ok());
}
}
TEST(ResourceHandleTest, DifferentDevice) {
ResourceMgr resource_mgr("");
OpKernelContext::Params params;
params.resource_manager = &resource_mgr;
StubDevice device("device_name");
params.device = &device;
OpKernelContext ctx(¶ms, 0);
ResourceHandle p =
MakeResourceHandle<StubResource>(&ctx, "container", "name");
ResourceMgr other_resource_mgr("");
OpKernelContext::Params other_params;
other_params.resource_manager = &other_resource_mgr;
StubDevice other_device("other_device_name");
other_params.device = &other_device;
OpKernelContext other_ctx(&other_params, 0);
auto* r = new StubResource();
ASSERT_FALSE(CreateResource(&other_ctx, p, r).ok());
r->Unref();
}
class OtherStubResource : public ResourceBase {
public:
string DebugString() const override { return ""; }
};
TEST(ResourceHandleTest, DifferentType) {
ResourceMgr resource_mgr("");
OpKernelContext::Params params;
params.resource_manager = &resource_mgr;
StubDevice device("device_name");
params.device = &device;
OpKernelContext ctx(¶ms, 0);
ResourceHandle p =
MakeResourceHandle<StubResource>(&ctx, "container", "name");
auto* r = new OtherStubResource;
ASSERT_FALSE(CreateResource(&ctx, p, r).ok());
r->Unref();
}
TEST(ResourceHandleTest, DeleteUsingResourceHandle) {
ResourceMgr resource_mgr("");
OpKernelContext::Params params;
params.resource_manager = &resource_mgr;
StubDevice device("device_name");
params.device = &device;
OpKernelContext ctx(¶ms, 0);
ResourceHandle p =
MakeResourceHandle<StubResource>(&ctx, "container", "name");
StubResource* r = new StubResource;
TF_EXPECT_OK(CreateResource(&ctx, p, r));
core::RefCountPtr<StubResource> lookup_r;
TF_EXPECT_OK(LookupResource<StubResource>(&ctx, p, &lookup_r));
EXPECT_EQ(lookup_r.get(), r);
TF_EXPECT_OK(DeleteResource(&ctx, p));
EXPECT_NE(LookupResource<StubResource>(&ctx, p, &lookup_r).ok(), true);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/resource_mgr.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/resource_mgr_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a807a51e-59a8-4aa7-8b7c-9ed30daf3d07 | cpp | google/quiche | transport_parameters | quiche/quic/core/crypto/transport_parameters.cc | quiche/quic/core/crypto/transport_parameters_test.cc | #include "quiche/quic/core/crypto/transport_parameters.h"
#include <algorithm>
#include <cstdint>
#include <cstring>
#include <forward_list>
#include <memory>
#include <ostream>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "openssl/digest.h"
#include "openssl/sha.h"
#include "quiche/quic/core/quic_connection_id.h"
#include "quiche/quic/core/quic_data_reader.h"
#include "quiche/quic/core/quic_data_writer.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_ip_address.h"
namespace quic {
enum TransportParameters::TransportParameterId : uint64_t {
kOriginalDestinationConnectionId = 0,
kMaxIdleTimeout = 1,
kStatelessResetToken = 2,
kMaxPacketSize = 3,
kInitialMaxData = 4,
kInitialMaxStreamDataBidiLocal = 5,
kInitialMaxStreamDataBidiRemote = 6,
kInitialMaxStreamDataUni = 7,
kInitialMaxStreamsBidi = 8,
kInitialMaxStreamsUni = 9,
kAckDelayExponent = 0xa,
kMaxAckDelay = 0xb,
kDisableActiveMigration = 0xc,
kPreferredAddress = 0xd,
kActiveConnectionIdLimit = 0xe,
kInitialSourceConnectionId = 0xf,
kRetrySourceConnectionId = 0x10,
kMaxDatagramFrameSize = 0x20,
kGoogleHandshakeMessage = 0x26ab,
kInitialRoundTripTime = 0x3127,
kGoogleConnectionOptions = 0x3128,
kGoogleQuicVersion =
0x4752,
kMinAckDelay = 0xDE1A,
kVersionInformation = 0xFF73DB,
kReliableStreamReset = 0x17F7586D2CB571,
};
namespace {
constexpr QuicVersionLabel kReservedVersionMask = 0x0f0f0f0f;
constexpr QuicVersionLabel kReservedVersionBits = 0x0a0a0a0a;
constexpr uint64_t kMinMaxPacketSizeTransportParam = 1200;
constexpr uint64_t kMaxAckDelayExponentTransportParam = 20;
constexpr uint64_t kDefaultAckDelayExponentTransportParam = 3;
constexpr uint64_t kMaxMaxAckDelayTransportParam = 16383;
constexpr uint64_t kDefaultMaxAckDelayTransportParam = 25;
constexpr uint64_t kMinActiveConnectionIdLimitTransportParam = 2;
constexpr uint64_t kDefaultActiveConnectionIdLimitTransportParam = 2;
std::string TransportParameterIdToString(
TransportParameters::TransportParameterId param_id) {
switch (param_id) {
case TransportParameters::kOriginalDestinationConnectionId:
return "original_destination_connection_id";
case TransportParameters::kMaxIdleTimeout:
return "max_idle_timeout";
case TransportParameters::kStatelessResetToken:
return "stateless_reset_token";
case TransportParameters::kMaxPacketSize:
return "max_udp_payload_size";
case TransportParameters::kInitialMaxData:
return "initial_max_data";
case TransportParameters::kInitialMaxStreamDataBidiLocal:
return "initial_max_stream_data_bidi_local";
case TransportParameters::kInitialMaxStreamDataBidiRemote:
return "initial_max_stream_data_bidi_remote";
case TransportParameters::kInitialMaxStreamDataUni:
return "initial_max_stream_data_uni";
case TransportParameters::kInitialMaxStreamsBidi:
return "initial_max_streams_bidi";
case TransportParameters::kInitialMaxStreamsUni:
return "initial_max_streams_uni";
case TransportParameters::kAckDelayExponent:
return "ack_delay_exponent";
case TransportParameters::kMaxAckDelay:
return "max_ack_delay";
case TransportParameters::kDisableActiveMigration:
return "disable_active_migration";
case TransportParameters::kPreferredAddress:
return "preferred_address";
case TransportParameters::kActiveConnectionIdLimit:
return "active_connection_id_limit";
case TransportParameters::kInitialSourceConnectionId:
return "initial_source_connection_id";
case TransportParameters::kRetrySourceConnectionId:
return "retry_source_connection_id";
case TransportParameters::kMaxDatagramFrameSize:
return "max_datagram_frame_size";
case TransportParameters::kGoogleHandshakeMessage:
return "google_handshake_message";
case TransportParameters::kInitialRoundTripTime:
return "initial_round_trip_time";
case TransportParameters::kGoogleConnectionOptions:
return "google_connection_options";
case TransportParameters::kGoogleQuicVersion:
return "google-version";
case TransportParameters::kMinAckDelay:
return "min_ack_delay_us";
case TransportParameters::kVersionInformation:
return "version_information";
case TransportParameters::kReliableStreamReset:
return "reliable_stream_reset";
}
return absl::StrCat("Unknown(", param_id, ")");
}
bool TransportParameterIdIsKnown(
TransportParameters::TransportParameterId param_id) {
switch (param_id) {
case TransportParameters::kOriginalDestinationConnectionId:
case TransportParameters::kMaxIdleTimeout:
case TransportParameters::kStatelessResetToken:
case TransportParameters::kMaxPacketSize:
case TransportParameters::kInitialMaxData:
case TransportParameters::kInitialMaxStreamDataBidiLocal:
case TransportParameters::kInitialMaxStreamDataBidiRemote:
case TransportParameters::kInitialMaxStreamDataUni:
case TransportParameters::kInitialMaxStreamsBidi:
case TransportParameters::kInitialMaxStreamsUni:
case TransportParameters::kAckDelayExponent:
case TransportParameters::kMaxAckDelay:
case TransportParameters::kDisableActiveMigration:
case TransportParameters::kPreferredAddress:
case TransportParameters::kActiveConnectionIdLimit:
case TransportParameters::kInitialSourceConnectionId:
case TransportParameters::kRetrySourceConnectionId:
case TransportParameters::kMaxDatagramFrameSize:
case TransportParameters::kGoogleHandshakeMessage:
case TransportParameters::kInitialRoundTripTime:
case TransportParameters::kGoogleConnectionOptions:
case TransportParameters::kGoogleQuicVersion:
case TransportParameters::kMinAckDelay:
case TransportParameters::kVersionInformation:
case TransportParameters::kReliableStreamReset:
return true;
}
return false;
}
}
TransportParameters::IntegerParameter::IntegerParameter(
TransportParameters::TransportParameterId param_id, uint64_t default_value,
uint64_t min_value, uint64_t max_value)
: param_id_(param_id),
value_(default_value),
default_value_(default_value),
min_value_(min_value),
max_value_(max_value),
has_been_read_(false) {
QUICHE_DCHECK_LE(min_value, default_value);
QUICHE_DCHECK_LE(default_value, max_value);
QUICHE_DCHECK_LE(max_value, quiche::kVarInt62MaxValue);
}
TransportParameters::IntegerParameter::IntegerParameter(
TransportParameters::TransportParameterId param_id)
: TransportParameters::IntegerParameter::IntegerParameter(
param_id, 0, 0, quiche::kVarInt62MaxValue) {}
void TransportParameters::IntegerParameter::set_value(uint64_t value) {
value_ = value;
}
uint64_t TransportParameters::IntegerParameter::value() const { return value_; }
bool TransportParameters::IntegerParameter::IsValid() const {
return min_value_ <= value_ && value_ <= max_value_;
}
bool TransportParameters::IntegerParameter::Write(
QuicDataWriter* writer) const {
QUICHE_DCHECK(IsValid());
if (value_ == default_value_) {
return true;
}
if (!writer->WriteVarInt62(param_id_)) {
QUIC_BUG(quic_bug_10743_1) << "Failed to write param_id for " << *this;
return false;
}
const quiche::QuicheVariableLengthIntegerLength value_length =
QuicDataWriter::GetVarInt62Len(value_);
if (!writer->WriteVarInt62(value_length)) {
QUIC_BUG(quic_bug_10743_2) << "Failed to write value_length for " << *this;
return false;
}
if (!writer->WriteVarInt62WithForcedLength(value_, value_length)) {
QUIC_BUG(quic_bug_10743_3) << "Failed to write value for " << *this;
return false;
}
return true;
}
bool TransportParameters::IntegerParameter::Read(QuicDataReader* reader,
std::string* error_details) {
if (has_been_read_) {
*error_details =
"Received a second " + TransportParameterIdToString(param_id_);
return false;
}
has_been_read_ = true;
if (!reader->ReadVarInt62(&value_)) {
*error_details =
"Failed to parse value for " + TransportParameterIdToString(param_id_);
return false;
}
if (!reader->IsDoneReading()) {
*error_details =
absl::StrCat("Received unexpected ", reader->BytesRemaining(),
" bytes after parsing ", this->ToString(false));
return false;
}
return true;
}
std::string TransportParameters::IntegerParameter::ToString(
bool for_use_in_list) const {
if (for_use_in_list && value_ == default_value_) {
return "";
}
std::string rv = for_use_in_list ? " " : "";
absl::StrAppend(&rv, TransportParameterIdToString(param_id_), " ", value_);
if (!IsValid()) {
rv += " (Invalid)";
}
return rv;
}
std::ostream& operator<<(std::ostream& os,
const TransportParameters::IntegerParameter& param) {
os << param.ToString(false);
return os;
}
TransportParameters::PreferredAddress::PreferredAddress()
: ipv4_socket_address(QuicIpAddress::Any4(), 0),
ipv6_socket_address(QuicIpAddress::Any6(), 0),
connection_id(EmptyQuicConnectionId()),
stateless_reset_token(kStatelessResetTokenLength, 0) {}
TransportParameters::PreferredAddress::~PreferredAddress() {}
bool TransportParameters::PreferredAddress::operator==(
const PreferredAddress& rhs) const {
return ipv4_socket_address == rhs.ipv4_socket_address &&
ipv6_socket_address == rhs.ipv6_socket_address &&
connection_id == rhs.connection_id &&
stateless_reset_token == rhs.stateless_reset_token;
}
bool TransportParameters::PreferredAddress::operator!=(
const PreferredAddress& rhs) const {
return !(*this == rhs);
}
std::ostream& operator<<(
std::ostream& os,
const TransportParameters::PreferredAddress& preferred_address) {
os << preferred_address.ToString();
return os;
}
std::string TransportParameters::PreferredAddress::ToString() const {
return "[" + ipv4_socket_address.ToString() + " " +
ipv6_socket_address.ToString() + " connection_id " +
connection_id.ToString() + " stateless_reset_token " +
absl::BytesToHexString(absl::string_view(
reinterpret_cast<const char*>(stateless_reset_token.data()),
stateless_reset_token.size())) +
"]";
}
TransportParameters::LegacyVersionInformation::LegacyVersionInformation()
: version(0) {}
bool TransportParameters::LegacyVersionInformation::operator==(
const LegacyVersionInformation& rhs) const {
return version == rhs.version && supported_versions == rhs.supported_versions;
}
bool TransportParameters::LegacyVersionInformation::operator!=(
const LegacyVersionInformation& rhs) const {
return !(*this == rhs);
}
std::string TransportParameters::LegacyVersionInformation::ToString() const {
std::string rv =
absl::StrCat("legacy[version ", QuicVersionLabelToString(version));
if (!supported_versions.empty()) {
absl::StrAppend(&rv,
" supported_versions " +
QuicVersionLabelVectorToString(supported_versions));
}
absl::StrAppend(&rv, "]");
return rv;
}
std::ostream& operator<<(std::ostream& os,
const TransportParameters::LegacyVersionInformation&
legacy_version_information) {
os << legacy_version_information.ToString();
return os;
}
TransportParameters::VersionInformation::VersionInformation()
: chosen_version(0) {}
bool TransportParameters::VersionInformation::operator==(
const VersionInformation& rhs) const {
return chosen_version == rhs.chosen_version &&
other_versions == rhs.other_versions;
}
bool TransportParameters::VersionInformation::operator!=(
const VersionInformation& rhs) const {
return !(*this == rhs);
}
std::string TransportParameters::VersionInformation::ToString() const {
std::string rv = absl::StrCat("[chosen_version ",
QuicVersionLabelToString(chosen_version));
if (!other_versions.empty()) {
absl::StrAppend(&rv, " other_versions " +
QuicVersionLabelVectorToString(other_versions));
}
absl::StrAppend(&rv, "]");
return rv;
}
std::ostream& operator<<(
std::ostream& os,
const TransportParameters::VersionInformation& version_information) {
os << version_information.ToString();
return os;
}
std::ostream& operator<<(std::ostream& os, const TransportParameters& params) {
os << params.ToString();
return os;
}
std::string TransportParameters::ToString() const {
std::string rv = "[";
if (perspective == Perspective::IS_SERVER) {
rv += "Server";
} else {
rv += "Client";
}
if (legacy_version_information.has_value()) {
rv += " " + legacy_version_information->ToString();
}
if (version_information.has_value()) {
rv += " " + version_information->ToString();
}
if (original_destination_connection_id.has_value()) {
rv += " " + TransportParameterIdToString(kOriginalDestinationConnectionId) +
" " + original_destination_connection_id->ToString();
}
rv += max_idle_timeout_ms.ToString(true);
if (!stateless_reset_token.empty()) {
rv += " " + TransportParameterIdToString(kStatelessResetToken) + " " +
absl::BytesToHexString(absl::string_view(
reinterpret_cast<const char*>(stateless_reset_token.data()),
stateless_reset_token.size()));
}
rv += max_udp_payload_size.ToString(true);
rv += initial_max_data.ToString(true);
rv += initial_max_stream_data_bidi_local.ToString(true);
rv += initial_max_stream_data_bidi_remote.ToString(true);
rv += initial_max_stream_data_uni.ToString(true);
rv += initial_max_streams_bidi.ToString(true);
rv += initial_max_streams_uni.ToString(true);
rv += ack_delay_exponent.ToString(true);
rv += max_ack_delay.ToString(true);
rv += min_ack_delay_us.ToString(true);
if (disable_active_migration) {
rv += " " + TransportParameterIdToString(kDisableActiveMigration);
}
if (reliable_stream_reset) {
rv += " " + TransportParameterIdToString(kReliableStreamReset);
}
if (preferred_address) {
rv += " " + TransportParameterIdToString(kPreferredAddress) + " " +
preferred_address->ToString();
}
rv += active_connection_id_limit.ToString(true);
if (initial_source_connection_id.has_value()) {
rv += " " + TransportParameterIdToString(kInitialSourceConnectionId) + " " +
initial_source_connection_id->ToString();
}
if (retry_source_connection_id.has_value()) {
rv += " " + TransportParameterIdToString(kRetrySourceConnectionId) + " " +
retry_source_connection_id->ToString();
}
rv += max_datagram_frame_size.ToString(true);
if (google_handshake_message.has_value()) {
absl::StrAppend(&rv, " ",
TransportParameterIdToString(kGoogleHandshakeMessage),
" length: ", google_handshake_message->length());
}
rv += initial_round_trip_time_us.ToString(true);
if (google_connection_options.has_value()) {
rv += " " + TransportParameterIdToString(kGoogleConnectionOptions) + " ";
bool first = true;
for (const QuicTag& connection_option : *google_connection_options) {
if (first) {
first = false;
} else {
rv += ",";
}
rv += QuicTagToString(connection_option);
}
}
for (const auto& kv : custom_parameters) {
absl::StrAppend(&rv, " 0x", absl::Hex(static_cast<uint32_t>(kv.first)),
"=");
static constexpr size_t kMaxPrintableLength = 32;
if (kv.second.length() <= kMaxPrintableLength) {
rv += absl::BytesToHexString(kv.second);
} else {
absl::string_view truncated(kv.second.data(), kMaxPrintableLength);
rv += absl::StrCat(absl::BytesToHexString(truncated), "...(length ",
kv.second.length(), ")");
}
}
rv += "]";
return rv;
}
TransportParameters::TransportParameters()
: max_idle_timeout_ms(kMaxIdleTimeout),
max_udp_payload_size(kMaxPacketSize, kDefaultMaxPacketSizeTransportParam,
kMinMaxPacketSizeTransportParam,
quiche::kVarInt62MaxValue),
initial_max_data(kInitialMaxData),
initial_max_stream_data_bidi_local(kInitialMaxStreamDataBidiLocal),
initial_max_stream_data_bidi_remote(kInitialMaxStreamDataBidiRemote),
initial_max_stream_data_uni(kInitialMaxStreamDataUni),
initial_max_streams_bidi(kInitialMaxStreamsBidi),
initial_max_streams_uni(kInitialMaxStreamsUni),
ack_delay_exponent(kAckDelayExponent,
kDefaultAckDelayExponentTransportParam, 0,
kMaxAckDelayExponentTransportParam),
max_ack_delay(kMaxAckDelay, kDefaultMaxAckDelayTransportParam, 0,
kMaxMaxAckDelayTransportParam),
min_ack_delay_us(kMinAckDelay, 0, 0,
kMaxMaxAckDelayTransportParam * kNumMicrosPerMilli),
disable_active_migration(false),
active_connection_id_limit(kActiveConnectionIdLimit,
kDefaultActiveConnectionIdLimitTransportParam,
kMinActiveConnectionIdLimitTransportParam,
quiche::kVarInt62MaxValue),
max_datagram_frame_size(kMaxDatagramFrameSize),
reliable_stream_reset(false),
initial_round_trip_time_us(kInitialRoundTripTime)
{}
TransportParameters::TransportParameters(const TransportParameters& other)
: perspective(other.perspective),
legacy_version_information(other.legacy_version_information),
version_information(other.version_information),
original_destination_connection_id(
other.original_destination_connection_id),
max_idle_timeout_ms(other.max_idle_timeout_ms),
stateless_reset_token(other.stateless_reset_token),
max_udp_payload_size(other.max_udp_payload_size),
initial_max_data(other.initial_max_data),
initial_max_stream_data_bidi_local(
other.initial_max_stream_data_bidi_local),
initial_max_stream_data_bidi_remote(
other.initial_max_stream_data_bidi_remote),
initial_max_stream_data_uni(other.initial_max_stream_data_uni),
initial_max_streams_bidi(other.initial_max_streams_bidi),
initial_max_streams_uni(other.initial_max_streams_uni),
ack_delay_exponent(other.ack_delay_exponent),
max_ack_delay(other.max_ack_delay),
min_ack_delay_us(other.min_ack_delay_us),
disable_active_migration(other.disable_active_migration),
active_connection_id_limit(other.active_connection_id_limit),
initial_source_connection_id(other.initial_source_connection_id),
retry_source_connection_id(other.retry_source_connection_id),
max_datagram_frame_size(other.max_datagram_frame_size),
reliable_stream_reset(other.reliable_stream_reset),
initial_round_trip_time_us(other.initial_round_trip_time_us),
google_handshake_message(other.google_handshake_message),
google_connection_options(other.google_connection_options),
custom_parameters(other.custom_parameters) {
if (other.preferred_address) {
preferred_address = std::make_unique<TransportParameters::PreferredAddress>(
*other.preferred_address);
}
}
bool TransportParameters::operator==(const TransportParameters& rhs) const {
if (!(perspective == rhs.perspective &&
legacy_version_information == rhs.legacy_version_information &&
version_information == rhs.version_information &&
original_destination_connection_id ==
rhs.original_destination_connection_id &&
max_idle_timeout_ms.value() == rhs.max_idle_timeout_ms.value() &&
stateless_reset_token == rhs.stateless_reset_token &&
max_udp_payload_size.value() == rhs.max_udp_payload_size.value() &&
initial_max_data.value() == rhs.initial_max_data.value() &&
initial_max_stream_data_bidi_local.value() ==
rhs.initial_max_stream_data_bidi_local.value() &&
initial_max_stream_data_bidi_remote.value() ==
rhs.initial_max_stream_data_bidi_remote.value() &&
initial_max_stream_data_uni.value() ==
rhs.initial_max_stream_data_uni.value() &&
initial_max_streams_bidi.value() ==
rhs.initial_max_streams_bidi.value() &&
initial_max_streams_uni.value() ==
rhs.initial_max_streams_uni.value() &&
ack_delay_exponent.value() == rhs.ack_delay_exponent.value() &&
max_ack_delay.value() == rhs.max_ack_delay.value() &&
min_ack_delay_us.value() == rhs.min_ack_delay_us.value() &&
disable_active_migration == rhs.disable_active_migration &&
active_connection_id_limit.value() ==
rhs.active_connection_id_limit.value() &&
initial_source_connection_id == rhs.initial_source_connection_id &&
retry_source_connection_id == rhs.retry_source_connection_id &&
max_datagram_frame_size.value() ==
rhs.max_datagram_frame_size.value() &&
reliable_stream_reset == rhs.reliable_stream_reset &&
initial_round_trip_time_us.value() ==
rhs.initial_round_trip_time_us.value() &&
google_handshake_message == rhs.google_handshake_message &&
google_connection_options == rhs.google_connection_options &&
custom_parameters == rhs.custom_parameters)) {
return false;
}
if ((!preferred_address && rhs.preferred_address) ||
(preferred_address && !rhs.preferred_address)) {
return false;
}
if (preferred_address && rhs.preferred_address &&
*preferred_address != *rhs.preferred_address) {
return false;
}
return true;
}
bool TransportParameters::operator!=(const TransportParameters& rhs) const {
return !(*this == rhs);
}
bool TransportParameters::AreValid(std::string* error_details) const {
QUICHE_DCHECK(perspective == Perspective::IS_CLIENT ||
perspective == Perspective::IS_SERVER);
if (perspective == Perspective::IS_CLIENT && !stateless_reset_token.empty()) {
*error_details = "Client cannot send stateless reset token";
return false;
}
if (perspective == Perspective::IS_CLIENT &&
original_destination_connection_id.has_value()) {
*error_details = "Client cannot send original_destination_connection_id";
return false;
}
if (!stateless_reset_token.empty() &&
stateless_reset_token.size() != kStatelessResetTokenLength) {
*error_details = absl::StrCat("Stateless reset token has bad length ",
stateless_reset_token.size());
return false;
}
if (perspective == Perspective::IS_CLIENT && preferred_address) {
*error_details = "Client cannot send preferred address";
return false;
}
if (preferred_address && preferred_address->stateless_reset_token.size() !=
kStatelessResetTokenLength) {
*error_details =
absl::StrCat("Preferred address stateless reset token has bad length ",
preferred_address->stateless_reset_token.size());
return false;
}
if (preferred_address &&
(!preferred_address->ipv4_socket_address.host().IsIPv4() ||
!preferred_address->ipv6_socket_address.host().IsIPv6())) {
QUIC_BUG(quic_bug_10743_4) << "Preferred address family failure";
*error_details = "Internal preferred address family failure";
return false;
}
if (perspective == Perspective::IS_CLIENT &&
retry_source_connection_id.has_value()) {
*error_details = "Client cannot send retry_source_connection_id";
return false;
}
for (const auto& kv : custom_parameters) {
if (TransportParameterIdIsKnown(kv.first)) {
*error_details = absl::StrCat("Using custom_parameters with known ID ",
TransportParameterIdToString(kv.first),
" is not allowed");
return false;
}
}
if (perspective == Perspective::IS_SERVER &&
google_handshake_message.has_value()) {
*error_details = "Server cannot send google_handshake_message";
return false;
}
if (perspective == Perspective::IS_SERVER &&
initial_round_trip_time_us.value() > 0) {
*error_details = "Server cannot send initial round trip time";
return false;
}
if (version_information.has_value()) {
const QuicVersionLabel& chosen_version =
version_information->chosen_version;
const QuicVersionLabelVector& other_versions =
version_information->other_versions;
if (chosen_version == 0) {
*error_details = "Invalid chosen version";
return false;
}
if (perspective == Perspective::IS_CLIENT &&
std::find(other_versions.begin(), other_versions.end(),
chosen_version) == other_versions.end()) {
*error_details = "Client chosen version not in other versions";
return false;
}
}
const bool ok =
max_idle_timeout_ms.IsValid() && max_udp_payload_size.IsValid() &&
initial_max_data.IsValid() &&
initial_max_stream_data_bidi_local.IsValid() &&
initial_max_stream_data_bidi_remote.IsValid() &&
initial_max_stream_data_uni.IsValid() &&
initial_max_streams_bidi.IsValid() && initial_max_streams_uni.IsValid() &&
ack_delay_exponent.IsValid() && max_ack_delay.IsValid() &&
min_ack_delay_us.IsValid() && active_connection_id_limit.IsValid() &&
max_datagram_frame_size.IsValid() && initial_round_trip_time_us.IsValid();
if (!ok) {
*error_details = "Invalid transport parameters " + this->ToString();
}
return ok;
}
TransportParameters::~TransportParameters() = default;
bool SerializeTransportParameters(const TransportParameters& in,
std::vector<uint8_t>* out) {
std::string error_details;
if (!in.AreValid(&error_details)) {
QUIC_BUG(invalid transport parameters)
<< "Not serializing invalid transport parameters: " << error_details;
return false;
}
if (!in.legacy_version_information.has_value() ||
in.legacy_version_information->version == 0 ||
(in.perspective == Perspective::IS_SERVER &&
in.legacy_version_information->supported_versions.empty())) {
QUIC_BUG(missing versions) << "Refusing to serialize without versions";
return false;
}
TransportParameters::ParameterMap custom_parameters = in.custom_parameters;
for (const auto& kv : custom_parameters) {
if (kv.first % 31 == 27) {
QUIC_BUG(custom_parameters with GREASE)
<< "Serializing custom_parameters with GREASE ID " << kv.first
<< " is not allowed";
return false;
}
}
static constexpr size_t kMaxGreaseLength = 16;
static constexpr size_t kTypeAndValueLength = 2 * sizeof(uint64_t);
static constexpr size_t kIntegerParameterLength =
kTypeAndValueLength + sizeof(uint64_t);
static constexpr size_t kStatelessResetParameterLength =
kTypeAndValueLength + 16 ;
static constexpr size_t kConnectionIdParameterLength =
kTypeAndValueLength + 255 ;
static constexpr size_t kPreferredAddressParameterLength =
kTypeAndValueLength + 4 + 2 +
16 + 1 +
255 + 16 ;
static constexpr size_t kKnownTransportParamLength =
kConnectionIdParameterLength +
kIntegerParameterLength +
kStatelessResetParameterLength +
kIntegerParameterLength +
kIntegerParameterLength +
kIntegerParameterLength +
kIntegerParameterLength +
kIntegerParameterLength +
kIntegerParameterLength +
kIntegerParameterLength +
kIntegerParameterLength +
kIntegerParameterLength +
kIntegerParameterLength +
kTypeAndValueLength +
kPreferredAddressParameterLength +
kIntegerParameterLength +
kConnectionIdParameterLength +
kConnectionIdParameterLength +
kIntegerParameterLength +
kTypeAndValueLength +
kIntegerParameterLength +
kTypeAndValueLength +
kTypeAndValueLength +
kTypeAndValueLength;
std::vector<TransportParameters::TransportParameterId> parameter_ids = {
TransportParameters::kOriginalDestinationConnectionId,
TransportParameters::kMaxIdleTimeout,
TransportParameters::kStatelessResetToken,
TransportParameters::kMaxPacketSize,
TransportParameters::kInitialMaxData,
TransportParameters::kInitialMaxStreamDataBidiLocal,
TransportParameters::kInitialMaxStreamDataBidiRemote,
TransportParameters::kInitialMaxStreamDataUni,
TransportParameters::kInitialMaxStreamsBidi,
TransportParameters::kInitialMaxStreamsUni,
TransportParameters::kAckDelayExponent,
TransportParameters::kMaxAckDelay,
TransportParameters::kMinAckDelay,
TransportParameters::kActiveConnectionIdLimit,
TransportParameters::kMaxDatagramFrameSize,
TransportParameters::kReliableStreamReset,
TransportParameters::kGoogleHandshakeMessage,
TransportParameters::kInitialRoundTripTime,
TransportParameters::kDisableActiveMigration,
TransportParameters::kPreferredAddress,
TransportParameters::kInitialSourceConnectionId,
TransportParameters::kRetrySourceConnectionId,
TransportParameters::kGoogleConnectionOptions,
TransportParameters::kGoogleQuicVersion,
TransportParameters::kVersionInformation,
};
size_t max_transport_param_length = kKnownTransportParamLength;
if (in.google_connection_options.has_value()) {
max_transport_param_length +=
in.google_connection_options->size() * sizeof(QuicTag);
}
if (in.legacy_version_information.has_value()) {
max_transport_param_length +=
sizeof(in.legacy_version_information->version) +
1 +
in.legacy_version_information->supported_versions.size() *
sizeof(QuicVersionLabel);
}
if (in.version_information.has_value()) {
max_transport_param_length +=
sizeof(in.version_information->chosen_version) +
(in.version_information->other_versions.size() + 1) *
sizeof(QuicVersionLabel);
}
if (in.google_handshake_message.has_value()) {
max_transport_param_length += in.google_handshake_message->length();
}
QuicRandom* random = QuicRandom::GetInstance();
uint64_t grease_id64 = random->RandUint64() % ((1ULL << 62) - 31);
grease_id64 = (grease_id64 / 31) * 31 + 27;
TransportParameters::TransportParameterId grease_id =
static_cast<TransportParameters::TransportParameterId>(grease_id64);
const size_t grease_length = random->RandUint64() % kMaxGreaseLength;
QUICHE_DCHECK_GE(kMaxGreaseLength, grease_length);
char grease_contents[kMaxGreaseLength];
random->RandBytes(grease_contents, grease_length);
custom_parameters[grease_id] = std::string(grease_contents, grease_length);
for (const auto& kv : custom_parameters) {
max_transport_param_length += kTypeAndValueLength + kv.second.length();
parameter_ids.push_back(kv.first);
}
for (size_t i = parameter_ids.size() - 1; i > 0; i--) {
std::swap(parameter_ids[i],
parameter_ids[random->InsecureRandUint64() % (i + 1)]);
}
out->resize(max_transport_param_length);
QuicDataWriter writer(out->size(), reinterpret_cast<char*>(out->data()));
for (TransportParameters::TransportParameterId parameter_id : parameter_ids) {
switch (parameter_id) {
case TransportParameters::kOriginalDestinationConnectionId: {
if (in.original_destination_connection_id.has_value()) {
QUICHE_DCHECK_EQ(Perspective::IS_SERVER, in.perspective);
QuicConnectionId original_destination_connection_id =
*in.original_destination_connection_id;
if (!writer.WriteVarInt62(
TransportParameters::kOriginalDestinationConnectionId) ||
!writer.WriteStringPieceVarInt62(absl::string_view(
original_destination_connection_id.data(),
original_destination_connection_id.length()))) {
QUIC_BUG(Failed to write original_destination_connection_id)
<< "Failed to write original_destination_connection_id "
<< original_destination_connection_id << " for " << in;
return false;
}
}
} break;
case TransportParameters::kMaxIdleTimeout: {
if (!in.max_idle_timeout_ms.Write(&writer)) {
QUIC_BUG(Failed to write idle_timeout)
<< "Failed to write idle_timeout for " << in;
return false;
}
} break;
case TransportParameters::kStatelessResetToken: {
if (!in.stateless_reset_token.empty()) {
QUICHE_DCHECK_EQ(kStatelessResetTokenLength,
in.stateless_reset_token.size());
QUICHE_DCHECK_EQ(Perspective::IS_SERVER, in.perspective);
if (!writer.WriteVarInt62(
TransportParameters::kStatelessResetToken) ||
!writer.WriteStringPieceVarInt62(
absl::string_view(reinterpret_cast<const char*>(
in.stateless_reset_token.data()),
in.stateless_reset_token.size()))) {
QUIC_BUG(Failed to write stateless_reset_token)
<< "Failed to write stateless_reset_token of length "
<< in.stateless_reset_token.size() << " for " << in;
return false;
}
}
} break;
case TransportParameters::kMaxPacketSize: {
if (!in.max_udp_payload_size.Write(&writer)) {
QUIC_BUG(Failed to write max_udp_payload_size)
<< "Failed to write max_udp_payload_size for " << in;
return false;
}
} break;
case TransportParameters::kInitialMaxData: {
if (!in.initial_max_data.Write(&writer)) {
QUIC_BUG(Failed to write initial_max_data)
<< "Failed to write initial_max_data for " << in;
return false;
}
} break;
case TransportParameters::kInitialMaxStreamDataBidiLocal: {
if (!in.initial_max_stream_data_bidi_local.Write(&writer)) {
QUIC_BUG(Failed to write initial_max_stream_data_bidi_local)
<< "Failed to write initial_max_stream_data_bidi_local for "
<< in;
return false;
}
} break;
case TransportParameters::kInitialMaxStreamDataBidiRemote: {
if (!in.initial_max_stream_data_bidi_remote.Write(&writer)) {
QUIC_BUG(Failed to write initial_max_stream_data_bidi_remote)
<< "Failed to write initial_max_stream_data_bidi_remote for "
<< in;
return false;
}
} break;
case TransportParameters::kInitialMaxStreamDataUni: {
if (!in.initial_max_stream_data_uni.Write(&writer)) {
QUIC_BUG(Failed to write initial_max_stream_data_uni)
<< "Failed to write initial_max_stream_data_uni for " << in;
return false;
}
} break;
case TransportParameters::kInitialMaxStreamsBidi: {
if (!in.initial_max_streams_bidi.Write(&writer)) {
QUIC_BUG(Failed to write initial_max_streams_bidi)
<< "Failed to write initial_max_streams_bidi for " << in;
return false;
}
} break;
case TransportParameters::kInitialMaxStreamsUni: {
if (!in.initial_max_streams_uni.Write(&writer)) {
QUIC_BUG(Failed to write initial_max_streams_uni)
<< "Failed to write initial_max_streams_uni for " << in;
return false;
}
} break;
case TransportParameters::kAckDelayExponent: {
if (!in.ack_delay_exponent.Write(&writer)) {
QUIC_BUG(Failed to write ack_delay_exponent)
<< "Failed to write ack_delay_exponent for " << in;
return false;
}
} break;
case TransportParameters::kMaxAckDelay: {
if (!in.max_ack_delay.Write(&writer)) {
QUIC_BUG(Failed to write max_ack_delay)
<< "Failed to write max_ack_delay for " << in;
return false;
}
} break;
case TransportParameters::kMinAckDelay: {
if (!in.min_ack_delay_us.Write(&writer)) {
QUIC_BUG(Failed to write min_ack_delay_us)
<< "Failed to write min_ack_delay_us for " << in;
return false;
}
} break;
case TransportParameters::kActiveConnectionIdLimit: {
if (!in.active_connection_id_limit.Write(&writer)) {
QUIC_BUG(Failed to write active_connection_id_limit)
<< "Failed to write active_connection_id_limit for " << in;
return false;
}
} break;
case TransportParameters::kMaxDatagramFrameSize: {
if (!in.max_datagram_frame_size.Write(&writer)) {
QUIC_BUG(Failed to write max_datagram_frame_size)
<< "Failed to write max_datagram_frame_size for " << in;
return false;
}
} break;
case TransportParameters::kGoogleHandshakeMessage: {
if (in.google_handshake_message.has_value()) {
if (!writer.WriteVarInt62(
TransportParameters::kGoogleHandshakeMessage) ||
!writer.WriteStringPieceVarInt62(*in.google_handshake_message)) {
QUIC_BUG(Failed to write google_handshake_message)
<< "Failed to write google_handshake_message: "
<< *in.google_handshake_message << " for " << in;
return false;
}
}
} break;
case TransportParameters::kInitialRoundTripTime: {
if (!in.initial_round_trip_time_us.Write(&writer)) {
QUIC_BUG(Failed to write initial_round_trip_time_us)
<< "Failed to write initial_round_trip_time_us for " << in;
return false;
}
} break;
case TransportParameters::kDisableActiveMigration: {
if (in.disable_active_migration) {
if (!writer.WriteVarInt62(
TransportParameters::kDisableActiveMigration) ||
!writer.WriteVarInt62( 0)) {
QUIC_BUG(Failed to write disable_active_migration)
<< "Failed to write disable_active_migration for " << in;
return false;
}
}
} break;
case TransportParameters::kReliableStreamReset: {
if (in.reliable_stream_reset) {
if (!writer.WriteVarInt62(
TransportParameters::kReliableStreamReset) ||
!writer.WriteVarInt62( 0)) {
QUIC_BUG(Failed to write reliable_stream_reset)
<< "Failed to write reliable_stream_reset for " << in;
return false;
}
}
} break;
case TransportParameters::kPreferredAddress: {
if (in.preferred_address) {
std::string v4_address_bytes =
in.preferred_address->ipv4_socket_address.host().ToPackedString();
std::string v6_address_bytes =
in.preferred_address->ipv6_socket_address.host().ToPackedString();
if (v4_address_bytes.length() != 4 ||
v6_address_bytes.length() != 16 ||
in.preferred_address->stateless_reset_token.size() !=
kStatelessResetTokenLength) {
QUIC_BUG(quic_bug_10743_12)
<< "Bad lengths " << *in.preferred_address;
return false;
}
const uint64_t preferred_address_length =
v4_address_bytes.length() + sizeof(uint16_t) +
v6_address_bytes.length() + sizeof(uint16_t) +
sizeof(uint8_t) +
in.preferred_address->connection_id.length() +
in.preferred_address->stateless_reset_token.size();
if (!writer.WriteVarInt62(TransportParameters::kPreferredAddress) ||
!writer.WriteVarInt62(
preferred_address_length) ||
!writer.WriteStringPiece(v4_address_bytes) ||
!writer.WriteUInt16(
in.preferred_address->ipv4_socket_address.port()) ||
!writer.WriteStringPiece(v6_address_bytes) ||
!writer.WriteUInt16(
in.preferred_address->ipv6_socket_address.port()) ||
!writer.WriteUInt8(
in.preferred_address->connection_id.length()) ||
!writer.WriteBytes(
in.preferred_address->connection_id.data(),
in.preferred_address->connection_id.length()) ||
!writer.WriteBytes(
in.preferred_address->stateless_reset_token.data(),
in.preferred_address->stateless_reset_token.size())) {
QUIC_BUG(Failed to write preferred_address)
<< "Failed to write preferred_address for " << in;
return false;
}
}
} break;
case TransportParameters::kInitialSourceConnectionId: {
if (in.initial_source_connection_id.has_value()) {
QuicConnectionId initial_source_connection_id =
*in.initial_source_connection_id;
if (!writer.WriteVarInt62(
TransportParameters::kInitialSourceConnectionId) ||
!writer.WriteStringPieceVarInt62(
absl::string_view(initial_source_connection_id.data(),
initial_source_connection_id.length()))) {
QUIC_BUG(Failed to write initial_source_connection_id)
<< "Failed to write initial_source_connection_id "
<< initial_source_connection_id << " for " << in;
return false;
}
}
} break;
case TransportParameters::kRetrySourceConnectionId: {
if (in.retry_source_connection_id.has_value()) {
QUICHE_DCHECK_EQ(Perspective::IS_SERVER, in.perspective);
QuicConnectionId retry_source_connection_id =
*in.retry_source_connection_id;
if (!writer.WriteVarInt62(
TransportParameters::kRetrySourceConnectionId) ||
!writer.WriteStringPieceVarInt62(
absl::string_view(retry_source_connection_id.data(),
retry_source_connection_id.length()))) {
QUIC_BUG(Failed to write retry_source_connection_id)
<< "Failed to write retry_source_connection_id "
<< retry_source_connection_id << " for " << in;
return false;
}
}
} break;
case TransportParameters::kGoogleConnectionOptions: {
if (in.google_connection_options.has_value()) {
static_assert(sizeof(in.google_connection_options->front()) == 4,
"bad size");
uint64_t connection_options_length =
in.google_connection_options->size() * 4;
if (!writer.WriteVarInt62(
TransportParameters::kGoogleConnectionOptions) ||
!writer.WriteVarInt62(
connection_options_length)) {
QUIC_BUG(Failed to write google_connection_options)
<< "Failed to write google_connection_options of length "
<< connection_options_length << " for " << in;
return false;
}
for (const QuicTag& connection_option :
*in.google_connection_options) {
if (!writer.WriteTag(connection_option)) {
QUIC_BUG(Failed to write google_connection_option)
<< "Failed to write google_connection_option "
<< QuicTagToString(connection_option) << " for " << in;
return false;
}
}
}
} break;
case TransportParameters::kGoogleQuicVersion: {
if (!in.legacy_version_information.has_value()) {
break;
}
static_assert(sizeof(QuicVersionLabel) == sizeof(uint32_t),
"bad length");
uint64_t google_version_length =
sizeof(in.legacy_version_information->version);
if (in.perspective == Perspective::IS_SERVER) {
google_version_length +=
sizeof(uint8_t) +
sizeof(QuicVersionLabel) *
in.legacy_version_information->supported_versions.size();
}
if (!writer.WriteVarInt62(TransportParameters::kGoogleQuicVersion) ||
!writer.WriteVarInt62(
google_version_length) ||
!writer.WriteUInt32(in.legacy_version_information->version)) {
QUIC_BUG(Failed to write Google version extension)
<< "Failed to write Google version extension for " << in;
return false;
}
if (in.perspective == Perspective::IS_SERVER) {
if (!writer.WriteUInt8(
sizeof(QuicVersionLabel) *
in.legacy_version_information->supported_versions.size())) {
QUIC_BUG(Failed to write versions length)
<< "Failed to write versions length for " << in;
return false;
}
for (QuicVersionLabel version_label :
in.legacy_version_information->supported_versions) {
if (!writer.WriteUInt32(version_label)) {
QUIC_BUG(Failed to write supported version)
<< "Failed to write supported version for " << in;
return false;
}
}
}
} break;
case TransportParameters::kVersionInformation: {
if (!in.version_information.has_value()) {
break;
}
static_assert(sizeof(QuicVersionLabel) == sizeof(uint32_t),
"bad length");
QuicVersionLabelVector other_versions =
in.version_information->other_versions;
const size_t grease_index =
random->InsecureRandUint64() % (other_versions.size() + 1);
other_versions.insert(
other_versions.begin() + grease_index,
CreateQuicVersionLabel(QuicVersionReservedForNegotiation()));
const uint64_t version_information_length =
sizeof(in.version_information->chosen_version) +
sizeof(QuicVersionLabel) * other_versions.size();
if (!writer.WriteVarInt62(TransportParameters::kVersionInformation) ||
!writer.WriteVarInt62(
version_information_length) ||
!writer.WriteUInt32(in.version_information->chosen_version)) {
QUIC_BUG(Failed to write chosen version)
<< "Failed to write chosen version for " << in;
return false;
}
for (QuicVersionLabel version_label : other_versions) {
if (!writer.WriteUInt32(version_label)) {
QUIC_BUG(Failed to write other version)
<< "Failed to write other version for " << in;
return false;
}
}
} break;
default: {
auto it = custom_parameters.find(parameter_id);
if (it == custom_parameters.end()) {
QUIC_BUG(Unknown parameter) << "Unknown parameter " << parameter_id;
return false;
}
if (!writer.WriteVarInt62(parameter_id) ||
!writer.WriteStringPieceVarInt62(it->second)) {
QUIC_BUG(Failed to write custom parameter)
<< "Failed to write custom parameter " << parameter_id;
return false;
}
} break;
}
}
out->resize(writer.length());
QUIC_DLOG(INFO) << "Serialized " << in << " as " << writer.length()
<< " bytes";
return true;
}
bool ParseTransportParameters(ParsedQuicVersion version,
Perspective perspective, const uint8_t* in,
size_t in_len, TransportParameters* out,
std::string* error_details) {
out->perspective = perspective;
QuicDataReader reader(reinterpret_cast<const char*>(in), in_len);
while (!reader.IsDoneReading()) {
uint64_t param_id64;
if (!reader.ReadVarInt62(¶m_id64)) {
*error_details = "Failed to parse transport parameter ID";
return false;
}
TransportParameters::TransportParameterId param_id =
static_cast<TransportParameters::TransportParameterId>(param_id64);
absl::string_view value;
if (!reader.ReadStringPieceVarInt62(&value)) {
*error_details =
"Failed to read length and value of transport parameter " +
TransportParameterIdToString(param_id);
return false;
}
QuicDataReader value_reader(value);
bool parse_success = true;
switch (param_id) {
case TransportParameters::kOriginalDestinationConnectionId: {
if (out->original_destination_connection_id.has_value()) {
*error_details =
"Received a second original_destination_connection_id";
return false;
}
const size_t connection_id_length = value_reader.BytesRemaining();
if (!QuicUtils::IsConnectionIdLengthValidForVersion(
connection_id_length, version.transport_version)) {
*error_details = absl::StrCat(
"Received original_destination_connection_id of invalid length ",
connection_id_length);
return false;
}
QuicConnectionId original_destination_connection_id;
if (!value_reader.ReadConnectionId(&original_destination_connection_id,
connection_id_length)) {
*error_details = "Failed to read original_destination_connection_id";
return false;
}
out->original_destination_connection_id =
original_destination_connection_id;
} break;
case TransportParameters::kMaxIdleTimeout:
parse_success =
out->max_idle_timeout_ms.Read(&value_reader, error_details);
break;
case TransportParameters::kStatelessResetToken: {
if (!out->stateless_reset_token.empty()) {
*error_details = "Received a second stateless_reset_token";
return false;
}
absl::string_view stateless_reset_token =
value_reader.ReadRemainingPayload();
if (stateless_reset_token.length() != kStatelessResetTokenLength) {
*error_details =
absl::StrCat("Received stateless_reset_token of invalid length ",
stateless_reset_token.length());
return false;
}
out->stateless_reset_token.assign(
stateless_reset_token.data(),
stateless_reset_token.data() + stateless_reset_token.length());
} break;
case TransportParameters::kMaxPacketSize:
parse_success =
out->max_udp_payload_size.Read(&value_reader, error_details);
break;
case TransportParameters::kInitialMaxData:
parse_success =
out->initial_max_data.Read(&value_reader, error_details);
break;
case TransportParameters::kInitialMaxStreamDataBidiLocal:
parse_success = out->initial_max_stream_data_bidi_local.Read(
&value_reader, error_details);
break;
case TransportParameters::kInitialMaxStreamDataBidiRemote:
parse_success = out->initial_max_stream_data_bidi_remote.Read(
&value_reader, error_details);
break;
case TransportParameters::kInitialMaxStreamDataUni:
parse_success =
out->initial_max_stream_data_uni.Read(&value_reader, error_details);
break;
case TransportParameters::kInitialMaxStreamsBidi:
parse_success =
out->initial_max_streams_bidi.Read(&value_reader, error_details);
break;
case TransportParameters::kInitialMaxStreamsUni:
parse_success =
out->initial_max_streams_uni.Read(&value_reader, error_details);
break;
case TransportParameters::kAckDelayExponent:
parse_success =
out->ack_delay_exponent.Read(&value_reader, error_details);
break;
case TransportParameters::kMaxAckDelay:
parse_success = out->max_ack_delay.Read(&value_reader, error_details);
break;
case TransportParameters::kDisableActiveMigration:
if (out->disable_active_migration) {
*error_details = "Received a second disable_active_migration";
return false;
}
out->disable_active_migration = true;
break;
case TransportParameters::kPreferredAddress: {
TransportParameters::PreferredAddress preferred_address;
uint16_t ipv4_port, ipv6_port;
in_addr ipv4_address;
in6_addr ipv6_address;
preferred_address.stateless_reset_token.resize(
kStatelessResetTokenLength);
if (!value_reader.ReadBytes(&ipv4_address, sizeof(ipv4_address)) ||
!value_reader.ReadUInt16(&ipv4_port) ||
!value_reader.ReadBytes(&ipv6_address, sizeof(ipv6_address)) ||
!value_reader.ReadUInt16(&ipv6_port) ||
!value_reader.ReadLengthPrefixedConnectionId(
&preferred_address.connection_id) ||
!value_reader.ReadBytes(&preferred_address.stateless_reset_token[0],
kStatelessResetTokenLength)) {
*error_details = "Failed to read preferred_address";
return false;
}
preferred_address.ipv4_socket_address =
QuicSocketAddress(QuicIpAddress(ipv4_address), ipv4_port);
preferred_address.ipv6_socket_address =
QuicSocketAddress(QuicIpAddress(ipv6_address), ipv6_port);
if (!preferred_address.ipv4_socket_address.host().IsIPv4() ||
!preferred_address.ipv6_socket_address.host().IsIPv6()) {
*error_details = "Received preferred_address of bad families " +
preferred_address.ToString();
return false;
}
if (!QuicUtils::IsConnectionIdValidForVersion(
preferred_address.connection_id, version.transport_version)) {
*error_details = "Received invalid preferred_address connection ID " +
preferred_address.ToString();
return false;
}
out->preferred_address =
std::make_unique<TransportParameters::PreferredAddress>(
preferred_address);
} break;
case TransportParameters::kActiveConnectionIdLimit:
parse_success =
out->active_connection_id_limit.Read(&value_reader, error_details);
break;
case TransportParameters::kInitialSourceConnectionId: {
if (out->initial_source_connection_id.has_value()) {
*error_details = "Received a second initial_source_connection_id";
return false;
}
const size_t connection_id_length = value_reader.BytesRemaining();
if (!QuicUtils::IsConnectionIdLengthValidForVersion(
connection_id_length, version.transport_version)) {
*error_details = absl::StrCat(
"Received initial_source_connection_id of invalid length ",
connection_id_length);
return false;
}
QuicConnectionId initial_source_connection_id;
if (!value_reader.ReadConnectionId(&initial_source_connection_id,
connection_id_length)) {
*error_details = "Failed to read initial_source_connection_id";
return false;
}
out->initial_source_connection_id = initial_source_connection_id;
} break;
case TransportParameters::kRetrySourceConnectionId: {
if (out->retry_source_connection_id.has_value()) {
*error_details = "Received a second retry_source_connection_id";
return false;
}
const size_t connection_id_length = value_reader.BytesRemaining();
if (!QuicUtils::IsConnectionIdLengthValidForVersion(
connection_id_length, version.transport_version)) {
*error_details = absl::StrCat(
"Received retry_source_connection_id of invalid length ",
connection_id_length);
return false;
}
QuicConnectionId retry_source_connection_id;
if (!value_reader.ReadConnectionId(&retry_source_connection_id,
connection_id_length)) {
*error_details = "Failed to read retry_source_connection_id";
return false;
}
out->retry_source_connection_id = retry_source_connection_id;
} break;
case TransportParameters::kMaxDatagramFrameSize:
parse_success =
out->max_datagram_frame_size.Read(&value_reader, error_details);
break;
case TransportParameters::kGoogleHandshakeMessage:
if (out->google_handshake_message.has_value()) {
*error_details = "Received a second google_handshake_message";
return false;
}
out->google_handshake_message =
std::string(value_reader.ReadRemainingPayload());
break;
case TransportParameters::kInitialRoundTripTime:
parse_success =
out->initial_round_trip_time_us.Read(&value_reader, error_details);
break;
case TransportParameters::kReliableStreamReset:
if (out->reliable_stream_reset) {
*error_details = "Received a second reliable_stream_reset";
return false;
}
out->reliable_stream_reset = true;
break;
case TransportParameters::kGoogleConnectionOptions: {
if (out->google_connection_options.has_value()) {
*error_details = "Received a second google_connection_options";
return false;
}
out->google_connection_options = QuicTagVector{};
while (!value_reader.IsDoneReading()) {
QuicTag connection_option;
if (!value_reader.ReadTag(&connection_option)) {
*error_details = "Failed to read a google_connection_options";
return false;
}
out->google_connection_options->push_back(connection_option);
}
} break;
case TransportParameters::kGoogleQuicVersion: {
if (!out->legacy_version_information.has_value()) {
out->legacy_version_information =
TransportParameters::LegacyVersionInformation();
}
if (!value_reader.ReadUInt32(
&out->legacy_version_information->version)) {
*error_details = "Failed to read Google version extension version";
return false;
}
if (perspective == Perspective::IS_SERVER) {
uint8_t versions_length;
if (!value_reader.ReadUInt8(&versions_length)) {
*error_details = "Failed to parse Google supported versions length";
return false;
}
const uint8_t num_versions = versions_length / sizeof(uint32_t);
for (uint8_t i = 0; i < num_versions; ++i) {
QuicVersionLabel parsed_version;
if (!value_reader.ReadUInt32(&parsed_version)) {
*error_details = "Failed to parse Google supported version";
return false;
}
out->legacy_version_information->supported_versions.push_back(
parsed_version);
}
}
} break;
case TransportParameters::kVersionInformation: {
if (out->version_information.has_value()) {
*error_details = "Received a second version_information";
return false;
}
out->version_information = TransportParameters::VersionInformation();
if (!value_reader.ReadUInt32(
&out->version_information->chosen_version)) {
*error_details = "Failed to read chosen version";
return false;
}
while (!value_reader.IsDoneReading()) {
QuicVersionLabel other_version;
if (!value_reader.ReadUInt32(&other_version)) {
*error_details = "Failed to parse other version";
return false;
}
out->version_information->other_versions.push_back(other_version);
}
} break;
case TransportParameters::kMinAckDelay:
parse_success =
out->min_ack_delay_us.Read(&value_reader, error_details);
break;
default:
if (out->custom_parameters.find(param_id) !=
out->custom_parameters.end()) {
*error_details = "Received a second unknown parameter" +
TransportParameterIdToString(param_id);
return false;
}
out->custom_parameters[param_id] =
std::string(value_reader.ReadRemainingPayload());
break;
}
if (!parse_success) {
QUICHE_DCHECK(!error_details->empty());
return false;
}
if (!value_reader.IsDoneReading()) {
*error_details = absl::StrCat(
"Received unexpected ", value_reader.BytesRemaining(),
" bytes after parsing ", TransportParameterIdToString(param_id));
return false;
}
}
if (!out->AreValid(error_details)) {
QUICHE_DCHECK(!error_details->empty());
return false;
}
QUIC_DLOG(INFO) << "Parsed transport parameters " << *out << " from "
<< in_len << " bytes";
return true;
}
namespace {
bool DigestUpdateIntegerParam(
EVP_MD_CTX* hash_ctx, const TransportParameters::IntegerParameter& param) {
uint64_t value = param.value();
return EVP_DigestUpdate(hash_ctx, &value, sizeof(value));
}
}
bool SerializeTransportParametersForTicket(
const TransportParameters& in, const std::vector<uint8_t>& application_data,
std::vector<uint8_t>* out) {
std::string error_details;
if (!in.AreValid(&error_details)) {
QUIC_BUG(quic_bug_10743_26)
<< "Not serializing invalid transport parameters: " << error_details;
return false;
}
out->resize(SHA256_DIGEST_LENGTH + 1);
const uint8_t serialization_version = 0;
(*out)[0] = serialization_version;
bssl::ScopedEVP_MD_CTX hash_ctx;
uint64_t app_data_len = application_data.size();
const uint64_t parameter_version = 0;
if (!EVP_DigestInit(hash_ctx.get(), EVP_sha256()) ||
!EVP_DigestUpdate(hash_ctx.get(), &app_data_len, sizeof(app_data_len)) ||
!EVP_DigestUpdate(hash_ctx.get(), application_data.data(),
application_data.size()) ||
!EVP_DigestUpdate(hash_ctx.get(), ¶meter_version,
sizeof(parameter_version))) {
QUIC_BUG(quic_bug_10743_27)
<< "Unexpected failure of EVP_Digest functions when hashing "
"Transport Parameters for ticket";
return false;
}
if (!DigestUpdateIntegerParam(hash_ctx.get(), in.initial_max_data) ||
!DigestUpdateIntegerParam(hash_ctx.get(),
in.initial_max_stream_data_bidi_local) ||
!DigestUpdateIntegerParam(hash_ctx.get(),
in.initial_max_stream_data_bidi_remote) ||
!DigestUpdateIntegerParam(hash_ctx.get(),
in.initial_max_stream_data_uni) ||
!DigestUpdateIntegerParam(hash_ctx.get(), in.initial_max_streams_bidi) ||
!DigestUpdateIntegerParam(hash_ctx.get(), in.initial_max_streams_uni) ||
!DigestUpdateIntegerParam(hash_ctx.get(),
in.active_connection_id_limit)) {
QUIC_BUG(quic_bug_10743_28)
<< "Unexpected failure of EVP_Digest functions when hashing "
"Transport Parameters for ticket";
return false;
}
uint8_t disable_active_migration = in.disable_active_migration ? 1 : 0;
uint8_t reliable_stream_reset = in.reliable_stream_reset ? 1 : 0;
if (!EVP_DigestUpdate(hash_ctx.get(), &disable_active_migration,
sizeof(disable_active_migration)) ||
(reliable_stream_reset &&
!EVP_DigestUpdate(hash_ctx.get(), "ResetStreamAt", 13)) ||
!EVP_DigestFinal(hash_ctx.get(), out->data() + 1, nullptr)) {
QUIC_BUG(quic_bug_10743_29)
<< "Unexpected failure of EVP_Digest functions when hashing "
"Transport Parameters for ticket";
return false;
}
return true;
}
void DegreaseTransportParameters(TransportParameters& parameters) {
for (auto it = parameters.custom_parameters.begin();
it != parameters.custom_parameters.end();
) {
if (it->first % 31 == 27) {
parameters.custom_parameters.erase(it++);
} else {
++it;
}
}
if (parameters.version_information.has_value()) {
QuicVersionLabelVector clean_versions;
for (QuicVersionLabel version :
parameters.version_information->other_versions) {
if ((version & kReservedVersionMask) != kReservedVersionBits) {
clean_versions.push_back(version);
}
}
parameters.version_information->other_versions = std::move(clean_versions);
}
}
} | #include "quiche/quic/core/crypto/transport_parameters.h"
#include <cstring>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/macros.h"
#include "absl/strings/escaping.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/crypto/crypto_protocol.h"
#include "quiche/quic/core/quic_connection_id.h"
#include "quiche/quic/core/quic_tag.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/platform/api/quic_expect_bug.h"
#include "quiche/quic/platform/api/quic_ip_address.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
namespace quic {
namespace test {
namespace {
const QuicVersionLabel kFakeVersionLabel = 0x01234567;
const QuicVersionLabel kFakeVersionLabel2 = 0x89ABCDEF;
const uint64_t kFakeIdleTimeoutMilliseconds = 12012;
const uint64_t kFakeInitialMaxData = 101;
const uint64_t kFakeInitialMaxStreamDataBidiLocal = 2001;
const uint64_t kFakeInitialMaxStreamDataBidiRemote = 2002;
const uint64_t kFakeInitialMaxStreamDataUni = 3000;
const uint64_t kFakeInitialMaxStreamsBidi = 21;
const uint64_t kFakeInitialMaxStreamsUni = 22;
const bool kFakeDisableMigration = true;
const bool kFakeReliableStreamReset = true;
const uint64_t kFakeInitialRoundTripTime = 53;
const uint8_t kFakePreferredStatelessResetTokenData[16] = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F};
const auto kCustomParameter1 =
static_cast<TransportParameters::TransportParameterId>(0xffcd);
const char* kCustomParameter1Value = "foo";
const auto kCustomParameter2 =
static_cast<TransportParameters::TransportParameterId>(0xff34);
const char* kCustomParameter2Value = "bar";
const char kFakeGoogleHandshakeMessage[] =
"01000106030392655f5230270d4964a4f99b15bbad220736d972aea97bf9ac494ead62e6";
QuicConnectionId CreateFakeOriginalDestinationConnectionId() {
return TestConnectionId(0x1337);
}
QuicConnectionId CreateFakeInitialSourceConnectionId() {
return TestConnectionId(0x2345);
}
QuicConnectionId CreateFakeRetrySourceConnectionId() {
return TestConnectionId(0x9876);
}
QuicConnectionId CreateFakePreferredConnectionId() {
return TestConnectionId(0xBEEF);
}
std::vector<uint8_t> CreateFakePreferredStatelessResetToken() {
return std::vector<uint8_t>(
kFakePreferredStatelessResetTokenData,
kFakePreferredStatelessResetTokenData +
sizeof(kFakePreferredStatelessResetTokenData));
}
QuicSocketAddress CreateFakeV4SocketAddress() {
QuicIpAddress ipv4_address;
if (!ipv4_address.FromString("65.66.67.68")) {
QUIC_LOG(FATAL) << "Failed to create IPv4 address";
return QuicSocketAddress();
}
return QuicSocketAddress(ipv4_address, 0x4884);
}
QuicSocketAddress CreateFakeV6SocketAddress() {
QuicIpAddress ipv6_address;
if (!ipv6_address.FromString("6061:6263:6465:6667:6869:6A6B:6C6D:6E6F")) {
QUIC_LOG(FATAL) << "Failed to create IPv6 address";
return QuicSocketAddress();
}
return QuicSocketAddress(ipv6_address, 0x6336);
}
std::unique_ptr<TransportParameters::PreferredAddress>
CreateFakePreferredAddress() {
TransportParameters::PreferredAddress preferred_address;
preferred_address.ipv4_socket_address = CreateFakeV4SocketAddress();
preferred_address.ipv6_socket_address = CreateFakeV6SocketAddress();
preferred_address.connection_id = CreateFakePreferredConnectionId();
preferred_address.stateless_reset_token =
CreateFakePreferredStatelessResetToken();
return std::make_unique<TransportParameters::PreferredAddress>(
preferred_address);
}
TransportParameters::LegacyVersionInformation
CreateFakeLegacyVersionInformationClient() {
TransportParameters::LegacyVersionInformation legacy_version_information;
legacy_version_information.version = kFakeVersionLabel;
return legacy_version_information;
}
TransportParameters::LegacyVersionInformation
CreateFakeLegacyVersionInformationServer() {
TransportParameters::LegacyVersionInformation legacy_version_information =
CreateFakeLegacyVersionInformationClient();
legacy_version_information.supported_versions.push_back(kFakeVersionLabel);
legacy_version_information.supported_versions.push_back(kFakeVersionLabel2);
return legacy_version_information;
}
TransportParameters::VersionInformation CreateFakeVersionInformation() {
TransportParameters::VersionInformation version_information;
version_information.chosen_version = kFakeVersionLabel;
version_information.other_versions.push_back(kFakeVersionLabel);
version_information.other_versions.push_back(kFakeVersionLabel2);
return version_information;
}
QuicTagVector CreateFakeGoogleConnectionOptions() {
return {kALPN, MakeQuicTag('E', 'F', 'G', 0x00),
MakeQuicTag('H', 'I', 'J', 0xff)};
}
void RemoveGreaseParameters(TransportParameters* params) {
std::vector<TransportParameters::TransportParameterId> grease_params;
for (const auto& kv : params->custom_parameters) {
if (kv.first % 31 == 27) {
grease_params.push_back(kv.first);
}
}
EXPECT_EQ(grease_params.size(), 1u);
for (TransportParameters::TransportParameterId param_id : grease_params) {
params->custom_parameters.erase(param_id);
}
if (params->version_information.has_value()) {
QuicVersionLabelVector& other_versions =
params->version_information.value().other_versions;
for (auto it = other_versions.begin(); it != other_versions.end();) {
if ((*it & 0x0f0f0f0f) == 0x0a0a0a0a) {
it = other_versions.erase(it);
} else {
++it;
}
}
}
}
}
class TransportParametersTest : public QuicTestWithParam<ParsedQuicVersion> {
protected:
TransportParametersTest() : version_(GetParam()) {}
ParsedQuicVersion version_;
};
INSTANTIATE_TEST_SUITE_P(TransportParametersTests, TransportParametersTest,
::testing::ValuesIn(AllSupportedVersionsWithTls()),
::testing::PrintToStringParamName());
TEST_P(TransportParametersTest, Comparator) {
TransportParameters orig_params;
TransportParameters new_params;
orig_params.perspective = Perspective::IS_CLIENT;
new_params.perspective = Perspective::IS_SERVER;
EXPECT_NE(orig_params, new_params);
EXPECT_FALSE(orig_params == new_params);
EXPECT_TRUE(orig_params != new_params);
new_params.perspective = Perspective::IS_CLIENT;
orig_params.legacy_version_information =
CreateFakeLegacyVersionInformationClient();
new_params.legacy_version_information =
CreateFakeLegacyVersionInformationClient();
orig_params.version_information = CreateFakeVersionInformation();
new_params.version_information = CreateFakeVersionInformation();
orig_params.disable_active_migration = true;
new_params.disable_active_migration = true;
orig_params.reliable_stream_reset = true;
new_params.reliable_stream_reset = true;
EXPECT_EQ(orig_params, new_params);
EXPECT_TRUE(orig_params == new_params);
EXPECT_FALSE(orig_params != new_params);
orig_params.legacy_version_information.value().supported_versions.push_back(
kFakeVersionLabel);
new_params.legacy_version_information.value().supported_versions.push_back(
kFakeVersionLabel2);
EXPECT_NE(orig_params, new_params);
EXPECT_FALSE(orig_params == new_params);
EXPECT_TRUE(orig_params != new_params);
new_params.legacy_version_information.value().supported_versions.pop_back();
new_params.legacy_version_information.value().supported_versions.push_back(
kFakeVersionLabel);
orig_params.stateless_reset_token = CreateStatelessResetTokenForTest();
new_params.stateless_reset_token = CreateStatelessResetTokenForTest();
EXPECT_EQ(orig_params, new_params);
EXPECT_TRUE(orig_params == new_params);
EXPECT_FALSE(orig_params != new_params);
orig_params.max_udp_payload_size.set_value(kMaxPacketSizeForTest);
new_params.max_udp_payload_size.set_value(kMaxPacketSizeForTest + 1);
EXPECT_NE(orig_params, new_params);
EXPECT_FALSE(orig_params == new_params);
EXPECT_TRUE(orig_params != new_params);
new_params.max_udp_payload_size.set_value(kMaxPacketSizeForTest);
EXPECT_EQ(orig_params, new_params);
EXPECT_TRUE(orig_params == new_params);
EXPECT_FALSE(orig_params != new_params);
orig_params.preferred_address = CreateFakePreferredAddress();
EXPECT_NE(orig_params, new_params);
EXPECT_FALSE(orig_params == new_params);
EXPECT_TRUE(orig_params != new_params);
new_params.preferred_address = CreateFakePreferredAddress();
EXPECT_EQ(orig_params, new_params);
EXPECT_TRUE(orig_params == new_params);
EXPECT_FALSE(orig_params != new_params);
orig_params.custom_parameters[kCustomParameter1] = kCustomParameter1Value;
orig_params.custom_parameters[kCustomParameter2] = kCustomParameter2Value;
new_params.custom_parameters[kCustomParameter2] = kCustomParameter2Value;
new_params.custom_parameters[kCustomParameter1] = kCustomParameter1Value;
EXPECT_EQ(orig_params, new_params);
EXPECT_TRUE(orig_params == new_params);
EXPECT_FALSE(orig_params != new_params);
orig_params.initial_source_connection_id =
CreateFakeInitialSourceConnectionId();
new_params.initial_source_connection_id = std::nullopt;
EXPECT_NE(orig_params, new_params);
EXPECT_FALSE(orig_params == new_params);
EXPECT_TRUE(orig_params != new_params);
new_params.initial_source_connection_id = TestConnectionId(0xbadbad);
EXPECT_NE(orig_params, new_params);
EXPECT_FALSE(orig_params == new_params);
EXPECT_TRUE(orig_params != new_params);
new_params.initial_source_connection_id =
CreateFakeInitialSourceConnectionId();
EXPECT_EQ(orig_params, new_params);
EXPECT_TRUE(orig_params == new_params);
EXPECT_FALSE(orig_params != new_params);
}
TEST_P(TransportParametersTest, CopyConstructor) {
TransportParameters orig_params;
orig_params.perspective = Perspective::IS_CLIENT;
orig_params.legacy_version_information =
CreateFakeLegacyVersionInformationClient();
orig_params.version_information = CreateFakeVersionInformation();
orig_params.original_destination_connection_id =
CreateFakeOriginalDestinationConnectionId();
orig_params.max_idle_timeout_ms.set_value(kFakeIdleTimeoutMilliseconds);
orig_params.stateless_reset_token = CreateStatelessResetTokenForTest();
orig_params.max_udp_payload_size.set_value(kMaxPacketSizeForTest);
orig_params.initial_max_data.set_value(kFakeInitialMaxData);
orig_params.initial_max_stream_data_bidi_local.set_value(
kFakeInitialMaxStreamDataBidiLocal);
orig_params.initial_max_stream_data_bidi_remote.set_value(
kFakeInitialMaxStreamDataBidiRemote);
orig_params.initial_max_stream_data_uni.set_value(
kFakeInitialMaxStreamDataUni);
orig_params.initial_max_streams_bidi.set_value(kFakeInitialMaxStreamsBidi);
orig_params.initial_max_streams_uni.set_value(kFakeInitialMaxStreamsUni);
orig_params.ack_delay_exponent.set_value(kAckDelayExponentForTest);
orig_params.max_ack_delay.set_value(kMaxAckDelayForTest);
orig_params.min_ack_delay_us.set_value(kMinAckDelayUsForTest);
orig_params.disable_active_migration = kFakeDisableMigration;
orig_params.reliable_stream_reset = kFakeReliableStreamReset;
orig_params.preferred_address = CreateFakePreferredAddress();
orig_params.active_connection_id_limit.set_value(
kActiveConnectionIdLimitForTest);
orig_params.initial_source_connection_id =
CreateFakeInitialSourceConnectionId();
orig_params.retry_source_connection_id = CreateFakeRetrySourceConnectionId();
orig_params.initial_round_trip_time_us.set_value(kFakeInitialRoundTripTime);
std::string google_handshake_message;
ASSERT_TRUE(absl::HexStringToBytes(kFakeGoogleHandshakeMessage,
&google_handshake_message));
orig_params.google_handshake_message = std::move(google_handshake_message);
orig_params.google_connection_options = CreateFakeGoogleConnectionOptions();
orig_params.custom_parameters[kCustomParameter1] = kCustomParameter1Value;
orig_params.custom_parameters[kCustomParameter2] = kCustomParameter2Value;
TransportParameters new_params(orig_params);
EXPECT_EQ(new_params, orig_params);
}
TEST_P(TransportParametersTest, RoundTripClient) {
TransportParameters orig_params;
orig_params.perspective = Perspective::IS_CLIENT;
orig_params.legacy_version_information =
CreateFakeLegacyVersionInformationClient();
orig_params.version_information = CreateFakeVersionInformation();
orig_params.max_idle_timeout_ms.set_value(kFakeIdleTimeoutMilliseconds);
orig_params.max_udp_payload_size.set_value(kMaxPacketSizeForTest);
orig_params.initial_max_data.set_value(kFakeInitialMaxData);
orig_params.initial_max_stream_data_bidi_local.set_value(
kFakeInitialMaxStreamDataBidiLocal);
orig_params.initial_max_stream_data_bidi_remote.set_value(
kFakeInitialMaxStreamDataBidiRemote);
orig_params.initial_max_stream_data_uni.set_value(
kFakeInitialMaxStreamDataUni);
orig_params.initial_max_streams_bidi.set_value(kFakeInitialMaxStreamsBidi);
orig_params.initial_max_streams_uni.set_value(kFakeInitialMaxStreamsUni);
orig_params.ack_delay_exponent.set_value(kAckDelayExponentForTest);
orig_params.max_ack_delay.set_value(kMaxAckDelayForTest);
orig_params.min_ack_delay_us.set_value(kMinAckDelayUsForTest);
orig_params.disable_active_migration = kFakeDisableMigration;
orig_params.reliable_stream_reset = kFakeReliableStreamReset;
orig_params.active_connection_id_limit.set_value(
kActiveConnectionIdLimitForTest);
orig_params.initial_source_connection_id =
CreateFakeInitialSourceConnectionId();
orig_params.initial_round_trip_time_us.set_value(kFakeInitialRoundTripTime);
std::string google_handshake_message;
ASSERT_TRUE(absl::HexStringToBytes(kFakeGoogleHandshakeMessage,
&google_handshake_message));
orig_params.google_handshake_message = std::move(google_handshake_message);
orig_params.google_connection_options = CreateFakeGoogleConnectionOptions();
orig_params.custom_parameters[kCustomParameter1] = kCustomParameter1Value;
orig_params.custom_parameters[kCustomParameter2] = kCustomParameter2Value;
std::vector<uint8_t> serialized;
ASSERT_TRUE(SerializeTransportParameters(orig_params, &serialized));
TransportParameters new_params;
std::string error_details;
ASSERT_TRUE(ParseTransportParameters(version_, Perspective::IS_CLIENT,
serialized.data(), serialized.size(),
&new_params, &error_details))
<< error_details;
EXPECT_TRUE(error_details.empty());
RemoveGreaseParameters(&new_params);
EXPECT_EQ(new_params, orig_params);
}
TEST_P(TransportParametersTest, RoundTripServer) {
TransportParameters orig_params;
orig_params.perspective = Perspective::IS_SERVER;
orig_params.legacy_version_information =
CreateFakeLegacyVersionInformationServer();
orig_params.version_information = CreateFakeVersionInformation();
orig_params.original_destination_connection_id =
CreateFakeOriginalDestinationConnectionId();
orig_params.max_idle_timeout_ms.set_value(kFakeIdleTimeoutMilliseconds);
orig_params.stateless_reset_token = CreateStatelessResetTokenForTest();
orig_params.max_udp_payload_size.set_value(kMaxPacketSizeForTest);
orig_params.initial_max_data.set_value(kFakeInitialMaxData);
orig_params.initial_max_stream_data_bidi_local.set_value(
kFakeInitialMaxStreamDataBidiLocal);
orig_params.initial_max_stream_data_bidi_remote.set_value(
kFakeInitialMaxStreamDataBidiRemote);
orig_params.initial_max_stream_data_uni.set_value(
kFakeInitialMaxStreamDataUni);
orig_params.initial_max_streams_bidi.set_value(kFakeInitialMaxStreamsBidi);
orig_params.initial_max_streams_uni.set_value(kFakeInitialMaxStreamsUni);
orig_params.ack_delay_exponent.set_value(kAckDelayExponentForTest);
orig_params.max_ack_delay.set_value(kMaxAckDelayForTest);
orig_params.min_ack_delay_us.set_value(kMinAckDelayUsForTest);
orig_params.disable_active_migration = kFakeDisableMigration;
orig_params.reliable_stream_reset = kFakeReliableStreamReset;
orig_params.preferred_address = CreateFakePreferredAddress();
orig_params.active_connection_id_limit.set_value(
kActiveConnectionIdLimitForTest);
orig_params.initial_source_connection_id =
CreateFakeInitialSourceConnectionId();
orig_params.retry_source_connection_id = CreateFakeRetrySourceConnectionId();
orig_params.google_connection_options = CreateFakeGoogleConnectionOptions();
std::vector<uint8_t> serialized;
ASSERT_TRUE(SerializeTransportParameters(orig_params, &serialized));
TransportParameters new_params;
std::string error_details;
ASSERT_TRUE(ParseTransportParameters(version_, Perspective::IS_SERVER,
serialized.data(), serialized.size(),
&new_params, &error_details))
<< error_details;
EXPECT_TRUE(error_details.empty());
RemoveGreaseParameters(&new_params);
EXPECT_EQ(new_params, orig_params);
}
TEST_P(TransportParametersTest, AreValid) {
{
TransportParameters params;
std::string error_details;
params.perspective = Perspective::IS_CLIENT;
EXPECT_TRUE(params.AreValid(&error_details));
EXPECT_TRUE(error_details.empty());
}
{
TransportParameters params;
std::string error_details;
params.perspective = Perspective::IS_CLIENT;
EXPECT_TRUE(params.AreValid(&error_details));
EXPECT_TRUE(error_details.empty());
params.max_idle_timeout_ms.set_value(kFakeIdleTimeoutMilliseconds);
EXPECT_TRUE(params.AreValid(&error_details));
EXPECT_TRUE(error_details.empty());
params.max_idle_timeout_ms.set_value(601000);
EXPECT_TRUE(params.AreValid(&error_details));
EXPECT_TRUE(error_details.empty());
}
{
TransportParameters params;
std::string error_details;
params.perspective = Perspective::IS_CLIENT;
EXPECT_TRUE(params.AreValid(&error_details));
EXPECT_TRUE(error_details.empty());
params.max_udp_payload_size.set_value(1200);
EXPECT_TRUE(params.AreValid(&error_details));
EXPECT_TRUE(error_details.empty());
params.max_udp_payload_size.set_value(65535);
EXPECT_TRUE(params.AreValid(&error_details));
EXPECT_TRUE(error_details.empty());
params.max_udp_payload_size.set_value(9999999);
EXPECT_TRUE(params.AreValid(&error_details));
EXPECT_TRUE(error_details.empty());
params.max_udp_payload_size.set_value(0);
error_details = "";
EXPECT_FALSE(params.AreValid(&error_details));
EXPECT_EQ(error_details,
"Invalid transport parameters [Client max_udp_payload_size 0 "
"(Invalid)]");
params.max_udp_payload_size.set_value(1199);
error_details = "";
EXPECT_FALSE(params.AreValid(&error_details));
EXPECT_EQ(error_details,
"Invalid transport parameters [Client max_udp_payload_size 1199 "
"(Invalid)]");
}
{
TransportParameters params;
std::string error_details;
params.perspective = Perspective::IS_CLIENT;
EXPECT_TRUE(params.AreValid(&error_details));
EXPECT_TRUE(error_details.empty());
params.ack_delay_exponent.set_value(0);
EXPECT_TRUE(params.AreValid(&error_details));
EXPECT_TRUE(error_details.empty());
params.ack_delay_exponent.set_value(20);
EXPECT_TRUE(params.AreValid(&error_details));
EXPECT_TRUE(error_details.empty());
params.ack_delay_exponent.set_value(21);
EXPECT_FALSE(params.AreValid(&error_details));
EXPECT_EQ(error_details,
"Invalid transport parameters [Client ack_delay_exponent 21 "
"(Invalid)]");
}
{
TransportParameters params;
std::string error_details;
params.perspective = Perspective::IS_CLIENT;
EXPECT_TRUE(params.AreValid(&error_details));
EXPECT_TRUE(error_details.empty());
params.active_connection_id_limit.set_value(2);
EXPECT_TRUE(params.AreValid(&error_details));
EXPECT_TRUE(error_details.empty());
params.active_connection_id_limit.set_value(999999);
EXPECT_TRUE(params.AreValid(&error_details));
EXPECT_TRUE(error_details.empty());
params.active_connection_id_limit.set_value(1);
EXPECT_FALSE(params.AreValid(&error_details));
EXPECT_EQ(error_details,
"Invalid transport parameters [Client active_connection_id_limit"
" 1 (Invalid)]");
params.active_connection_id_limit.set_value(0);
EXPECT_FALSE(params.AreValid(&error_details));
EXPECT_EQ(error_details,
"Invalid transport parameters [Client active_connection_id_limit"
" 0 (Invalid)]");
}
}
TEST_P(TransportParametersTest, NoClientParamsWithStatelessResetToken) {
TransportParameters orig_params;
orig_params.perspective = Perspective::IS_CLIENT;
orig_params.legacy_version_information =
CreateFakeLegacyVersionInformationClient();
orig_params.max_idle_timeout_ms.set_value(kFakeIdleTimeoutMilliseconds);
orig_params.stateless_reset_token = CreateStatelessResetTokenForTest();
orig_params.max_udp_payload_size.set_value(kMaxPacketSizeForTest);
std::vector<uint8_t> out;
EXPECT_QUIC_BUG(
EXPECT_FALSE(SerializeTransportParameters(orig_params, &out)),
"Not serializing invalid transport parameters: Client cannot send "
"stateless reset token");
}
TEST_P(TransportParametersTest, ParseClientParams) {
const uint8_t kClientParams[] = {
0x01,
0x02,
0x6e, 0xec,
0x03,
0x02,
0x63, 0x29,
0x04,
0x02,
0x40, 0x65,
0x05,
0x02,
0x47, 0xD1,
0x06,
0x02,
0x47, 0xD2,
0x07,
0x02,
0x4B, 0xB8,
0x08,
0x01,
0x15,
0x09,
0x01,
0x16,
0x0a,
0x01,
0x0a,
0x0b,
0x01,
0x33,
0x80, 0x00, 0xde, 0x1a,
0x02,
0x43, 0xe8,
0x0c,
0x00,
0xc0, 0x17, 0xf7, 0x58, 0x6d, 0x2c, 0xb5, 0x71,
0x00,
0x0e,
0x01,
0x34,
0x0f,
0x08,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x45,
0x66, 0xab,
0x24,
0x01, 0x00, 0x01, 0x06, 0x03, 0x03, 0x92, 0x65, 0x5f, 0x52, 0x30, 0x27,
0x0d, 0x49, 0x64, 0xa4, 0xf9, 0x9b, 0x15, 0xbb, 0xad, 0x22, 0x07, 0x36,
0xd9, 0x72, 0xae, 0xa9, 0x7b, 0xf9, 0xac, 0x49, 0x4e, 0xad, 0x62, 0xe6,
0x71, 0x27,
0x01,
0x35,
0x71, 0x28,
0x0c,
'A', 'L', 'P', 'N',
'E', 'F', 'G', 0x00,
'H', 'I', 'J', 0xff,
0x80, 0x00, 0x47, 0x52,
0x04,
0x01, 0x23, 0x45, 0x67,
0x80, 0xFF, 0x73, 0xDB,
0x0C,
0x01, 0x23, 0x45, 0x67,
0x01, 0x23, 0x45, 0x67,
0x89, 0xab, 0xcd, 0xef,
};
const uint8_t* client_params =
reinterpret_cast<const uint8_t*>(kClientParams);
size_t client_params_length = ABSL_ARRAYSIZE(kClientParams);
TransportParameters new_params;
std::string error_details;
ASSERT_TRUE(ParseTransportParameters(version_, Perspective::IS_CLIENT,
client_params, client_params_length,
&new_params, &error_details))
<< error_details;
EXPECT_TRUE(error_details.empty());
EXPECT_EQ(Perspective::IS_CLIENT, new_params.perspective);
ASSERT_TRUE(new_params.legacy_version_information.has_value());
EXPECT_EQ(kFakeVersionLabel,
new_params.legacy_version_information.value().version);
EXPECT_TRUE(
new_params.legacy_version_information.value().supported_versions.empty());
ASSERT_TRUE(new_params.version_information.has_value());
EXPECT_EQ(new_params.version_information.value(),
CreateFakeVersionInformation());
EXPECT_FALSE(new_params.original_destination_connection_id.has_value());
EXPECT_EQ(kFakeIdleTimeoutMilliseconds,
new_params.max_idle_timeout_ms.value());
EXPECT_TRUE(new_params.stateless_reset_token.empty());
EXPECT_EQ(kMaxPacketSizeForTest, new_params.max_udp_payload_size.value());
EXPECT_EQ(kFakeInitialMaxData, new_params.initial_max_data.value());
EXPECT_EQ(kFakeInitialMaxStreamDataBidiLocal,
new_params.initial_max_stream_data_bidi_local.value());
EXPECT_EQ(kFakeInitialMaxStreamDataBidiRemote,
new_params.initial_max_stream_data_bidi_remote.value());
EXPECT_EQ(kFakeInitialMaxStreamDataUni,
new_params.initial_max_stream_data_uni.value());
EXPECT_EQ(kFakeInitialMaxStreamsBidi,
new_params.initial_max_streams_bidi.value());
EXPECT_EQ(kFakeInitialMaxStreamsUni,
new_params.initial_max_streams_uni.value());
EXPECT_EQ(kAckDelayExponentForTest, new_params.ack_delay_exponent.value());
EXPECT_EQ(kMaxAckDelayForTest, new_params.max_ack_delay.value());
EXPECT_EQ(kMinAckDelayUsForTest, new_params.min_ack_delay_us.value());
EXPECT_EQ(kFakeDisableMigration, new_params.disable_active_migration);
EXPECT_EQ(kFakeReliableStreamReset, new_params.reliable_stream_reset);
EXPECT_EQ(kActiveConnectionIdLimitForTest,
new_params.active_connection_id_limit.value());
ASSERT_TRUE(new_params.initial_source_connection_id.has_value());
EXPECT_EQ(CreateFakeInitialSourceConnectionId(),
new_params.initial_source_connection_id.value());
EXPECT_FALSE(new_params.retry_source_connection_id.has_value());
EXPECT_EQ(kFakeInitialRoundTripTime,
new_params.initial_round_trip_time_us.value());
ASSERT_TRUE(new_params.google_connection_options.has_value());
EXPECT_EQ(CreateFakeGoogleConnectionOptions(),
new_params.google_connection_options.value());
std::string expected_google_handshake_message;
ASSERT_TRUE(absl::HexStringToBytes(kFakeGoogleHandshakeMessage,
&expected_google_handshake_message));
EXPECT_EQ(expected_google_handshake_message,
new_params.google_handshake_message);
}
TEST_P(TransportParametersTest,
ParseClientParamsFailsWithFullStatelessResetToken) {
const uint8_t kClientParamsWithFullToken[] = {
0x01,
0x02,
0x6e, 0xec,
0x02,
0x10,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D, 0x9E, 0x9F,
0x03,
0x02,
0x63, 0x29,
0x04,
0x02,
0x40, 0x65,
};
const uint8_t* client_params =
reinterpret_cast<const uint8_t*>(kClientParamsWithFullToken);
size_t client_params_length = ABSL_ARRAYSIZE(kClientParamsWithFullToken);
TransportParameters out_params;
std::string error_details;
EXPECT_FALSE(ParseTransportParameters(version_, Perspective::IS_CLIENT,
client_params, client_params_length,
&out_params, &error_details));
EXPECT_EQ(error_details, "Client cannot send stateless reset token");
}
TEST_P(TransportParametersTest,
ParseClientParamsFailsWithEmptyStatelessResetToken) {
const uint8_t kClientParamsWithEmptyToken[] = {
0x01,
0x02,
0x6e, 0xec,
0x02,
0x00,
0x03,
0x02,
0x63, 0x29,
0x04,
0x02,
0x40, 0x65,
};
const uint8_t* client_params =
reinterpret_cast<const uint8_t*>(kClientParamsWithEmptyToken);
size_t client_params_length = ABSL_ARRAYSIZE(kClientParamsWithEmptyToken);
TransportParameters out_params;
std::string error_details;
EXPECT_FALSE(ParseTransportParameters(version_, Perspective::IS_CLIENT,
client_params, client_params_length,
&out_params, &error_details));
EXPECT_EQ(error_details,
"Received stateless_reset_token of invalid length 0");
}
TEST_P(TransportParametersTest, ParseClientParametersRepeated) {
const uint8_t kClientParamsRepeated[] = {
0x01,
0x02,
0x6e, 0xec,
0x03,
0x02,
0x63, 0x29,
0x01,
0x02,
0x6e, 0xec,
};
const uint8_t* client_params =
reinterpret_cast<const uint8_t*>(kClientParamsRepeated);
size_t client_params_length = ABSL_ARRAYSIZE(kClientParamsRepeated);
TransportParameters out_params;
std::string error_details;
EXPECT_FALSE(ParseTransportParameters(version_, Perspective::IS_CLIENT,
client_params, client_params_length,
&out_params, &error_details));
EXPECT_EQ(error_details, "Received a second max_idle_timeout");
}
TEST_P(TransportParametersTest, ParseServerParams) {
const uint8_t kServerParams[] = {
0x00,
0x08,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x37,
0x01,
0x02,
0x6e, 0xec,
0x02,
0x10,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D, 0x9E, 0x9F,
0x03,
0x02,
0x63, 0x29,
0x04,
0x02,
0x40, 0x65,
0x05,
0x02,
0x47, 0xD1,
0x06,
0x02,
0x47, 0xD2,
0x07,
0x02,
0x4B, 0xB8,
0x08,
0x01,
0x15,
0x09,
0x01,
0x16,
0x0a,
0x01,
0x0a,
0x0b,
0x01,
0x33,
0x80, 0x00, 0xde, 0x1a,
0x02,
0x43, 0xe8,
0x0c,
0x00,
0xc0, 0x17, 0xf7, 0x58, 0x6d, 0x2c, 0xb5, 0x71,
0x00,
0x0d,
0x31,
0x41, 0x42, 0x43, 0x44,
0x48, 0x84,
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
0x63, 0x36,
0x08,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xBE, 0xEF,
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F,
0x0e,
0x01,
0x34,
0x0f,
0x08,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x45,
0x10,
0x08,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x98, 0x76,
0x71, 0x28,
0x0c,
'A', 'L', 'P', 'N',
'E', 'F', 'G', 0x00,
'H', 'I', 'J', 0xff,
0x80, 0x00, 0x47, 0x52,
0x0d,
0x01, 0x23, 0x45, 0x67,
0x08,
0x01, 0x23, 0x45, 0x67,
0x89, 0xab, 0xcd, 0xef,
0x80, 0xFF, 0x73, 0xDB,
0x0C,
0x01, 0x23, 0x45, 0x67,
0x01, 0x23, 0x45, 0x67,
0x89, 0xab, 0xcd, 0xef,
};
const uint8_t* server_params =
reinterpret_cast<const uint8_t*>(kServerParams);
size_t server_params_length = ABSL_ARRAYSIZE(kServerParams);
TransportParameters new_params;
std::string error_details;
ASSERT_TRUE(ParseTransportParameters(version_, Perspective::IS_SERVER,
server_params, server_params_length,
&new_params, &error_details))
<< error_details;
EXPECT_TRUE(error_details.empty());
EXPECT_EQ(Perspective::IS_SERVER, new_params.perspective);
ASSERT_TRUE(new_params.legacy_version_information.has_value());
EXPECT_EQ(kFakeVersionLabel,
new_params.legacy_version_information.value().version);
ASSERT_EQ(
2u,
new_params.legacy_version_information.value().supported_versions.size());
EXPECT_EQ(
kFakeVersionLabel,
new_params.legacy_version_information.value().supported_versions[0]);
EXPECT_EQ(
kFakeVersionLabel2,
new_params.legacy_version_information.value().supported_versions[1]);
ASSERT_TRUE(new_params.version_information.has_value());
EXPECT_EQ(new_params.version_information.value(),
CreateFakeVersionInformation());
ASSERT_TRUE(new_params.original_destination_connection_id.has_value());
EXPECT_EQ(CreateFakeOriginalDestinationConnectionId(),
new_params.original_destination_connection_id.value());
EXPECT_EQ(kFakeIdleTimeoutMilliseconds,
new_params.max_idle_timeout_ms.value());
EXPECT_EQ(CreateStatelessResetTokenForTest(),
new_params.stateless_reset_token);
EXPECT_EQ(kMaxPacketSizeForTest, new_params.max_udp_payload_size.value());
EXPECT_EQ(kFakeInitialMaxData, new_params.initial_max_data.value());
EXPECT_EQ(kFakeInitialMaxStreamDataBidiLocal,
new_params.initial_max_stream_data_bidi_local.value());
EXPECT_EQ(kFakeInitialMaxStreamDataBidiRemote,
new_params.initial_max_stream_data_bidi_remote.value());
EXPECT_EQ(kFakeInitialMaxStreamDataUni,
new_params.initial_max_stream_data_uni.value());
EXPECT_EQ(kFakeInitialMaxStreamsBidi,
new_params.initial_max_streams_bidi.value());
EXPECT_EQ(kFakeInitialMaxStreamsUni,
new_params.initial_max_streams_uni.value());
EXPECT_EQ(kAckDelayExponentForTest, new_params.ack_delay_exponent.value());
EXPECT_EQ(kMaxAckDelayForTest, new_params.max_ack_delay.value());
EXPECT_EQ(kMinAckDelayUsForTest, new_params.min_ack_delay_us.value());
EXPECT_EQ(kFakeDisableMigration, new_params.disable_active_migration);
EXPECT_EQ(kFakeReliableStreamReset, new_params.reliable_stream_reset);
ASSERT_NE(nullptr, new_params.preferred_address.get());
EXPECT_EQ(CreateFakeV4SocketAddress(),
new_params.preferred_address->ipv4_socket_address);
EXPECT_EQ(CreateFakeV6SocketAddress(),
new_params.preferred_address->ipv6_socket_address);
EXPECT_EQ(CreateFakePreferredConnectionId(),
new_params.preferred_address->connection_id);
EXPECT_EQ(CreateFakePreferredStatelessResetToken(),
new_params.preferred_address->stateless_reset_token);
EXPECT_EQ(kActiveConnectionIdLimitForTest,
new_params.active_connection_id_limit.value());
ASSERT_TRUE(new_params.initial_source_connection_id.has_value());
EXPECT_EQ(CreateFakeInitialSourceConnectionId(),
new_params.initial_source_connection_id.value());
ASSERT_TRUE(new_params.retry_source_connection_id.has_value());
EXPECT_EQ(CreateFakeRetrySourceConnectionId(),
new_params.retry_source_connection_id.value());
ASSERT_TRUE(new_params.google_connection_options.has_value());
EXPECT_EQ(CreateFakeGoogleConnectionOptions(),
new_params.google_connection_options.value());
}
TEST_P(TransportParametersTest, ParseServerParametersRepeated) {
const uint8_t kServerParamsRepeated[] = {
0x00,
0x08,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x37,
0x01,
0x02,
0x6e, 0xec,
0x02,
0x10,
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
0x01,
0x02,
0x6e, 0xec,
};
const uint8_t* server_params =
reinterpret_cast<const uint8_t*>(kServerParamsRepeated);
size_t server_params_length = ABSL_ARRAYSIZE(kServerParamsRepeated);
TransportParameters out_params;
std::string error_details;
EXPECT_FALSE(ParseTransportParameters(version_, Perspective::IS_SERVER,
server_params, server_params_length,
&out_params, &error_details));
EXPECT_EQ(error_details, "Received a second max_idle_timeout");
}
TEST_P(TransportParametersTest,
ParseServerParametersEmptyOriginalConnectionId) {
const uint8_t kServerParamsEmptyOriginalConnectionId[] = {
0x00,
0x00,
0x01,
0x02,
0x6e, 0xec,
0x02,
0x10,
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
};
const uint8_t* server_params =
reinterpret_cast<const uint8_t*>(kServerParamsEmptyOriginalConnectionId);
size_t server_params_length =
ABSL_ARRAYSIZE(kServerParamsEmptyOriginalConnectionId);
TransportParameters out_params;
std::string error_details;
ASSERT_TRUE(ParseTransportParameters(version_, Perspective::IS_SERVER,
server_params, server_params_length,
&out_params, &error_details))
<< error_details;
ASSERT_TRUE(out_params.original_destination_connection_id.has_value());
EXPECT_EQ(out_params.original_destination_connection_id.value(),
EmptyQuicConnectionId());
}
TEST_P(TransportParametersTest, VeryLongCustomParameter) {
std::string custom_value(70000, '?');
TransportParameters orig_params;
orig_params.perspective = Perspective::IS_CLIENT;
orig_params.legacy_version_information =
CreateFakeLegacyVersionInformationClient();
orig_params.custom_parameters[kCustomParameter1] = custom_value;
std::vector<uint8_t> serialized;
ASSERT_TRUE(SerializeTransportParameters(orig_params, &serialized));
TransportParameters new_params;
std::string error_details;
ASSERT_TRUE(ParseTransportParameters(version_, Perspective::IS_CLIENT,
serialized.data(), serialized.size(),
&new_params, &error_details))
<< error_details;
EXPECT_TRUE(error_details.empty());
RemoveGreaseParameters(&new_params);
EXPECT_EQ(new_params, orig_params);
}
TEST_P(TransportParametersTest, SerializationOrderIsRandom) {
TransportParameters orig_params;
orig_params.perspective = Perspective::IS_CLIENT;
orig_params.legacy_version_information =
CreateFakeLegacyVersionInformationClient();
orig_params.max_idle_timeout_ms.set_value(kFakeIdleTimeoutMilliseconds);
orig_params.max_udp_payload_size.set_value(kMaxPacketSizeForTest);
orig_params.initial_max_data.set_value(kFakeInitialMaxData);
orig_params.initial_max_stream_data_bidi_local.set_value(
kFakeInitialMaxStreamDataBidiLocal);
orig_params.initial_max_stream_data_bidi_remote.set_value(
kFakeInitialMaxStreamDataBidiRemote);
orig_params.initial_max_stream_data_uni.set_value(
kFakeInitialMaxStreamDataUni);
orig_params.initial_max_streams_bidi.set_value(kFakeInitialMaxStreamsBidi);
orig_params.initial_max_streams_uni.set_value(kFakeInitialMaxStreamsUni);
orig_params.ack_delay_exponent.set_value(kAckDelayExponentForTest);
orig_params.max_ack_delay.set_value(kMaxAckDelayForTest);
orig_params.min_ack_delay_us.set_value(kMinAckDelayUsForTest);
orig_params.disable_active_migration = kFakeDisableMigration;
orig_params.reliable_stream_reset = kFakeReliableStreamReset;
orig_params.active_connection_id_limit.set_value(
kActiveConnectionIdLimitForTest);
orig_params.initial_source_connection_id =
CreateFakeInitialSourceConnectionId();
orig_params.initial_round_trip_time_us.set_value(kFakeInitialRoundTripTime);
orig_params.google_connection_options = CreateFakeGoogleConnectionOptions();
orig_params.custom_parameters[kCustomParameter1] = kCustomParameter1Value;
orig_params.custom_parameters[kCustomParameter2] = kCustomParameter2Value;
std::vector<uint8_t> first_serialized;
ASSERT_TRUE(SerializeTransportParameters(orig_params, &first_serialized));
for (int i = 0; i < 1000; i++) {
std::vector<uint8_t> serialized;
ASSERT_TRUE(SerializeTransportParameters(orig_params, &serialized));
if (serialized != first_serialized) {
return;
}
}
}
TEST_P(TransportParametersTest, Degrease) {
TransportParameters orig_params;
orig_params.perspective = Perspective::IS_CLIENT;
orig_params.legacy_version_information =
CreateFakeLegacyVersionInformationClient();
orig_params.version_information = CreateFakeVersionInformation();
orig_params.max_idle_timeout_ms.set_value(kFakeIdleTimeoutMilliseconds);
orig_params.max_udp_payload_size.set_value(kMaxPacketSizeForTest);
orig_params.initial_max_data.set_value(kFakeInitialMaxData);
orig_params.initial_max_stream_data_bidi_local.set_value(
kFakeInitialMaxStreamDataBidiLocal);
orig_params.initial_max_stream_data_bidi_remote.set_value(
kFakeInitialMaxStreamDataBidiRemote);
orig_params.initial_max_stream_data_uni.set_value(
kFakeInitialMaxStreamDataUni);
orig_params.initial_max_streams_bidi.set_value(kFakeInitialMaxStreamsBidi);
orig_params.initial_max_streams_uni.set_value(kFakeInitialMaxStreamsUni);
orig_params.ack_delay_exponent.set_value(kAckDelayExponentForTest);
orig_params.max_ack_delay.set_value(kMaxAckDelayForTest);
orig_params.min_ack_delay_us.set_value(kMinAckDelayUsForTest);
orig_params.disable_active_migration = kFakeDisableMigration;
orig_params.reliable_stream_reset = kFakeReliableStreamReset;
orig_params.active_connection_id_limit.set_value(
kActiveConnectionIdLimitForTest);
orig_params.initial_source_connection_id =
CreateFakeInitialSourceConnectionId();
orig_params.initial_round_trip_time_us.set_value(kFakeInitialRoundTripTime);
std::string google_handshake_message;
ASSERT_TRUE(absl::HexStringToBytes(kFakeGoogleHandshakeMessage,
&google_handshake_message));
orig_params.google_handshake_message = std::move(google_handshake_message);
orig_params.google_connection_options = CreateFakeGoogleConnectionOptions();
orig_params.custom_parameters[kCustomParameter1] = kCustomParameter1Value;
orig_params.custom_parameters[kCustomParameter2] = kCustomParameter2Value;
std::vector<uint8_t> serialized;
ASSERT_TRUE(SerializeTransportParameters(orig_params, &serialized));
TransportParameters new_params;
std::string error_details;
ASSERT_TRUE(ParseTransportParameters(version_, Perspective::IS_CLIENT,
serialized.data(), serialized.size(),
&new_params, &error_details))
<< error_details;
EXPECT_TRUE(error_details.empty());
EXPECT_NE(new_params, orig_params);
DegreaseTransportParameters(new_params);
EXPECT_EQ(new_params, orig_params);
}
class TransportParametersTicketSerializationTest : public QuicTest {
protected:
void SetUp() override {
original_params_.perspective = Perspective::IS_SERVER;
original_params_.legacy_version_information =
CreateFakeLegacyVersionInformationServer();
original_params_.original_destination_connection_id =
CreateFakeOriginalDestinationConnectionId();
original_params_.max_idle_timeout_ms.set_value(
kFakeIdleTimeoutMilliseconds);
original_params_.stateless_reset_token = CreateStatelessResetTokenForTest();
original_params_.max_udp_payload_size.set_value(kMaxPacketSizeForTest);
original_params_.initial_max_data.set_value(kFakeInitialMaxData);
original_params_.initial_max_stream_data_bidi_local.set_value(
kFakeInitialMaxStreamDataBidiLocal);
original_params_.initial_max_stream_data_bidi_remote.set_value(
kFakeInitialMaxStreamDataBidiRemote);
original_params_.initial_max_stream_data_uni.set_value(
kFakeInitialMaxStreamDataUni);
original_params_.initial_max_streams_bidi.set_value(
kFakeInitialMaxStreamsBidi);
original_params_.initial_max_streams_uni.set_value(
kFakeInitialMaxStreamsUni);
original_params_.ack_delay_exponent.set_value(kAckDelayExponentForTest);
original_params_.max_ack_delay.set_value(kMaxAckDelayForTest);
original_params_.min_ack_delay_us.set_value(kMinAckDelayUsForTest);
original_params_.disable_active_migration = kFakeDisableMigration;
original_params_.reliable_stream_reset = kFakeReliableStreamReset;
original_params_.preferred_address = CreateFakePreferredAddress();
original_params_.active_connection_id_limit.set_value(
kActiveConnectionIdLimitForTest);
original_params_.initial_source_connection_id =
CreateFakeInitialSourceConnectionId();
original_params_.retry_source_connection_id =
CreateFakeRetrySourceConnectionId();
original_params_.google_connection_options =
CreateFakeGoogleConnectionOptions();
ASSERT_TRUE(SerializeTransportParametersForTicket(
original_params_, application_state_, &original_serialized_params_));
}
TransportParameters original_params_;
std::vector<uint8_t> application_state_ = {0, 1};
std::vector<uint8_t> original_serialized_params_;
};
TEST_F(TransportParametersTicketSerializationTest,
StatelessResetTokenDoesntChangeOutput) {
TransportParameters new_params = original_params_;
new_params.stateless_reset_token = CreateFakePreferredStatelessResetToken();
EXPECT_NE(new_params, original_params_);
std::vector<uint8_t> serialized;
ASSERT_TRUE(SerializeTransportParametersForTicket(
new_params, application_state_, &serialized));
EXPECT_EQ(original_serialized_params_, serialized);
}
TEST_F(TransportParametersTicketSerializationTest,
ConnectionIDDoesntChangeOutput) {
TransportParameters new_params = original_params_;
new_params.original_destination_connection_id = TestConnectionId(0xCAFE);
EXPECT_NE(new_params, original_params_);
std::vector<uint8_t> serialized;
ASSERT_TRUE(SerializeTransportParametersForTicket(
new_params, application_state_, &serialized));
EXPECT_EQ(original_serialized_params_, serialized);
}
TEST_F(TransportParametersTicketSerializationTest, StreamLimitChangesOutput) {
TransportParameters new_params = original_params_;
new_params.initial_max_stream_data_bidi_local.set_value(
kFakeInitialMaxStreamDataBidiLocal + 1);
EXPECT_NE(new_params, original_params_);
std::vector<uint8_t> serialized;
ASSERT_TRUE(SerializeTransportParametersForTicket(
new_params, application_state_, &serialized));
EXPECT_NE(original_serialized_params_, serialized);
}
TEST_F(TransportParametersTicketSerializationTest,
ApplicationStateChangesOutput) {
std::vector<uint8_t> new_application_state = {0};
EXPECT_NE(new_application_state, application_state_);
std::vector<uint8_t> serialized;
ASSERT_TRUE(SerializeTransportParametersForTicket(
original_params_, new_application_state, &serialized));
EXPECT_NE(original_serialized_params_, serialized);
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/transport_parameters.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/transport_parameters_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
64411ee4-506a-45b4-acc7-4283142cc2d5 | cpp | tensorflow/tensorflow | xla_expression | tensorflow/compiler/tf2xla/xla_expression.cc | tensorflow/compiler/tf2xla/xla_expression_test.cc | #include "tensorflow/compiler/tf2xla/xla_expression.h"
#include "tensorflow/compiler/tf2xla/literal_util.h"
#include "tensorflow/compiler/tf2xla/shape_util.h"
#include "xla/hlo/builder/value_inference.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
XlaExpression::XlaExpression() = default;
XlaExpression XlaExpression::Invalid() {
XlaExpression e;
e.kind_ = Kind::kInvalid;
return e;
}
XlaExpression XlaExpression::Constant(Tensor value) {
XlaExpression e;
e.kind_ = Kind::kConstant;
e.dtype_ = value.dtype();
e.constant_value_ = value;
return e;
}
XlaExpression XlaExpression::ConstantResource(Tensor value,
XlaResource* resource) {
XlaExpression e;
e.kind_ = Kind::kResource;
e.dtype_ = DT_RESOURCE;
e.resource_ = resource;
e.constant_value_ = value;
return e;
}
XlaExpression XlaExpression::XlaOp(xla::XlaOp value, DataType dtype) {
XlaExpression e;
e.kind_ = Kind::kXlaOp;
e.dtype_ = dtype;
e.handle_ = value;
return e;
}
XlaExpression XlaExpression::TensorList(xla::XlaOp tensor_list) {
XlaExpression e;
e.kind_ = Kind::kTensorList;
e.dtype_ = DT_VARIANT;
e.handle_ = tensor_list;
return e;
}
XlaExpression XlaExpression::Resource(XlaResource* resource) {
XlaExpression e;
e.kind_ = Kind::kResource;
e.dtype_ = DT_RESOURCE;
e.resource_ = resource;
return e;
}
string XlaExpression::HumanString() const {
switch (kind_) {
case Kind::kInvalid:
return "invalid";
case Kind::kConstant:
return "constant";
case Kind::kXlaOp:
return "xla_op";
case Kind::kResource:
return "resource";
case Kind::kTensorList:
return "tensor_list";
}
}
xla::XlaOp XlaExpression::AsXlaOp(xla::XlaBuilder* builder) const {
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<xla::XlaOp> {
switch (kind_) {
case Kind::kConstant: {
xla::BorrowingLiteral literal;
TF_RETURN_IF_ERROR(
HostTensorToBorrowingLiteral(*constant_value_, &literal));
return xla::ConstantLiteral(builder, literal);
}
case Kind::kTensorList:
TF_FALLTHROUGH_INTENDED;
case Kind::kXlaOp:
if (builder != handle_.builder()) {
return errors::InvalidArgument(
"Mismatched builders in XlaExpression::AsXlaOp");
}
return handle_;
default:
return errors::InvalidArgument("AsXlaOp called on XlaExpression: ",
HumanString());
}
});
}
absl::StatusOr<Tensor> XlaExpression::ResolveDynamism() const {
switch (kind()) {
case Kind::kConstant: {
Tensor constant_false(DT_BOOL, constant_value()->shape());
auto flat = constant_false.flat<bool>();
for (int64_t i = 0; i < flat.size(); ++i) flat(i) = false;
return constant_false;
}
case Kind::kXlaOp:
break;
case Kind::kTensorList:
TF_FALLTHROUGH_INTENDED;
case Kind::kResource:
TF_FALLTHROUGH_INTENDED;
case Kind::kInvalid:
return errors::InvalidArgument(
"ResolveDynamism called on unsupported XlaExpression: ",
HumanString());
}
TF_ASSIGN_OR_RETURN(TensorShape shape, GetShape());
std::vector<int64_t> layout_indices(shape.dims());
std::iota(layout_indices.rbegin(), layout_indices.rend(), 0);
xla::ValueInference value_inference(handle().builder());
TF_ASSIGN_OR_RETURN(xla::LiteralSlice literal,
value_inference.AnalyzeIsDynamic(handle()));
Tensor tensor(DT_BOOL);
TF_RETURN_IF_ERROR(LiteralToHostTensor(literal, DT_BOOL, &tensor));
return tensor;
}
absl::StatusOr<std::optional<Tensor>> XlaExpression::ResolveConstant(
xla::Client* client, bool dynamic_dimension_is_minus_one,
xla::ValueInferenceMode mode) const {
switch (kind()) {
case Kind::kConstant:
case Kind::kResource:
return constant_value();
case Kind::kXlaOp:
break;
case Kind::kTensorList:
TF_FALLTHROUGH_INTENDED;
case Kind::kInvalid:
return errors::InvalidArgument(
"ResolveConstant called on XlaExpression: ", HumanString());
}
TF_ASSIGN_OR_RETURN(TensorShape shape, GetShape());
std::vector<int64_t> layout_indices(shape.dims());
std::iota(layout_indices.rbegin(), layout_indices.rend(), 0);
xla::Layout layout = xla::LayoutUtil::MakeLayout(layout_indices);
if (mode == xla::ValueInferenceMode::kLowerBound ||
mode == xla::ValueInferenceMode::kUpperBound ||
mode == xla::ValueInferenceMode::kValue) {
std::vector<int64_t> layout_indices(shape.dims());
std::iota(layout_indices.rbegin(), layout_indices.rend(), 0);
xla::ValueInference value_inference(handle().builder());
TF_ASSIGN_OR_RETURN(xla::OptionalLiteral literal,
value_inference.AnalyzeConstant(handle(), mode));
if (!literal.GetValue().has_value()) {
return {std::nullopt};
}
Tensor tensor;
TF_RETURN_IF_ERROR(LiteralToHostTensor(
literal.GetValue().value().Relayout(layout), dtype(), &tensor));
return {tensor};
}
TF_ASSIGN_OR_RETURN(bool is_constant,
handle().builder()->IsConstant(handle()));
if (!is_constant) {
return {std::nullopt};
}
if (!client)
return errors::InvalidArgument("client is required to resolve constant");
TF_ASSIGN_OR_RETURN(xla::XlaComputation constant_graph,
handle().builder()->BuildConstantSubGraph(
handle(), dynamic_dimension_is_minus_one));
TF_ASSIGN_OR_RETURN(xla::Literal literal,
client->ComputeConstant(constant_graph, &layout));
Tensor tensor;
TF_RETURN_IF_ERROR(LiteralToHostTensor(literal, dtype(), &tensor));
return {tensor};
}
absl::StatusOr<TensorShape> XlaExpression::GetShape() const {
switch (kind_) {
case Kind::kConstant:
return constant_value()->shape();
case Kind::kResource:
if (constant_value()) {
return constant_value()->shape();
}
return TensorShape({});
case Kind::kXlaOp: {
TF_ASSIGN_OR_RETURN(xla::Shape xla_shape,
handle().builder()->GetShape(handle()));
TensorShape shape;
TF_RETURN_IF_ERROR(XLAShapeToTensorShape(xla_shape, &shape));
return shape;
}
case Kind::kTensorList:
return TensorShape({});
case Kind::kInvalid:
return errors::InvalidArgument(
"GetShape() called on invalid XlaExpression");
}
}
absl::StatusOr<xla::Shape> XlaExpression::GetXlaShape() const {
if (kind_ == Kind::kXlaOp) {
return handle().builder()->GetShape(handle());
}
TF_ASSIGN_OR_RETURN(TensorShape shape, GetShape());
return TensorShapeToXLAShape(dtype_, shape);
}
const XlaExpression* XlaExpression::CastExpressionFromTensor(
const Tensor& tensor) {
const XlaExpression* expression =
reinterpret_cast<const XlaExpression*>(tensor.tensor_data().data());
CHECK(expression->kind() != XlaExpression::Kind::kInvalid)
<< expression->HumanString();
return expression;
}
void XlaExpression::AssignExpressionToTensor(const XlaExpression& value,
Tensor* tensor) {
const XlaExpression* expression =
reinterpret_cast<const XlaExpression*>(tensor->tensor_data().data());
CHECK(expression->kind() == XlaExpression::Kind::kInvalid)
<< expression->HumanString();
*const_cast<XlaExpression*>(expression) = value;
}
} | #include "tensorflow/compiler/tf2xla/xla_expression.h"
#include <memory>
#include "absl/memory/memory.h"
#include "tensorflow/compiler/tf2xla/xla_resource.h"
#include "xla/client/client_library.h"
#include "xla/client/local_client.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/literal.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/tests/literal_test_util.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class XlaExpressionTest : public ::testing::Test {
protected:
void SetUp() override {
client_ = xla::ClientLibrary::LocalClientOrDie();
builder_ = std::make_unique<xla::XlaBuilder>("acomputation");
constant_ = test::AsScalar<int32>(42);
op_ = xla::ConstantR0<int32>(builder_.get(), 7);
non_constant_op_ = xla::Parameter(
builder_.get(), 0, xla::ShapeUtil::MakeShape(xla::F32, {}), "x");
resource_ = std::make_unique<XlaResource>(
XlaResource::kVariable, 0, string("avariable"),
DT_INT32, TensorShape({17, 3}), op_, -1,
std::set<string>(),
false);
}
xla::Client* client_;
std::unique_ptr<xla::XlaBuilder> builder_;
Tensor constant_;
xla::XlaOp op_;
xla::XlaOp non_constant_op_;
std::unique_ptr<XlaResource> resource_;
};
TEST_F(XlaExpressionTest, Kind) {
EXPECT_TRUE(XlaExpression::Kind::kInvalid == XlaExpression().kind());
EXPECT_TRUE(XlaExpression::Kind::kInvalid == XlaExpression::Invalid().kind());
EXPECT_TRUE(XlaExpression::Kind::kConstant ==
XlaExpression::Constant(constant_).kind());
EXPECT_TRUE(XlaExpression::Kind::kXlaOp ==
XlaExpression::XlaOp(op_, DT_INT32).kind());
EXPECT_TRUE(XlaExpression::Kind::kResource ==
XlaExpression::Resource(resource_.get()).kind());
}
TEST_F(XlaExpressionTest, HumanString) {
EXPECT_EQ("invalid", XlaExpression().HumanString());
EXPECT_EQ("invalid", XlaExpression::Invalid().HumanString());
EXPECT_EQ("constant", XlaExpression::Constant(constant_).HumanString());
EXPECT_EQ("xla_op", XlaExpression::XlaOp(op_, DT_INT32).HumanString());
EXPECT_EQ("resource", XlaExpression::Resource(resource_.get()).HumanString());
}
TEST_F(XlaExpressionTest, AsXlaOp) {
xla::XlaOp op_as_op =
XlaExpression::XlaOp(op_, DT_INT32).AsXlaOp(builder_.get());
EXPECT_TRUE(op_.IsIdenticalTo(op_as_op));
xla::XlaOp const_as_op =
XlaExpression::Constant(constant_).AsXlaOp(builder_.get());
TF_ASSERT_OK_AND_ASSIGN(xla::XlaComputation computation,
builder_->BuildConstantSubGraph(const_as_op));
TF_ASSERT_OK_AND_ASSIGN(xla::Literal value,
client_->ComputeConstant(computation));
EXPECT_TRUE(xla::LiteralTestUtil::Equal(xla::LiteralUtil::CreateR0<int32>(42),
value));
}
TEST_F(XlaExpressionTest, GetShape) {
EXPECT_FALSE(XlaExpression().GetShape().ok());
EXPECT_FALSE(XlaExpression::Invalid().GetShape().ok());
TF_ASSERT_OK_AND_ASSIGN(TensorShape resource_shape,
XlaExpression::Resource(resource_.get()).GetShape());
EXPECT_EQ(TensorShape({}), resource_shape);
TF_ASSERT_OK_AND_ASSIGN(TensorShape op_shape,
XlaExpression::XlaOp(op_, DT_INT32).GetShape());
EXPECT_EQ(TensorShape({}), op_shape);
TF_ASSERT_OK_AND_ASSIGN(TensorShape constant_shape,
XlaExpression::Constant(constant_).GetShape());
EXPECT_EQ(TensorShape({}), constant_shape);
}
TEST_F(XlaExpressionTest, ResolveConstant) {
EXPECT_FALSE(XlaExpression().ResolveConstant(client_).ok());
EXPECT_FALSE(XlaExpression::Invalid().ResolveConstant(client_).ok());
EXPECT_FALSE(XlaExpression::Resource(resource_.get())
.ResolveConstant(client_)
->has_value());
TF_ASSERT_OK_AND_ASSIGN(
std::optional<Tensor> op_constant,
XlaExpression::XlaOp(op_, DT_INT32).ResolveConstant(client_));
ASSERT_TRUE(op_constant.has_value());
test::ExpectTensorEqual<int32>(test::AsScalar<int32>(7), *op_constant);
TF_ASSERT_OK_AND_ASSIGN(std::optional<Tensor> op_nonconstant,
XlaExpression::XlaOp(non_constant_op_, DT_FLOAT)
.ResolveConstant(client_));
EXPECT_FALSE(op_nonconstant.has_value());
TF_ASSERT_OK_AND_ASSIGN(
std::optional<Tensor> constant_constant,
XlaExpression::Constant(constant_).ResolveConstant(client_));
ASSERT_TRUE(constant_constant.has_value());
test::ExpectTensorEqual<int32>(constant_, *constant_constant);
}
TEST_F(XlaExpressionTest, ResolveConstantOnResource) {
XlaExpression constant_resource =
XlaExpression::ConstantResource(constant_, resource_.get());
EXPECT_TRUE(constant_resource.ResolveConstant(client_).ok());
EXPECT_TRUE(resource_->SetZeroValue(builder_.get()).ok());
LOG(ERROR) << "Resource is overwritten: " << resource_->IsOverwritten();
absl::StatusOr<std::optional<Tensor>> resolved_constant =
constant_resource.ResolveConstant(client_);
EXPECT_TRUE(resolved_constant.ok());
EXPECT_FALSE(resolved_constant->has_value());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/xla_expression.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/xla_expression_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f83be770-40eb-47f8-bdc5-9088f094734f | cpp | abseil/abseil-cpp | container_memory | absl/container/internal/container_memory.h | absl/container/internal/container_memory_test.cc | #ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
#define ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <memory>
#include <new>
#include <tuple>
#include <type_traits>
#include <utility>
#include "absl/base/config.h"
#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
#include "absl/utility/utility.h"
#ifdef ABSL_HAVE_ADDRESS_SANITIZER
#include <sanitizer/asan_interface.h>
#endif
#ifdef ABSL_HAVE_MEMORY_SANITIZER
#include <sanitizer/msan_interface.h>
#endif
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
template <size_t Alignment>
struct alignas(Alignment) AlignedType {};
template <size_t Alignment, class Alloc>
void* Allocate(Alloc* alloc, size_t n) {
static_assert(Alignment > 0, "");
assert(n && "n must be positive");
using M = AlignedType<Alignment>;
using A = typename absl::allocator_traits<Alloc>::template rebind_alloc<M>;
using AT = typename absl::allocator_traits<Alloc>::template rebind_traits<M>;
A my_mem_alloc(*alloc);
void* p = AT::allocate(my_mem_alloc, (n + sizeof(M) - 1) / sizeof(M));
assert(reinterpret_cast<uintptr_t>(p) % Alignment == 0 &&
"allocator does not respect alignment");
return p;
}
template <class Allocator, class ValueType>
constexpr auto IsDestructionTrivial() {
constexpr bool result =
std::is_trivially_destructible<ValueType>::value &&
std::is_same<typename absl::allocator_traits<
Allocator>::template rebind_alloc<char>,
std::allocator<char>>::value;
return std::integral_constant<bool, result>();
}
template <size_t Alignment, class Alloc>
void Deallocate(Alloc* alloc, void* p, size_t n) {
static_assert(Alignment > 0, "");
assert(n && "n must be positive");
using M = AlignedType<Alignment>;
using A = typename absl::allocator_traits<Alloc>::template rebind_alloc<M>;
using AT = typename absl::allocator_traits<Alloc>::template rebind_traits<M>;
A my_mem_alloc(*alloc);
AT::deallocate(my_mem_alloc, static_cast<M*>(p),
(n + sizeof(M) - 1) / sizeof(M));
}
namespace memory_internal {
template <class Alloc, class T, class Tuple, size_t... I>
void ConstructFromTupleImpl(Alloc* alloc, T* ptr, Tuple&& t,
absl::index_sequence<I...>) {
absl::allocator_traits<Alloc>::construct(
*alloc, ptr, std::get<I>(std::forward<Tuple>(t))...);
}
template <class T, class F>
struct WithConstructedImplF {
template <class... Args>
decltype(std::declval<F>()(std::declval<T>())) operator()(
Args&&... args) const {
return std::forward<F>(f)(T(std::forward<Args>(args)...));
}
F&& f;
};
template <class T, class Tuple, size_t... Is, class F>
decltype(std::declval<F>()(std::declval<T>())) WithConstructedImpl(
Tuple&& t, absl::index_sequence<Is...>, F&& f) {
return WithConstructedImplF<T, F>{std::forward<F>(f)}(
std::get<Is>(std::forward<Tuple>(t))...);
}
template <class T, size_t... Is>
auto TupleRefImpl(T&& t, absl::index_sequence<Is...>)
-> decltype(std::forward_as_tuple(std::get<Is>(std::forward<T>(t))...)) {
return std::forward_as_tuple(std::get<Is>(std::forward<T>(t))...);
}
template <class T>
auto TupleRef(T&& t) -> decltype(TupleRefImpl(
std::forward<T>(t),
absl::make_index_sequence<
std::tuple_size<typename std::decay<T>::type>::value>())) {
return TupleRefImpl(
std::forward<T>(t),
absl::make_index_sequence<
std::tuple_size<typename std::decay<T>::type>::value>());
}
template <class F, class K, class V>
decltype(std::declval<F>()(std::declval<const K&>(), std::piecewise_construct,
std::declval<std::tuple<K>>(), std::declval<V>()))
DecomposePairImpl(F&& f, std::pair<std::tuple<K>, V> p) {
const auto& key = std::get<0>(p.first);
return std::forward<F>(f)(key, std::piecewise_construct, std::move(p.first),
std::move(p.second));
}
}
template <class Alloc, class T, class Tuple>
void ConstructFromTuple(Alloc* alloc, T* ptr, Tuple&& t) {
memory_internal::ConstructFromTupleImpl(
alloc, ptr, std::forward<Tuple>(t),
absl::make_index_sequence<
std::tuple_size<typename std::decay<Tuple>::type>::value>());
}
template <class T, class Tuple, class F>
decltype(std::declval<F>()(std::declval<T>())) WithConstructed(Tuple&& t,
F&& f) {
return memory_internal::WithConstructedImpl<T>(
std::forward<Tuple>(t),
absl::make_index_sequence<
std::tuple_size<typename std::decay<Tuple>::type>::value>(),
std::forward<F>(f));
}
inline std::pair<std::tuple<>, std::tuple<>> PairArgs() { return {}; }
template <class F, class S>
std::pair<std::tuple<F&&>, std::tuple<S&&>> PairArgs(F&& f, S&& s) {
return {std::piecewise_construct, std::forward_as_tuple(std::forward<F>(f)),
std::forward_as_tuple(std::forward<S>(s))};
}
template <class F, class S>
std::pair<std::tuple<const F&>, std::tuple<const S&>> PairArgs(
const std::pair<F, S>& p) {
return PairArgs(p.first, p.second);
}
template <class F, class S>
std::pair<std::tuple<F&&>, std::tuple<S&&>> PairArgs(std::pair<F, S>&& p) {
return PairArgs(std::forward<F>(p.first), std::forward<S>(p.second));
}
template <class F, class S>
auto PairArgs(std::piecewise_construct_t, F&& f, S&& s)
-> decltype(std::make_pair(memory_internal::TupleRef(std::forward<F>(f)),
memory_internal::TupleRef(std::forward<S>(s)))) {
return std::make_pair(memory_internal::TupleRef(std::forward<F>(f)),
memory_internal::TupleRef(std::forward<S>(s)));
}
template <class F, class... Args>
auto DecomposePair(F&& f, Args&&... args)
-> decltype(memory_internal::DecomposePairImpl(
std::forward<F>(f), PairArgs(std::forward<Args>(args)...))) {
return memory_internal::DecomposePairImpl(
std::forward<F>(f), PairArgs(std::forward<Args>(args)...));
}
template <class F, class Arg>
decltype(std::declval<F>()(std::declval<const Arg&>(), std::declval<Arg>()))
DecomposeValue(F&& f, Arg&& arg) {
const auto& key = arg;
return std::forward<F>(f)(key, std::forward<Arg>(arg));
}
inline void SanitizerPoisonMemoryRegion(const void* m, size_t s) {
#ifdef ABSL_HAVE_ADDRESS_SANITIZER
ASAN_POISON_MEMORY_REGION(m, s);
#endif
#ifdef ABSL_HAVE_MEMORY_SANITIZER
__msan_poison(m, s);
#endif
(void)m;
(void)s;
}
inline void SanitizerUnpoisonMemoryRegion(const void* m, size_t s) {
#ifdef ABSL_HAVE_ADDRESS_SANITIZER
ASAN_UNPOISON_MEMORY_REGION(m, s);
#endif
#ifdef ABSL_HAVE_MEMORY_SANITIZER
__msan_unpoison(m, s);
#endif
(void)m;
(void)s;
}
template <typename T>
inline void SanitizerPoisonObject(const T* object) {
SanitizerPoisonMemoryRegion(object, sizeof(T));
}
template <typename T>
inline void SanitizerUnpoisonObject(const T* object) {
SanitizerUnpoisonMemoryRegion(object, sizeof(T));
}
namespace memory_internal {
template <class Pair, class = std::true_type>
struct OffsetOf {
static constexpr size_t kFirst = static_cast<size_t>(-1);
static constexpr size_t kSecond = static_cast<size_t>(-1);
};
template <class Pair>
struct OffsetOf<Pair, typename std::is_standard_layout<Pair>::type> {
static constexpr size_t kFirst = offsetof(Pair, first);
static constexpr size_t kSecond = offsetof(Pair, second);
};
template <class K, class V>
struct IsLayoutCompatible {
private:
struct Pair {
K first;
V second;
};
template <class P>
static constexpr bool LayoutCompatible() {
return std::is_standard_layout<P>() && sizeof(P) == sizeof(Pair) &&
alignof(P) == alignof(Pair) &&
memory_internal::OffsetOf<P>::kFirst ==
memory_internal::OffsetOf<Pair>::kFirst &&
memory_internal::OffsetOf<P>::kSecond ==
memory_internal::OffsetOf<Pair>::kSecond;
}
public:
static constexpr bool value = std::is_standard_layout<K>() &&
std::is_standard_layout<Pair>() &&
memory_internal::OffsetOf<Pair>::kFirst == 0 &&
LayoutCompatible<std::pair<K, V>>() &&
LayoutCompatible<std::pair<const K, V>>();
};
}
template <class K, class V>
union map_slot_type {
map_slot_type() {}
~map_slot_type() = delete;
using value_type = std::pair<const K, V>;
using mutable_value_type =
std::pair<absl::remove_const_t<K>, absl::remove_const_t<V>>;
value_type value;
mutable_value_type mutable_value;
absl::remove_const_t<K> key;
};
template <class K, class V>
struct map_slot_policy {
using slot_type = map_slot_type<K, V>;
using value_type = std::pair<const K, V>;
using mutable_value_type =
std::pair<absl::remove_const_t<K>, absl::remove_const_t<V>>;
private:
static void emplace(slot_type* slot) {
new (slot) slot_type;
}
using kMutableKeys = memory_internal::IsLayoutCompatible<K, V>;
public:
static value_type& element(slot_type* slot) { return slot->value; }
static const value_type& element(const slot_type* slot) {
return slot->value;
}
#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
static K& mutable_key(slot_type* slot) {
return kMutableKeys::value ? slot->key
: *std::launder(const_cast<K*>(
std::addressof(slot->value.first)));
}
#else
static const K& mutable_key(slot_type* slot) { return key(slot); }
#endif
static const K& key(const slot_type* slot) {
return kMutableKeys::value ? slot->key : slot->value.first;
}
template <class Allocator, class... Args>
static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
emplace(slot);
if (kMutableKeys::value) {
absl::allocator_traits<Allocator>::construct(*alloc, &slot->mutable_value,
std::forward<Args>(args)...);
} else {
absl::allocator_traits<Allocator>::construct(*alloc, &slot->value,
std::forward<Args>(args)...);
}
}
template <class Allocator>
static void construct(Allocator* alloc, slot_type* slot, slot_type* other) {
emplace(slot);
if (kMutableKeys::value) {
absl::allocator_traits<Allocator>::construct(
*alloc, &slot->mutable_value, std::move(other->mutable_value));
} else {
absl::allocator_traits<Allocator>::construct(*alloc, &slot->value,
std::move(other->value));
}
}
template <class Allocator>
static void construct(Allocator* alloc, slot_type* slot,
const slot_type* other) {
emplace(slot);
absl::allocator_traits<Allocator>::construct(*alloc, &slot->value,
other->value);
}
template <class Allocator>
static auto destroy(Allocator* alloc, slot_type* slot) {
if (kMutableKeys::value) {
absl::allocator_traits<Allocator>::destroy(*alloc, &slot->mutable_value);
} else {
absl::allocator_traits<Allocator>::destroy(*alloc, &slot->value);
}
return IsDestructionTrivial<Allocator, value_type>();
}
template <class Allocator>
static auto transfer(Allocator* alloc, slot_type* new_slot,
slot_type* old_slot) {
auto is_relocatable =
typename absl::is_trivially_relocatable<value_type>::type();
emplace(new_slot);
#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
if (is_relocatable) {
std::memcpy(static_cast<void*>(std::launder(&new_slot->value)),
static_cast<const void*>(&old_slot->value),
sizeof(value_type));
return is_relocatable;
}
#endif
if (kMutableKeys::value) {
absl::allocator_traits<Allocator>::construct(
*alloc, &new_slot->mutable_value, std::move(old_slot->mutable_value));
} else {
absl::allocator_traits<Allocator>::construct(*alloc, &new_slot->value,
std::move(old_slot->value));
}
destroy(alloc, old_slot);
return is_relocatable;
}
};
using HashSlotFn = size_t (*)(const void* hash_fn, void* slot);
template <class Fn, class T>
size_t TypeErasedApplyToSlotFn(const void* fn, void* slot) {
const auto* f = static_cast<const Fn*>(fn);
return (*f)(*static_cast<const T*>(slot));
}
template <class Fn, class T>
size_t TypeErasedDerefAndApplyToSlotFn(const void* fn, void* slot_ptr) {
const auto* f = static_cast<const Fn*>(fn);
const T* slot = *static_cast<const T**>(slot_ptr);
return (*f)(*slot);
}
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/container/internal/container_memory.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <tuple>
#include <type_traits>
#include <typeindex>
#include <typeinfo>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/no_destructor.h"
#include "absl/container/internal/test_instance_tracker.h"
#include "absl/meta/type_traits.h"
#include "absl/strings/string_view.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
namespace {
using ::absl::test_internal::CopyableMovableInstance;
using ::absl::test_internal::InstanceTracker;
using ::testing::_;
using ::testing::ElementsAre;
using ::testing::Gt;
using ::testing::Pair;
TEST(Memory, AlignmentLargerThanBase) {
std::allocator<int8_t> alloc;
void* mem = Allocate<2>(&alloc, 3);
EXPECT_EQ(0, reinterpret_cast<uintptr_t>(mem) % 2);
memcpy(mem, "abc", 3);
Deallocate<2>(&alloc, mem, 3);
}
TEST(Memory, AlignmentSmallerThanBase) {
std::allocator<int64_t> alloc;
void* mem = Allocate<2>(&alloc, 3);
EXPECT_EQ(0, reinterpret_cast<uintptr_t>(mem) % 2);
memcpy(mem, "abc", 3);
Deallocate<2>(&alloc, mem, 3);
}
std::map<std::type_index, int>& AllocationMap() {
static absl::NoDestructor<std::map<std::type_index, int>> map;
return *map;
}
template <typename T>
struct TypeCountingAllocator {
TypeCountingAllocator() = default;
template <typename U>
TypeCountingAllocator(const TypeCountingAllocator<U>&) {}
using value_type = T;
T* allocate(size_t n, const void* = nullptr) {
AllocationMap()[typeid(T)] += n;
return std::allocator<T>().allocate(n);
}
void deallocate(T* p, std::size_t n) {
AllocationMap()[typeid(T)] -= n;
return std::allocator<T>().deallocate(p, n);
}
};
TEST(Memory, AllocateDeallocateMatchType) {
TypeCountingAllocator<int> alloc;
void* mem = Allocate<1>(&alloc, 1);
EXPECT_THAT(AllocationMap(), ElementsAre(Pair(_, Gt(0))));
Deallocate<1>(&alloc, mem, 1);
EXPECT_THAT(AllocationMap(), ElementsAre(Pair(_, 0)));
}
class Fixture : public ::testing::Test {
using Alloc = std::allocator<std::string>;
public:
Fixture() { ptr_ = std::allocator_traits<Alloc>::allocate(*alloc(), 1); }
~Fixture() override {
std::allocator_traits<Alloc>::destroy(*alloc(), ptr_);
std::allocator_traits<Alloc>::deallocate(*alloc(), ptr_, 1);
}
std::string* ptr() { return ptr_; }
Alloc* alloc() { return &alloc_; }
private:
Alloc alloc_;
std::string* ptr_;
};
TEST_F(Fixture, ConstructNoArgs) {
ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple());
EXPECT_EQ(*ptr(), "");
}
TEST_F(Fixture, ConstructOneArg) {
ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple("abcde"));
EXPECT_EQ(*ptr(), "abcde");
}
TEST_F(Fixture, ConstructTwoArg) {
ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple(5, 'a'));
EXPECT_EQ(*ptr(), "aaaaa");
}
TEST(PairArgs, NoArgs) {
EXPECT_THAT(PairArgs(),
Pair(std::forward_as_tuple(), std::forward_as_tuple()));
}
TEST(PairArgs, TwoArgs) {
EXPECT_EQ(
std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')),
PairArgs(1, 'A'));
}
TEST(PairArgs, Pair) {
EXPECT_EQ(
std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')),
PairArgs(std::make_pair(1, 'A')));
}
TEST(PairArgs, Piecewise) {
EXPECT_EQ(
std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')),
PairArgs(std::piecewise_construct, std::forward_as_tuple(1),
std::forward_as_tuple('A')));
}
TEST(WithConstructed, Simple) {
EXPECT_EQ(1, WithConstructed<absl::string_view>(
std::make_tuple(std::string("a")),
[](absl::string_view str) { return str.size(); }));
}
template <class F, class Arg>
decltype(DecomposeValue(std::declval<F>(), std::declval<Arg>()))
DecomposeValueImpl(int, F&& f, Arg&& arg) {
return DecomposeValue(std::forward<F>(f), std::forward<Arg>(arg));
}
template <class F, class Arg>
const char* DecomposeValueImpl(char, F&& f, Arg&& arg) {
return "not decomposable";
}
template <class F, class Arg>
decltype(DecomposeValueImpl(0, std::declval<F>(), std::declval<Arg>()))
TryDecomposeValue(F&& f, Arg&& arg) {
return DecomposeValueImpl(0, std::forward<F>(f), std::forward<Arg>(arg));
}
TEST(DecomposeValue, Decomposable) {
auto f = [](const int& x, int&& y) {
EXPECT_EQ(&x, &y);
EXPECT_EQ(42, x);
return 'A';
};
EXPECT_EQ('A', TryDecomposeValue(f, 42));
}
TEST(DecomposeValue, NotDecomposable) {
auto f = [](void*) {
ADD_FAILURE() << "Must not be called";
return 'A';
};
EXPECT_STREQ("not decomposable", TryDecomposeValue(f, 42));
}
template <class F, class... Args>
decltype(DecomposePair(std::declval<F>(), std::declval<Args>()...))
DecomposePairImpl(int, F&& f, Args&&... args) {
return DecomposePair(std::forward<F>(f), std::forward<Args>(args)...);
}
template <class F, class... Args>
const char* DecomposePairImpl(char, F&& f, Args&&... args) {
return "not decomposable";
}
template <class F, class... Args>
decltype(DecomposePairImpl(0, std::declval<F>(), std::declval<Args>()...))
TryDecomposePair(F&& f, Args&&... args) {
return DecomposePairImpl(0, std::forward<F>(f), std::forward<Args>(args)...);
}
TEST(DecomposePair, Decomposable) {
auto f = [](const int& x,
std::piecewise_construct_t, std::tuple<int&&> k,
std::tuple<double>&& v) {
EXPECT_EQ(&x, &std::get<0>(k));
EXPECT_EQ(42, x);
EXPECT_EQ(0.5, std::get<0>(v));
return 'A';
};
EXPECT_EQ('A', TryDecomposePair(f, 42, 0.5));
EXPECT_EQ('A', TryDecomposePair(f, std::make_pair(42, 0.5)));
EXPECT_EQ('A', TryDecomposePair(f, std::piecewise_construct,
std::make_tuple(42), std::make_tuple(0.5)));
}
TEST(DecomposePair, NotDecomposable) {
auto f = [](...) {
ADD_FAILURE() << "Must not be called";
return 'A';
};
EXPECT_STREQ("not decomposable", TryDecomposePair(f));
EXPECT_STREQ("not decomposable",
TryDecomposePair(f, std::piecewise_construct, std::make_tuple(),
std::make_tuple(0.5)));
}
TEST(MapSlotPolicy, ConstKeyAndValue) {
using slot_policy = map_slot_policy<const CopyableMovableInstance,
const CopyableMovableInstance>;
using slot_type = typename slot_policy::slot_type;
union Slots {
Slots() {}
~Slots() {}
slot_type slots[100];
} slots;
std::allocator<
std::pair<const CopyableMovableInstance, const CopyableMovableInstance>>
alloc;
InstanceTracker tracker;
slot_policy::construct(&alloc, &slots.slots[0], CopyableMovableInstance(1),
CopyableMovableInstance(1));
for (int i = 0; i < 99; ++i) {
slot_policy::transfer(&alloc, &slots.slots[i + 1], &slots.slots[i]);
}
slot_policy::destroy(&alloc, &slots.slots[99]);
EXPECT_EQ(tracker.copies(), 0);
}
TEST(MapSlotPolicy, TransferReturnsTrue) {
{
using slot_policy = map_slot_policy<int, float>;
EXPECT_TRUE(
(std::is_same<decltype(slot_policy::transfer<std::allocator<char>>(
nullptr, nullptr, nullptr)),
std::true_type>::value));
}
{
struct NonRelocatable {
NonRelocatable() = default;
NonRelocatable(NonRelocatable&&) {}
NonRelocatable& operator=(NonRelocatable&&) { return *this; }
void* self = nullptr;
};
EXPECT_FALSE(absl::is_trivially_relocatable<NonRelocatable>::value);
using slot_policy = map_slot_policy<int, NonRelocatable>;
EXPECT_TRUE(
(std::is_same<decltype(slot_policy::transfer<std::allocator<char>>(
nullptr, nullptr, nullptr)),
std::false_type>::value));
}
}
TEST(MapSlotPolicy, DestroyReturnsTrue) {
{
using slot_policy = map_slot_policy<int, float>;
EXPECT_TRUE(
(std::is_same<decltype(slot_policy::destroy<std::allocator<char>>(
nullptr, nullptr)),
std::true_type>::value));
}
{
EXPECT_FALSE(std::is_trivially_destructible<std::unique_ptr<int>>::value);
using slot_policy = map_slot_policy<int, std::unique_ptr<int>>;
EXPECT_TRUE(
(std::is_same<decltype(slot_policy::destroy<std::allocator<char>>(
nullptr, nullptr)),
std::false_type>::value));
}
}
TEST(ApplyTest, TypeErasedApplyToSlotFn) {
size_t x = 7;
auto fn = [](size_t v) { return v * 2; };
EXPECT_EQ((TypeErasedApplyToSlotFn<decltype(fn), size_t>(&fn, &x)), 14);
}
TEST(ApplyTest, TypeErasedDerefAndApplyToSlotFn) {
size_t x = 7;
auto fn = [](size_t v) { return v * 2; };
size_t* x_ptr = &x;
EXPECT_EQ(
(TypeErasedDerefAndApplyToSlotFn<decltype(fn), size_t>(&fn, &x_ptr)), 14);
}
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/container/internal/container_memory.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/container/internal/container_memory_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
eb5aeb38-bfcb-4297-bbdf-da5dfa1107c5 | cpp | tensorflow/tensorflow | memory | third_party/xla/xla/python/ifrt/memory.cc | third_party/xla/xla/python/ifrt/memory_test.cc | #include "xla/python/ifrt/memory.h"
#include <optional>
#include <string>
#include <utility>
#include "absl/base/thread_annotations.h"
#include "absl/container/node_hash_set.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "xla/python/ifrt/device.h"
namespace xla {
namespace ifrt {
namespace {
struct MemoryKindsSet {
absl::Mutex mu;
absl::node_hash_set<std::string> memory_kinds_set ABSL_GUARDED_BY(mu);
};
}
MemoryKind::MemoryKind(std::optional<absl::string_view> memory_kind) {
static auto* const global_set = new MemoryKindsSet();
if (!memory_kind.has_value()) {
return;
}
absl::MutexLock lock(&global_set->mu);
auto it = global_set->memory_kinds_set.find(*memory_kind);
if (it == global_set->memory_kinds_set.end()) {
memory_kind_ =
*global_set->memory_kinds_set.insert(std::string(*memory_kind)).first;
} else {
memory_kind_ = *it;
}
}
std::string MemoryKind::ToString() const {
if (memory_kind_.has_value()) {
return std::string(*memory_kind_);
}
return "(default)";
}
MemoryKind CanonicalizeMemoryKind(MemoryKind memory_kind, Device* device) {
if (memory_kind.memory_kind().has_value()) {
return memory_kind;
}
auto default_memory = device->DefaultMemory();
if (default_memory.ok()) {
return (*default_memory)->Kind();
}
return MemoryKind();
}
char Memory::ID = 0;
}
} | #include "xla/python/ifrt/memory.h"
#include <memory>
#include <optional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
using ::testing::Optional;
namespace xla {
namespace ifrt {
namespace {
TEST(MemoryKindTest, EqualityForUnspecified) {
MemoryKind memory_kind1;
MemoryKind memory_kind2;
EXPECT_EQ(memory_kind1, memory_kind2);
}
TEST(MemoryKindTest, EqualityForSameString) {
MemoryKind memory_kind1("abc");
MemoryKind memory_kind2("abc");
EXPECT_EQ(memory_kind1, memory_kind2);
}
TEST(MemoryKindTest, EqualityForSameStringContent) {
MemoryKind memory_kind1("abc");
MemoryKind memory_kind2(absl::StrCat("ab", "c"));
EXPECT_EQ(memory_kind1, memory_kind2);
}
TEST(MemoryKindTest, InequalityForDifferentStringContent) {
MemoryKind memory_kind1("abc");
MemoryKind memory_kind2("def");
EXPECT_NE(memory_kind1, memory_kind2);
}
TEST(MemoryKindTest, InequalityBetweenSpecifiedAndUnspecified) {
{
MemoryKind memory_kind1("abc");
MemoryKind memory_kind2;
EXPECT_NE(memory_kind1, memory_kind2);
}
{
MemoryKind memory_kind1;
MemoryKind memory_kind2("abc");
EXPECT_NE(memory_kind1, memory_kind2);
}
}
TEST(MemoryKindTest, MemorySafety) {
auto memory_kind_str = std::make_unique<std::string>("abc");
MemoryKind memory_kind(*memory_kind_str);
memory_kind_str.reset();
EXPECT_THAT(memory_kind.memory_kind(), Optional(absl::string_view("abc")));
}
TEST(MemoryKindTest, EqualityForUnspecifiedAndNullopt) {
MemoryKind memory_kind1;
MemoryKind memory_kind2(std::nullopt);
EXPECT_EQ(memory_kind1, memory_kind2);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/memory.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/memory_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
65b81fe1-98d0-44ae-8c4a-5403dc3066f5 | cpp | tensorflow/tensorflow | mean | tensorflow/lite/delegates/gpu/gl/kernels/mean.cc | tensorflow/lite/delegates/xnnpack/mean_test.cc | #include "tensorflow/lite/delegates/gpu/gl/kernels/mean.h"
#include <algorithm>
#include <any>
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/common/util.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
bool UseSubgroupBasedImpl(const GpuInfo& gpu_info) {
return gpu_info.IsApiVulkan() &&
(gpu_info.vulkan_info.api_version_major > 1 ||
gpu_info.vulkan_info.api_version_minor >= 1) &&
gpu_info.vulkan_info.subgroup_size >= 32 &&
gpu_info.vulkan_info.supports_subgroup_arithmetic;
}
void GenerateSubgroupBasedMean(const NodeShader::GenerationContext& ctx,
GeneratedCode* generated_code) {
int height = ctx.input_shapes[0][1];
int width = ctx.input_shapes[0][2];
int depth = ctx.input_shapes[0][3];
std::vector<Variable> parameters = {
{"input_data_0_h", height},
{"input_data_0_w", width},
{"output_data_0_h", 1},
{"output_data_0_w", 1},
};
std::string source = R"(
const uint columns_per_invocation =
($input_data_0_w$ + (gl_WorkGroupSize.x - 1))/gl_WorkGroupSize.x;
const uint rows_per_invocation =
($input_data_0_h$ + (gl_WorkGroupSize.y - 1))/gl_WorkGroupSize.y;
const uint first_row = gl_GlobalInvocationID.y*rows_per_invocation;
const uint first_col = gl_GlobalInvocationID.x*columns_per_invocation;
const uint last_row_exclusive =
min(first_row+rows_per_invocation, $input_data_0_h$);
const uint last_column_exclusive =
min(first_col+columns_per_invocation, $input_data_0_w$);
vec4 value = vec4(0);
for (uint h = first_row; h < last_row_exclusive; ++h) {
for (uint w = first_col; w < last_column_exclusive; ++w) {
value += $input_data_0[w, h, gid.z]$;
}
}
highp vec4 subgroup_sum = subgroupAdd(value);
if(subgroupElect()) {
subgroup_sums[gl_SubgroupID] = subgroup_sum;
}
memoryBarrierShared();
barrier();
if(gl_SubgroupID == 0) {
highp vec4 subtotal = vec4(0);
if (gl_SubgroupInvocationID < gl_NumSubgroups) {
subtotal = subgroup_sums[gl_SubgroupInvocationID];
}
highp vec4 grand_total = subgroupAdd(subtotal);
if(subgroupElect()) {
highp vec4 result = grand_total / $input_data_0_w$ / $input_data_0_h$;
$output_data_0[0, 0, gid.z] = result$;
}
}
)";
const uint32_t subgroup_size = ctx.gpu_info->vulkan_info.subgroup_size;
const uint32_t max_wg_size_x = ctx.gpu_info->GetMaxWorkGroupSizeForX();
const uint32_t max_wg_size_y = ctx.gpu_info->GetMaxWorkGroupSizeForY();
const uint32_t max_wg_size =
std::min(static_cast<uint32_t>(ctx.gpu_info->GetMaxWorkGroupTotalSize()),
subgroup_size * subgroup_size);
const uint32_t max_number_of_subgroups = max_wg_size / subgroup_size;
uint32_t wg_size_x = 0;
uint32_t wg_size_y = 0;
if (width * height <= max_wg_size && width <= max_wg_size_x &&
height <= max_wg_size_y) {
wg_size_x = width;
wg_size_y = height;
} else {
wg_size_x = std::min({static_cast<uint32_t>(std::sqrt(max_wg_size)),
max_wg_size_x, static_cast<uint32_t>(width)});
wg_size_y = std::min({max_wg_size / wg_size_x, max_wg_size_y,
static_cast<uint32_t>(height)});
}
std::vector<Variable> shared_variables = {
{"subgroup_sums", std::vector<float4>(max_number_of_subgroups)},
};
*generated_code = {
std::move(parameters),
{},
{std::move(shared_variables)},
uint3(wg_size_x, wg_size_y, uint32_t(DivideRoundUp(depth, 4))),
uint3(wg_size_x, wg_size_y, 1u),
std::move(source),
IOStructure::ONLY_DEFINITIONS,
IOStructure::ONLY_DEFINITIONS,
};
}
void GenerateTrivialMean(const NodeShader::GenerationContext& ctx,
GeneratedCode* generated_code) {
std::vector<Variable> parameters = {
{"input_data_0_h", static_cast<int>(ctx.input_shapes[0][1])},
{"input_data_0_w", static_cast<int>(ctx.input_shapes[0][2])}};
std::string source = R"(
highp vec4 sum = vec4(0.0);
highp float size = float($input_data_0_w$ * $input_data_0_h$);
for (int w = 0; w < $input_data_0_w$; w++) {
for (int h = 0; h < $input_data_0_h$; h++) {
sum += $input_data_0[w, h, gid.z]$;
}
}
value_0 = sum / size;
)";
*generated_code = {
std::move(parameters),
{},
{},
uint3(),
uint3(1, 1, 4),
std::move(source),
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
}
constexpr uint3 kTileSize = {8, 8, 1};
inline bool UseTiledImpl(const NodeShader::GenerationContext& ctx) {
const int h = ctx.input_shapes[0][1];
const int w = ctx.input_shapes[0][2];
const int c = ctx.input_shapes[0][3];
return h % kTileSize.y == 0 && w % kTileSize.x == 0 && c % 4 == 0 &&
(h / kTileSize.y) * (w / kTileSize.x) * c * sizeof(float) <=
32768;
}
void GenerateTiledMean(const NodeShader::GenerationContext& ctx,
GeneratedCode* generated_code) {
const int h = ctx.input_shapes[0][1];
const int w = ctx.input_shapes[0][2];
const int s = DivideRoundUp(ctx.input_shapes[0][3], 4);
std::vector<Variable> parameters = {
{"input_data_0_h", h},
{"input_data_0_w", w},
{"tile_size_h", kTileSize.y},
{"tile_size_w", kTileSize.x},
};
std::vector<Variable> shared_variables = {
{"tile_sum",
std::vector<float4>((w / kTileSize.x) * (h / kTileSize.y) * s)}};
std::string source = R"(
ivec2 tile_size = ivec2($tile_size_w$, $tile_size_h$);
ivec2 num_tiles = ivec2($input_data_0_w$, $input_data_0_h$) / tile_size;
highp vec4 partial_sum = vec4(0.0);
for (int x = gid.x * tile_size.x; x < (gid.x + 1) * tile_size.x; ++x) {
for (int y = gid.y * tile_size.y; y < (gid.y + 1) * tile_size.y; ++y) {
partial_sum += $input_data_0[x, y, gid.z]$;
}
}
$tile_sum$[num_tiles.x * num_tiles.y * gid.z + num_tiles.x * gid.y + gid.x] = partial_sum;
memoryBarrierShared(); barrier();
if (gid.x == 0 && gid.y == 0) {
highp vec4 sum = vec4(0.0);
for (int i = 0; i < num_tiles.x * num_tiles.y; ++i) {
sum += $tile_sum$[num_tiles.x * num_tiles.y * gid.z + i];
}
highp vec4 mean = sum / float($input_data_0_w$ * $input_data_0_h$);
$output_data_0[0, 0, gid.z] = mean$;
}
)";
*generated_code = {
std::move(parameters),
{},
std::move(shared_variables),
uint3(kTileSize.x, kTileSize.y, static_cast<uint32_t>(s)),
kTileSize,
std::move(source),
IOStructure::ONLY_DEFINITIONS,
IOStructure::ONLY_DEFINITIONS,
};
}
class Mean : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
const auto& attr = std::any_cast<const MeanAttributes&>(ctx.op_attr);
if (attr.dims != std::set<Axis>({Axis::HEIGHT, Axis::WIDTH})) {
return absl::InvalidArgumentError(
"Mean calculation is supported only for height and width.");
}
if (!(ctx.input_shapes.size() == 1 && ctx.output_shapes.size() == 1 &&
ctx.output_shapes[0][1] == 1 && ctx.output_shapes[0][2] == 1 &&
ctx.output_shapes[0][3] == ctx.input_shapes[0][3])) {
return absl::InvalidArgumentError(
"Mean calculation is supported for one input and one 1x1 output with "
"the same channel count.");
}
if (UseSubgroupBasedImpl(*ctx.gpu_info)) {
GenerateSubgroupBasedMean(ctx, generated_code);
} else if (UseTiledImpl(ctx)) {
GenerateTiledMean(ctx, generated_code);
} else {
GenerateTrivialMean(ctx, generated_code);
}
return absl::OkStatus();
}
};
}
std::unique_ptr<NodeShader> NewMeanNodeShader() {
return std::make_unique<Mean>();
}
}
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/reduce_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace xnnpack {
TEST(Mean, 4DReduceBatchSqueezeDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, height, width, channels})
.Axes({0})
.KeepDims(false)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 4DReduceBatchKeepDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, height, width, channels})
.Axes({0})
.KeepDims(true)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 4DReduceHeightSqueezeDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, height, width, channels})
.Axes({1})
.KeepDims(false)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 4DReduceHeightKeepDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, height, width, channels})
.Axes({1})
.KeepDims(true)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 4DReduceWidthSqueezeDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, height, width, channels})
.Axes({2})
.KeepDims(false)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 4DReduceWidthKeepDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, height, width, channels})
.Axes({2})
.KeepDims(true)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 4DReduceHeightWidthSqueezeDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, height, width, channels})
.Axes({1, 2})
.KeepDims(false)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
ReduceTester()
.InputShape({batch, height, width, channels})
.Axes({2, 1})
.KeepDims(false)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 4DReduceHeightWidthKeepDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, height, width, channels})
.Axes({1, 2})
.KeepDims(true)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
ReduceTester()
.InputShape({batch, height, width, channels})
.Axes({2, 1})
.KeepDims(true)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 4DReduceChannelsSqueezeDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, height, width, channels})
.Axes({3})
.KeepDims(false)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 4DReduceChannelsKeepDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, height, width, channels})
.Axes({3})
.KeepDims(true)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 3DReduceBatchSqueezeDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, width, channels})
.Axes({0})
.KeepDims(false)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 3DReduceBatchKeepDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, width, channels})
.Axes({0})
.KeepDims(true)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 3DReduceWidthSqueezeDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, width, channels})
.Axes({1})
.KeepDims(false)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 3DReduceWidthKeepDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, width, channels})
.Axes({1})
.KeepDims(true)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 3DReduceChannelsSqueezeDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, width, channels})
.Axes({2})
.KeepDims(false)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 3DReduceChannelsKeepDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, width, channels})
.Axes({2})
.KeepDims(true)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 2DReduceBatchSqueezeDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, channels})
.Axes({0})
.KeepDims(false)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 2DReduceBatchKeepDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, channels})
.Axes({0})
.KeepDims(true)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 2DReduceChannelsSqueezeDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, channels})
.Axes({1})
.KeepDims(false)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 2DReduceChannelsKeepDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, channels})
.Axes({1})
.KeepDims(true)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 1DSqueezeDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
ReduceTester().InputShape({batch}).Axes({0}).KeepDims(false).Test(
BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, 1DKeepDims) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
ReduceTester().InputShape({batch}).Axes({0}).KeepDims(true).Test(
BuiltinOperator_MEAN, xnnpack_delegate.get());
}
TEST(Mean, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
ReduceTester()
.InputShape({batch, height, width, channels})
.Axes({1, 2})
.KeepDims(true)
.Test(BuiltinOperator_MEAN, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/kernels/mean.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/mean_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4590b1c7-332b-4543-a56c-801e115dd511 | cpp | abseil/abseil-cpp | barrier | absl/synchronization/barrier.cc | absl/synchronization/barrier_test.cc | #include "absl/synchronization/barrier.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/synchronization/mutex.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
static bool IsZero(void *arg) {
return 0 == *reinterpret_cast<int *>(arg);
}
bool Barrier::Block() {
MutexLock l(&this->lock_);
this->num_to_block_--;
if (this->num_to_block_ < 0) {
ABSL_RAW_LOG(
FATAL,
"Block() called too many times. num_to_block_=%d out of total=%d",
this->num_to_block_, this->num_to_exit_);
}
this->lock_.Await(Condition(IsZero, &this->num_to_block_));
this->num_to_exit_--;
ABSL_RAW_CHECK(this->num_to_exit_ >= 0, "barrier underflow");
return this->num_to_exit_ == 0;
}
ABSL_NAMESPACE_END
} | #include "absl/synchronization/barrier.h"
#include <thread>
#include <vector>
#include "gtest/gtest.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
TEST(Barrier, SanityTest) {
constexpr int kNumThreads = 10;
absl::Barrier* barrier = new absl::Barrier(kNumThreads);
absl::Mutex mutex;
int counter = 0;
auto thread_func = [&] {
if (barrier->Block()) {
delete barrier;
}
absl::MutexLock lock(&mutex);
++counter;
};
std::vector<std::thread> threads;
for (int i = 0; i < kNumThreads - 1; ++i) {
threads.push_back(std::thread(thread_func));
}
absl::SleepFor(absl::Seconds(1));
{
absl::MutexLock lock(&mutex);
EXPECT_EQ(counter, 0);
}
threads.push_back(std::thread(thread_func));
for (auto& thread : threads) {
thread.join();
}
absl::MutexLock lock(&mutex);
EXPECT_EQ(counter, kNumThreads);
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/synchronization/barrier.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/synchronization/barrier_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
75346150-76eb-46bf-af45-eced291a9d7e | cpp | tensorflow/tensorflow | cpu_layout_assignment | third_party/xla/xla/service/cpu/cpu_layout_assignment.cc | third_party/xla/xla/service/cpu/cpu_layout_assignment_test.cc | #include "xla/service/cpu/cpu_layout_assignment.h"
#include <cstdint>
#include <numeric>
#include <optional>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/map_util.h"
#include "xla/service/cpu/dot_op_emitter.h"
#include "xla/service/cpu/ir_emission_utils.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace cpu {
namespace {
using std::nullopt;
using std::optional;
using ShouldMakeOperandColMajorCache =
absl::flat_hash_map<const HloInstruction*, bool>;
}
static bool ShouldMakeAllUsersColMajor(const HloInstruction* instruction) {
for (auto* user : instruction->users()) {
optional<int64_t> operand_idx =
ProfitableToMakeDotOperandColumnMajor(*user);
if (!operand_idx || user->operand(*operand_idx) != instruction ||
absl::c_count(user->operands(), instruction) != 1) {
return false;
}
}
return true;
}
static optional<int64_t> ShouldMakeOperandColumnMajor(
ShouldMakeOperandColMajorCache* cache, const HloInstruction& instruction) {
optional<int64_t> operand_idx =
ProfitableToMakeDotOperandColumnMajor(instruction);
if (!operand_idx) {
return nullopt;
}
const HloInstruction* operand = instruction.operand(*operand_idx);
if (operand->opcode() != HloOpcode::kConstant) {
return nullopt;
}
auto it = cache->find(operand);
if (it == cache->end()) {
auto insert_result =
cache->insert({operand, ShouldMakeAllUsersColMajor(operand)});
CHECK(insert_result.second);
it = insert_result.first;
}
return it->second ? operand_idx : nullopt;
}
static Shape RowMajorShape(Shape shape) {
ShapeUtil::ForEachMutableSubshape(
&shape, [](Shape* subshape, const ShapeIndex& index) {
if (!subshape->IsArray()) {
return;
}
std::vector<int64_t> dimension_order(subshape->dimensions_size());
std::iota(dimension_order.rbegin(), dimension_order.rend(), 0);
*subshape->mutable_layout() = LayoutUtil::MakeLayout(dimension_order);
});
return shape;
}
static Shape ColMajorShape(const Shape& old_shape) {
Shape new_shape(old_shape);
std::vector<int64_t> dimension_order(new_shape.dimensions_size());
std::iota(dimension_order.begin(), dimension_order.end(), 0);
*new_shape.mutable_layout() = LayoutUtil::MakeLayout(dimension_order);
return new_shape;
}
static bool OperandsAndResultMustHaveRowMajorLayout(
const HloInstruction& instr,
const TargetMachineFeatures& target_machine_features) {
if (instr.opcode() == HloOpcode::kConvolution) {
return PotentiallyImplementedAsEigenConvolution(instr,
target_machine_features);
} else if (instr.opcode() == HloOpcode::kDot) {
return DotOperandsAndResultMustHaveRowMajorLayout(instr,
target_machine_features);
} else if (instr.opcode() == HloOpcode::kCustomCall) {
return instr.custom_call_target() == "TopK";
}
return false;
}
absl::Status CpuLayoutAssignment::AddBackendConstraints(
LayoutConstraints* constraints) {
ShouldMakeOperandColMajorCache cache;
const HloComputation* computation = constraints->computation();
for (auto* instruction : computation->instructions()) {
if (OperandsAndResultMustHaveRowMajorLayout(*instruction,
target_machine_features_)) {
TF_RETURN_IF_ERROR(SetInstructionLayout(
RowMajorShape(instruction->shape()), instruction));
for (int i = 0; i < instruction->operand_count(); i++) {
TF_RETURN_IF_ERROR(SetOperandLayout(
RowMajorShape(instruction->operand(i)->shape()), instruction, i));
}
} else if (optional<int64_t> op_idx =
ShouldMakeOperandColumnMajor(&cache, *instruction)) {
const HloInstruction* op = instruction->operand(*op_idx);
TF_RETURN_IF_ERROR(
SetOperandLayout(ColMajorShape(op->shape()), instruction, *op_idx));
} else if (instruction->opcode() == HloOpcode::kReduceScatter) {
auto ars = Cast<HloReduceScatterInstruction>(instruction);
TF_RETURN_IF_ERROR(SetInstructionLayout(
ShapeUtil::MoveDimToMajor(ars->shape(), ars->scatter_dimension()),
ars));
} else if (instruction->opcode() == HloOpcode::kAllGather) {
auto ag = Cast<HloAllGatherInstruction>(instruction);
TF_RETURN_IF_ERROR(SetInstructionLayout(
ShapeUtil::MoveDimToMajor(ag->shape(), ag->all_gather_dimension()),
ag));
} else {
for (int64_t operand_no = 0; operand_no < instruction->operand_count();
++operand_no) {
if (constraints->OperandLayout(instruction, operand_no) != nullptr) {
continue;
}
if (AnyOperandBufferForwarded(instruction, operand_no)) {
continue;
}
if (!instruction->operand(operand_no)->shape().IsArray()) {
continue;
}
Shape operand_shape(
RowMajorShape(instruction->operand(operand_no)->shape()));
TF_RETURN_IF_ERROR(
SetOperandLayout(operand_shape, instruction, operand_no));
}
if (computation->parent()->entry_computation() == computation &&
computation->root_instruction() == instruction) {
continue;
}
if (!instruction->shape().IsArray()) {
continue;
}
}
}
return absl::OkStatus();
}
}
} | #include "xla/service/cpu/cpu_layout_assignment.h"
#include <initializer_list>
#include <memory>
#include <utility>
#include <vector>
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/computation_layout.h"
#include "xla/service/cpu/target_machine_features_fake.h"
#include "xla/shape_layout.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_utils.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace {
class CpuLayoutAssignmentTest : public HloTestBase {
protected:
void AssignLayouts(HloModule* module,
ComputationLayout* entry_computation_layout) {
cpu::TargetMachineFeaturesWithFakeAlignmentLogic target_machine_features(
[](int64_t shape_size) {
return cpu::TargetMachineFeatures::kEigenExpectedTensorAlignment;
});
cpu::CpuLayoutAssignment layout_assignment(entry_computation_layout,
&target_machine_features);
EXPECT_IS_OK(layout_assignment.Run(module).status());
}
};
TEST_F(CpuLayoutAssignmentTest, DotWithConstantRhsTensor) {
auto builder = HloComputation::Builder(TestName());
Shape lhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {12}, {0});
Shape rhs_shape = ShapeUtil::MakeShape(F32, {12, 24});
Shape result_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {24}, {0});
auto dot_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_shape, "param0"));
auto dot_rhs = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(rhs_shape)));
auto result = builder.AddInstruction(
CreateCanonicalDot(result_shape, dot_lhs, dot_rhs));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(lhs_shape));
*computation_layout.mutable_result_layout() =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(result_shape));
AssignLayouts(module.get(), &computation_layout);
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({0}),
dot_lhs->shape().layout()));
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({0, 1}),
dot_rhs->shape().layout()));
EXPECT_TRUE(
LayoutUtil::Equal(LayoutUtil::MakeLayout({0}), result->shape().layout()));
for (const auto& instruction : computation->instructions()) {
EXPECT_NE(instruction->opcode(), HloOpcode::kCopy);
}
}
TEST_F(CpuLayoutAssignmentTest, MultipleDotsWithSameConstantRhsTensor0) {
auto builder = HloComputation::Builder(TestName());
Shape lhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {12}, {0});
Shape rhs_shape = ShapeUtil::MakeShape(F32, {12, 24});
Shape result_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {24}, {0});
auto dot_a_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_shape, "param0"));
auto dot_b_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(1, lhs_shape, "param1"));
auto dot_rhs = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(rhs_shape)));
auto dot_a_result = builder.AddInstruction(
CreateCanonicalDot(result_shape, dot_a_lhs, dot_rhs));
auto dot_b_result = builder.AddInstruction(
CreateCanonicalDot(result_shape, dot_b_lhs, dot_rhs));
builder.AddInstruction(HloInstruction::CreateBinary(
result_shape, HloOpcode::kAdd, dot_a_result, dot_b_result));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(lhs_shape));
*computation_layout.mutable_result_layout() =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(result_shape));
AssignLayouts(module.get(), &computation_layout);
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({0, 1}),
dot_rhs->shape().layout()));
for (HloInstruction* instruction :
{dot_a_lhs, dot_b_lhs, dot_a_result, dot_b_result}) {
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({0}),
instruction->shape().layout()));
}
for (const auto& instruction : computation->instructions()) {
EXPECT_NE(instruction->opcode(), HloOpcode::kCopy);
}
}
TEST_F(CpuLayoutAssignmentTest, MultipleDotsWithSameConstantRhsTensor1) {
auto builder = HloComputation::Builder(TestName());
Shape lhs_a_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 12}, {0, 1});
Shape lhs_b_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 12}, {0, 1});
Shape rhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {12, 24}, {0, 1});
Shape result_a_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 24}, {0, 1});
Shape result_b_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 24}, {0, 1});
auto dot_a_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_a_shape, "param0"));
auto dot_b_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(1, lhs_b_shape, "param1"));
auto dot_rhs = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(rhs_shape)));
auto dot_a_result = builder.AddInstruction(
CreateCanonicalDot(result_a_shape, dot_a_lhs, dot_rhs));
auto dot_b_result = builder.AddInstruction(
CreateCanonicalDot(result_b_shape, dot_b_lhs, dot_rhs));
auto tuple_result = builder.AddInstruction(
HloInstruction::CreateTuple({dot_a_result, dot_b_result}));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(lhs_a_shape));
*computation_layout.mutable_parameter_layout(1) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(lhs_b_shape));
*computation_layout.mutable_result_layout() =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(tuple_result->shape()));
AssignLayouts(module.get(), &computation_layout);
for (HloInstruction* instruction :
{dot_rhs, dot_a_lhs, dot_b_lhs, dot_a_result, dot_b_result}) {
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({1, 0}),
instruction->shape().layout()));
}
for (const auto& instruction : computation->instructions()) {
EXPECT_NE(instruction->opcode(), HloOpcode::kCopy);
}
}
TEST_F(CpuLayoutAssignmentTest, DotWithConstantLhsTensor) {
auto builder = HloComputation::Builder(TestName());
Shape lhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 12}, {0, 1});
Shape rhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {12, 24}, {0, 1});
Shape result_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 24}, {0, 1});
auto dot_lhs = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(lhs_shape)));
auto dot_rhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, rhs_shape, "param0"));
auto dot_result = builder.AddInstruction(
CreateCanonicalDot(result_shape, dot_lhs, dot_rhs));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(rhs_shape));
*computation_layout.mutable_result_layout() =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(result_shape));
AssignLayouts(module.get(), &computation_layout);
for (HloInstruction* instruction : {dot_lhs, dot_rhs, dot_result}) {
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({1, 0}),
instruction->shape().layout()));
}
for (const auto& instruction : computation->instructions()) {
EXPECT_NE(instruction->opcode(), HloOpcode::kCopy);
}
}
TEST_F(CpuLayoutAssignmentTest, DotWithConstantRhsTensorThroughGTE) {
auto builder = HloComputation::Builder(TestName());
Shape lhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 12}, {0, 1});
Shape rhs_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {12, 24}, {0, 1});
Shape other_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {100, 24}, {0, 1});
auto constant_shape = ShapeUtil::MakeTupleShape({other_shape, rhs_shape});
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(constant_shape)));
Shape result_shape = ShapeUtil::MakeShape(F32, {1, 24});
auto dot_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_shape, "param0"));
auto dot_rhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(rhs_shape, constant, 1));
auto dot_result = builder.AddInstruction(
CreateCanonicalDot(result_shape, dot_lhs, dot_rhs));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(lhs_shape));
*computation_layout.mutable_result_layout() =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(result_shape));
AssignLayouts(module.get(), &computation_layout);
for (HloInstruction* instruction : {dot_lhs, dot_rhs, dot_result}) {
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({1, 0}),
instruction->shape().layout()));
}
for (const auto& instruction : computation->instructions()) {
EXPECT_NE(instruction->opcode(), HloOpcode::kCopy);
}
}
struct DotOutputFusionLayoutAssignmentResult {
bool layout_assignment_changed_something;
const HloInstruction* dot_lhs_fusion_param;
const HloInstruction* dot_rhs_fusion_param;
const HloInstruction* addend_fusion_param;
};
static absl::StatusOr<DotOutputFusionLayoutAssignmentResult> RunDotOutputFusion(
HloModule* module, const std::string& test_name, int m, int k, int n,
const int64_t dot_operand_idx_in_add) {
DotOutputFusionLayoutAssignmentResult result;
CHECK(dot_operand_idx_in_add == 0 || dot_operand_idx_in_add == 1);
auto builder = HloComputation::Builder(test_name);
Shape dot_lhs_shape = ShapeUtil::MakeShape(F32, {m, k});
Shape dot_rhs_shape = ShapeUtil::MakeShape(F32, {k, n});
Shape dot_shape = ShapeUtil::MakeShape(F32, {m, n});
if (m == 1) {
dot_lhs_shape = ShapeUtil::MakeShape(F32, {k});
dot_shape = ShapeUtil::MakeShape(F32, {n});
} else if (n == 1) {
dot_rhs_shape = ShapeUtil::MakeShape(F32, {k});
dot_shape = ShapeUtil::MakeShape(F32, {m});
}
HloInstruction* dot_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, dot_lhs_shape, "param0"));
HloInstruction* addend = builder.AddInstruction(
HloInstruction::CreateParameter(1, dot_shape, "param1"));
HloInstruction* dot_rhs = builder.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(dot_rhs_shape)));
HloInstruction* dot_result =
builder.AddInstruction(CreateCanonicalDot(dot_shape, dot_lhs, dot_rhs));
HloInstruction* add_result;
if (dot_operand_idx_in_add == 0) {
add_result = builder.AddInstruction(HloInstruction::CreateBinary(
dot_shape, HloOpcode::kAdd, dot_result, addend));
} else {
add_result = builder.AddInstruction(HloInstruction::CreateBinary(
dot_shape, HloOpcode::kAdd, addend, dot_result));
}
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloInstruction* fusion_instruction =
module->entry_computation()->AddInstruction(HloInstruction::CreateFusion(
dot_shape, HloInstruction::FusionKind::kOutput, add_result));
TF_RETURN_IF_ERROR(
computation->ReplaceInstruction(add_result, fusion_instruction));
HloInstruction* fused_add =
fusion_instruction->fused_instructions_computation()->root_instruction();
HloInstruction* fused_dot = fusion_instruction->FuseInstruction(dot_result);
TF_RETURN_IF_ERROR(
computation->RemoveInstructionAndUnusedOperands(dot_result));
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(dot_lhs_shape));
*computation_layout.mutable_parameter_layout(1) =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(dot_shape));
*computation_layout.mutable_result_layout() =
ShapeLayout(LayoutUtil::GetWithDefaultLayout(dot_shape));
result.dot_lhs_fusion_param =
fusion_instruction->operand(fused_dot->operand(0)->parameter_number());
result.dot_rhs_fusion_param =
fusion_instruction->operand(fused_dot->operand(1)->parameter_number());
result.addend_fusion_param = fusion_instruction->operand(
fused_add->operand(1 - dot_operand_idx_in_add)->parameter_number());
cpu::TargetMachineFeaturesWithFakeAlignmentLogic target_machine_features(
[](int64_t shape_size) {
return cpu::TargetMachineFeatures::kEigenExpectedTensorAlignment;
});
cpu::CpuLayoutAssignment layout_assignment(&computation_layout,
&target_machine_features);
TF_ASSIGN_OR_RETURN(result.layout_assignment_changed_something,
layout_assignment.Run(module));
return result;
}
static void AssertCorrectLayoutForDotOutputFusion(
const HloComputation* computation,
const DotOutputFusionLayoutAssignmentResult& layout_assignment_result,
bool expect_col_major_dot_rhs) {
Layout expected_dot_rhs_layout = expect_col_major_dot_rhs
? LayoutUtil::MakeLayout({0, 1})
: LayoutUtil::MakeLayout({1, 0});
if (layout_assignment_result.dot_rhs_fusion_param->shape().rank() == 1) {
expected_dot_rhs_layout = LayoutUtil::MakeLayout({0});
}
EXPECT_TRUE(LayoutUtil::Equal(
expected_dot_rhs_layout,
layout_assignment_result.dot_rhs_fusion_param->shape().layout()));
EXPECT_TRUE(LayoutUtil::Equal(
LayoutUtil::MakeDescendingLayout(
layout_assignment_result.dot_lhs_fusion_param->shape().rank()),
layout_assignment_result.dot_lhs_fusion_param->shape().layout()));
EXPECT_TRUE(LayoutUtil::Equal(
LayoutUtil::MakeDescendingLayout(
layout_assignment_result.addend_fusion_param->shape().rank()),
layout_assignment_result.addend_fusion_param->shape().layout()));
EXPECT_THAT(computation->instructions(), Each(Not(op::Copy())));
}
TEST_F(CpuLayoutAssignmentTest, DotOutputFusion_1x50x19_dot_idx_0) {
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
TF_ASSERT_OK_AND_ASSIGN(
DotOutputFusionLayoutAssignmentResult layout_assignment_result,
RunDotOutputFusion(module.get(), TestName(), 1, 50, 19,
0));
ASSERT_TRUE(layout_assignment_result.layout_assignment_changed_something);
AssertCorrectLayoutForDotOutputFusion(module->entry_computation(),
layout_assignment_result,
true);
}
TEST_F(CpuLayoutAssignmentTest, DotOutputFusion_1x50x19_dot_idx_1) {
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
TF_ASSERT_OK_AND_ASSIGN(
DotOutputFusionLayoutAssignmentResult layout_assignment_result,
RunDotOutputFusion(module.get(), TestName(), 1, 50, 19,
1));
ASSERT_TRUE(layout_assignment_result.layout_assignment_changed_something);
AssertCorrectLayoutForDotOutputFusion(module->entry_computation(),
layout_assignment_result,
true);
}
TEST_F(CpuLayoutAssignmentTest, DotOutputFusion_19x50x1_dot_idx_0) {
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
TF_ASSERT_OK_AND_ASSIGN(
DotOutputFusionLayoutAssignmentResult layout_assignment_result,
RunDotOutputFusion(module.get(), TestName(), 19, 50, 1,
0));
ASSERT_TRUE(layout_assignment_result.layout_assignment_changed_something);
AssertCorrectLayoutForDotOutputFusion(module->entry_computation(),
layout_assignment_result,
false);
}
TEST_F(CpuLayoutAssignmentTest, DotOutputFusion_19x50x1_dot_idx_1) {
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
TF_ASSERT_OK_AND_ASSIGN(
DotOutputFusionLayoutAssignmentResult layout_assignment_result,
RunDotOutputFusion(module.get(), TestName(), 19, 50, 1,
1));
ASSERT_TRUE(layout_assignment_result.layout_assignment_changed_something);
AssertCorrectLayoutForDotOutputFusion(module->entry_computation(),
layout_assignment_result,
false);
}
TEST_F(CpuLayoutAssignmentTest, DotOutputFusion_19x50x19_dot_idx_0) {
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
TF_ASSERT_OK_AND_ASSIGN(
DotOutputFusionLayoutAssignmentResult layout_assignment_result,
RunDotOutputFusion(module.get(), TestName(), 19, 50, 19,
0));
ASSERT_TRUE(layout_assignment_result.layout_assignment_changed_something);
AssertCorrectLayoutForDotOutputFusion(module->entry_computation(),
layout_assignment_result,
false);
}
TEST_F(CpuLayoutAssignmentTest, DotOutputFusion_19x50x19_dot_idx_1) {
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
TF_ASSERT_OK_AND_ASSIGN(
DotOutputFusionLayoutAssignmentResult layout_assignment_result,
RunDotOutputFusion(module.get(), TestName(), 19, 50, 19,
1));
ASSERT_TRUE(layout_assignment_result.layout_assignment_changed_something);
AssertCorrectLayoutForDotOutputFusion(module->entry_computation(),
layout_assignment_result,
false);
}
TEST_F(CpuLayoutAssignmentTest, BatchDotLayoutMustBeRowMajor) {
const char* hlo_string = R"(
HloModule BatchDotLayoutMustBeRowMajor
ENTRY BatchDotLayoutMustBeRowMajor {
p0 = f32[10,1,10] parameter(0)
p1 = f32[10,10,1] parameter(1)
ROOT dot = f32[10,1,1] dot(p0, p1), lhs_batch_dims={0},
lhs_contracting_dims={2},
rhs_batch_dims={0},
rhs_contracting_dims={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloComputation* computation = module->entry_computation();
ComputationLayout computation_layout(computation->ComputeProgramShape());
*computation_layout.mutable_parameter_layout(0) = ShapeLayout(
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 1, 10}, {2, 1, 0}));
*computation_layout.mutable_parameter_layout(1) = ShapeLayout(
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 10, 1}, {2, 1, 0}));
*computation_layout.mutable_result_layout() = ShapeLayout(
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 1, 1}, {1, 2, 0}));
AssignLayouts(module.get(), &computation_layout);
Shape expected_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 1, 1}, {2, 1, 0});
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Copy(op::ShapeWithLayout(expected_shape)));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Copy(op::Dot(
op::ShapeWithLayout(computation_layout.parameter_layout(0).shape()),
op::ShapeWithLayout(
computation_layout.parameter_layout(1).shape()))));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/cpu_layout_assignment.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/cpu_layout_assignment_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
34ca573c-4dda-4c6b-af92-9ccafd413f77 | cpp | google/quiche | quiche_simple_arena | quiche/common/quiche_simple_arena.cc | quiche/common/quiche_simple_arena_test.cc | #include "quiche/common/quiche_simple_arena.h"
#include <algorithm>
#include <cstring>
#include <utility>
#include "quiche/common/platform/api/quiche_logging.h"
namespace quiche {
QuicheSimpleArena::QuicheSimpleArena(size_t block_size)
: block_size_(block_size) {}
QuicheSimpleArena::~QuicheSimpleArena() = default;
QuicheSimpleArena::QuicheSimpleArena(QuicheSimpleArena&& other) = default;
QuicheSimpleArena& QuicheSimpleArena::operator=(QuicheSimpleArena&& other) =
default;
char* QuicheSimpleArena::Alloc(size_t size) {
Reserve(size);
Block& b = blocks_.back();
QUICHE_DCHECK_GE(b.size, b.used + size);
char* out = b.data.get() + b.used;
b.used += size;
return out;
}
char* QuicheSimpleArena::Realloc(char* original, size_t oldsize,
size_t newsize) {
QUICHE_DCHECK(!blocks_.empty());
Block& last = blocks_.back();
if (last.data.get() <= original && original < last.data.get() + last.size) {
QUICHE_DCHECK_GE(last.data.get() + last.used, original + oldsize);
if (original + oldsize == last.data.get() + last.used) {
if (original + newsize < last.data.get() + last.size) {
last.used += newsize - oldsize;
return original;
}
}
}
char* out = Alloc(newsize);
memcpy(out, original, oldsize);
return out;
}
char* QuicheSimpleArena::Memdup(const char* data, size_t size) {
char* out = Alloc(size);
memcpy(out, data, size);
return out;
}
void QuicheSimpleArena::Free(char* data, size_t size) {
if (blocks_.empty()) {
return;
}
Block& b = blocks_.back();
if (size <= b.used && data + size == b.data.get() + b.used) {
b.used -= size;
}
}
void QuicheSimpleArena::Reset() {
blocks_.clear();
status_.bytes_allocated_ = 0;
}
void QuicheSimpleArena::Reserve(size_t additional_space) {
if (blocks_.empty()) {
AllocBlock(std::max(additional_space, block_size_));
} else {
const Block& last = blocks_.back();
if (last.size < last.used + additional_space) {
AllocBlock(std::max(additional_space, block_size_));
}
}
}
void QuicheSimpleArena::AllocBlock(size_t size) {
blocks_.push_back(Block(size));
status_.bytes_allocated_ += size;
}
QuicheSimpleArena::Block::Block(size_t s)
: data(new char[s]), size(s), used(0) {}
QuicheSimpleArena::Block::~Block() = default;
QuicheSimpleArena::Block::Block(QuicheSimpleArena::Block&& other)
: size(other.size), used(other.used) {
data = std::move(other.data);
}
QuicheSimpleArena::Block& QuicheSimpleArena::Block::operator=(
QuicheSimpleArena::Block&& other) {
size = other.size;
used = other.used;
data = std::move(other.data);
return *this;
}
} | #include "quiche/common/quiche_simple_arena.h"
#include <string>
#include <vector>
#include "absl/strings/string_view.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace quiche {
namespace {
size_t kDefaultBlockSize = 2048;
const char kTestString[] = "This is a decently long test string.";
TEST(QuicheSimpleArenaTest, NoAllocationOnConstruction) {
QuicheSimpleArena arena(kDefaultBlockSize);
EXPECT_EQ(0u, arena.status().bytes_allocated());
}
TEST(QuicheSimpleArenaTest, Memdup) {
QuicheSimpleArena arena(kDefaultBlockSize);
const size_t length = strlen(kTestString);
char* c = arena.Memdup(kTestString, length);
EXPECT_NE(nullptr, c);
EXPECT_NE(c, kTestString);
EXPECT_EQ(absl::string_view(c, length), kTestString);
}
TEST(QuicheSimpleArenaTest, MemdupLargeString) {
QuicheSimpleArena arena(10 );
const size_t length = strlen(kTestString);
char* c = arena.Memdup(kTestString, length);
EXPECT_NE(nullptr, c);
EXPECT_NE(c, kTestString);
EXPECT_EQ(absl::string_view(c, length), kTestString);
}
TEST(QuicheSimpleArenaTest, MultipleBlocks) {
QuicheSimpleArena arena(40 );
std::vector<std::string> strings = {
"One decently long string.", "Another string.",
"A third string that will surely go in a different block."};
std::vector<absl::string_view> copies;
for (const std::string& s : strings) {
absl::string_view sp(arena.Memdup(s.data(), s.size()), s.size());
copies.push_back(sp);
}
EXPECT_EQ(strings.size(), copies.size());
for (size_t i = 0; i < strings.size(); ++i) {
EXPECT_EQ(copies[i], strings[i]);
}
}
TEST(QuicheSimpleArenaTest, UseAfterReset) {
QuicheSimpleArena arena(kDefaultBlockSize);
const size_t length = strlen(kTestString);
char* c = arena.Memdup(kTestString, length);
arena.Reset();
c = arena.Memdup(kTestString, length);
EXPECT_NE(nullptr, c);
EXPECT_NE(c, kTestString);
EXPECT_EQ(absl::string_view(c, length), kTestString);
}
TEST(QuicheSimpleArenaTest, Free) {
QuicheSimpleArena arena(kDefaultBlockSize);
const size_t length = strlen(kTestString);
arena.Free(const_cast<char*>(kTestString), length);
char* c1 = arena.Memdup("Foo", 3);
char* c2 = arena.Memdup(kTestString, length);
arena.Free(const_cast<char*>(kTestString), length);
char* c3 = arena.Memdup("Bar", 3);
char* c4 = arena.Memdup(kTestString, length);
EXPECT_NE(c1, c2);
EXPECT_NE(c1, c3);
EXPECT_NE(c1, c4);
EXPECT_NE(c2, c3);
EXPECT_NE(c2, c4);
EXPECT_NE(c3, c4);
arena.Free(c4, length);
arena.Free(c2, length);
char* c5 = arena.Memdup("Baz", 3);
EXPECT_EQ(c4, c5);
}
TEST(QuicheSimpleArenaTest, Alloc) {
QuicheSimpleArena arena(kDefaultBlockSize);
const size_t length = strlen(kTestString);
char* c1 = arena.Alloc(length);
char* c2 = arena.Alloc(2 * length);
char* c3 = arena.Alloc(3 * length);
char* c4 = arena.Memdup(kTestString, length);
EXPECT_EQ(c1 + length, c2);
EXPECT_EQ(c2 + 2 * length, c3);
EXPECT_EQ(c3 + 3 * length, c4);
EXPECT_EQ(absl::string_view(c4, length), kTestString);
}
TEST(QuicheSimpleArenaTest, Realloc) {
QuicheSimpleArena arena(kDefaultBlockSize);
const size_t length = strlen(kTestString);
char* c1 = arena.Memdup(kTestString, length);
char* c2 = arena.Realloc(c1, length, 2 * length);
EXPECT_TRUE(c1);
EXPECT_EQ(c1, c2);
EXPECT_EQ(absl::string_view(c1, length), kTestString);
char* c3 = arena.Memdup(kTestString, length);
EXPECT_EQ(c2 + 2 * length, c3);
EXPECT_EQ(absl::string_view(c3, length), kTestString);
char* c4 = arena.Realloc(c3, length, 2 * length);
EXPECT_EQ(c3, c4);
EXPECT_EQ(absl::string_view(c4, length), kTestString);
char* c5 = arena.Realloc(c4, 2 * length, 3 * length);
EXPECT_EQ(c4, c5);
EXPECT_EQ(absl::string_view(c5, length), kTestString);
char* c6 = arena.Memdup(kTestString, length);
EXPECT_EQ(c5 + 3 * length, c6);
EXPECT_EQ(absl::string_view(c6, length), kTestString);
char* c7 = arena.Realloc(c6, length, kDefaultBlockSize);
EXPECT_EQ(absl::string_view(c7, length), kTestString);
arena.Free(c7, kDefaultBlockSize);
char* c8 = arena.Memdup(kTestString, length);
EXPECT_NE(c6, c7);
EXPECT_EQ(c7, c8);
EXPECT_EQ(absl::string_view(c8, length), kTestString);
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/quiche_simple_arena.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/quiche_simple_arena_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
6907e617-e772-409b-9aa1-787ccb9693f8 | cpp | tensorflow/tensorflow | cumsum | tensorflow/lite/delegates/gpu/common/tasks/cumsum.cc | tensorflow/lite/delegates/gpu/cl/kernels/cumsum_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/cumsum.h"
#include <string>
#include <utility>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
namespace tflite {
namespace gpu {
void Cumsum::GetCumsumCode(const OperationDef& op_def) {
AddSrcTensor("src_tensor", op_def.src_tensors[0]);
AddDstTensor("dst_tensor", op_def.dst_tensors[0]);
std::map<Axis, std::string> task_sizes = {
{Axis::WIDTH, "args.src_tensor.Width()"},
{Axis::HEIGHT, "args.src_tensor.Height()"},
{Axis::DEPTH, "args.src_tensor.Depth()"},
{Axis::CHANNELS, "args.src_tensor.Slices()"},
{Axis::BATCH, "args.src_tensor.Batch()"},
};
std::string limit = task_sizes[axis_];
task_sizes[axis_] = "1";
std::map<Axis, std::string> index_name = {
{Axis::WIDTH, "X"}, {Axis::HEIGHT, "Y"}, {Axis::DEPTH, "Z"},
{Axis::CHANNELS, "S"}, {Axis::BATCH, "B"},
};
std::string indexes = "X, Y";
std::string c;
c += "MAIN_FUNCTION($0) {\n";
if (definition_.dst_tensors[0].HasAxis(Axis::DEPTH)) {
indexes += ", Z";
c += " int linear_id = GLOBAL_ID_1;\n";
c += " int Y = linear_id % " + task_sizes[Axis::HEIGHT] + ";\n";
c += " int D = linear_id / " + task_sizes[Axis::HEIGHT] + ";\n";
c += " if (D >= " + task_sizes[Axis::DEPTH] + ") return;\n";
} else {
c += " int Y = GLOBAL_ID_1;\n";
c += " if (Y >= " + task_sizes[Axis::HEIGHT] + ") return;\n";
}
indexes += ", S";
if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
indexes += ", B";
c += " int linear_id = GLOBAL_ID_0;\n";
c += " int X = linear_id / " + task_sizes[Axis::BATCH] + ";\n";
c += " int B = linear_id % " + task_sizes[Axis::BATCH] + ";\n";
c += " if (X >= " + task_sizes[Axis::WIDTH] + ") return;\n";
} else {
c += " int X = GLOBAL_ID_0;\n";
c += " if (X >= " + task_sizes[Axis::WIDTH] + ") return;\n";
}
c += " int S = GLOBAL_ID_2;\n";
c += " if (S >= " + task_sizes[Axis::CHANNELS] + ") return;\n";
c += " args.src_tensor::type res = args.src_tensor::zero_value;\n";
c += " for (; " + index_name[axis_] + " < " + limit + "; " +
index_name[axis_] + "++) {\n";
c += " args.src_tensor::type curr = args.src_tensor.Read(" + indexes +
");\n";
if (axis_ == Axis::CHANNELS) {
c += " res.x = res.w + curr.x;\n";
c += " res.y = res.x + curr.y;\n";
c += " res.z = res.y + curr.z;\n";
c += " res.w = res.z + curr.w;\n";
} else {
c += " res += curr;\n";
}
c += " args.dst_tensor.Write(res, " + indexes + ");\n";
c += " }\n";
c += "}\n";
code_ = c;
}
int3 Cumsum::GetGridSize() const {
const int width = axis_ == Axis::WIDTH ? 1 : src_[0]->Width();
const int height = axis_ == Axis::HEIGHT ? 1 : src_[0]->Height();
const int depth = axis_ == Axis::DEPTH ? 1 : src_[0]->Depth();
const int batch = axis_ == Axis::BATCH ? 1 : src_[0]->Batch();
const int slices = axis_ == Axis::CHANNELS ? 1 : src_[0]->Slices();
const int grid_x = width * batch;
const int grid_y = height * depth;
const int grid_z = slices;
return int3(grid_x, grid_y, grid_z);
}
Cumsum::Cumsum(Cumsum&& operation)
: GPUOperation(std::move(operation)), axis_(operation.axis_) {}
Cumsum& Cumsum::operator=(Cumsum&& operation) {
if (this != &operation) {
axis_ = operation.axis_;
GPUOperation::operator=(std::move(operation));
}
return *this;
}
Cumsum CreateCumsum(const OperationDef& definition,
const CumsumAttributes& attr) {
Cumsum op(definition, attr.axis);
op.GetCumsumCode(definition);
return op;
}
}
} | #include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/cumsum_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, CumsumHWCTest) {
absl::Status status = CumsumHWCTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, CumsumBHWCTest) {
absl::Status status = CumsumBHWCTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/cumsum.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/cumsum_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
990e754e-57b8-469c-adf1-c1785a3f9738 | cpp | tensorflow/tensorflow | list_from_tensor | tensorflow/lite/kernels/variants/list_kernels/list_from_tensor.cc | tensorflow/lite/kernels/variants/list_kernels/list_from_tensor_test.cc | #include <cstring>
#include <utility>
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/variants/list_ops_lib.h"
#include "tensorflow/lite/kernels/variants/list_ops_util.h"
#include "tensorflow/lite/kernels/variants/tensor_array.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace variants {
namespace ops {
namespace {
constexpr int kTensorInput = 0;
constexpr int kElementShapeInput = 1;
constexpr int kListOut = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
const TfLiteTensor* element_shape;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kElementShapeInput, &element_shape));
TF_LITE_ENSURE(context, element_shape->type == kTfLiteInt32);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kListOut, &output));
TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteVariant);
output->allocation_type = kTfLiteVariantObject;
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* tensor_input;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kTensorInput, &tensor_input));
const int rank = tensor_input->dims->size;
TF_LITE_ENSURE(context, rank > 0);
const int list_len = tensor_input->dims->data[0];
IntArrayUniquePtr element_shape_for_tensors =
BuildTfLiteArray(rank - 1, tensor_input->dims->data + 1);
const TfLiteTensor* element_shape_tensor;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kElementShapeInput,
&element_shape_tensor));
TF_LITE_ENSURE(context, (element_shape_tensor->dims->size == 1 &&
element_shape_tensor->dims->data[0] == rank - 1) ||
element_shape_tensor->dims->size == 0);
IntArrayUniquePtr element_shape_for_list =
TensorAsShape(*element_shape_tensor);
if (element_shape_for_list->size > 0) {
TF_LITE_ENSURE_EQ(context, element_shape_for_list->size,
element_shape_for_tensors->size);
for (int i = 0; i < element_shape_for_tensors->size; ++i) {
const int lhs = element_shape_for_list->data[i];
const int rhs = element_shape_for_tensors->data[i];
TF_LITE_ENSURE(context, lhs == -1 || rhs == -1 || lhs == rhs);
}
}
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kListOut, &output));
TF_LITE_ENSURE_OK(context, TfLiteTensorVariantRealloc<TensorArray>(
output, tensor_input->type,
BuildTfLiteArray(*element_shape_for_list)));
TensorArray* arr =
static_cast<TensorArray*>(static_cast<VariantData*>(output->data.data));
arr->Resize(list_len);
size_t data_offset = 0;
for (int i = 0; i < list_len; ++i) {
TensorUniquePtr tensor_to_set = BuildTfLiteTensor(
tensor_input->type, BuildTfLiteArray(*element_shape_for_tensors),
kTfLiteDynamic);
memcpy(tensor_to_set->data.raw, tensor_input->data.raw + data_offset,
tensor_to_set->bytes);
data_offset += tensor_to_set->bytes;
TF_LITE_ENSURE(context, arr->Set(i, std::move(tensor_to_set)));
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_LIST_FROM_TENSOR() {
static TfLiteRegistration r = {nullptr, nullptr, Prepare, Eval};
return &r;
}
}
}
} | #include <tuple>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/kernels/variants/list_ops_lib.h"
#include "tensorflow/lite/kernels/variants/tensor_array.h"
#include "tensorflow/lite/schema/schema_generated.h"
using ::testing::ElementsAre;
namespace tflite {
namespace variants {
namespace ops {
namespace {
class ListFromTensorModel : public SingleOpModel {
public:
ListFromTensorModel(TensorData tensor_data, TensorData shape_data) {
tensor_id_ = AddInput(tensor_data);
shape_id_ = AddInput(shape_data);
list_id_ = AddOutput({TensorType_VARIANT, {1}});
SetCustomOp("TensorListFromTensor", {},
Register_LIST_FROM_TENSOR);
BuildInterpreter({tensor_data.shape, shape_data.shape});
}
const TensorArray* GetOutputTensorArray(int tensor_id) {
TfLiteTensor* tensor = interpreter_->tensor(tensor_id);
TFLITE_CHECK(tensor != nullptr && tensor->type == kTfLiteVariant &&
tensor->allocation_type == kTfLiteVariantObject);
return static_cast<const TensorArray*>(
static_cast<const VariantData*>(tensor->data.data));
}
int tensor_id_;
int shape_id_;
int list_id_;
};
TEST(ListFromTensorTest, MatrixInput_ReturnsListWithVectorElements) {
ListFromTensorModel m({TensorType_INT32, {2, 2}}, {TensorType_INT32, {1}});
m.PopulateTensor<int>(m.tensor_id_, {1, 2, 3, 4});
m.PopulateTensor<int>(m.shape_id_, {2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TensorArray* arr = m.GetOutputTensorArray(m.list_id_);
ASSERT_EQ(arr->NumElements(), 2);
ASSERT_THAT(arr->ElementShape(), DimsAre({2}));
ASSERT_EQ(arr->ElementType(), kTfLiteInt32);
{
const TfLiteTensor* element = arr->At(0);
ASSERT_THAT(element, DimsAre({2}));
EXPECT_THAT(std::make_tuple(GetTensorData<int>(element), 2),
ElementsAre(1, 2));
}
{
const TfLiteTensor* element = arr->At(1);
ASSERT_THAT(element, DimsAre({2}));
EXPECT_THAT(std::make_tuple(GetTensorData<int>(element), 2),
ElementsAre(3, 4));
}
}
TEST(ListFromTensorTest, VectorInput_ReturnsListWithScalarElements) {
ListFromTensorModel m({TensorType_INT32, {2}}, {TensorType_INT32, {0}});
m.PopulateTensor<int>(m.tensor_id_, {1, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TensorArray* arr = m.GetOutputTensorArray(m.list_id_);
ASSERT_EQ(arr->NumElements(), 2);
ASSERT_THAT(arr->ElementShape(), DimsAre({}));
ASSERT_EQ(arr->ElementType(), kTfLiteInt32);
{
const TfLiteTensor* element = arr->At(0);
ASSERT_THAT(element, DimsAre({}));
EXPECT_THAT(std::make_tuple(GetTensorData<int>(element), 1),
ElementsAre(1));
}
{
const TfLiteTensor* element = arr->At(1);
ASSERT_THAT(element, DimsAre({}));
EXPECT_THAT(std::make_tuple(GetTensorData<int>(element), 1),
ElementsAre(2));
}
}
TEST(ListFromTensorTest, 3DInput_ReturnsListWithMatrixElements) {
ListFromTensorModel m({TensorType_INT32, {2, 2, 2}}, {TensorType_INT32, {2}});
m.PopulateTensor<int>(m.tensor_id_, {1, 2, 3, 4, 5, 6, 7, 8});
m.PopulateTensor<int>(m.shape_id_, {2, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TensorArray* arr = m.GetOutputTensorArray(m.list_id_);
ASSERT_EQ(arr->NumElements(), 2);
ASSERT_THAT(arr->ElementShape(), DimsAre({2, 2}));
ASSERT_EQ(arr->ElementType(), kTfLiteInt32);
{
const TfLiteTensor* element = arr->At(0);
ASSERT_THAT(element, DimsAre({2, 2}));
EXPECT_THAT(std::make_tuple(GetTensorData<int>(element), 4),
ElementsAre(1, 2, 3, 4));
}
{
const TfLiteTensor* element = arr->At(1);
ASSERT_THAT(element, DimsAre({2, 2}));
EXPECT_THAT(std::make_tuple(GetTensorData<int>(element), 4),
ElementsAre(5, 6, 7, 8));
}
}
TEST(ListFromTensorTest, MismatchedShapeInputTensorShape_Fails) {
ListFromTensorModel m({TensorType_INT32, {2, 2, 2}}, {TensorType_INT32, {2}});
m.PopulateTensor<int>(m.shape_id_, {2, 3});
ASSERT_EQ(m.Invoke(), kTfLiteError);
}
TEST(ListFromTensorTest, ScalarInput_Fails) {
ListFromTensorModel m({TensorType_INT32, {}}, {TensorType_INT32, {}});
ASSERT_EQ(m.Invoke(), kTfLiteError);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/variants/list_kernels/list_from_tensor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/variants/list_kernels/list_from_tensor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
924450a7-b081-4c29-b3f6-2bb71a1b42d4 | cpp | tensorflow/tensorflow | xnnpack_delegate_provider | tensorflow/lite/tools/delegates/xnnpack_delegate_provider.cc | tensorflow/lite/tools/delegates/xnnpack_delegate_provider_test.cc | #include <string>
#include <utility>
#include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include "tensorflow/lite/tools/evaluation/utils.h"
namespace tflite {
namespace tools {
class XnnpackDelegateProvider : public DelegateProvider {
public:
XnnpackDelegateProvider() {
default_params_.AddParam("use_xnnpack", ToolParam::Create<bool>(false));
default_params_.AddParam("xnnpack_force_fp16",
ToolParam::Create<bool>(false));
default_params_.AddParam("xnnpack_weight_cache_file_path",
ToolParam::Create<std::string>(""));
}
std::vector<Flag> CreateFlags(ToolParams* params) const final;
void LogParams(const ToolParams& params, bool verbose) const final;
TfLiteDelegatePtr CreateTfLiteDelegate(const ToolParams& params) const final;
std::pair<TfLiteDelegatePtr, int> CreateRankedTfLiteDelegate(
const ToolParams& params) const final;
std::string GetName() const final { return "XNNPACK"; }
};
REGISTER_DELEGATE_PROVIDER(XnnpackDelegateProvider);
std::vector<Flag> XnnpackDelegateProvider::CreateFlags(
ToolParams* params) const {
std::vector<Flag> flags = {
CreateFlag<bool>("use_xnnpack", params,
"explicitly apply the XNNPACK delegate. Note the "
"XNNPACK delegate could "
"be implicitly applied by the TF Lite runtime "
"regardless the value of "
"this parameter. To disable this implicit application, "
"set the value to "
"false explicitly."),
CreateFlag<bool>("xnnpack_force_fp16", params,
"enforce float16 inference."),
CreateFlag<std::string>("xnnpack_weight_cache_file_path", params,
"enable file-backed weight caching."),
};
return flags;
}
void XnnpackDelegateProvider::LogParams(const ToolParams& params,
bool verbose) const {
LOG_TOOL_PARAM(params, bool, "use_xnnpack", "Use xnnpack", verbose);
LOG_TOOL_PARAM(params, bool, "xnnpack_force_fp16", "xnnpack_force_fp16",
verbose);
LOG_TOOL_PARAM(params, std::string, "xnnpack_weight_cache_file_path",
"xnnpack_weight_cache_file_path", verbose);
}
TfLiteDelegatePtr XnnpackDelegateProvider::CreateTfLiteDelegate(
const ToolParams& params) const {
if (params.Get<bool>("use_xnnpack")) {
return evaluation::CreateXNNPACKDelegate(
params.Get<int32_t>("num_threads"),
params.Get<bool>("xnnpack_force_fp16"),
params.Get<std::string>("xnnpack_weight_cache_file_path").c_str());
}
return CreateNullDelegate();
}
std::pair<TfLiteDelegatePtr, int>
XnnpackDelegateProvider::CreateRankedTfLiteDelegate(
const ToolParams& params) const {
auto ptr = CreateTfLiteDelegate(params);
return std::make_pair(std::move(ptr),
params.GetPosition<bool>("use_xnnpack"));
}
}
} | #include <cstdint>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include "tensorflow/lite/tools/tool_params.h"
namespace tflite {
namespace tools {
namespace {
TEST(XNNPackDelegateProviderTest, Test) {
const std::string kFakeCacheParam =
testing::TempDir() + "/XNNPackDelegateProviderTest.xnnpack_cache";
const auto& providers = GetRegisteredDelegateProviders();
ASSERT_EQ(providers.size(), 1);
ToolParams params;
const auto& xnnpack_provider = providers[0];
ASSERT_NE(xnnpack_provider, nullptr);
params.Merge(xnnpack_provider->DefaultParams());
params.AddParam("num_threads", ToolParam::Create<int32_t>(-1));
EXPECT_TRUE(params.HasParam("use_xnnpack"));
EXPECT_FALSE(params.HasValueSet<bool>("use_xnnpack"));
ASSERT_NE(params.GetParam("use_xnnpack"), nullptr);
EXPECT_TRUE(params.HasParam("xnnpack_force_fp16"));
EXPECT_FALSE(params.HasValueSet<bool>("xnnpack_force_fp16"));
ASSERT_NE(params.GetParam("xnnpack_force_fp16"), nullptr);
EXPECT_TRUE(params.HasParam("xnnpack_weight_cache_file_path"));
EXPECT_FALSE(
params.HasValueSet<std::string>("xnnpack_weight_cache_file_path"));
ASSERT_NE(params.GetParam("xnnpack_weight_cache_file_path"), nullptr);
params.Set<bool>("use_xnnpack", true, 0);
{
TfLiteDelegatePtr delegate = xnnpack_provider->CreateTfLiteDelegate(params);
const TfLiteXNNPackDelegateOptions* options =
TfLiteXNNPackDelegateGetOptions(delegate.get());
ASSERT_NE(options, nullptr);
EXPECT_EQ(options->weight_cache_file_path, nullptr);
}
params.Set<bool>("xnnpack_force_fp16", true, 1);
params.Set<std::string>("xnnpack_weight_cache_file_path", kFakeCacheParam,
2);
{
TfLiteDelegatePtr delegate = xnnpack_provider->CreateTfLiteDelegate(params);
const TfLiteXNNPackDelegateOptions* options =
TfLiteXNNPackDelegateGetOptions(delegate.get());
ASSERT_NE(options, nullptr);
EXPECT_THAT(options->weight_cache_file_path,
testing::StrEq(kFakeCacheParam));
EXPECT_TRUE(options->flags & TFLITE_XNNPACK_DELEGATE_FLAG_FORCE_FP16);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/delegates/xnnpack_delegate_provider.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/delegates/xnnpack_delegate_provider_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8d045eae-f78b-47ac-9a3d-4ba595aa190d | cpp | google/arolla | strings | arolla/qexpr/operators/strings/strings.cc | arolla/qexpr/operators/strings/strings_test.cc | #include "arolla/qexpr/operators/strings/strings.h"
#include <cstddef>
#include <cstdint>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/base/no_destructor.h"
#include "absl/base/nullability.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "icu4c/source/common/unicode/bytestream.h"
#include "icu4c/source/common/unicode/casemap.h"
#include "icu4c/source/common/unicode/errorcode.h"
#include "icu4c/source/common/unicode/stringoptions.h"
#include "icu4c/source/common/unicode/umachine.h"
#include "icu4c/source/common/unicode/utf8.h"
#include "double-conversion/double-to-string.h"
#include "double-conversion/utils.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/strings/regex.h"
#include "arolla/util/bytes.h"
#include "arolla/util/text.h"
#include "arolla/util/unit.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
namespace {
absl::Status ValidateUtf8(absl::string_view bytes) {
if (bytes.size() > size_t{std::numeric_limits<int32_t>::max()}) {
return absl::UnimplementedError("string is too long to convert to UTF-8");
}
int32_t offset = 0;
while (offset < bytes.size()) {
UChar32 character;
int32_t previous_offset = offset;
U8_NEXT(bytes.data(), offset, bytes.size(), character);
if (character < 0) {
return absl::InvalidArgumentError(absl::StrFormat(
"invalid UTF-8 sequence at position %d", previous_offset));
}
}
return absl::OkStatus();
}
}
absl::StatusOr<Text> LowerOp::operator()(
absl::string_view in, std::optional<absl::string_view> locale) const {
std::string result;
icu::StringByteSink<std::string> sink(&result);
icu::ErrorCode error_code;
const char* locale_ptr = locale.has_value() ? locale->data() : nullptr;
icu::CaseMap::utf8ToLower(locale_ptr, U_FOLD_CASE_DEFAULT,
absl::string_view(in), sink,
nullptr, error_code);
if (error_code.isFailure()) {
return absl::InvalidArgumentError(absl::StrFormat(
"utf8ToLower failed with error: %s", error_code.errorName()));
}
return Text(std::move(result));
}
absl::StatusOr<Text> UpperOp::operator()(
absl::string_view in, std::optional<absl::string_view> locale) const {
std::string result;
icu::StringByteSink<std::string> sink(&result);
icu::ErrorCode error_code;
const char* locale_ptr = locale.has_value() ? locale->data() : nullptr;
icu::CaseMap::utf8ToUpper(locale_ptr, U_FOLD_CASE_DEFAULT,
absl::string_view(in), sink,
nullptr, error_code);
if (error_code.isFailure()) {
return absl::InvalidArgumentError(absl::StrFormat(
"utf8ToUpper failed with error: %s", error_code.errorName()));
}
return Text(std::move(result));
}
absl::StatusOr<Text> DecodeOp::operator()(absl::string_view s) const {
RETURN_IF_ERROR(ValidateUtf8(s));
return Text(s);
}
absl::StatusOr<std::string> ReplaceOp::operator()(
absl::string_view s, absl::string_view old_sub, absl::string_view new_sub,
OptionalValue<int32_t> max_subs) const {
size_t count = std::numeric_limits<size_t>::max();
if (max_subs.present) {
if (max_subs.value == 0) {
return std::string(s);
}
count = max_subs.value;
}
std::string res;
size_t offset = 0;
if (old_sub.empty()) {
absl::StrAppend(&res, new_sub);
while ((--count > 0) && (offset < s.length())) {
absl::StrAppend(&res, s.substr(offset, 1), new_sub);
++offset;
}
} else {
while (count-- > 0) {
const size_t start = s.find(old_sub, offset);
if (start == std::string::npos) break;
absl::StrAppend(&res, s.substr(offset, start - offset), new_sub);
offset = start + old_sub.size();
}
}
res.append(s.begin() + offset, s.end());
return res;
}
absl::StatusOr<absl::Nonnull<RegexPtr>> CompileRegexOp::operator()(
absl::string_view pattern) const {
return CompileRegex(pattern);
}
OptionalUnit ContainsRegexOp::operator()(absl::string_view text,
const RegexPtr& regex) const {
return OptionalUnit(regex != nullptr && regex->PartialMatch(text));
}
OptionalUnit ContainsRegexOp::operator()(OptionalValue<absl::string_view> text,
const RegexPtr& regex) const {
return OptionalUnit(text.present && regex != nullptr &&
regex->PartialMatch(text.value));
}
absl::StatusOr<OptionalValue<Text>> ExtractRegexOp::operator()(
const Text& text, const RegexPtr& regex) const {
if (regex == nullptr) {
return std::nullopt;
}
if (regex->NumberOfCapturingGroups() != 1) {
return absl::InvalidArgumentError(
absl::StrFormat("ExtractRegexOp expected regular expression with "
"exactly one capturing group; got `%s` which "
"contains %d capturing groups",
regex->pattern(), regex->NumberOfCapturingGroups()));
}
std::string match;
if (regex->PartialMatch(text.view(), &match)) {
return Text(std::move(match));
}
return std::nullopt;
}
absl::StatusOr<OptionalValue<Text>> ExtractRegexOp::operator()(
const OptionalValue<Text>& text, const RegexPtr& regex) const {
if (text.present) {
return (*this)(text.value, regex);
}
return std::nullopt;
}
namespace {
template <class T>
Text SignedIntegerToText(T x) {
return Text(absl::StrFormat("%d", x));
}
}
Text AsTextOp::operator()(absl::string_view s) const {
return Text(absl::StrFormat("b'%s'", absl::Utf8SafeCHexEscape(s)));
}
Text AsTextOp::operator()(const Bytes& x) const {
return operator()(absl::string_view(x));
}
Text AsTextOp::operator()(Unit) const { return Text("present"); }
Text AsTextOp::operator()(int32_t x) const { return SignedIntegerToText(x); }
Text AsTextOp::operator()(int64_t x) const { return SignedIntegerToText(x); }
Text AsTextOp::operator()(uint64_t x) const {
return Text(absl::StrFormat("%d", x));
}
Text AsTextOp::operator()(bool x) const {
return x ? Text("true") : Text("false");
}
Text AsTextOp::operator()(float x) const {
static const absl::NoDestructor<double_conversion::DoubleToStringConverter>
converter(double_conversion::DoubleToStringConverter::NO_FLAGS, "inf",
"nan",
'e',
-6, 21,
6,
0);
char buf[128];
double_conversion::StringBuilder builder(buf, sizeof(buf));
converter->ToShortestSingle(x, &builder);
return Text(builder.Finalize());
}
Text AsTextOp::operator()(double x) const {
static const absl::NoDestructor<double_conversion::DoubleToStringConverter>
converter(double_conversion::DoubleToStringConverter::NO_FLAGS, "inf",
"nan",
'e',
-6, 21,
6,
0);
char buf[128];
double_conversion::StringBuilder builder(buf, sizeof(buf));
converter->ToShortest(x, &builder);
return Text(builder.Finalize());
}
Text TextAsTextOp::operator()(absl::string_view s) const { return Text(s); }
Text TextAsTextOp::operator()(const Text& s) const { return s; }
} | #include <cstdint>
#include <string>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qexpr/operators.h"
#include "arolla/qtype/base_types.h"
#include "arolla/util/bytes.h"
#include "arolla/util/text.h"
#include "arolla/util/unit.h"
namespace arolla {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::testing::ElementsAre;
using ::testing::HasSubstr;
TEST(StringsTest, AsText) {
EXPECT_THAT(InvokeOperator<Text>("strings.as_text", kUnit),
IsOkAndHolds(Text("present")));
EXPECT_THAT(InvokeOperator<Text>("strings.as_text", Text("text")),
IsOkAndHolds(Text("text")));
EXPECT_THAT(InvokeOperator<Text>("strings.as_text",
Bytes(std::string({0, 'b', '\'', 'e', 1}))),
IsOkAndHolds(Text("b'\\x00\\x62\\'e\\x01'")));
EXPECT_THAT(InvokeOperator<Text>("strings.as_text", false),
IsOkAndHolds(Text("false")));
EXPECT_THAT(InvokeOperator<Text>("strings.as_text", int32_t{1}),
IsOkAndHolds(Text("1")));
EXPECT_THAT(InvokeOperator<Text>("strings.as_text", int64_t{1}),
IsOkAndHolds(Text("1")));
EXPECT_THAT(InvokeOperator<Text>("strings.as_text", 2.3f),
IsOkAndHolds(Text("2.3")));
EXPECT_THAT(InvokeOperator<Text>("strings.as_text", 2.3),
IsOkAndHolds(Text("2.3")));
EXPECT_THAT(InvokeOperator<Text>("strings.as_text", 14.137167f),
IsOkAndHolds(Text("14.137167")));
EXPECT_THAT(InvokeOperator<Text>("strings.as_text", 14.137167),
IsOkAndHolds(Text("14.137167")));
EXPECT_THAT(
InvokeOperator<DenseArray<Text>>(
"strings.as_text", CreateDenseArray<Bytes>(
{Bytes(std::string({0, 'b', '\'', 'e', 1}))})),
IsOkAndHolds(ElementsAre(Text("b'\\x00\\x62\\'e\\x01'"))));
}
TEST(StringsTest, Decode) {
EXPECT_THAT(InvokeOperator<Text>("strings.decode", Bytes("text")),
IsOkAndHolds(Text("text")));
EXPECT_THAT(InvokeOperator<Text>("strings.decode", Bytes("te\0xt")),
IsOkAndHolds(Text("te\0xt")));
EXPECT_THAT(InvokeOperator<Text>("strings.decode", Bytes("\xEF\xBF\xBD")),
IsOkAndHolds(Text("\xEF\xBF\xBD")));
EXPECT_THAT(InvokeOperator<Text>("strings.decode", Bytes("\xA0text")),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("invalid UTF-8 sequence at position 0")));
EXPECT_THAT(InvokeOperator<Text>("strings.decode", Bytes("te\xC0\0xt")),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("invalid UTF-8 sequence at position 2")));
EXPECT_THAT(InvokeOperator<Text>("strings.decode", Bytes("text\x80")),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("invalid UTF-8 sequence at position 4")));
}
TEST(StringsTest, Lower) {
Text input("Hello World.");
Text expected_output("hello world.");
EXPECT_THAT(InvokeOperator<Text>("strings.lower", input),
IsOkAndHolds(expected_output));
}
TEST(StringsTest, LowerOptional) {
OptionalValue<Text> input(Text("Hello World."));
OptionalValue<Text> expected_output(Text("hello world."));
EXPECT_THAT(InvokeOperator<OptionalValue<Text>>("strings.lower", input),
IsOkAndHolds(expected_output));
}
TEST(StringsTest, LowerWithLocale) {
Text input("TITLE");
Text locale("TR_tr");
EXPECT_THAT(InvokeOperator<Text>("strings.lower", input, locale),
IsOkAndHolds(Text("tıtle")));
}
TEST(StringsTest, Upper) {
Text input("Hello World.");
EXPECT_THAT(InvokeOperator<Text>("strings.upper", input),
IsOkAndHolds(Text("HELLO WORLD.")));
}
TEST(StringsTest, UpperWithLocale) {
Text input("istanbul");
Text locale("TR_tr");
EXPECT_THAT(InvokeOperator<Text>("strings.upper", input, locale),
IsOkAndHolds(Text("İSTANBUL")));
}
TEST(StringsTest, BytesLength) {
EXPECT_THAT(InvokeOperator<int32_t>("strings.length",
Bytes("古池や蛙飛び込む水の音")),
IsOkAndHolds(33));
}
TEST(StringsTest, TextLength) {
EXPECT_THAT(
InvokeOperator<int32_t>("strings.length", Text("古池や蛙飛び込む水の音")),
IsOkAndHolds(11));
}
TEST(StringsTest, Replace) {
Text input("Hello ello foo.");
Text old_sub("ell");
Text new_sub("XX");
EXPECT_THAT(InvokeOperator<Text>("strings.replace", input, old_sub, new_sub,
OptionalValue<int32_t>(-1)),
IsOkAndHolds(Text("HXXo XXo foo.")));
EXPECT_THAT(InvokeOperator<Text>("strings.replace", input, old_sub, new_sub,
OptionalValue<int32_t>(0)),
IsOkAndHolds(Text("Hello ello foo.")));
EXPECT_THAT(InvokeOperator<Text>("strings.replace", input, old_sub, new_sub,
OptionalValue<int32_t>(1)),
IsOkAndHolds(Text("HXXo ello foo.")));
EXPECT_THAT(InvokeOperator<Text>("strings.replace", input, old_sub, new_sub,
OptionalValue<int32_t>(2)),
IsOkAndHolds(Text("HXXo XXo foo.")));
EXPECT_THAT(InvokeOperator<Text>("strings.replace", input, old_sub, new_sub,
OptionalValue<int32_t>()),
IsOkAndHolds(Text("HXXo XXo foo.")));
EXPECT_THAT(
InvokeOperator<Text>("strings.replace", input, Text(), new_sub,
OptionalValue<int32_t>(-1)),
IsOkAndHolds(Text("XXHXXeXXlXXlXXoXX XXeXXlXXlXXoXX XXfXXoXXoXX.XX")));
EXPECT_THAT(
InvokeOperator<Text>("strings.replace", input, Text(), new_sub,
OptionalValue<int32_t>()),
IsOkAndHolds(Text("XXHXXeXXlXXlXXoXX XXeXXlXXlXXoXX XXfXXoXXoXX.XX")));
EXPECT_THAT(InvokeOperator<Text>("strings.replace", input, Text(), new_sub,
OptionalValue<int32_t>(3)),
IsOkAndHolds(Text("XXHXXeXXllo ello foo.")));
EXPECT_THAT(InvokeOperator<Text>("strings.replace", Text(), Text(), new_sub,
OptionalValue<int32_t>()),
IsOkAndHolds(Text("XX")));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/operators/strings/strings.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/operators/strings/strings_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
71175d7d-af7a-41c7-8175-be39824b744d | cpp | tensorflow/tensorflow | tpu_rewrite_device_util | tensorflow/compiler/mlir/tensorflow/utils/tpu_rewrite_device_util.cc | tensorflow/compiler/mlir/tensorflow/utils/tpu_rewrite_device_util_test.cc | #include "tensorflow/compiler/mlir/tensorflow/utils/tpu_rewrite_device_util.h"
#include <algorithm>
#include <cstdint>
#include <iterator>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/FormatVariadic.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/TypeUtilities.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_structs.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/device_util.h"
#include "tensorflow/compiler/mlir/utils/string_container_utils.h"
#include "xla/array4d.h"
#include "xla/service/computation_placer.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/protobuf/tpu/topology.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
constexpr int kTPUTopologyRank = 4;
constexpr char kDeviceTPUSystem[] = "TPU_SYSTEM";
constexpr char kDeviceTPU[] = "TPU";
constexpr char kTPUReplicatedCore[] = "TPU_REPLICATED_CORE";
constexpr char kTPUReplicatedHost[] = "TPU_REPLICATED_HOST";
constexpr char kBadIntArrayElementMsg[] =
"bad '{0}' attribute at index {1}, not an int";
using ParsedDevice = DeviceNameUtils::ParsedName;
using ParsedDevices = llvm::ArrayRef<DeviceNameUtils::ParsedName>;
namespace {
llvm::SmallVector<ParsedDevice, 8> FindMatchingDevices(
ParsedDevices devices, const ParsedDevice& spec) {
llvm::SmallVector<ParsedDevice, 8> matching_devices;
for (const auto& device : devices) {
if (DeviceNameUtils::IsCompleteSpecification(spec, device)) {
matching_devices.push_back(device);
}
}
return matching_devices;
}
template <typename T>
absl::Status MismatchedTPUSystemAttributeErr(absl::string_view attribute, T a,
T b) {
return absl::InvalidArgumentError(
absl::StrCat("found ", kDeviceTPUSystem, " devices with conflicting ",
attribute, "s '", a, "' and '", b, "'"));
}
absl::StatusOr<llvm::SmallVector<ParsedDevice, 8>> GetTPUSystemDevices(
ParsedDevices devices) {
ParsedDevice spec;
spec.type = kDeviceTPUSystem;
spec.has_type = true;
spec.id = 0;
spec.has_id = true;
llvm::SmallVector<ParsedDevice, 8> system_devices =
FindMatchingDevices(devices, spec);
if (system_devices.empty())
return absl::InvalidArgumentError(
absl::StrCat("no ", kDeviceTPUSystem, " devices found"));
const auto& job = system_devices[0].job;
auto replica = system_devices[0].replica;
for (const auto& device : llvm::make_range(std::next(system_devices.begin()),
system_devices.end())) {
if (device.job != job)
return MismatchedTPUSystemAttributeErr("job", job, device.job);
if (device.replica != replica)
return MismatchedTPUSystemAttributeErr("replica", replica,
device.replica);
}
std::sort(system_devices.begin(), system_devices.end(),
[](const ParsedDevice& a, const ParsedDevice& b) {
return a.task < b.task;
});
return system_devices;
}
absl::StatusOr<llvm::SmallVector<llvm::SmallVector<ParsedDevice, 8>, 8>>
GetTPUDevices(ParsedDevices devices,
llvm::ArrayRef<ParsedDevice> system_devices) {
llvm::SmallVector<llvm::SmallVector<ParsedDevice, 8>, 8> tpu_devices;
tpu_devices.reserve(system_devices.size());
auto lookup = [&devices](ParsedDevice device_spec) {
device_spec.has_type = true;
device_spec.type = kDeviceTPU;
device_spec.has_id = false;
llvm::SmallVector<ParsedDevice, 8> host_tpu_devices =
FindMatchingDevices(devices, device_spec);
std::sort(host_tpu_devices.begin(), host_tpu_devices.end(),
[](const ParsedDevice& i, const ParsedDevice& j) {
return i.id < j.id;
});
return host_tpu_devices;
};
int num_tpus_per_host = 0;
{
const auto& device = system_devices[0];
auto host_tpu_devices = lookup(device);
num_tpus_per_host = host_tpu_devices.size();
tpu_devices.push_back(std::move(host_tpu_devices));
}
for (const auto& device_spec : llvm::make_range(
std::next(system_devices.begin()), system_devices.end())) {
auto host_tpu_devices = lookup(device_spec);
const int64_t host_tpu_devices_size = host_tpu_devices.size();
if (num_tpus_per_host != host_tpu_devices_size)
return absl::InvalidArgumentError(
absl::StrCat("expected the number of TPU devices per host to be ",
num_tpus_per_host, ", got ", host_tpu_devices.size()));
tpu_devices.push_back(std::move(host_tpu_devices));
}
return tpu_devices;
}
std::string GetTPUCompilationDevice(ParsedDevice system_device) {
system_device.type = tensorflow::DEVICE_CPU;
return DeviceNameUtils::ParsedNameToString(system_device);
}
absl::StatusOr<std::string> GetCPUHostDeviceForTPUDevice(
ParsedDevice tpu_device, ParsedDevices devices) {
tpu_device.type = DEVICE_CPU;
bool enable_multiple_local_cpu_devices =
tensorflow::GetMlirCommonFlags()
->tf_mlir_enable_multiple_local_cpu_devices;
if (!enable_multiple_local_cpu_devices) {
tpu_device.id = 0;
}
if (FindMatchingDevices(devices, tpu_device).empty()) {
return absl::InvalidArgumentError(absl::StrCat(
"Can't find device: ", DeviceNameUtils::ParsedNameToString(tpu_device),
" in the devices list."));
}
return DeviceNameUtils::ParsedNameToString(tpu_device);
}
absl::StatusOr<TPUDevicesAndHosts> GetFullMeshTPUExecutionDeviceAssignment(
int num_replicas, int num_cores_per_replica,
llvm::ArrayRef<llvm::SmallVector<ParsedDevice, 8>> tpu_devices,
ParsedDevices devices) {
const int num_tasks = tpu_devices.size();
const int num_tpus_per_task = tpu_devices[0].size();
const int num_tpu_devices = num_tasks * num_tpus_per_task;
if (num_replicas != 1 && num_replicas != num_tpu_devices)
return absl::InvalidArgumentError(
absl::StrCat("'num_replicas' must be equal to 1 or ", num_tpu_devices,
", got ", num_replicas));
if (num_cores_per_replica != 1)
return absl::InvalidArgumentError(
absl::StrCat("'num_cores_per_replica' must be equal to 1, got ",
num_cores_per_replica));
TPUDevicesAndHosts devices_and_hosts;
devices_and_hosts.reserve(num_replicas);
for (int i = 0; i < num_replicas; ++i) {
const int task = i / num_tpus_per_task;
const int device = i % num_tpus_per_task;
const auto& tpu_device = tpu_devices[task][device];
devices_and_hosts.push_back({TPUDeviceAndHost(
tensorflow::DeviceNameUtils::ParsedNameToString(tpu_device),
*GetCPUHostDeviceForTPUDevice(tpu_device, devices))});
}
return devices_and_hosts;
}
struct TaskAndDevice {
TaskAndDevice() = default;
TaskAndDevice(int task, int device) : task(task), device(device) {}
int task = -1;
int device = -1;
};
bool DeviceCoordinateOutOfBound(int x, int y, int z, int core, int bound_x,
int bound_y, int bound_z, int bound_core) {
return x < 0 || x >= bound_x || y < 0 || y >= bound_y || z < 0 ||
z >= bound_z || core < 0 || core >= bound_core;
}
absl::Status DeviceCoordinateErrorMsg(absl::string_view attribute, int x, int y,
int z, int core, int bound_x, int bound_y,
int bound_z, int bound_core) {
return absl::InvalidArgumentError(
absl::StrCat("device coordinate (", x, ", ", y, ", ", z, ", ", core,
") in '", attribute, "' is outside of mesh shape (", bound_x,
", ", bound_y, ", ", bound_z, ", ", bound_core, ")"));
}
absl::Status DuplicateCoordinateErrorMsg(absl::string_view attribute, int x,
int y, int z, int core) {
return absl::InvalidArgumentError(
absl::StrCat("'", attribute, "' has duplicate device coordinate (", x,
", ", y, ", ", z, ", ", core, ")"));
}
absl::StatusOr<xla::Array4D<TaskAndDevice>> ParseTopologyAttr(
llvm::StringRef topology_attr, int num_tasks, int num_tpus_per_task) {
tpu::TopologyProto topology_proto;
if (!topology_proto.ParseFromString(topology_attr.str()))
return absl::InvalidArgumentError(absl::StrCat(
"failed to parse '", kTopologyAttr, "' attribute to TopologyProto"));
if (topology_proto.mesh_shape_size() != kTPUTopologyRank)
return absl::InvalidArgumentError(absl::StrCat(
"'", kTopologyAttr, "' 'mesh_shape' must be rank ", kTPUTopologyRank,
", got rank ", topology_proto.mesh_shape_size()));
for (auto mesh_shape_dim : llvm::enumerate(topology_proto.mesh_shape()))
if (mesh_shape_dim.value() <= 0)
return absl::InvalidArgumentError(
absl::StrCat("'", kTopologyAttr, "' 'mesh_shape' dimension ",
mesh_shape_dim.index(), " must be positive, got ",
mesh_shape_dim.value()));
if (topology_proto.num_tasks() != num_tasks)
return absl::InvalidArgumentError(absl::StrCat(
"number of tasks from available TPU devices must be 'num_tasks' in '",
kTopologyAttr, "' (", topology_proto.num_tasks(), "), got ",
num_tasks));
if (topology_proto.num_tpu_devices_per_task() != num_tpus_per_task)
return absl::InvalidArgumentError(absl::StrCat(
"number of TPU devices available per task must be "
"'num_tpu_devices_per_task' in '",
kTopologyAttr, "' (", topology_proto.num_tpu_devices_per_task(),
"), got ", num_tpus_per_task));
const int expected_device_coordinates_size =
num_tasks * num_tpus_per_task * kTPUTopologyRank;
if (topology_proto.device_coordinates_size() !=
expected_device_coordinates_size)
return absl::InvalidArgumentError(absl::StrCat(
"length of 'device_coordinates' in '", kTopologyAttr,
"' must be 'num_tasks' * 'num_tpus_per_task' * ", kTPUTopologyRank,
" (", num_tasks, " * ", num_tpus_per_task, " * ", kTPUTopologyRank,
"), got ", topology_proto.device_coordinates_size()));
const int bound_x = topology_proto.mesh_shape(0);
const int bound_y = topology_proto.mesh_shape(1);
const int bound_z = topology_proto.mesh_shape(2);
const int bound_core = topology_proto.mesh_shape(3);
xla::Array4D<TaskAndDevice> topology(bound_x, bound_y, bound_z, bound_core);
int pos = 0;
for (int task = 0; task < num_tasks; ++task) {
for (int device = 0; device < num_tpus_per_task; ++device) {
int x = topology_proto.device_coordinates(pos++);
int y = topology_proto.device_coordinates(pos++);
int z = topology_proto.device_coordinates(pos++);
int core = topology_proto.device_coordinates(pos++);
if (DeviceCoordinateOutOfBound(x, y, z, core, bound_x, bound_y, bound_z,
bound_core))
return DeviceCoordinateErrorMsg(kTopologyAttr, x, y, z, core, bound_x,
bound_y, bound_z, bound_core);
auto& task_and_device = topology(x, y, z, core);
if (task_and_device.task != -1)
return DuplicateCoordinateErrorMsg(kTopologyAttr, x, y, z, core);
task_and_device = {task, device};
}
}
return topology;
}
absl::StatusOr<std::pair<TPUDevicesAndHosts, xla::DeviceAssignmentProto>>
GetGeneralTPUExecutionDeviceAssignment(
int num_replicas, int num_cores_per_replica,
llvm::ArrayRef<llvm::SmallVector<ParsedDevice, 8>> tpu_devices,
ParsedDevices devices, llvm::StringRef topology_attr,
llvm::ArrayRef<int64_t> device_assignment_attr) {
const int num_tasks = tpu_devices.size();
const int num_tpus_per_task = tpu_devices[0].size();
TF_ASSIGN_OR_RETURN(auto topology, ParseTopologyAttr(topology_attr, num_tasks,
num_tpus_per_task));
const int expected_device_assignment_size =
num_replicas * num_cores_per_replica * kTPUTopologyRank;
const int device_assignment_attr_size = device_assignment_attr.size();
if (device_assignment_attr_size != expected_device_assignment_size)
return absl::InvalidArgumentError(absl::StrCat(
"length of '", kDeviceAssignmentAttr,
"' must be 'num_replicas' * 'num_cores_per_replica' * ",
kTPUTopologyRank, " (", num_replicas, " * ", num_cores_per_replica,
" * ", kTPUTopologyRank, "), got ", device_assignment_attr.size()));
const int bound_x = topology.n1();
const int bound_y = topology.n2();
const int bound_z = topology.n3();
const int bound_core = topology.n4();
auto location_to_id = [&](int x, int y, int z, int core) {
return (x + bound_x * (y + bound_y * z)) * bound_core + core;
};
std::vector<bool> used_device_ids(bound_x * bound_y * bound_z * bound_core,
false);
TPUDevicesAndHosts devices_and_hosts(
num_replicas, llvm::SmallVector<TPUDeviceAndHost, 8>(
num_cores_per_replica, TPUDeviceAndHost()));
xla::DeviceAssignment device_assignment(num_replicas, num_cores_per_replica);
int pos = 0;
for (int replica = 0; replica < num_replicas; ++replica) {
for (int logical_core = 0; logical_core < num_cores_per_replica;
++logical_core) {
int x = device_assignment_attr[pos++];
int y = device_assignment_attr[pos++];
int z = device_assignment_attr[pos++];
int core = device_assignment_attr[pos++];
if (DeviceCoordinateOutOfBound(x, y, z, core, bound_x, bound_y, bound_z,
bound_core))
return DeviceCoordinateErrorMsg(kDeviceAssignmentAttr, x, y, z, core,
bound_x, bound_y, bound_z, bound_core);
TaskAndDevice task_and_device = topology(x, y, z, core);
const int task = task_and_device.task;
const int device = task_and_device.device;
if (task == -1 || device == -1)
return absl::InvalidArgumentError(absl::StrCat(
"no TPU device found for '", kDeviceAssignmentAttr,
"' device coordinate (", x, ", ", y, ", ", z, ", ", core, ")"));
const int device_id = location_to_id(x, y, z, core);
if (used_device_ids[device_id])
return DuplicateCoordinateErrorMsg(kDeviceAssignmentAttr, x, y, z,
core);
used_device_ids[device_id] = true;
device_assignment(replica, logical_core) = device_id;
auto& device_and_host = devices_and_hosts[replica][logical_core];
const auto& tpu_device = tpu_devices[task][device];
device_and_host.device = DeviceNameUtils::ParsedNameToString(tpu_device);
device_and_host.host = *GetCPUHostDeviceForTPUDevice(tpu_device, devices);
}
}
xla::DeviceAssignmentProto device_assignment_proto;
device_assignment.Serialize(&device_assignment_proto);
return std::pair<TPUDevicesAndHosts, xla::DeviceAssignmentProto>(
std::move(devices_and_hosts), std::move(device_assignment_proto));
}
mlir::LogicalResult GetTopology(mlir::tf_device::ClusterOp cluster,
std::string& topology) {
mlir::StringAttr topology_attr =
cluster->getAttrOfType<mlir::StringAttr>(tensorflow::kTopologyAttr);
if (topology_attr) {
topology = topology_attr.getValue();
return mlir::success();
} else {
return cluster.emitOpError(
llvm::formatv("requires attribute '{0}'", tensorflow::kTopologyAttr)
.str());
}
}
mlir::LogicalResult GetDeviceAssignmentCoordinates(
mlir::tf_device::ClusterOp cluster,
llvm::SmallVector<int64_t, 8>& device_coordinates) {
mlir::ArrayAttr device_assignment_attr =
cluster->getAttrOfType<mlir::ArrayAttr>(
tensorflow::kDeviceAssignmentAttr);
if (!device_assignment_attr)
return cluster.emitOpError(llvm::formatv("requires attribute '{0}'",
tensorflow::kDeviceAssignmentAttr)
.str());
if (absl::StatusOr<llvm::SmallVector<int64_t, 8>> fetched_device_coordinates =
tensorflow::GetDeviceCoordinates(device_assignment_attr);
fetched_device_coordinates.ok()) {
device_coordinates = *fetched_device_coordinates;
return mlir::success();
} else {
return cluster.emitError() << "error in fetching tpu device coordinates: "
<< fetched_device_coordinates.status().message();
}
}
int GetNumCoresPerReplica(mlir::tf_device::ClusterOp cluster) {
mlir::IntegerAttr num_cores_per_replica_attr =
cluster->getAttrOfType<mlir::IntegerAttr>(kNumCoresPerReplicaAttr);
if (num_cores_per_replica_attr) {
return num_cores_per_replica_attr.getInt();
} else {
return 1;
}
}
mlir::LogicalResult GetTPUDevicesAndHostsNotReplicated(
mlir::TF::RuntimeDevices devices, mlir::tf_device::ClusterOp cluster,
tensorflow::TPUDevicesAndHosts& devices_and_hosts) {
std::string topology;
if (failed(GetTopology(cluster, topology))) {
return mlir::failure();
}
llvm::SmallVector<int64_t, 8> device_coordinates;
if (failed(GetDeviceAssignmentCoordinates(cluster, device_coordinates))) {
return mlir::failure();
}
if (absl::StatusOr<TPUDeviceAssignment> tpu_device_assignment =
tensorflow::GetTPUCompilationAndExecutionDevices(
devices.device_names(), 1,
GetNumCoresPerReplica(cluster), topology, device_coordinates);
tpu_device_assignment.ok()) {
devices_and_hosts = tpu_device_assignment->tpu_devices;
return mlir::success();
} else {
return cluster.emitError()
<< "error in fetching TPU compilation/execution devices: "
<< tpu_device_assignment.status().message();
}
}
mlir::LogicalResult GetHostDeviceOCInTPUPipeline(
mlir::TF::RuntimeDevices devices, mlir::tf_device::ClusterOp cluster,
std::string& host_device) {
mlir::tf_device::ReplicateOp replicate =
cluster->getParentOfType<mlir::tf_device::ReplicateOp>();
if (replicate) {
host_device = GetDeviceAliasForHostOfLogicalCore(0);
return mlir::success();
}
tensorflow::TPUDevicesAndHosts devices_and_hosts;
if (failed(GetTPUDevicesAndHostsNotReplicated(devices, cluster,
devices_and_hosts))) {
return mlir::failure();
} else {
host_device = devices_and_hosts[0][0].host;
return mlir::success();
}
}
llvm::SmallVector<std::string, 8> GetTPUToHostMapReplicated(
mlir::tf_device::ClusterOp cluster) {
int num_cores_per_replica = GetNumCoresPerReplica(cluster);
llvm::SmallVector<std::string, 8> core_to_host;
core_to_host.reserve(num_cores_per_replica);
for (int core = 0; core < num_cores_per_replica; ++core) {
core_to_host.push_back(GetDeviceAliasForHostOfLogicalCore(core));
}
return core_to_host;
}
mlir::LogicalResult GetTPUToHostMapNotReplicated(
mlir::TF::RuntimeDevices devices, mlir::tf_device::ClusterOp cluster,
llvm::SmallVector<std::string, 8>& core_to_host) {
tensorflow::TPUDevicesAndHosts devices_and_hosts;
if (failed(GetTPUDevicesAndHostsNotReplicated(devices, cluster,
devices_and_hosts))) {
return mlir::failure();
}
core_to_host.reserve(GetNumCoresPerReplica(cluster));
for (const auto& device_and_host : devices_and_hosts[0]) {
core_to_host.push_back(device_and_host.host);
}
return mlir::success();
}
mlir::LogicalResult GetTPUToHostMap(
mlir::TF::RuntimeDevices devices, mlir::tf_device::ClusterOp cluster,
llvm::SmallVector<std::string, 8>& core_to_host) {
if (cluster->getParentOfType<mlir::tf_device::ReplicateOp>()) {
core_to_host = GetTPUToHostMapReplicated(cluster);
return mlir::success();
}
return GetTPUToHostMapNotReplicated(devices, cluster, core_to_host);
}
}
absl::StatusOr<llvm::SmallVector<int64_t, 8>> GetDeviceCoordinates(
mlir::ArrayAttr device_assignment_attr) {
llvm::SmallVector<int64_t, 8> device_coordinates;
device_coordinates.reserve(device_assignment_attr.size());
for (auto device_coordinate_and_idx :
llvm::enumerate(device_assignment_attr)) {
auto device_coordinate =
mlir::dyn_cast<mlir::IntegerAttr>(device_coordinate_and_idx.value());
if (!device_coordinate)
return absl::InvalidArgumentError(
llvm::formatv(kBadIntArrayElementMsg, kDeviceAssignmentAttr,
device_coordinate_and_idx.index())
.str());
device_coordinates.push_back(device_coordinate.getInt());
}
return device_coordinates;
}
absl::StatusOr<xla::DeviceAssignmentProto> GetXlaDeviceAssignmentProto(
llvm::StringRef topology_attr, int num_replicas, int num_cores_per_replica,
llvm::ArrayRef<int64_t> device_assignment_attr) {
tpu::TopologyProto topology_proto;
if (!topology_proto.ParseFromString(topology_attr.str()))
return absl::InvalidArgumentError(absl::StrCat(
"failed to parse '", kTopologyAttr, "' attribute to TopologyProto"));
if (topology_proto.mesh_shape_size() < 4) {
return absl::InvalidArgumentError(absl::StrCat(
"The size of mesh_shape must be larger than or equal to 4, but got ",
topology_proto.mesh_shape_size()));
}
const int bound_x = topology_proto.mesh_shape(0);
const int bound_y = topology_proto.mesh_shape(1);
const int bound_z = topology_proto.mesh_shape(2);
const int bound_core = topology_proto.mesh_shape(3);
const int expected_device_assignment_size =
num_replicas * num_cores_per_replica * kTPUTopologyRank;
const int device_assignment_attr_size = device_assignment_attr.size();
if (device_assignment_attr_size != expected_device_assignment_size)
return absl::InvalidArgumentError(absl::StrCat(
"length of '", kDeviceAssignmentAttr,
"' must be 'num_replicas' * 'num_cores_per_replica' * ",
kTPUTopologyRank, " (", num_replicas, " * ", num_cores_per_replica,
" * ", kTPUTopologyRank, "), got ", device_assignment_attr.size()));
auto location_to_id = [&](int x, int y, int z, int core) {
return (x + bound_x * (y + bound_y * z)) * bound_core + core;
};
std::vector<bool> used_device_ids(bound_x * bound_y * bound_z * bound_core,
false);
xla::DeviceAssignment device_assignment(num_replicas, num_cores_per_replica);
int pos = 0;
for (int replica = 0; replica < num_replicas; ++replica) {
for (int logical_core = 0; logical_core < num_cores_per_replica;
++logical_core) {
int x = device_assignment_attr[pos++];
int y = device_assignment_attr[pos++];
int z = device_assignment_attr[pos++];
int core = device_assignment_attr[pos++];
if (DeviceCoordinateOutOfBound(x, y, z, core, bound_x, bound_y, bound_z,
bound_core))
return DeviceCoordinateErrorMsg(kDeviceAssignmentAttr, x, y, z, core,
bound_x, bound_y, bound_z, bound_core);
const int device_id = location_to_id(x, y, z, core);
if (used_device_ids[device_id])
return DuplicateCoordinateErrorMsg(kDeviceAssignmentAttr, x, y, z,
core);
used_device_ids[device_id] = true;
device_assignment(replica, logical_core) = device_id;
}
}
xla::DeviceAssignmentProto device_assignment_proto;
device_assignment.Serialize(&device_assignment_proto);
return device_assignment_proto;
}
absl::StatusOr<TPUDeviceAssignment> GetTPUCompilationAndExecutionDevices(
ParsedDevices devices, int num_replicas, int num_cores_per_replica,
llvm::StringRef topology_attr,
llvm::ArrayRef<int64_t> device_assignment_attr) {
TF_ASSIGN_OR_RETURN(auto system_devices, GetTPUSystemDevices(devices));
TF_ASSIGN_OR_RETURN(auto tpu_devices, GetTPUDevices(devices, system_devices));
std::string compilation_device = GetTPUCompilationDevice(system_devices[0]);
if (topology_attr.empty()) {
if (!device_assignment_attr.empty())
return absl::InvalidArgumentError(
absl::StrCat("'", kDeviceAssignmentAttr, "' must not be set when '",
kTopologyAttr, "' is not set"));
TF_ASSIGN_OR_RETURN(
auto execution_devices,
GetFullMeshTPUExecutionDeviceAssignment(
num_replicas, num_cores_per_replica, tpu_devices, devices));
return TPUDeviceAssignment(compilation_device,
std::move(execution_devices));
}
TF_ASSIGN_OR_RETURN(auto devices_and_ids,
GetGeneralTPUExecutionDeviceAssignment(
num_replicas, num_cores_per_replica, tpu_devices,
devices, topology_attr, device_assignment_attr));
return TPUDeviceAssignment(compilation_device,
std::move(devices_and_ids.first),
std::move(devices_and_ids.second));
}
std::string GetDeviceAliasForLogicalCore(const int core_index) {
return llvm::formatv("{0}_{1}", kTPUReplicatedCore, core_index).str();
}
std::string GetDeviceAliasForHostOfLogicalCore(const int core_index) {
return llvm::formatv("{0}_{1}", kTPUReplicatedHost, core_index).str();
}
bool HasModelParallelism(mlir::tf_device::ClusterOp cluster) {
mlir::IntegerAttr num_cores_per_replica_attr =
cluster->getAttrOfType<mlir::IntegerAttr>(
tensorflow::kNumCoresPerReplicaAttr);
if (!num_cores_per_replica_attr) return false;
return num_cores_per_replica_attr.getInt() != 1;
}
bool HasTPUDevice(const mlir::TF::RuntimeDevices& devices) {
for (const auto& device : devices.device_names()) {
if (device.has_type && device.type == "TPU") return true;
}
return false;
}
mlir::LogicalResult GetHostDeviceOutsideCompilationInGenericPipeline(
mlir::TF::RuntimeDevices devices, std::string* host_device) {
for (const auto& device : devices.device_names()) {
if (device.has_type && device.type == "CPU" && device.id == 0) {
if (!host_device->empty()) {
LOG(WARNING) << "Found multiple CPU:0 host devices";
if (device.job == "chief")
*host_device =
tensorflow::DeviceNameUtils::ParsedNameToString(device);
continue;
}
*host_device = tensorflow::DeviceNameUtils::ParsedNameToString(device);
}
}
if (host_device->empty()) {
LOG(ERROR) << "Did not find any CPU:0 host devices";
return mlir::failure();
}
return mlir::success();
}
mlir::LogicalResult GetHostDeviceOutsideComputation(
mlir::TF::RuntimeDevices devices, mlir::tf_device::ClusterOp cluster,
std::string* host_device) {
if (HasTPUDevice(devices) ||
cluster->getParentOfType<mlir::tf_device::ReplicateOp>()) {
return GetHostDeviceOCInTPUPipeline(devices, cluster, *host_device);
} else {
return GetHostDeviceOutsideCompilationInGenericPipeline(devices,
host_device);
}
}
bool IsTPUDevice(llvm::StringRef device) {
ParsedDevice parsed_device;
if (!DeviceNameUtils::ParseFullName(mlir::StringRefToView(device),
&parsed_device))
return false;
return parsed_device.has_type && parsed_device.type == kDeviceTPU;
}
bool IsTPUReplicatedCore(llvm::StringRef device) {
ParsedDevice parsed_device;
if (!DeviceNameUtils::ParseFullName(mlir::StringRefToView(device),
&parsed_device))
return false;
return parsed_device.has_type && parsed_device.type == kTPUReplicatedCore;
}
bool TypeValidForXLA(const mlir::Type& type) {
const mlir::Type elem = getElementTypeOrSelf(type);
return !mlir::isa<mlir::TF::ResourceType>(elem) &&
!mlir::isa<mlir::TF::StringType>(elem);
}
mlir::LogicalResult GetDeviceToHostMap(
mlir::tf_device::ClusterOp cluster,
llvm::SmallVector<std::string, 8>& core_to_host) {
mlir::TF::RuntimeDevices devices;
if (failed(tensorflow::GetDevicesFromOp(
cluster->getParentOfType<mlir::ModuleOp>(), &devices))) {
return mlir::failure();
}
if (tensorflow::HasTPUDevice(devices) ||
cluster->getParentOfType<mlir::tf_device::ReplicateOp>()) {
return GetTPUToHostMap(devices, cluster, core_to_host);
}
std::string host_device;
if (failed(tensorflow::GetHostDeviceOutsideCompilationInGenericPipeline(
devices, &host_device))) {
return mlir::failure();
} else {
core_to_host.push_back(host_device);
return mlir::success();
}
}
mlir::LogicalResult GetNonReplicatedTPU0(mlir::Operation* op,
std::string* tpu0_device) {
mlir::ModuleOp moduleOp = op->getParentOfType<mlir::ModuleOp>();
mlir::TF::RuntimeDevices devices;
if (failed(tensorflow::GetDevicesFromOp(moduleOp, &devices)))
return moduleOp.emitOpError() << "No available devices.";
llvm::ArrayRef<tensorflow::DeviceNameUtils::ParsedName> device_names =
devices.device_names();
auto status_or_system_devices = GetTPUSystemDevices(device_names);
if (!status_or_system_devices.ok())
return moduleOp.emitOpError()
<< "error in fetching TPU_SYSTEM devices: "
<< status_or_system_devices.status().message();
auto status_or_tpu_devices =
GetTPUDevices(device_names, status_or_system_devices.value());
if (!status_or_tpu_devices.ok())
return moduleOp.emitOpError() << "error in fetching TPU devices: "
<< status_or_tpu_devices.status().message();
*tpu0_device =
DeviceNameUtils::ParsedNameToString(status_or_tpu_devices.value()[0][0]);
return mlir::success();
}
mlir::LogicalResult GetNonReplicatedCPU0(mlir::Operation* op,
std::string* cpu0_device) {
std::string tpu0_device;
if (failed(tensorflow::GetNonReplicatedTPU0(op, &tpu0_device)))
return mlir::failure();
auto status = tensorflow::DeviceNameUtils::DeviceNameToCpuDeviceName(
tpu0_device, cpu0_device);
if (!status.ok())
return op->emitError()
<< "error in converting TPU0 to CPU0. The TPU device is "
<< tpu0_device;
return mlir::success();
}
} | #include "tensorflow/compiler/mlir/tensorflow/utils/tpu_rewrite_device_util.h"
#include <cstdint>
#include <optional>
#include <string>
#include <tuple>
#include <vector>
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/FormatVariadic.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/device_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/tpu/topology.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace {
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> GetMlirModuleFromString(
llvm::StringRef string, mlir::MLIRContext* context) {
mlir::DialectRegistry mlir_registry;
RegisterAllTensorFlowDialects(mlir_registry);
context->appendDialectRegistry(mlir_registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module;
auto status =
tensorflow::DeserializeMlirModule(string, context, &mlir_module);
if (!status.ok()) {
return status;
}
return mlir_module;
}
using Device = DeviceNameUtils::ParsedName;
bool DeviceNamesToParsedNames(llvm::ArrayRef<std::string> device_names,
llvm::SmallVectorImpl<Device>* parsed_devices) {
parsed_devices->reserve(device_names.size());
for (const auto& device_name : device_names) {
Device parsed_name;
if (!DeviceNameUtils::ParseFullName(device_name, &parsed_name))
return false;
parsed_devices->push_back(parsed_name);
}
return true;
}
using DeviceNames = llvm::SmallVector<std::string, 8>;
struct ParameterizedDeviceSetTest
: ::testing::TestWithParam<std::tuple<DeviceNames, std::string>> {};
TEST_P(ParameterizedDeviceSetTest, BadDeviceSet) {
llvm::SmallVector<Device, 8> devices;
ASSERT_TRUE(DeviceNamesToParsedNames(std::get<0>(GetParam()), &devices));
std::string topology_attr;
std::vector<int64_t> device_assignment_attr;
auto status_or = GetTPUCompilationAndExecutionDevices(
devices, 1, 1, topology_attr,
device_assignment_attr);
ASSERT_FALSE(status_or.ok());
EXPECT_EQ(status_or.status().message(), std::get<1>(GetParam()));
}
INSTANTIATE_TEST_SUITE_P(
BadDeviceSet, ParameterizedDeviceSetTest,
::testing::Values(
std::make_tuple<DeviceNames, std::string>(
{"/job:localhost/replica:0/task:0/device:CPU:0"},
"no TPU_SYSTEM devices found"),
std::make_tuple<DeviceNames, std::string>(
{"/job:localhost/replica:0/task:0/device:TPU_SYSTEM:0",
"/job:worker/replica:0/task:0/device:TPU_SYSTEM:0"},
"found TPU_SYSTEM devices with conflicting jobs 'localhost' and "
"'worker'"),
std::make_tuple<DeviceNames, std::string>(
{"/job:localhost/replica:0/task:0/device:TPU_SYSTEM:0",
"/job:localhost/replica:1/task:0/device:TPU_SYSTEM:0"},
"found TPU_SYSTEM devices with conflicting replicas '0' and '1'"),
std::make_tuple<DeviceNames, std::string>(
{"/job:localhost/replica:0/task:0/device:TPU_SYSTEM:0",
"/job:localhost/replica:0/task:0/device:TPU:0",
"/job:localhost/replica:0/task:0/device:TPU:1",
"/job:localhost/replica:0/task:1/device:TPU_SYSTEM:0",
"/job:localhost/replica:0/task:1/device:TPU:0"},
"expected the number of TPU devices per host to be 2, got 1")));
struct ParameterizedMetadataTest
: ::testing::TestWithParam<std::tuple<int, int, std::string,
std::vector<int64_t>, std::string>> {
};
TEST_P(ParameterizedMetadataTest, BadMetadata) {
llvm::SmallVector<Device, 8> devices;
ASSERT_TRUE(DeviceNamesToParsedNames(
{"/job:worker/replica:0/task:0/device:TPU_SYSTEM:0",
"/job:worker/replica:0/task:0/device:TPU:0",
"/job:worker/replica:0/task:0/device:CPU:0",
"/job:worker/replica:0/task:1/device:TPU_SYSTEM:0",
"/job:worker/replica:0/task:1/device:TPU:0",
"/job:worker/replica:0/task:1/device:CPU:0"},
&devices));
std::string compilation_device;
llvm::SmallVector<llvm::SmallVector<std::string, 8>, 8> execution_devices;
std::optional<xla::DeviceAssignmentProto> xla_device_assignment;
auto status_or = GetTPUCompilationAndExecutionDevices(
devices, std::get<0>(GetParam()), std::get<1>(GetParam()),
std::get<2>(GetParam()), std::get<3>(GetParam()));
ASSERT_FALSE(status_or.ok());
EXPECT_EQ(status_or.status().message(), std::get<4>(GetParam()));
}
std::string TopologyWithMeshShape(llvm::ArrayRef<int> mesh_shape) {
tpu::TopologyProto topology_proto;
for (int mesh_dim : mesh_shape) topology_proto.add_mesh_shape(mesh_dim);
return topology_proto.SerializeAsString();
}
std::string TopologyWithMeshShapeAndTasks(llvm::ArrayRef<int> mesh_shape,
int num_tasks,
int num_tpu_devices_per_task) {
tpu::TopologyProto topology_proto;
for (int mesh_dim : mesh_shape) topology_proto.add_mesh_shape(mesh_dim);
topology_proto.set_num_tasks(num_tasks);
topology_proto.set_num_tpu_devices_per_task(num_tpu_devices_per_task);
return topology_proto.SerializeAsString();
}
std::string TopologyWithDeviceCoordinates(
llvm::ArrayRef<int> device_coordinates) {
tpu::TopologyProto topology_proto;
topology_proto.add_mesh_shape(2);
topology_proto.add_mesh_shape(1);
topology_proto.add_mesh_shape(1);
topology_proto.add_mesh_shape(1);
topology_proto.set_num_tasks(2);
topology_proto.set_num_tpu_devices_per_task(1);
for (int device_coordinate : device_coordinates)
topology_proto.add_device_coordinates(device_coordinate);
return topology_proto.SerializeAsString();
}
INSTANTIATE_TEST_SUITE_P(
BadFullMeshMetadata, ParameterizedMetadataTest,
::testing::Values(
std::make_tuple(
2, 1, "", std::vector<int64_t>{0},
"'device_assignment' must not be set when 'topology' is not set"),
std::make_tuple(8, 1, "", std::vector<int64_t>(),
"'num_replicas' must be equal to 1 or 2, got 8"),
std::make_tuple(2, 2, "", std::vector<int64_t>(),
"'num_cores_per_replica' must be equal to 1, got 2")));
INSTANTIATE_TEST_SUITE_P(
BadGeneralTopologyMetadata, ParameterizedMetadataTest,
::testing::Values(
std::make_tuple(
2, 1, "BAD_TOPOLOGY", std::vector<int64_t>(),
"failed to parse 'topology' attribute to TopologyProto"),
std::make_tuple(4, 2, TopologyWithMeshShape({0}),
std::vector<int64_t>(),
"'topology' 'mesh_shape' must be rank 4, got rank 1"),
std::make_tuple(
2, 1, TopologyWithMeshShape({2, 0, 1, 2}), std::vector<int64_t>(),
"'topology' 'mesh_shape' dimension 1 must be positive, got 0"),
std::make_tuple(2, 1, TopologyWithMeshShapeAndTasks({1, 1, 1, 1}, 1, 1),
std::vector<int64_t>(),
"number of tasks from available TPU devices must be "
"'num_tasks' in 'topology' (1), got 2"),
std::make_tuple(2, 1, TopologyWithMeshShapeAndTasks({1, 1, 1, 1}, 2, 2),
std::vector<int64_t>(),
"number of TPU devices available per task must be "
"'num_tpu_devices_per_task' in 'topology' (2), got 1"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({}), std::vector<int64_t>(),
"length of 'device_coordinates' in 'topology' must be 'num_tasks' "
"* 'num_tpus_per_task' * 4 (2 * 1 * 4), got 0"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({-1, 0, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>(),
"device coordinate (-1, 0, 0, 0) in 'topology' is outside "
"of mesh shape (2, 1, 1, 1)"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({2, 0, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>(),
"device coordinate (2, 0, 0, 0) in 'topology' is outside "
"of mesh shape (2, 1, 1, 1)"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({0, -1, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>(),
"device coordinate (0, -1, 0, 0) in 'topology' is outside "
"of mesh shape (2, 1, 1, 1)"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({0, 1, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>(),
"device coordinate (0, 1, 0, 0) in 'topology' is outside "
"of mesh shape (2, 1, 1, 1)"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({0, 0, 0, -1, 1, 0, 0, 0}),
std::vector<int64_t>(),
"device coordinate (0, 0, 0, -1) in 'topology' is outside "
"of mesh shape (2, 1, 1, 1)"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({0, 0, 0, 1, 1, 0, 0, 0}),
std::vector<int64_t>(),
"device coordinate (0, 0, 0, 1) in 'topology' is outside "
"of mesh shape (2, 1, 1, 1)"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({0, 0, 0, 0, 0, 0, 0, 0}),
std::vector<int64_t>(),
"'topology' has duplicate device coordinate (0, 0, 0, 0)")));
INSTANTIATE_TEST_SUITE_P(
BadGeneralDeviceAssignmentMetadata, ParameterizedMetadataTest,
::testing::Values(
std::make_tuple(2, 1,
TopologyWithDeviceCoordinates({0, 0, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>(),
"length of 'device_assignment' must be 'num_replicas' "
"* 'num_cores_per_replica' * 4 (2 * 1 * 4), got 0"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({0, 0, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>{-1, 0, 0, 0, 0, 0, 0, 0},
"device coordinate (-1, 0, 0, 0) in 'device_assignment' "
"is outside of mesh shape (2, 1, 1, 1)"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({0, 0, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>{2, 0, 0, 0, 0, 0, 0, 0},
"device coordinate (2, 0, 0, 0) in 'device_assignment' is "
"outside of mesh shape (2, 1, 1, 1)"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({0, 0, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>{0, -1, 0, 0, 0, 0, 0, 0},
"device coordinate (0, -1, 0, 0) in 'device_assignment' "
"is outside of mesh shape (2, 1, 1, 1)"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({0, 0, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>{0, 1, 0, 0, 0, 0, 0, 0},
"device coordinate (0, 1, 0, 0) in 'device_assignment' is "
"outside of mesh shape (2, 1, 1, 1)"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({0, 0, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>{0, 0, 0, -1, 0, 0, 0, 0},
"device coordinate (0, 0, 0, -1) in 'device_assignment' "
"is outside of mesh shape (2, 1, 1, 1)"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({0, 0, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>{0, 0, 0, 1, 0, 0, 0, 0},
"device coordinate (0, 0, 0, 1) in 'device_assignment' is "
"outside of mesh shape (2, 1, 1, 1)"),
std::make_tuple(2, 1,
TopologyWithDeviceCoordinates({0, 0, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>{0, 0, 0, 0, 0, 0, 0, 0},
"'device_assignment' has duplicate device coordinate "
"(0, 0, 0, 0)")));
std::vector<std::string> MakeDeviceSet(int num_tasks,
int num_devices_per_task) {
std::vector<std::string> devices{
"/job:localhost/replica:0/task:0/device:CPU:0"};
devices.reserve(num_tasks * num_devices_per_task + num_tasks + 1);
for (int task = 0; task < num_tasks; ++task) {
devices.push_back(
llvm::formatv("/job:worker/replica:0/task:{0}/device:CPU:0", task)
.str());
devices.push_back(
llvm::formatv("/job:worker/replica:0/task:{0}/device:TPU_SYSTEM:0",
task)
.str());
for (int device = 0; device < num_devices_per_task; ++device)
devices.push_back(
llvm::formatv("/job:worker/replica:0/task:{0}/device:TPU:{1}", task,
device)
.str());
}
return devices;
}
TEST(TPURewriteDeviceUtilTest,
BadGeneralDeviceAssignmentMetadataMissingDevice) {
tpu::TopologyProto topology_proto;
{
topology_proto.add_mesh_shape(2);
topology_proto.add_mesh_shape(1);
topology_proto.add_mesh_shape(1);
topology_proto.add_mesh_shape(1);
topology_proto.set_num_tasks(1);
topology_proto.set_num_tpu_devices_per_task(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
}
std::string topology_attr = topology_proto.SerializeAsString();
std::vector<int64_t> device_assignment_attr{1, 0, 0, 0};
llvm::SmallVector<Device, 8> devices;
std::vector<std::string> device_names =
MakeDeviceSet(1, 1);
ASSERT_TRUE(DeviceNamesToParsedNames(device_names, &devices));
auto status_or = GetTPUCompilationAndExecutionDevices(
devices, 1, 1, topology_attr,
device_assignment_attr);
ASSERT_FALSE(status_or.ok());
EXPECT_EQ(status_or.status().message(),
"no TPU device found for 'device_assignment' device coordinate (1, "
"0, 0, 0)");
}
TEST(TPURewriteDeviceUtilTest, ValidFullMeshDeviceAssignment) {
llvm::SmallVector<Device, 8> devices;
std::vector<std::string> device_names =
MakeDeviceSet(2, 4);
ASSERT_TRUE(DeviceNamesToParsedNames(device_names, &devices));
std::string topology_attr;
std::vector<int64_t> device_assignment_attr;
auto status_or = GetTPUCompilationAndExecutionDevices(
devices, 8, 1, topology_attr,
device_assignment_attr);
TF_ASSERT_OK(status_or.status());
const auto& tpu_device_assignment = status_or.value();
EXPECT_EQ(tpu_device_assignment.compilation_device,
"/job:worker/replica:0/task:0/device:CPU:0");
const auto& tpu_devices = tpu_device_assignment.tpu_devices;
ASSERT_EQ(tpu_devices.size(), 8);
for (const auto& replica_tpu_devices : tpu_devices)
ASSERT_EQ(replica_tpu_devices.size(), 1);
EXPECT_EQ(tpu_devices[0][0].device,
"/job:worker/replica:0/task:0/device:TPU:0");
EXPECT_EQ(tpu_devices[0][0].host,
"/job:worker/replica:0/task:0/device:CPU:0");
EXPECT_EQ(tpu_devices[1][0].device,
"/job:worker/replica:0/task:0/device:TPU:1");
EXPECT_EQ(tpu_devices[1][0].host,
"/job:worker/replica:0/task:0/device:CPU:0");
EXPECT_EQ(tpu_devices[2][0].device,
"/job:worker/replica:0/task:0/device:TPU:2");
EXPECT_EQ(tpu_devices[2][0].host,
"/job:worker/replica:0/task:0/device:CPU:0");
EXPECT_EQ(tpu_devices[3][0].device,
"/job:worker/replica:0/task:0/device:TPU:3");
EXPECT_EQ(tpu_devices[3][0].host,
"/job:worker/replica:0/task:0/device:CPU:0");
EXPECT_EQ(tpu_devices[4][0].device,
"/job:worker/replica:0/task:1/device:TPU:0");
EXPECT_EQ(tpu_devices[4][0].host,
"/job:worker/replica:0/task:1/device:CPU:0");
EXPECT_EQ(tpu_devices[5][0].device,
"/job:worker/replica:0/task:1/device:TPU:1");
EXPECT_EQ(tpu_devices[5][0].host,
"/job:worker/replica:0/task:1/device:CPU:0");
EXPECT_EQ(tpu_devices[6][0].device,
"/job:worker/replica:0/task:1/device:TPU:2");
EXPECT_EQ(tpu_devices[6][0].host,
"/job:worker/replica:0/task:1/device:CPU:0");
EXPECT_EQ(tpu_devices[7][0].device,
"/job:worker/replica:0/task:1/device:TPU:3");
EXPECT_EQ(tpu_devices[7][0].host,
"/job:worker/replica:0/task:1/device:CPU:0");
EXPECT_FALSE(tpu_device_assignment.xla_device_assignment.has_value());
}
TEST(TPURewriteDeviceUtilTest, ValidGeneralDeviceAssignmentMesh2x2x2) {
tpu::TopologyProto topology_proto;
{
topology_proto.add_mesh_shape(2);
topology_proto.add_mesh_shape(2);
topology_proto.add_mesh_shape(1);
topology_proto.add_mesh_shape(2);
topology_proto.set_num_tasks(2);
topology_proto.set_num_tpu_devices_per_task(4);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
}
std::string topology_attr = topology_proto.SerializeAsString();
std::vector<int64_t> device_assignment_attr{0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0,
0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0,
0, 1, 1, 1, 0, 0, 1, 1, 0, 1};
llvm::SmallVector<Device, 8> devices;
std::vector<std::string> device_names =
MakeDeviceSet(2, 4);
ASSERT_TRUE(DeviceNamesToParsedNames(device_names, &devices));
auto status_or = GetTPUCompilationAndExecutionDevices(
devices, 4, 2, topology_attr,
device_assignment_attr);
TF_ASSERT_OK(status_or.status());
const auto& tpu_device_assignment = status_or.value();
EXPECT_EQ(tpu_device_assignment.compilation_device,
"/job:worker/replica:0/task:0/device:CPU:0");
const auto& tpu_devices = tpu_device_assignment.tpu_devices;
ASSERT_EQ(tpu_devices.size(), 4);
for (const auto& replica_tpu_devices : tpu_devices)
ASSERT_EQ(replica_tpu_devices.size(), 2);
EXPECT_EQ(tpu_devices[0][0].device,
"/job:worker/replica:0/task:0/device:TPU:0");
EXPECT_EQ(tpu_devices[0][0].host,
"/job:worker/replica:0/task:0/device:CPU:0");
EXPECT_EQ(tpu_devices[0][1].device,
"/job:worker/replica:0/task:1/device:TPU:3");
EXPECT_EQ(tpu_devices[0][1].host,
"/job:worker/replica:0/task:1/device:CPU:0");
EXPECT_EQ(tpu_devices[1][0].device,
"/job:worker/replica:0/task:0/device:TPU:1");
EXPECT_EQ(tpu_devices[1][0].host,
"/job:worker/replica:0/task:0/device:CPU:0");
EXPECT_EQ(tpu_devices[1][1].device,
"/job:worker/replica:0/task:1/device:TPU:2");
EXPECT_EQ(tpu_devices[1][1].host,
"/job:worker/replica:0/task:1/device:CPU:0");
EXPECT_EQ(tpu_devices[2][0].device,
"/job:worker/replica:0/task:0/device:TPU:3");
EXPECT_EQ(tpu_devices[2][0].host,
"/job:worker/replica:0/task:0/device:CPU:0");
EXPECT_EQ(tpu_devices[2][1].device,
"/job:worker/replica:0/task:1/device:TPU:0");
EXPECT_EQ(tpu_devices[2][1].host,
"/job:worker/replica:0/task:1/device:CPU:0");
EXPECT_EQ(tpu_devices[3][0].device,
"/job:worker/replica:0/task:0/device:TPU:2");
EXPECT_EQ(tpu_devices[3][0].host,
"/job:worker/replica:0/task:0/device:CPU:0");
EXPECT_EQ(tpu_devices[3][1].device,
"/job:worker/replica:0/task:1/device:TPU:1");
EXPECT_EQ(tpu_devices[3][1].host,
"/job:worker/replica:0/task:1/device:CPU:0");
auto& xla_device_assignment = tpu_device_assignment.xla_device_assignment;
ASSERT_TRUE(xla_device_assignment.has_value());
EXPECT_EQ(xla_device_assignment->replica_count(), 4);
EXPECT_EQ(xla_device_assignment->computation_count(), 2);
ASSERT_EQ(xla_device_assignment->computation_devices_size(), 2);
const auto& computation_device_0 =
xla_device_assignment->computation_devices(0);
ASSERT_EQ(computation_device_0.replica_device_ids_size(), 4);
const auto& computation_device_1 =
xla_device_assignment->computation_devices(1);
ASSERT_EQ(computation_device_1.replica_device_ids_size(), 4);
EXPECT_EQ(computation_device_0.replica_device_ids(0), 0);
EXPECT_EQ(computation_device_0.replica_device_ids(1), 4);
EXPECT_EQ(computation_device_0.replica_device_ids(2), 2);
EXPECT_EQ(computation_device_0.replica_device_ids(3), 6);
EXPECT_EQ(computation_device_1.replica_device_ids(0), 1);
EXPECT_EQ(computation_device_1.replica_device_ids(1), 5);
EXPECT_EQ(computation_device_1.replica_device_ids(2), 3);
EXPECT_EQ(computation_device_1.replica_device_ids(3), 7);
}
TEST(TPURewriteDeviceUtilTest, ValidXLADeviceAssignmentMesh1x2x1x3) {
tpu::TopologyProto topology_proto;
{
topology_proto.add_mesh_shape(1);
topology_proto.add_mesh_shape(2);
topology_proto.add_mesh_shape(1);
topology_proto.add_mesh_shape(3);
topology_proto.set_num_tasks(3);
topology_proto.set_num_tpu_devices_per_task(2);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(2);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(2);
}
std::string topology_attr = topology_proto.SerializeAsString();
std::vector<int64_t> device_assignment_attr{
0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 2, 0, 1, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0};
llvm::SmallVector<Device, 8> devices;
std::vector<std::string> device_names =
MakeDeviceSet(3, 2);
ASSERT_TRUE(DeviceNamesToParsedNames(device_names, &devices));
auto xla_device_assignment = GetXlaDeviceAssignmentProto(
topology_attr, 2, 3,
device_assignment_attr);
TF_ASSERT_OK(xla_device_assignment.status());
EXPECT_EQ(xla_device_assignment->replica_count(), 2);
EXPECT_EQ(xla_device_assignment->computation_count(), 3);
ASSERT_EQ(xla_device_assignment->computation_devices_size(), 3);
const auto& computation_device_0 =
xla_device_assignment->computation_devices(0);
ASSERT_EQ(computation_device_0.replica_device_ids_size(), 2);
const auto& computation_device_1 =
xla_device_assignment->computation_devices(1);
ASSERT_EQ(computation_device_1.replica_device_ids_size(), 2);
const auto& computation_device_2 =
xla_device_assignment->computation_devices(2);
ASSERT_EQ(computation_device_2.replica_device_ids_size(), 2);
EXPECT_EQ(computation_device_0.replica_device_ids(0), 1);
EXPECT_EQ(computation_device_0.replica_device_ids(1), 5);
EXPECT_EQ(computation_device_1.replica_device_ids(0), 4);
EXPECT_EQ(computation_device_1.replica_device_ids(1), 0);
EXPECT_EQ(computation_device_2.replica_device_ids(0), 2);
EXPECT_EQ(computation_device_2.replica_device_ids(1), 3);
}
TEST(TPURewriteDeviceUtilTest, InvalidXLADeviceAssignmentMesh1x2x1x3) {
tpu::TopologyProto topology_proto;
{
topology_proto.add_mesh_shape(1);
topology_proto.add_mesh_shape(2);
topology_proto.add_mesh_shape(1);
topology_proto.add_mesh_shape(3);
topology_proto.set_num_tasks(3);
topology_proto.set_num_tpu_devices_per_task(2);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(2);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(2);
}
std::string topology_attr = topology_proto.SerializeAsString();
std::vector<int64_t> device_assignment_attr{
0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 2, 0, 1, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0};
llvm::SmallVector<Device, 8> devices;
std::vector<std::string> device_names =
MakeDeviceSet(3, 2);
ASSERT_TRUE(DeviceNamesToParsedNames(device_names, &devices));
auto xla_device_assignment = GetXlaDeviceAssignmentProto(
topology_attr, 2, 2,
device_assignment_attr);
EXPECT_THAT(xla_device_assignment,
testing::StatusIs(
absl::StatusCode::kInvalidArgument,
::testing::HasSubstr(
"must be 'num_replicas' * 'num_cores_per_replica' * ")));
}
TEST(TPURewriteDeviceUtilTest, ValidGeneralDeviceAssignmentMesh1x2x1x3) {
tpu::TopologyProto topology_proto;
{
topology_proto.add_mesh_shape(1);
topology_proto.add_mesh_shape(2);
topology_proto.add_mesh_shape(1);
topology_proto.add_mesh_shape(3);
topology_proto.set_num_tasks(3);
topology_proto.set_num_tpu_devices_per_task(2);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(2);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(2);
}
std::string topology_attr = topology_proto.SerializeAsString();
std::vector<int64_t> device_assignment_attr{
0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 2, 0, 1, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0};
llvm::SmallVector<Device, 8> devices;
std::vector<std::string> device_names =
MakeDeviceSet(3, 2);
ASSERT_TRUE(DeviceNamesToParsedNames(device_names, &devices));
auto status_or = GetTPUCompilationAndExecutionDevices(
devices, 2, 3, topology_attr,
device_assignment_attr);
TF_ASSERT_OK(status_or.status());
auto& tpu_device_assignment = status_or.value();
EXPECT_EQ(tpu_device_assignment.compilation_device,
"/job:worker/replica:0/task:0/device:CPU:0");
auto& tpu_devices = tpu_device_assignment.tpu_devices;
ASSERT_EQ(tpu_devices.size(), 2);
for (const auto& replica_tpu_devices : tpu_devices)
ASSERT_EQ(replica_tpu_devices.size(), 3);
EXPECT_EQ(tpu_devices[0][0].device,
"/job:worker/replica:0/task:1/device:TPU:1");
EXPECT_EQ(tpu_devices[0][0].host,
"/job:worker/replica:0/task:1/device:CPU:0");
EXPECT_EQ(tpu_devices[0][1].device,
"/job:worker/replica:0/task:1/device:TPU:0");
EXPECT_EQ(tpu_devices[0][1].host,
"/job:worker/replica:0/task:1/device:CPU:0");
EXPECT_EQ(tpu_devices[0][2].device,
"/job:worker/replica:0/task:2/device:TPU:0");
EXPECT_EQ(tpu_devices[0][2].host,
"/job:worker/replica:0/task:2/device:CPU:0");
EXPECT_EQ(tpu_devices[1][0].device,
"/job:worker/replica:0/task:2/device:TPU:1");
EXPECT_EQ(tpu_devices[1][0].host,
"/job:worker/replica:0/task:2/device:CPU:0");
EXPECT_EQ(tpu_devices[1][1].device,
"/job:worker/replica:0/task:0/device:TPU:0");
EXPECT_EQ(tpu_devices[1][1].host,
"/job:worker/replica:0/task:0/device:CPU:0");
EXPECT_EQ(tpu_devices[1][2].device,
"/job:worker/replica:0/task:0/device:TPU:1");
EXPECT_EQ(tpu_devices[1][2].host,
"/job:worker/replica:0/task:0/device:CPU:0");
auto& xla_device_assignment = tpu_device_assignment.xla_device_assignment;
ASSERT_TRUE(xla_device_assignment.has_value());
EXPECT_EQ(xla_device_assignment->replica_count(), 2);
EXPECT_EQ(xla_device_assignment->computation_count(), 3);
ASSERT_EQ(xla_device_assignment->computation_devices_size(), 3);
const auto& computation_device_0 =
xla_device_assignment->computation_devices(0);
ASSERT_EQ(computation_device_0.replica_device_ids_size(), 2);
const auto& computation_device_1 =
xla_device_assignment->computation_devices(1);
ASSERT_EQ(computation_device_1.replica_device_ids_size(), 2);
const auto& computation_device_2 =
xla_device_assignment->computation_devices(2);
ASSERT_EQ(computation_device_2.replica_device_ids_size(), 2);
EXPECT_EQ(computation_device_0.replica_device_ids(0), 1);
EXPECT_EQ(computation_device_0.replica_device_ids(1), 5);
EXPECT_EQ(computation_device_1.replica_device_ids(0), 4);
EXPECT_EQ(computation_device_1.replica_device_ids(1), 0);
EXPECT_EQ(computation_device_2.replica_device_ids(0), 2);
EXPECT_EQ(computation_device_2.replica_device_ids(1), 3);
}
TEST(TPURewriteDeviceUtilTest, TestGetDeviceCoordinates) {
mlir::MLIRContext context;
mlir::Builder builder(&context);
auto device_assignment_attr = builder.getI64ArrayAttr({1, 2, 3});
auto status_or_device_coodinates =
GetDeviceCoordinates(device_assignment_attr);
ASSERT_TRUE(status_or_device_coodinates.ok());
auto device_coordinates = status_or_device_coodinates.value();
EXPECT_EQ(device_coordinates[0], 1);
EXPECT_EQ(device_coordinates[1], 2);
EXPECT_EQ(device_coordinates[2], 3);
}
TEST(TPURewriteDeviceUtilTest, TestInvalidAttrForDeviceAssignmentDisallowed) {
mlir::MLIRContext context;
mlir::Builder builder(&context);
auto device_assignment_attr = builder.getF32ArrayAttr({1.0, 2.0, 3.0});
auto status_or_device_coodinates =
GetDeviceCoordinates(device_assignment_attr);
ASSERT_TRUE(!status_or_device_coodinates.ok());
EXPECT_EQ(status_or_device_coodinates.status().message(),
"bad 'device_assignment' attribute at index 0, not an int");
}
TEST(TPURewriteDeviceUtilTest, TestHasModelParallelismFalse) {
mlir::MLIRContext context;
context.loadDialect<mlir::tf_device::TensorFlowDeviceDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
mlir::OpBuilder builder(module_ref->getBodyRegion());
llvm::SmallVector<mlir::Type, 8> result_types;
auto cluster = builder.create<mlir::tf_device::ClusterOp>(
mlir::UnknownLoc::get(&context), result_types);
cluster->setAttr(kNumCoresPerReplicaAttr,
builder.getIntegerAttr(builder.getIntegerType(64), 1));
cluster->setAttr(kTopologyAttr, builder.getStringAttr(""));
cluster->setAttr(kDeviceAssignmentAttr, builder.getArrayAttr({}));
EXPECT_FALSE(HasModelParallelism(cluster));
}
TEST(TPURewriteDeviceUtilTest, TestHasModelParallelismTrue) {
mlir::MLIRContext context;
context.loadDialect<mlir::tf_device::TensorFlowDeviceDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
mlir::OpBuilder builder(module_ref->getBodyRegion());
llvm::SmallVector<mlir::Type, 8> result_types;
auto cluster = builder.create<mlir::tf_device::ClusterOp>(
mlir::UnknownLoc::get(&context), result_types);
cluster->setAttr(kNumCoresPerReplicaAttr,
builder.getIntegerAttr(builder.getIntegerType(64), 5));
cluster->setAttr(kTopologyAttr, builder.getStringAttr(""));
cluster->setAttr(kDeviceAssignmentAttr, builder.getArrayAttr({}));
EXPECT_TRUE(HasModelParallelism(cluster));
}
TEST(TPURewriteDeviceUtilTest,
TestHasModelParallelismFalseMissingCoresPerReplicaAttr) {
mlir::MLIRContext context;
context.loadDialect<mlir::tf_device::TensorFlowDeviceDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
mlir::OpBuilder builder(module_ref->getBodyRegion());
llvm::SmallVector<mlir::Type, 8> result_types;
auto cluster = builder.create<mlir::tf_device::ClusterOp>(
mlir::UnknownLoc::get(&context), result_types);
cluster->setAttr(kNumCoresPerReplicaAttr,
builder.getIntegerAttr(builder.getIntegerType(64), 1));
cluster->setAttr(kTopologyAttr, builder.getStringAttr(""));
cluster->setAttr(kDeviceAssignmentAttr, builder.getArrayAttr({}));
EXPECT_FALSE(HasModelParallelism(cluster));
}
TEST(TPURewriteDeviceUtilTest,
TestGetHostFailNumCoresPerReplicaMissingAttributes) {
mlir::MLIRContext context;
context.loadDialect<mlir::tf_device::TensorFlowDeviceDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
mlir::OpBuilder builder(module_ref->getBodyRegion());
llvm::SmallVector<mlir::Type, 8> result_types;
auto cluster = builder.create<mlir::tf_device::ClusterOp>(
mlir::UnknownLoc::get(&context), result_types);
cluster->setAttr(kDeviceAssignmentAttr, builder.getArrayAttr({}));
mlir::TF::RuntimeDevices devices;
std::string host_device;
EXPECT_TRUE(mlir::failed(
GetHostDeviceOutsideComputation(devices, cluster, &host_device)));
}
TEST(TPURewriteDeviceUtilTest, TestGetHostFailDeviceMissingAttributes) {
mlir::MLIRContext context;
context.loadDialect<mlir::tf_device::TensorFlowDeviceDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
mlir::OpBuilder builder(module_ref->getBodyRegion());
llvm::SmallVector<mlir::Type, 8> result_types;
auto cluster = builder.create<mlir::tf_device::ClusterOp>(
mlir::UnknownLoc::get(&context), result_types);
cluster->setAttr(kNumCoresPerReplicaAttr,
builder.getIntegerAttr(builder.getIntegerType(64), 1));
mlir::TF::RuntimeDevices devices;
std::string host_device;
EXPECT_TRUE(mlir::failed(
GetHostDeviceOutsideComputation(devices, cluster, &host_device)));
}
TEST(TPURewriteDeviceUtilTest, TestGetHostDeviceFailMissingTopology) {
mlir::MLIRContext context;
context.loadDialect<mlir::tf_device::TensorFlowDeviceDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
mlir::OpBuilder builder(module_ref->getBodyRegion());
llvm::SmallVector<mlir::Type, 8> result_types;
auto cluster = builder.create<mlir::tf_device::ClusterOp>(
mlir::UnknownLoc::get(&context), result_types);
cluster->setAttr(kNumCoresPerReplicaAttr,
builder.getIntegerAttr(builder.getIntegerType(64), 1));
cluster->setAttr(kDeviceAssignmentAttr, builder.getArrayAttr({}));
mlir::TF::RuntimeDevices runtime_devices;
std::string host_device;
EXPECT_TRUE(mlir::failed(
GetHostDeviceOutsideComputation(runtime_devices, cluster, &host_device)));
}
TEST(TPURewriteDeviceUtilTest, TestGetHostDeviceFailMissingDeviceAssignment) {
mlir::MLIRContext context;
context.loadDialect<mlir::tf_device::TensorFlowDeviceDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
mlir::OpBuilder builder(module_ref->getBodyRegion());
llvm::SmallVector<mlir::Type, 8> result_types;
auto cluster = builder.create<mlir::tf_device::ClusterOp>(
mlir::UnknownLoc::get(&context), result_types);
cluster->setAttr(kNumCoresPerReplicaAttr,
builder.getIntegerAttr(builder.getIntegerType(64), 1));
cluster->setAttr(kTopologyAttr, builder.getStringAttr(""));
mlir::TF::RuntimeDevices runtime_devices;
std::string host_device;
EXPECT_TRUE(mlir::failed(
GetHostDeviceOutsideComputation(runtime_devices, cluster, &host_device)));
}
TEST(TPURewriteDeviceUtilTest, TestGetHostDeviceFailBadDeviceAssignment) {
mlir::MLIRContext context;
context.loadDialect<mlir::tf_device::TensorFlowDeviceDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
mlir::OpBuilder builder(module_ref->getBodyRegion());
llvm::SmallVector<mlir::Type, 8> result_types;
auto cluster = builder.create<mlir::tf_device::ClusterOp>(
mlir::UnknownLoc::get(&context), result_types);
cluster->setAttr(kNumCoresPerReplicaAttr,
builder.getIntegerAttr(builder.getIntegerType(64), 1));
cluster->setAttr(kTopologyAttr, builder.getStringAttr(""));
cluster->setAttr(kDeviceAssignmentAttr,
builder.getStrArrayAttr(llvm::ArrayRef<llvm::StringRef>(
{"bad_device_assigment"})));
mlir::TF::RuntimeDevices runtime_devices;
std::string host_device;
EXPECT_TRUE(mlir::failed(
GetHostDeviceOutsideComputation(runtime_devices, cluster, &host_device)));
}
TEST(TPURewriteDeviceUtilTest, TestGetHostDeviceFailBadDeviceName) {
mlir::MLIRContext context;
context.loadDialect<mlir::tf_device::TensorFlowDeviceDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
mlir::OpBuilder builder(module_ref->getBodyRegion());
(*module_ref)
->setAttr("tf.devices",
builder.getStrArrayAttr(
llvm::ArrayRef<llvm::StringRef>({"bad_device_name"})));
llvm::SmallVector<mlir::Type, 8> result_types;
auto cluster = builder.create<mlir::tf_device::ClusterOp>(
mlir::UnknownLoc::get(&context), result_types);
cluster->setAttr(kNumCoresPerReplicaAttr,
builder.getIntegerAttr(builder.getIntegerType(64), 1));
cluster->setAttr(kTopologyAttr, builder.getStringAttr(""));
cluster->setAttr(kDeviceAssignmentAttr, builder.getArrayAttr({}));
mlir::TF::RuntimeDevices runtime_devices;
(void)GetDevicesFromOp(*module_ref, &runtime_devices);
std::string host_device;
EXPECT_TRUE(mlir::failed(
GetHostDeviceOutsideComputation(runtime_devices, cluster, &host_device)));
}
TEST(TPURewriteDeviceUtilTest, TestGetHostDeviceTPUReplicate) {
mlir::MLIRContext context;
context.loadDialect<mlir::tf_device::TensorFlowDeviceDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
mlir::OpBuilder builder(module_ref->getBodyRegion());
llvm::SmallDenseMap<llvm::StringRef, llvm::SmallVector<llvm::StringRef, 4>>
devices;
auto replicate = builder.create<mlir::tf_device::ReplicateOp>(
mlir::UnknownLoc::get(&context), 2, devices,
llvm::ArrayRef<std::pair<mlir::ValueRange, mlir::Type>>{},
mlir::ValueRange{}, mlir::TypeRange{});
builder.setInsertionPoint(&replicate.getBody().front(),
replicate.getBody().front().begin());
llvm::SmallVector<mlir::Type, 8> result_types;
auto cluster = builder.create<mlir::tf_device::ClusterOp>(
mlir::UnknownLoc::get(&context), result_types);
mlir::TF::RuntimeDevices runtime_devices;
std::string host_device;
EXPECT_TRUE(mlir::succeeded(
GetHostDeviceOutsideComputation(runtime_devices, cluster, &host_device)));
EXPECT_EQ(host_device, GetDeviceAliasForHostOfLogicalCore(0));
}
TEST(TPURewriteDeviceUtilTest, TestGetHostDeviceNotReplicated) {
mlir::MLIRContext context;
context.loadDialect<mlir::tf_device::TensorFlowDeviceDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
mlir::OpBuilder builder(module_ref->getBodyRegion());
(*module_ref)
->setAttr("tf.devices",
builder.getStrArrayAttr(llvm::ArrayRef<llvm::StringRef>(
{"/job:localhost/replica:0/task:0/device:TPU_SYSTEM:0",
"/job:localhost/replica:0/task:0/device:TPU:0",
"/job:localhost/replica:0/task:0/device:CPU:0",
"/job:worker/replica:0/task:0/device:CPU:0"})));
llvm::SmallVector<mlir::Type, 8> result_types;
auto cluster = builder.create<mlir::tf_device::ClusterOp>(
mlir::UnknownLoc::get(&context), result_types);
cluster->setAttr(kNumCoresPerReplicaAttr,
builder.getIntegerAttr(builder.getIntegerType(64), 1));
cluster->setAttr(kTopologyAttr, builder.getStringAttr(""));
cluster->setAttr(kDeviceAssignmentAttr, builder.getArrayAttr({}));
mlir::TF::RuntimeDevices runtime_devices;
(void)GetDevicesFromOp(*module_ref, &runtime_devices);
std::string host_device;
EXPECT_TRUE(mlir::succeeded(
GetHostDeviceOutsideComputation(runtime_devices, cluster, &host_device)));
EXPECT_EQ(host_device, "/job:localhost/replica:0/task:0/device:CPU:0");
}
TEST(TPURewriteDeviceUtilTest, TestGetHostDeviceInGenericPipeline) {
mlir::MLIRContext context;
context.loadDialect<mlir::tf_device::TensorFlowDeviceDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
mlir::OpBuilder builder(module_ref->getBodyRegion());
(*module_ref)
->setAttr("tf.devices",
builder.getStrArrayAttr(llvm::ArrayRef<llvm::StringRef>(
{"/job:localhost/replica:0/task:0/device:CPU:0"})));
llvm::SmallVector<mlir::Type, 8> result_types;
auto cluster = builder.create<mlir::tf_device::ClusterOp>(
mlir::UnknownLoc::get(&context), result_types);
mlir::TF::RuntimeDevices runtime_devices;
(void)GetDevicesFromOp(*module_ref, &runtime_devices);
std::string host_device;
EXPECT_TRUE(mlir::succeeded(
GetHostDeviceOutsideComputation(runtime_devices, cluster, &host_device)));
EXPECT_EQ(host_device, "/job:localhost/replica:0/task:0/device:CPU:0");
}
TEST(TPURewriteDeviceUtilTest, TestGetHostDeviceInGenericPipelineMultiCPUs) {
mlir::MLIRContext context;
context.loadDialect<mlir::tf_device::TensorFlowDeviceDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
mlir::OpBuilder builder(module_ref->getBodyRegion());
(*module_ref)
->setAttr("tf.devices",
builder.getStrArrayAttr(llvm::ArrayRef<llvm::StringRef>(
{"/job:chief/replica:0/task:0/device:CPU:0",
"/job:ps/replica:0/task:0/device:CPU:0",
"/job:ps/replica:0/task:1/device:CPU:0",
"/job:worker/replica:0/task:2/device:CPU:0"})));
llvm::SmallVector<mlir::Type, 8> result_types;
auto cluster = builder.create<mlir::tf_device::ClusterOp>(
mlir::UnknownLoc::get(&context), result_types);
mlir::TF::RuntimeDevices runtime_devices;
(void)GetDevicesFromOp(*module_ref, &runtime_devices);
std::string host_device;
EXPECT_TRUE(mlir::succeeded(
GetHostDeviceOutsideComputation(runtime_devices, cluster, &host_device)));
EXPECT_EQ(host_device, "/job:chief/replica:0/task:0/device:CPU:0");
}
TEST(TPURewriteDeviceUtilTest, TestIsTPUDevice) {
EXPECT_TRUE(IsTPUDevice("/job:localhost/replica:0/task:0/device:TPU:0"));
EXPECT_FALSE(IsTPUDevice("/job:localhost/replica:0/task:0/device:CPU:0"));
EXPECT_FALSE(IsTPUDevice("INVALID_DEVICE"));
}
TEST(TPURewriteDeviceUtilTest, TestDeviceToHostMapBadTopology) {
static const char* const module_str =
R"(
module attributes {tf.devices = {"/job:localhost/replica:0/task:0/device:CPU:0", "/job:localhost/replica:0/task:0/device:TPU:0", "/job:localhost/replica:0/task:0/device:TPU:1", "/job:localhost/replica:0/task:0/device:TPU_SYSTEM:0"}} {
func.func @main() -> () {
"tf_device.cluster"() ({
tf_device.return
}) {device_assignment = [0, 0, 0, 0, 0, 0, 0, 1], num_cores_per_replica = 2 : i64} : () -> ()
func.return
}
})";
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(mlir::OwningOpRef<mlir::ModuleOp> module,
GetMlirModuleFromString(module_str, &context));
mlir::tf_device::ClusterOp cluster;
module->walk(
[&](mlir::tf_device::ClusterOp descendant) { cluster = descendant; });
llvm::SmallVector<std::string, 8> core_to_host;
EXPECT_TRUE(mlir::failed(GetDeviceToHostMap(cluster, core_to_host)));
}
TEST(TPURewriteDeviceUtilTest, TestDeviceToHostMapBadDeviceAssignment) {
static const char* const module_str =
R"(
module attributes {tf.devices = {"/job:localhost/replica:0/task:0/device:CPU:0", "/job:localhost/replica:0/task:0/device:TPU:0", "/job:localhost/replica:0/task:0/device:TPU:1", "/job:localhost/replica:0/task:0/device:TPU_SYSTEM:0"}} {
func.func @main() -> () {
"tf_device.cluster"() ({
tf_device.return
}) {num_cores_per_replica = 2 : i64, topology = "\0A\04\01\01\01\02\10\01\18\02\22\08\00\00\00\00\00\00\00\01*\02\08\01"} : () -> ()
func.return
}
})";
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(mlir::OwningOpRef<mlir::ModuleOp> module,
GetMlirModuleFromString(module_str, &context));
mlir::tf_device::ClusterOp cluster;
module->walk(
[&](mlir::tf_device::ClusterOp descendant) { cluster = descendant; });
llvm::SmallVector<std::string, 8> core_to_host;
EXPECT_TRUE(mlir::failed(GetDeviceToHostMap(cluster, core_to_host)));
}
TEST(TPURewriteDeviceUtilTest, TestDeviceToHostMapNotReplicated) {
static const char* const module_str =
R"(
module attributes {tf.devices = {"/job:localhost/replica:0/task:0/device:CPU:0", "/job:localhost/replica:0/task:0/device:TPU:0", "/job:localhost/replica:0/task:0/device:TPU:1", "/job:localhost/replica:0/task:0/device:TPU_SYSTEM:0"}} {
func.func @main() -> () {
"tf_device.cluster"() ({
tf_device.return
}) {device_assignment = [0, 0, 0, 0, 0, 0, 0, 1], num_cores_per_replica = 2 : i64, topology = "\0A\04\01\01\01\02\10\01\18\02\22\08\00\00\00\00\00\00\00\01*\02\08\01"} : () -> ()
func.return
}
})";
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(mlir::OwningOpRef<mlir::ModuleOp> module,
GetMlirModuleFromString(module_str, &context));
mlir::tf_device::ClusterOp cluster;
module->walk(
[&](mlir::tf_device::ClusterOp descendant) { cluster = descendant; });
llvm::SmallVector<std::string, 8> core_to_host;
EXPECT_TRUE(mlir::succeeded(GetDeviceToHostMap(cluster, core_to_host)));
EXPECT_EQ(core_to_host.size(), 2);
EXPECT_EQ(core_to_host[0], "/job:localhost/replica:0/task:0/device:CPU:0");
EXPECT_EQ(core_to_host[1], "/job:localhost/replica:0/task:0/device:CPU:0");
}
TEST(TPURewriteDeviceUtilTest, TestDeviceToHostMapReplicated) {
static const char* const module_str =
R"(
module attributes {tf.devices = {"/job:localhost/replica:0/task:0/device:CPU:0", "/job:localhost/replica:0/task:0/device:TPU:0", "/job:localhost/replica:0/task:0/device:TPU:1", "/job:localhost/replica:0/task:0/device:TPU:2", "/job:localhost/replica:0/task:0/device:TPU:3", "/job:localhost/replica:0/task:0/device:TPU:4", "/job:localhost/replica:0/task:0/device:TPU:5", "/job:localhost/replica:0/task:0/device:TPU:6", "/job:localhost/replica:0/task:0/device:TPU:7", "/job:localhost/replica:0/task:0/device:TPU_SYSTEM:0"}} {
func.func @main() -> () {
tf_device.replicate() {n = 4 : i32} {
"tf_device.cluster"() ({
tf_device.return
}) {device_assignment = [0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1], num_cores_per_replica = 2 : i64, topology = "\0A\04\02\02\01\02\10\01\18\08\22 \00\00\00\00\00\00\00\01\01\00\00\00\01\00\00\01\00\01\00\00\00\01\00\01\01\01\00\00\01\01\00\01*\02\08\01"} : () -> ()
tf_device.return
}
func.return
}
})";
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(mlir::OwningOpRef<mlir::ModuleOp> module,
GetMlirModuleFromString(module_str, &context));
mlir::tf_device::ClusterOp cluster;
module->walk(
[&](mlir::tf_device::ClusterOp descendant) { cluster = descendant; });
llvm::SmallVector<std::string, 8> core_to_host;
EXPECT_TRUE(mlir::succeeded(GetDeviceToHostMap(cluster, core_to_host)));
EXPECT_EQ(core_to_host.size(), 2);
EXPECT_EQ(core_to_host[0], "TPU_REPLICATED_HOST_0");
EXPECT_EQ(core_to_host[1], "TPU_REPLICATED_HOST_1");
}
TEST(TPURewriteDeviceUtilTest, TestDeviceToHostMapCPU) {
static const char* const module_str =
R"(
module attributes {tf.devices = {"/job:localhost/replica:0/task:0/device:CPU:0"}} {
func.func @main() -> () {
"tf_device.cluster"() ({
tf_device.return
}) {} : () -> ()
func.return
}
})";
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(mlir::OwningOpRef<mlir::ModuleOp> module,
GetMlirModuleFromString(module_str, &context));
mlir::tf_device::ClusterOp cluster;
module->walk(
[&](mlir::tf_device::ClusterOp descendant) { cluster = descendant; });
llvm::SmallVector<std::string, 8> core_to_host;
EXPECT_TRUE(mlir::succeeded(GetDeviceToHostMap(cluster, core_to_host)));
EXPECT_EQ(core_to_host.size(), 1);
EXPECT_EQ(core_to_host[0], "/job:localhost/replica:0/task:0/device:CPU:0");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/tpu_rewrite_device_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/tpu_rewrite_device_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
85a652e9-c99d-43a7-9418-8569ec44a1db | cpp | tensorflow/tensorflow | rotate | tensorflow/lite/experimental/ml_adjacent/algo/rotate.cc | tensorflow/lite/experimental/ml_adjacent/algo/rotate_test.cc | #include <cmath>
#include <cstring>
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace ml_adj {
namespace rotate {
namespace {
using ::ml_adj::algo::Algo;
using ::ml_adj::algo::InputPack;
using ::ml_adj::algo::OutputPack;
using ::ml_adj::data::DataRef;
using ::ml_adj::data::MutableDataRef;
inline float DegreesToRadians(int angle) { return angle * M_PI / 180; }
void ComputeNewSize(dim_t src_width, dim_t src_height, int angle,
dim_t& dst_width, dim_t& dst_height) {
dst_width = src_width;
dst_height = src_height;
if (angle % 90 == 0) {
if (angle == 90 || angle == 270) {
dst_width = src_height;
dst_height = src_width;
}
} else {
const float angle_rad = DegreesToRadians(angle);
const float cos_angle = std::cos(angle_rad);
const float sin_angle = std::sin(angle_rad);
const int edge_x = src_width / 2;
const int edge_y = src_height / 2;
for (int y : {-edge_y, edge_y}) {
for (int x : {-edge_x, edge_x}) {
const int x_transformed =
static_cast<int>(std::floor(cos_angle * x + sin_angle * y));
const int y_transformed =
static_cast<int>(std::floor(-sin_angle * x + cos_angle * y));
if (std::abs(x_transformed) > dst_width / 2)
dst_width = 2 * std::abs(x_transformed);
if (std::abs(y_transformed) > dst_height / 2)
dst_height = 2 * std::abs(y_transformed);
}
}
}
}
void Rotate90(int batches, int input_height, int input_width, int depth,
int output_height, int output_width, const float* input_data,
float* output_data) {
TFLITE_DCHECK(input_data != nullptr);
TFLITE_DCHECK(output_data != nullptr);
const int pixel_stride = depth;
const int src_row_stride = input_width * depth;
const int dst_row_stride = output_width * depth;
const int src_batch_stride = src_row_stride * input_height;
const int dst_batch_stride = dst_row_stride * output_height;
for (int b = 0; b < batches; ++b) {
const float* src_data_prt = input_data + b * src_batch_stride;
float* dst_data_prt = output_data + b * dst_batch_stride;
for (int y = 0; y < input_height; ++y) {
const float* src_ptr_row = src_data_prt + y * src_row_stride;
for (int x = 0; x < input_width; ++x) {
float* dst_ptr_row = dst_data_prt + x * dst_row_stride;
const float* src_ptr_pixel = src_ptr_row + x * pixel_stride;
float* dst_pixel_ptr =
dst_ptr_row + (output_width - y - 1) * pixel_stride;
for (int c = 0; c < depth; ++c) {
*dst_pixel_ptr++ = *src_ptr_pixel++;
}
}
}
}
}
void Rotate180(int batches, int input_height, int input_width, int depth,
int output_height, int output_width, const float* input_data,
float* output_data) {
TFLITE_DCHECK(input_data != nullptr);
TFLITE_DCHECK(output_data != nullptr);
const int dst_pixel_stride = depth;
const int src_row_stride = input_width * depth;
const int dst_row_stride = output_width * depth;
const int src_batch_stride = src_row_stride * input_height;
const int dst_batch_stride = dst_row_stride * output_height;
for (int b = 0; b < batches; ++b) {
const float* src_data_prt = input_data + b * src_batch_stride;
float* dst_data_prt = output_data + b * dst_batch_stride;
for (int y = 0; y < input_height; ++y) {
const float* src_ptr_row = src_data_prt + y * src_row_stride;
float* dst_ptr_row = dst_data_prt +
(output_height - y - 1) * dst_row_stride +
(output_width - 1) * dst_pixel_stride;
for (int x = 0; x < input_width; ++x) {
for (int c = 0; c < depth; ++c) {
dst_ptr_row[c] = src_ptr_row[c];
}
dst_ptr_row -= depth;
src_ptr_row += depth;
}
}
}
}
void Rotate270(int batches, int input_height, int input_width, int depth,
int output_height, int output_width, const float* input_data,
float* output_data) {
TFLITE_DCHECK(input_data != nullptr);
TFLITE_DCHECK(output_data != nullptr);
const int pixel_stride = depth;
const int src_row_stride = input_width * depth;
const int dst_row_stride = output_width * depth;
const int src_batch_stride = src_row_stride * input_height;
const int dst_batch_stride = dst_row_stride * output_height;
for (int b = 0; b < batches; ++b) {
const float* src_data_prt = input_data + b * src_batch_stride;
float* dst_data_prt = output_data + b * dst_batch_stride;
for (int y = 0; y < input_height; ++y) {
const float* src_ptr_row = src_data_prt + y * src_row_stride;
for (int x = 0; x < input_width; ++x) {
float* dst_ptr_row =
dst_data_prt + (output_height - x - 1) * dst_row_stride;
const float* src_ptr_pixel = src_ptr_row + x * pixel_stride;
float* dst_pixel_ptr = dst_ptr_row + y * pixel_stride;
for (int c = 0; c < depth; ++c) {
*dst_pixel_ptr++ = *src_ptr_pixel++;
}
}
}
}
}
void RotateGeneric(int batches, int input_height, int input_width, int depth,
int output_height, int output_width, int angle,
const float* input_data, float* output_data) {
TFLITE_DCHECK(input_data != nullptr);
TFLITE_DCHECK(output_data != nullptr);
const int pixel_stride = depth;
const int src_row_stride = input_width * depth;
const int dst_row_stride = output_width * depth;
const int src_batch_stride = src_row_stride * input_height;
const int dst_batch_stride = dst_row_stride * output_height;
memset(
output_data, 0,
batches * output_width * output_height * depth * sizeof(output_data[0]));
const float angle_rad = DegreesToRadians(angle);
const float cos_angle = std::cos(angle_rad);
const float sin_angle = std::sin(angle_rad);
for (int b = 0; b < batches; ++b) {
const float* src_data_prt = input_data + b * src_batch_stride;
float* dst_data_prt = output_data + b * dst_batch_stride;
for (int y = -output_height / 2; y < output_height / 2; ++y) {
for (int x = -output_width / 2; x < output_width / 2; ++x) {
const float x_transformed = cos_angle * x + sin_angle * y;
const float y_transformed = -sin_angle * x + cos_angle * y;
const int x_transformed_integer =
static_cast<int>(std::floor(x_transformed));
const int y_transformed_integer =
static_cast<int>(std::floor(y_transformed));
const int x_src_integer = x_transformed_integer + input_width / 2;
const int y_src_integer = y_transformed_integer + input_height / 2;
const int x0 = x_src_integer;
const int x1 = x_src_integer + 1;
const int y0 = y_src_integer;
const int y1 = y_src_integer + 1;
if (x0 < 0 || x0 >= input_width) continue;
if (x1 < 0 || x1 >= input_width) continue;
if (y0 < 0 || y0 >= input_height) continue;
if (y1 < 0 || y1 >= input_height) continue;
const float x_dist = x_transformed - x_transformed_integer;
const float y_dist = y_transformed - y_transformed_integer;
const float one_minus_x_dist = 1 - x_dist;
const float one_minus_y_dist = 1 - y_dist;
const float* src_ptr_row0 = src_data_prt + y0 * src_row_stride;
const float* src_ptr_row1 = src_data_prt + y1 * src_row_stride;
float* dst_row_ptr =
dst_data_prt + (y + output_height / 2) * dst_row_stride;
const float* src_ptr_pixel00 = src_ptr_row0 + x0 * pixel_stride;
const float* src_ptr_pixel10 = src_ptr_row0 + x1 * pixel_stride;
const float* src_ptr_pixel01 = src_ptr_row1 + x0 * pixel_stride;
const float* src_ptr_pixel11 = src_ptr_row1 + x1 * pixel_stride;
float* dst_pixel_ptr =
dst_row_ptr + (x + output_width / 2) * pixel_stride;
for (int c = 0; c < depth; ++c) {
const float v00 = *src_ptr_pixel00++;
const float v01 = *src_ptr_pixel01++;
const float v10 = *src_ptr_pixel10++;
const float v11 = *src_ptr_pixel11++;
*dst_pixel_ptr++ =
(v10 * one_minus_y_dist + v11 * y_dist) * x_dist +
(v00 * one_minus_y_dist + v01 * y_dist) * one_minus_x_dist;
}
}
}
}
}
void ComputeRotate(const InputPack& inputs, const OutputPack& outputs) {
TFLITE_DCHECK(inputs.size() == 2);
TFLITE_DCHECK(outputs.size() == 1);
const DataRef* img = inputs[0];
const float* img_data = reinterpret_cast<const float*>(img->Data());
const dim_t img_num_batches = img->Dims()[0];
const dim_t img_height = img->Dims()[1];
const dim_t img_width = img->Dims()[2];
const dim_t img_num_channels = img->Dims()[3];
const DataRef* angle = inputs[1];
const int angle_data = *reinterpret_cast<const int*>(angle->Data());
MutableDataRef* output = outputs[0];
dim_t new_width = 0;
dim_t new_height = 0;
ComputeNewSize(img_width, img_height, angle_data, new_width, new_height);
output->Resize({img_num_batches, new_height, new_width, img_num_channels});
float* output_data = reinterpret_cast<float*>(output->Data());
if (angle_data == 90) {
Rotate90(img_num_batches, img_height, img_width, img_num_channels,
new_height, new_width, img_data, output_data);
return;
}
if (angle_data == 180) {
Rotate180(img_num_batches, img_height, img_width, img_num_channels,
new_height, new_width, img_data, output_data);
return;
}
if (angle_data == 270) {
Rotate270(img_num_batches, img_height, img_width, img_num_channels,
new_height, new_width, img_data, output_data);
return;
}
RotateGeneric(img_num_batches, img_height, img_width, img_num_channels,
new_height, new_width, angle_data, img_data, output_data);
}
}
const Algo* Impl_Rotate() {
static const Algo rotate = {&ComputeRotate, nullptr};
return &rotate;
}
}
} | #include "tensorflow/lite/experimental/ml_adjacent/algo/rotate.h"
#include <cstring>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
using ::ml_adj::algo::Algo;
using ::ml_adj::data::OwningVectorRef;
namespace ml_adj {
namespace rotate {
namespace {
struct RotateTestParams {
const std::vector<dim_t> img_dims;
const std::vector<float> img_data;
const int angle;
const std::vector<float> expected_data;
const std::vector<dim_t> expected_shape;
};
class RotateTest : public ::testing::TestWithParam<RotateTestParams> {};
TEST_P(RotateTest, FloatPixelType) {
constexpr float kAbsError = 0.01f;
const RotateTestParams& params = GetParam();
OwningVectorRef img(etype_t::f32);
img.Resize(dims_t(params.img_dims));
ASSERT_EQ(img.Bytes(), params.img_data.size() * sizeof(float));
std::memcpy(img.Data(), params.img_data.data(), img.Bytes());
OwningVectorRef angle(etype_t::i32);
angle.Resize({1});
ASSERT_EQ(angle.Bytes(), sizeof(int));
std::memcpy(angle.Data(), ¶ms.angle, angle.Bytes());
OwningVectorRef output(etype_t::f32);
const Algo* rotate = Impl_Rotate();
rotate->process({&img, &angle}, {&output});
ASSERT_EQ(output.Bytes(), params.expected_data.size() * sizeof(float));
ASSERT_EQ(output.Dims(), params.expected_shape);
const float* out_data = reinterpret_cast<float*>(output.Data());
for (int i = 0; i < output.NumElements(); ++i) {
EXPECT_NEAR(out_data[i], params.expected_data[i], kAbsError)
<< "out_data[" << i << "] = " << out_data[i] << ", expected_data[" << i
<< "] = " << params.expected_data[i];
}
}
INSTANTIATE_TEST_SUITE_P(
RotateTests, RotateTest,
testing::ValuesIn({
RotateTestParams{{1, 3, 3, 1},
{11, 12, 13,
21, 22, 23,
31, 32, 33},
90,
{31, 21, 11,
32, 22, 12,
33, 23, 13},
{1, 3, 3, 1}},
RotateTestParams{{1, 3, 3, 1},
{11, 12, 13,
21, 22, 23,
31, 32, 33},
180,
{33, 32, 31,
23, 22, 21,
13, 12, 11},
{1, 3, 3, 1}},
RotateTestParams{{1, 3, 3, 1},
{11, 12, 13,
21, 22, 23,
31, 32, 33},
270,
{13, 23, 33,
12, 22, 32,
11, 21, 31},
{1, 3, 3, 1}},
RotateTestParams{{1, 8, 8, 1},
{1, 1, 1, 1, 1, 1, 1, 1,
1, 0, 0, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 0, 1,
1, 1, 1, 1, 1, 1, 1, 1},
-45,
{
0.00f, 0.00f, 0.00f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.00f, 0.00f, 0.59f,
0.83f, 0.00f, 0.00f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.00f, 0.54f, 0.00f,
0.12f, 0.83f, 0.00f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.54f, 0.00f, 0.00f,
0.00f, 0.12f, 0.83f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.54f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.12f, 0.83f, 0.00f, 0.00f,
0.00f, 0.78f, 0.00f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.23f, 0.97f, 0.00f,
0.00f, 0.00f, 0.54f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.12f, 0.83f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.54f, 0.00f, 0.00f,
0.00f, 0.12f, 0.83f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.00f, 0.54f, 0.00f,
0.12f, 0.83f, 0.00f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.00f, 0.00f, 0.59f,
0.83f, 0.00f, 0.00f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.00f, 0.00f, 0.00f,
0.00f, 0.00f, 0.00f, 0.00f, 0.00f, 0.00f,
},
{1, 12, 12, 1}},
}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/algo/rotate.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/algo/rotate_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b31f75b4-d54b-467f-9dff-33d3a865e33d | cpp | google/quiche | quic_tag | quiche/quic/core/quic_tag.cc | quiche/quic/core/quic_tag_test.cc | #include "quiche/quic/core/quic_tag.h"
#include <algorithm>
#include <string>
#include <vector>
#include "absl/base/macros.h"
#include "absl/strings/ascii.h"
#include "absl/strings/escaping.h"
#include "absl/strings/str_split.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/common/quiche_text_utils.h"
namespace quic {
bool FindMutualQuicTag(const QuicTagVector& our_tags,
const QuicTagVector& their_tags, QuicTag* out_result,
size_t* out_index) {
const size_t num_our_tags = our_tags.size();
const size_t num_their_tags = their_tags.size();
for (size_t i = 0; i < num_our_tags; i++) {
for (size_t j = 0; j < num_their_tags; j++) {
if (our_tags[i] == their_tags[j]) {
*out_result = our_tags[i];
if (out_index != nullptr) {
*out_index = j;
}
return true;
}
}
}
return false;
}
std::string QuicTagToString(QuicTag tag) {
if (tag == 0) {
return "0";
}
char chars[sizeof tag];
bool ascii = true;
const QuicTag orig_tag = tag;
for (size_t i = 0; i < ABSL_ARRAYSIZE(chars); i++) {
chars[i] = static_cast<char>(tag);
if ((chars[i] == 0 || chars[i] == '\xff') &&
i == ABSL_ARRAYSIZE(chars) - 1) {
chars[i] = ' ';
}
if (!absl::ascii_isprint(static_cast<unsigned char>(chars[i]))) {
ascii = false;
break;
}
tag >>= 8;
}
if (ascii) {
return std::string(chars, sizeof(chars));
}
return absl::BytesToHexString(absl::string_view(
reinterpret_cast<const char*>(&orig_tag), sizeof(orig_tag)));
}
uint32_t MakeQuicTag(uint8_t a, uint8_t b, uint8_t c, uint8_t d) {
return static_cast<uint32_t>(a) | static_cast<uint32_t>(b) << 8 |
static_cast<uint32_t>(c) << 16 | static_cast<uint32_t>(d) << 24;
}
bool ContainsQuicTag(const QuicTagVector& tag_vector, QuicTag tag) {
return std::find(tag_vector.begin(), tag_vector.end(), tag) !=
tag_vector.end();
}
QuicTag ParseQuicTag(absl::string_view tag_string) {
quiche::QuicheTextUtils::RemoveLeadingAndTrailingWhitespace(&tag_string);
std::string tag_bytes;
if (tag_string.length() == 8) {
tag_bytes = absl::HexStringToBytes(tag_string);
tag_string = tag_bytes;
}
QuicTag tag = 0;
for (auto it = tag_string.rbegin(); it != tag_string.rend(); ++it) {
unsigned char token_char = static_cast<unsigned char>(*it);
tag <<= 8;
tag |= token_char;
}
return tag;
}
QuicTagVector ParseQuicTagVector(absl::string_view tags_string) {
QuicTagVector tag_vector;
quiche::QuicheTextUtils::RemoveLeadingAndTrailingWhitespace(&tags_string);
if (!tags_string.empty()) {
std::vector<absl::string_view> tag_strings =
absl::StrSplit(tags_string, ',');
for (absl::string_view tag_string : tag_strings) {
tag_vector.push_back(ParseQuicTag(tag_string));
}
}
return tag_vector;
}
} | #include "quiche/quic/core/quic_tag.h"
#include "quiche/quic/core/crypto/crypto_protocol.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_test.h"
namespace quic {
namespace test {
namespace {
class QuicTagTest : public QuicTest {};
TEST_F(QuicTagTest, TagToString) {
EXPECT_EQ("SCFG", QuicTagToString(kSCFG));
EXPECT_EQ("SNO ", QuicTagToString(kServerNonceTag));
EXPECT_EQ("CRT ", QuicTagToString(kCertificateTag));
EXPECT_EQ("CHLO", QuicTagToString(MakeQuicTag('C', 'H', 'L', 'O')));
EXPECT_EQ("43484c1f", QuicTagToString(MakeQuicTag('C', 'H', 'L', '\x1f')));
}
TEST_F(QuicTagTest, MakeQuicTag) {
QuicTag tag = MakeQuicTag('A', 'B', 'C', 'D');
char bytes[4];
memcpy(bytes, &tag, 4);
EXPECT_EQ('A', bytes[0]);
EXPECT_EQ('B', bytes[1]);
EXPECT_EQ('C', bytes[2]);
EXPECT_EQ('D', bytes[3]);
}
TEST_F(QuicTagTest, ParseQuicTag) {
QuicTag tag_abcd = MakeQuicTag('A', 'B', 'C', 'D');
EXPECT_EQ(ParseQuicTag("ABCD"), tag_abcd);
EXPECT_EQ(ParseQuicTag("ABCDE"), tag_abcd);
QuicTag tag_efgh = MakeQuicTag('E', 'F', 'G', 'H');
EXPECT_EQ(ParseQuicTag("EFGH"), tag_efgh);
QuicTag tag_ijk = MakeQuicTag('I', 'J', 'K', 0);
EXPECT_EQ(ParseQuicTag("IJK"), tag_ijk);
QuicTag tag_l = MakeQuicTag('L', 0, 0, 0);
EXPECT_EQ(ParseQuicTag("L"), tag_l);
QuicTag tag_hex = MakeQuicTag('M', 'N', 'O', static_cast<char>(255));
EXPECT_EQ(ParseQuicTag("4d4e4fff"), tag_hex);
EXPECT_EQ(ParseQuicTag("4D4E4FFF"), tag_hex);
QuicTag tag_with_numbers = MakeQuicTag('P', 'Q', '1', '2');
EXPECT_EQ(ParseQuicTag("PQ12"), tag_with_numbers);
QuicTag tag_with_custom_chars = MakeQuicTag('r', '$', '_', '7');
EXPECT_EQ(ParseQuicTag("r$_7"), tag_with_custom_chars);
QuicTag tag_zero = 0;
EXPECT_EQ(ParseQuicTag(""), tag_zero);
QuicTagVector tag_vector;
EXPECT_EQ(ParseQuicTagVector(""), tag_vector);
EXPECT_EQ(ParseQuicTagVector(" "), tag_vector);
tag_vector.push_back(tag_abcd);
EXPECT_EQ(ParseQuicTagVector("ABCD"), tag_vector);
tag_vector.push_back(tag_efgh);
EXPECT_EQ(ParseQuicTagVector("ABCD,EFGH"), tag_vector);
tag_vector.push_back(tag_ijk);
EXPECT_EQ(ParseQuicTagVector("ABCD,EFGH,IJK"), tag_vector);
tag_vector.push_back(tag_l);
EXPECT_EQ(ParseQuicTagVector("ABCD,EFGH,IJK,L"), tag_vector);
tag_vector.push_back(tag_hex);
EXPECT_EQ(ParseQuicTagVector("ABCD,EFGH,IJK,L,4d4e4fff"), tag_vector);
tag_vector.push_back(tag_with_numbers);
EXPECT_EQ(ParseQuicTagVector("ABCD,EFGH,IJK,L,4d4e4fff,PQ12"), tag_vector);
tag_vector.push_back(tag_with_custom_chars);
EXPECT_EQ(ParseQuicTagVector("ABCD,EFGH,IJK,L,4d4e4fff,PQ12,r$_7"),
tag_vector);
tag_vector.push_back(tag_zero);
EXPECT_EQ(ParseQuicTagVector("ABCD,EFGH,IJK,L,4d4e4fff,PQ12,r$_7,"),
tag_vector);
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_tag.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_tag_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
096bf5ca-4d46-4bb3-b4a0-0aa0eeb250b7 | cpp | tensorflow/tensorflow | list_flex_ops | tensorflow/lite/tools/list_flex_ops.cc | tensorflow/lite/tools/list_flex_ops_test.cc | #include "tensorflow/lite/tools/list_flex_ops.h"
#include <fstream>
#include <sstream>
#include <string>
#include <vector>
#include "flatbuffers/flexbuffers.h"
#include "json/json.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tensorflow/lite/schema/schema_utils.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace flex {
std::string OpListToJSONString(const OpKernelSet& flex_ops) {
Json::Value result(Json::arrayValue);
for (const OpKernel& op : flex_ops) {
Json::Value op_kernel(Json::arrayValue);
op_kernel.append(Json::Value(op.op_name));
op_kernel.append(Json::Value(op.kernel_name));
result.append(op_kernel);
}
return Json::FastWriter().write(result);
}
string FindTensorflowKernelClass(tensorflow::NodeDef* node_def) {
if (!node_def || node_def->op().empty()) {
LOG(FATAL) << "Invalid NodeDef";
}
const tensorflow::OpRegistrationData* op_reg_data;
auto status =
tensorflow::OpRegistry::Global()->LookUp(node_def->op(), &op_reg_data);
if (!status.ok()) {
LOG(FATAL) << "Op " << node_def->op() << " not found: " << status;
}
AddDefaultsToNodeDef(op_reg_data->op_def, node_def);
tensorflow::DeviceNameUtils::ParsedName parsed_name;
if (!tensorflow::DeviceNameUtils::ParseFullName(node_def->device(),
&parsed_name)) {
LOG(FATAL) << "Failed to parse device from node_def: "
<< node_def->ShortDebugString();
}
string class_name;
if (!tensorflow::FindKernelDef(
tensorflow::DeviceType(parsed_name.type.c_str()), *node_def,
nullptr , &class_name)
.ok()) {
LOG(FATAL) << "Failed to find kernel class for op: " << node_def->op();
}
return class_name;
}
void AddFlexOpsFromModel(const tflite::Model* model, OpKernelSet* flex_ops) {
auto* subgraphs = model->subgraphs();
if (!subgraphs) return;
for (int subgraph_index = 0; subgraph_index < subgraphs->size();
++subgraph_index) {
const tflite::SubGraph* subgraph = subgraphs->Get(subgraph_index);
auto* operators = subgraph->operators();
auto* opcodes = model->operator_codes();
if (!operators || !opcodes) continue;
for (int i = 0; i < operators->size(); ++i) {
const tflite::Operator* op = operators->Get(i);
const tflite::OperatorCode* opcode = opcodes->Get(op->opcode_index());
if (tflite::GetBuiltinCode(opcode) != tflite::BuiltinOperator_CUSTOM ||
!tflite::IsFlexOp(opcode->custom_code()->c_str())) {
continue;
}
std::string flex_op_name(opcode->custom_code()->c_str());
std::string tf_op_name =
flex_op_name.substr(strlen(tflite::kFlexCustomCodePrefix));
if (op->custom_options_format() !=
tflite::CustomOptionsFormat_FLEXBUFFERS) {
LOG(FATAL) << "Invalid CustomOptionsFormat";
}
const flatbuffers::Vector<uint8_t>* custom_opt_bytes =
op->custom_options();
if (custom_opt_bytes && custom_opt_bytes->size()) {
const flexbuffers::Vector& v =
flexbuffers::GetRoot(custom_opt_bytes->data(),
custom_opt_bytes->size())
.AsVector();
std::string nodedef_str = v[1].AsString().str();
tensorflow::NodeDef nodedef;
if (nodedef_str.empty() || !nodedef.ParseFromString(nodedef_str)) {
LOG(FATAL) << "Failed to parse data into a valid NodeDef";
}
*nodedef.mutable_device() = "/CPU:0";
std::string kernel_class = FindTensorflowKernelClass(&nodedef);
flex_ops->insert({tf_op_name, kernel_class});
}
}
}
}
}
} | #include "tensorflow/lite/tools/list_flex_ops.h"
#include <cstdint>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/lite/kernels/test_util.h"
namespace tflite {
namespace flex {
class FlexOpsListTest : public ::testing::Test {
protected:
FlexOpsListTest() {}
void ReadOps(const string& path) {
std::string full_path = tensorflow::GetDataDependencyFilepath(path);
auto model = FlatBufferModel::BuildFromFile(full_path.data());
AddFlexOpsFromModel(model->GetModel(), &flex_ops_);
output_text_ = OpListToJSONString(flex_ops_);
}
void ReadOps(const tflite::Model* model) {
AddFlexOpsFromModel(model, &flex_ops_);
output_text_ = OpListToJSONString(flex_ops_);
}
std::string output_text_;
OpKernelSet flex_ops_;
};
TfLiteRegistration* Register_TEST() {
static TfLiteRegistration r = {nullptr, nullptr, nullptr, nullptr};
return &r;
}
std::vector<uint8_t> CreateFlexCustomOptions(std::string nodedef_raw_string) {
tensorflow::NodeDef node_def;
tensorflow::protobuf::TextFormat::ParseFromString(nodedef_raw_string,
&node_def);
std::string node_def_str = node_def.SerializeAsString();
auto flex_builder = std::make_unique<flexbuffers::Builder>();
flex_builder->Vector([&]() {
flex_builder->String(node_def.op());
flex_builder->String(node_def_str);
});
flex_builder->Finish();
return flex_builder->GetBuffer();
}
class FlexOpModel : public SingleOpModel {
public:
FlexOpModel(const std::string& op_name, const TensorData& input1,
const TensorData& input2, const TensorType& output,
const std::vector<uint8_t>& custom_options) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetCustomOp(op_name, custom_options, Register_TEST);
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
protected:
int input1_;
int input2_;
int output_;
};
TEST_F(FlexOpsListTest, TestModelsNoFlex) {
ReadOps("tensorflow/lite/testdata/test_model.bin");
EXPECT_EQ(output_text_, "[]\n");
}
TEST_F(FlexOpsListTest, TestBrokenModel) {
EXPECT_DEATH_IF_SUPPORTED(
ReadOps("tensorflow/lite/testdata/test_model_broken.bin"), "");
}
TEST_F(FlexOpsListTest, TestZeroSubgraphs) {
ReadOps("tensorflow/lite/testdata/0_subgraphs.bin");
EXPECT_EQ(output_text_, "[]\n");
}
TEST_F(FlexOpsListTest, TestFlexAdd) {
ReadOps("tensorflow/lite/testdata/multi_add_flex.bin");
EXPECT_EQ(output_text_,
"[[\"AddV2\",\"BinaryOp<CPUDevice, functor::add<float>>\"]]\n");
}
TEST_F(FlexOpsListTest, TestTwoModel) {
ReadOps("tensorflow/lite/testdata/multi_add_flex.bin");
ReadOps("tensorflow/lite/testdata/softplus_flex.bin");
EXPECT_EQ(output_text_,
"[[\"AddV2\",\"BinaryOp<CPUDevice, "
"functor::add<float>>\"],[\"Softplus\",\"SoftplusOp<CPUDevice, "
"float>\"]]\n");
}
TEST_F(FlexOpsListTest, TestDuplicatedOp) {
ReadOps("tensorflow/lite/testdata/multi_add_flex.bin");
ReadOps("tensorflow/lite/testdata/multi_add_flex.bin");
EXPECT_EQ(output_text_,
"[[\"AddV2\",\"BinaryOp<CPUDevice, functor::add<float>>\"]]\n");
}
TEST_F(FlexOpsListTest, TestInvalidCustomOptions) {
std::vector<uint8_t> random_custom_options(20);
FlexOpModel max_model("FlexAdd", {TensorType_FLOAT32, {3, 1, 2, 2}},
{TensorType_FLOAT32, {3, 1, 2, 1}}, TensorType_FLOAT32,
random_custom_options);
EXPECT_DEATH_IF_SUPPORTED(
ReadOps(tflite::GetModel(max_model.GetModelBuffer())),
"Failed to parse data into a valid NodeDef");
}
TEST_F(FlexOpsListTest, TestOpNameEmpty) {
std::string nodedef_raw_str =
"name: \"node_1\""
"op: \"\""
"input: [ \"b\", \"c\" ]"
"attr: { key: \"T\" value: { type: DT_FLOAT } }";
std::string random_fieldname = "random string";
FlexOpModel max_model("FlexAdd", {TensorType_FLOAT32, {3, 1, 2, 2}},
{TensorType_FLOAT32, {3, 1, 2, 1}}, TensorType_FLOAT32,
CreateFlexCustomOptions(nodedef_raw_str));
EXPECT_DEATH_IF_SUPPORTED(
ReadOps(tflite::GetModel(max_model.GetModelBuffer())), "Invalid NodeDef");
}
TEST_F(FlexOpsListTest, TestOpNotFound) {
std::string nodedef_raw_str =
"name: \"node_1\""
"op: \"FlexInvalidOp\""
"input: [ \"b\", \"c\" ]"
"attr: { key: \"T\" value: { type: DT_FLOAT } }";
FlexOpModel max_model("FlexAdd", {TensorType_FLOAT32, {3, 1, 2, 2}},
{TensorType_FLOAT32, {3, 1, 2, 1}}, TensorType_FLOAT32,
CreateFlexCustomOptions(nodedef_raw_str));
EXPECT_DEATH_IF_SUPPORTED(
ReadOps(tflite::GetModel(max_model.GetModelBuffer())),
"Op FlexInvalidOp not found");
}
TEST_F(FlexOpsListTest, TestKernelNotFound) {
std::string nodedef_raw_str =
"name: \"node_1\""
"op: \"Add\""
"input: [ \"b\", \"c\" ]"
"attr: { key: \"T\" value: { type: DT_BOOL } }";
FlexOpModel max_model("FlexAdd", {TensorType_FLOAT32, {3, 1, 2, 2}},
{TensorType_FLOAT32, {3, 1, 2, 1}}, TensorType_FLOAT32,
CreateFlexCustomOptions(nodedef_raw_str));
EXPECT_DEATH_IF_SUPPORTED(
ReadOps(tflite::GetModel(max_model.GetModelBuffer())),
"Failed to find kernel class for op: Add");
}
TEST_F(FlexOpsListTest, TestFlexAddWithSingleOpModel) {
std::string nodedef_raw_str =
"name: \"node_1\""
"op: \"Add\""
"input: [ \"b\", \"c\" ]"
"attr: { key: \"T\" value: { type: DT_FLOAT } }";
FlexOpModel max_model("FlexAdd", {TensorType_FLOAT32, {3, 1, 2, 2}},
{TensorType_FLOAT32, {3, 1, 2, 1}}, TensorType_FLOAT32,
CreateFlexCustomOptions(nodedef_raw_str));
ReadOps(tflite::GetModel(max_model.GetModelBuffer()));
EXPECT_EQ(output_text_,
"[[\"Add\",\"BinaryOp<CPUDevice, functor::add<float>>\"]]\n");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/list_flex_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/list_flex_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d6ca0ddf-27db-4719-896a-96d263713763 | cpp | abseil/abseil-cpp | ascii | absl/strings/ascii.cc | absl/strings/ascii_test.cc | #include "absl/strings/ascii.h"
#include <climits>
#include <cstddef>
#include <cstring>
#include <string>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/nullability.h"
#include "absl/base/optimization.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace ascii_internal {
ABSL_DLL const unsigned char kPropertyBits[256] = {
0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
0x40, 0x68, 0x48, 0x48, 0x48, 0x48, 0x40, 0x40,
0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
0x28, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84,
0x84, 0x84, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10,
0x10, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x05,
0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
0x05, 0x05, 0x05, 0x10, 0x10, 0x10, 0x10, 0x10,
0x10, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x05,
0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
0x05, 0x05, 0x05, 0x10, 0x10, 0x10, 0x10, 0x40,
};
ABSL_DLL const char kToLower[256] = {
'\x00', '\x01', '\x02', '\x03', '\x04', '\x05', '\x06', '\x07',
'\x08', '\x09', '\x0a', '\x0b', '\x0c', '\x0d', '\x0e', '\x0f',
'\x10', '\x11', '\x12', '\x13', '\x14', '\x15', '\x16', '\x17',
'\x18', '\x19', '\x1a', '\x1b', '\x1c', '\x1d', '\x1e', '\x1f',
'\x20', '\x21', '\x22', '\x23', '\x24', '\x25', '\x26', '\x27',
'\x28', '\x29', '\x2a', '\x2b', '\x2c', '\x2d', '\x2e', '\x2f',
'\x30', '\x31', '\x32', '\x33', '\x34', '\x35', '\x36', '\x37',
'\x38', '\x39', '\x3a', '\x3b', '\x3c', '\x3d', '\x3e', '\x3f',
'\x40', 'a', 'b', 'c', 'd', 'e', 'f', 'g',
'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
'p', 'q', 'r', 's', 't', 'u', 'v', 'w',
'x', 'y', 'z', '\x5b', '\x5c', '\x5d', '\x5e', '\x5f',
'\x60', '\x61', '\x62', '\x63', '\x64', '\x65', '\x66', '\x67',
'\x68', '\x69', '\x6a', '\x6b', '\x6c', '\x6d', '\x6e', '\x6f',
'\x70', '\x71', '\x72', '\x73', '\x74', '\x75', '\x76', '\x77',
'\x78', '\x79', '\x7a', '\x7b', '\x7c', '\x7d', '\x7e', '\x7f',
'\x80', '\x81', '\x82', '\x83', '\x84', '\x85', '\x86', '\x87',
'\x88', '\x89', '\x8a', '\x8b', '\x8c', '\x8d', '\x8e', '\x8f',
'\x90', '\x91', '\x92', '\x93', '\x94', '\x95', '\x96', '\x97',
'\x98', '\x99', '\x9a', '\x9b', '\x9c', '\x9d', '\x9e', '\x9f',
'\xa0', '\xa1', '\xa2', '\xa3', '\xa4', '\xa5', '\xa6', '\xa7',
'\xa8', '\xa9', '\xaa', '\xab', '\xac', '\xad', '\xae', '\xaf',
'\xb0', '\xb1', '\xb2', '\xb3', '\xb4', '\xb5', '\xb6', '\xb7',
'\xb8', '\xb9', '\xba', '\xbb', '\xbc', '\xbd', '\xbe', '\xbf',
'\xc0', '\xc1', '\xc2', '\xc3', '\xc4', '\xc5', '\xc6', '\xc7',
'\xc8', '\xc9', '\xca', '\xcb', '\xcc', '\xcd', '\xce', '\xcf',
'\xd0', '\xd1', '\xd2', '\xd3', '\xd4', '\xd5', '\xd6', '\xd7',
'\xd8', '\xd9', '\xda', '\xdb', '\xdc', '\xdd', '\xde', '\xdf',
'\xe0', '\xe1', '\xe2', '\xe3', '\xe4', '\xe5', '\xe6', '\xe7',
'\xe8', '\xe9', '\xea', '\xeb', '\xec', '\xed', '\xee', '\xef',
'\xf0', '\xf1', '\xf2', '\xf3', '\xf4', '\xf5', '\xf6', '\xf7',
'\xf8', '\xf9', '\xfa', '\xfb', '\xfc', '\xfd', '\xfe', '\xff',
};
ABSL_DLL const char kToUpper[256] = {
'\x00', '\x01', '\x02', '\x03', '\x04', '\x05', '\x06', '\x07',
'\x08', '\x09', '\x0a', '\x0b', '\x0c', '\x0d', '\x0e', '\x0f',
'\x10', '\x11', '\x12', '\x13', '\x14', '\x15', '\x16', '\x17',
'\x18', '\x19', '\x1a', '\x1b', '\x1c', '\x1d', '\x1e', '\x1f',
'\x20', '\x21', '\x22', '\x23', '\x24', '\x25', '\x26', '\x27',
'\x28', '\x29', '\x2a', '\x2b', '\x2c', '\x2d', '\x2e', '\x2f',
'\x30', '\x31', '\x32', '\x33', '\x34', '\x35', '\x36', '\x37',
'\x38', '\x39', '\x3a', '\x3b', '\x3c', '\x3d', '\x3e', '\x3f',
'\x40', '\x41', '\x42', '\x43', '\x44', '\x45', '\x46', '\x47',
'\x48', '\x49', '\x4a', '\x4b', '\x4c', '\x4d', '\x4e', '\x4f',
'\x50', '\x51', '\x52', '\x53', '\x54', '\x55', '\x56', '\x57',
'\x58', '\x59', '\x5a', '\x5b', '\x5c', '\x5d', '\x5e', '\x5f',
'\x60', 'A', 'B', 'C', 'D', 'E', 'F', 'G',
'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O',
'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W',
'X', 'Y', 'Z', '\x7b', '\x7c', '\x7d', '\x7e', '\x7f',
'\x80', '\x81', '\x82', '\x83', '\x84', '\x85', '\x86', '\x87',
'\x88', '\x89', '\x8a', '\x8b', '\x8c', '\x8d', '\x8e', '\x8f',
'\x90', '\x91', '\x92', '\x93', '\x94', '\x95', '\x96', '\x97',
'\x98', '\x99', '\x9a', '\x9b', '\x9c', '\x9d', '\x9e', '\x9f',
'\xa0', '\xa1', '\xa2', '\xa3', '\xa4', '\xa5', '\xa6', '\xa7',
'\xa8', '\xa9', '\xaa', '\xab', '\xac', '\xad', '\xae', '\xaf',
'\xb0', '\xb1', '\xb2', '\xb3', '\xb4', '\xb5', '\xb6', '\xb7',
'\xb8', '\xb9', '\xba', '\xbb', '\xbc', '\xbd', '\xbe', '\xbf',
'\xc0', '\xc1', '\xc2', '\xc3', '\xc4', '\xc5', '\xc6', '\xc7',
'\xc8', '\xc9', '\xca', '\xcb', '\xcc', '\xcd', '\xce', '\xcf',
'\xd0', '\xd1', '\xd2', '\xd3', '\xd4', '\xd5', '\xd6', '\xd7',
'\xd8', '\xd9', '\xda', '\xdb', '\xdc', '\xdd', '\xde', '\xdf',
'\xe0', '\xe1', '\xe2', '\xe3', '\xe4', '\xe5', '\xe6', '\xe7',
'\xe8', '\xe9', '\xea', '\xeb', '\xec', '\xed', '\xee', '\xef',
'\xf0', '\xf1', '\xf2', '\xf3', '\xf4', '\xf5', '\xf6', '\xf7',
'\xf8', '\xf9', '\xfa', '\xfb', '\xfc', '\xfd', '\xfe', '\xff',
};
template <bool ToUpper>
constexpr bool AsciiInAZRange(unsigned char c) {
constexpr unsigned char sub = (ToUpper ? 'a' : 'A') - SCHAR_MIN;
constexpr signed char threshold = SCHAR_MIN + 26;
unsigned char u = c - sub;
return static_cast<signed char>(u) < threshold;
}
template <bool ToUpper>
ABSL_ATTRIBUTE_ALWAYS_INLINE inline constexpr void AsciiStrCaseFoldImpl(
absl::Nonnull<char*> dst, absl::Nullable<const char*> src, size_t size) {
constexpr unsigned char kAsciiCaseBitFlip = 'a' ^ 'A';
for (size_t i = 0; i < size; ++i) {
unsigned char v = static_cast<unsigned char>(src[i]);
v ^= AsciiInAZRange<ToUpper>(v) ? kAsciiCaseBitFlip : 0;
dst[i] = static_cast<char>(v);
}
}
constexpr size_t kCaseFoldThreshold = 16;
template <bool ToUpper>
ABSL_ATTRIBUTE_NOINLINE constexpr void AsciiStrCaseFoldLong(
absl::Nonnull<char*> dst, absl::Nullable<const char*> src, size_t size) {
ABSL_ASSUME(size >= kCaseFoldThreshold);
AsciiStrCaseFoldImpl<ToUpper>(dst, src, size);
}
template <bool ToUpper>
constexpr void AsciiStrCaseFold(absl::Nonnull<char*> dst,
absl::Nullable<const char*> src, size_t size) {
size < kCaseFoldThreshold ? AsciiStrCaseFoldImpl<ToUpper>(dst, src, size)
: AsciiStrCaseFoldLong<ToUpper>(dst, src, size);
}
void AsciiStrToLower(absl::Nonnull<char*> dst, absl::Nullable<const char*> src,
size_t n) {
return AsciiStrCaseFold<false>(dst, src, n);
}
void AsciiStrToUpper(absl::Nonnull<char*> dst, absl::Nullable<const char*> src,
size_t n) {
return AsciiStrCaseFold<true>(dst, src, n);
}
static constexpr size_t ValidateAsciiCasefold() {
constexpr size_t num_chars = 1 + CHAR_MAX - CHAR_MIN;
size_t incorrect_index = 0;
char lowered[num_chars] = {};
char uppered[num_chars] = {};
for (unsigned int i = 0; i < num_chars; ++i) {
uppered[i] = lowered[i] = static_cast<char>(i);
}
AsciiStrCaseFold<false>(&lowered[0], &lowered[0], num_chars);
AsciiStrCaseFold<true>(&uppered[0], &uppered[0], num_chars);
for (size_t i = 0; i < num_chars; ++i) {
const char ch = static_cast<char>(i),
ch_upper = ('a' <= ch && ch <= 'z' ? 'A' + (ch - 'a') : ch),
ch_lower = ('A' <= ch && ch <= 'Z' ? 'a' + (ch - 'A') : ch);
if (uppered[i] != ch_upper || lowered[i] != ch_lower) {
incorrect_index = i > 0 ? i : num_chars;
break;
}
}
return incorrect_index;
}
static_assert(ValidateAsciiCasefold() == 0, "error in case conversion");
}
void AsciiStrToLower(absl::Nonnull<std::string*> s) {
char* p = &(*s)[0];
return ascii_internal::AsciiStrCaseFold<false>(p, p, s->size());
}
void AsciiStrToUpper(absl::Nonnull<std::string*> s) {
char* p = &(*s)[0];
return ascii_internal::AsciiStrCaseFold<true>(p, p, s->size());
}
void RemoveExtraAsciiWhitespace(absl::Nonnull<std::string*> str) {
auto stripped = StripAsciiWhitespace(*str);
if (stripped.empty()) {
str->clear();
return;
}
auto input_it = stripped.begin();
auto input_end = stripped.end();
auto output_it = &(*str)[0];
bool is_ws = false;
for (; input_it < input_end; ++input_it) {
if (is_ws) {
is_ws = absl::ascii_isspace(static_cast<unsigned char>(*input_it));
if (is_ws) --output_it;
} else {
is_ws = absl::ascii_isspace(static_cast<unsigned char>(*input_it));
}
*output_it = *input_it;
++output_it;
}
str->erase(static_cast<size_t>(output_it - &(*str)[0]));
}
ABSL_NAMESPACE_END
} | #include "absl/strings/ascii.h"
#include <algorithm>
#include <cctype>
#include <clocale>
#include <cstring>
#include <string>
#include "gtest/gtest.h"
#include "absl/base/macros.h"
#include "absl/strings/string_view.h"
namespace {
TEST(AsciiIsFoo, All) {
for (int i = 0; i < 256; i++) {
const auto c = static_cast<unsigned char>(i);
if ((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z'))
EXPECT_TRUE(absl::ascii_isalpha(c)) << ": failed on " << c;
else
EXPECT_TRUE(!absl::ascii_isalpha(c)) << ": failed on " << c;
}
for (int i = 0; i < 256; i++) {
const auto c = static_cast<unsigned char>(i);
if ((c >= '0' && c <= '9'))
EXPECT_TRUE(absl::ascii_isdigit(c)) << ": failed on " << c;
else
EXPECT_TRUE(!absl::ascii_isdigit(c)) << ": failed on " << c;
}
for (int i = 0; i < 256; i++) {
const auto c = static_cast<unsigned char>(i);
if (absl::ascii_isalpha(c) || absl::ascii_isdigit(c))
EXPECT_TRUE(absl::ascii_isalnum(c)) << ": failed on " << c;
else
EXPECT_TRUE(!absl::ascii_isalnum(c)) << ": failed on " << c;
}
for (int i = 0; i < 256; i++) {
const auto c = static_cast<unsigned char>(i);
if (i != '\0' && strchr(" \r\n\t\v\f", i))
EXPECT_TRUE(absl::ascii_isspace(c)) << ": failed on " << c;
else
EXPECT_TRUE(!absl::ascii_isspace(c)) << ": failed on " << c;
}
for (int i = 0; i < 256; i++) {
const auto c = static_cast<unsigned char>(i);
if (i >= 32 && i < 127)
EXPECT_TRUE(absl::ascii_isprint(c)) << ": failed on " << c;
else
EXPECT_TRUE(!absl::ascii_isprint(c)) << ": failed on " << c;
}
for (int i = 0; i < 256; i++) {
const auto c = static_cast<unsigned char>(i);
if (absl::ascii_isprint(c) && !absl::ascii_isspace(c) &&
!absl::ascii_isalnum(c)) {
EXPECT_TRUE(absl::ascii_ispunct(c)) << ": failed on " << c;
} else {
EXPECT_TRUE(!absl::ascii_ispunct(c)) << ": failed on " << c;
}
}
for (int i = 0; i < 256; i++) {
const auto c = static_cast<unsigned char>(i);
if (i == ' ' || i == '\t')
EXPECT_TRUE(absl::ascii_isblank(c)) << ": failed on " << c;
else
EXPECT_TRUE(!absl::ascii_isblank(c)) << ": failed on " << c;
}
for (int i = 0; i < 256; i++) {
const auto c = static_cast<unsigned char>(i);
if (i < 32 || i == 127)
EXPECT_TRUE(absl::ascii_iscntrl(c)) << ": failed on " << c;
else
EXPECT_TRUE(!absl::ascii_iscntrl(c)) << ": failed on " << c;
}
for (int i = 0; i < 256; i++) {
const auto c = static_cast<unsigned char>(i);
if (absl::ascii_isdigit(c) || (i >= 'A' && i <= 'F') ||
(i >= 'a' && i <= 'f')) {
EXPECT_TRUE(absl::ascii_isxdigit(c)) << ": failed on " << c;
} else {
EXPECT_TRUE(!absl::ascii_isxdigit(c)) << ": failed on " << c;
}
}
for (int i = 0; i < 256; i++) {
const auto c = static_cast<unsigned char>(i);
if (i > 32 && i < 127)
EXPECT_TRUE(absl::ascii_isgraph(c)) << ": failed on " << c;
else
EXPECT_TRUE(!absl::ascii_isgraph(c)) << ": failed on " << c;
}
for (int i = 0; i < 256; i++) {
const auto c = static_cast<unsigned char>(i);
if (i >= 'A' && i <= 'Z')
EXPECT_TRUE(absl::ascii_isupper(c)) << ": failed on " << c;
else
EXPECT_TRUE(!absl::ascii_isupper(c)) << ": failed on " << c;
}
for (int i = 0; i < 256; i++) {
const auto c = static_cast<unsigned char>(i);
if (i >= 'a' && i <= 'z')
EXPECT_TRUE(absl::ascii_islower(c)) << ": failed on " << c;
else
EXPECT_TRUE(!absl::ascii_islower(c)) << ": failed on " << c;
}
for (unsigned char c = 0; c < 128; c++) {
EXPECT_TRUE(absl::ascii_isascii(c)) << ": failed on " << c;
}
for (int i = 128; i < 256; i++) {
const auto c = static_cast<unsigned char>(i);
EXPECT_TRUE(!absl::ascii_isascii(c)) << ": failed on " << c;
}
}
TEST(AsciiIsFoo, SameAsIsFoo) {
#ifndef __ANDROID__
const char* old_locale = setlocale(LC_CTYPE, "C");
ASSERT_TRUE(old_locale != nullptr);
#endif
for (int i = 0; i < 256; i++) {
const auto c = static_cast<unsigned char>(i);
EXPECT_EQ(isalpha(c) != 0, absl::ascii_isalpha(c)) << c;
EXPECT_EQ(isdigit(c) != 0, absl::ascii_isdigit(c)) << c;
EXPECT_EQ(isalnum(c) != 0, absl::ascii_isalnum(c)) << c;
EXPECT_EQ(isspace(c) != 0, absl::ascii_isspace(c)) << c;
EXPECT_EQ(ispunct(c) != 0, absl::ascii_ispunct(c)) << c;
EXPECT_EQ(isblank(c) != 0, absl::ascii_isblank(c)) << c;
EXPECT_EQ(iscntrl(c) != 0, absl::ascii_iscntrl(c)) << c;
EXPECT_EQ(isxdigit(c) != 0, absl::ascii_isxdigit(c)) << c;
EXPECT_EQ(isprint(c) != 0, absl::ascii_isprint(c)) << c;
EXPECT_EQ(isgraph(c) != 0, absl::ascii_isgraph(c)) << c;
EXPECT_EQ(isupper(c) != 0, absl::ascii_isupper(c)) << c;
EXPECT_EQ(islower(c) != 0, absl::ascii_islower(c)) << c;
EXPECT_EQ(isascii(c) != 0, absl::ascii_isascii(c)) << c;
}
#ifndef __ANDROID__
ASSERT_TRUE(setlocale(LC_CTYPE, old_locale));
#endif
}
TEST(AsciiToFoo, All) {
#ifndef __ANDROID__
const char* old_locale = setlocale(LC_CTYPE, "C");
ASSERT_TRUE(old_locale != nullptr);
#endif
for (int i = 0; i < 256; i++) {
const auto c = static_cast<unsigned char>(i);
if (absl::ascii_islower(c))
EXPECT_EQ(absl::ascii_toupper(c), 'A' + (i - 'a')) << c;
else
EXPECT_EQ(absl::ascii_toupper(c), static_cast<char>(i)) << c;
if (absl::ascii_isupper(c))
EXPECT_EQ(absl::ascii_tolower(c), 'a' + (i - 'A')) << c;
else
EXPECT_EQ(absl::ascii_tolower(c), static_cast<char>(i)) << c;
EXPECT_EQ(static_cast<char>(tolower(i)), absl::ascii_tolower(c)) << c;
EXPECT_EQ(static_cast<char>(toupper(i)), absl::ascii_toupper(c)) << c;
}
#ifndef __ANDROID__
ASSERT_TRUE(setlocale(LC_CTYPE, old_locale));
#endif
}
TEST(AsciiStrTo, Lower) {
const char buf[] = "ABCDEF";
const std::string str("GHIJKL");
const std::string str2("MNOPQR");
const absl::string_view sp(str2);
const std::string long_str("ABCDEFGHIJKLMNOPQRSTUVWXYZ1!a");
std::string mutable_str("_`?@[{AMNOPQRSTUVWXYZ");
auto fun = []() -> std::string { return "PQRSTU"; };
EXPECT_EQ("abcdef", absl::AsciiStrToLower(buf));
EXPECT_EQ("ghijkl", absl::AsciiStrToLower(str));
EXPECT_EQ("mnopqr", absl::AsciiStrToLower(sp));
EXPECT_EQ("abcdefghijklmnopqrstuvwxyz1!a", absl::AsciiStrToLower(long_str));
EXPECT_EQ("pqrstu", absl::AsciiStrToLower(fun()));
EXPECT_EQ("", absl::AsciiStrToLower(absl::string_view()));
absl::AsciiStrToLower(&mutable_str);
EXPECT_EQ("_`?@[{amnopqrstuvwxyz", mutable_str);
char mutable_buf[] = "Mutable";
std::transform(mutable_buf, mutable_buf + strlen(mutable_buf),
mutable_buf, absl::ascii_tolower);
EXPECT_STREQ("mutable", mutable_buf);
}
TEST(AsciiStrTo, Upper) {
const char buf[] = "abcdef";
const std::string str("ghijkl");
const std::string str2("_`?@[{amnopqrstuvwxyz");
const absl::string_view sp(str2);
const std::string long_str("abcdefghijklmnopqrstuvwxyz1!A");
auto fun = []() -> std::string { return "pqrstu"; };
EXPECT_EQ("ABCDEF", absl::AsciiStrToUpper(buf));
EXPECT_EQ("GHIJKL", absl::AsciiStrToUpper(str));
EXPECT_EQ("_`?@[{AMNOPQRSTUVWXYZ", absl::AsciiStrToUpper(sp));
EXPECT_EQ("ABCDEFGHIJKLMNOPQRSTUVWXYZ1!A", absl::AsciiStrToUpper(long_str));
EXPECT_EQ("PQRSTU", absl::AsciiStrToUpper(fun()));
EXPECT_EQ("", absl::AsciiStrToUpper(absl::string_view()));
char mutable_buf[] = "Mutable";
std::transform(mutable_buf, mutable_buf + strlen(mutable_buf),
mutable_buf, absl::ascii_toupper);
EXPECT_STREQ("MUTABLE", mutable_buf);
}
TEST(StripLeadingAsciiWhitespace, FromStringView) {
EXPECT_EQ(absl::string_view{},
absl::StripLeadingAsciiWhitespace(absl::string_view{}));
EXPECT_EQ("foo", absl::StripLeadingAsciiWhitespace({"foo"}));
EXPECT_EQ("foo", absl::StripLeadingAsciiWhitespace({"\t \n\f\r\n\vfoo"}));
EXPECT_EQ("foo foo\n ",
absl::StripLeadingAsciiWhitespace({"\t \n\f\r\n\vfoo foo\n "}));
EXPECT_EQ(absl::string_view{}, absl::StripLeadingAsciiWhitespace(
{"\t \n\f\r\v\n\t \n\f\r\v\n"}));
}
TEST(StripLeadingAsciiWhitespace, InPlace) {
std::string str;
absl::StripLeadingAsciiWhitespace(&str);
EXPECT_EQ("", str);
str = "foo";
absl::StripLeadingAsciiWhitespace(&str);
EXPECT_EQ("foo", str);
str = "\t \n\f\r\n\vfoo";
absl::StripLeadingAsciiWhitespace(&str);
EXPECT_EQ("foo", str);
str = "\t \n\f\r\n\vfoo foo\n ";
absl::StripLeadingAsciiWhitespace(&str);
EXPECT_EQ("foo foo\n ", str);
str = "\t \n\f\r\v\n\t \n\f\r\v\n";
absl::StripLeadingAsciiWhitespace(&str);
EXPECT_EQ(absl::string_view{}, str);
}
TEST(StripTrailingAsciiWhitespace, FromStringView) {
EXPECT_EQ(absl::string_view{},
absl::StripTrailingAsciiWhitespace(absl::string_view{}));
EXPECT_EQ("foo", absl::StripTrailingAsciiWhitespace({"foo"}));
EXPECT_EQ("foo", absl::StripTrailingAsciiWhitespace({"foo\t \n\f\r\n\v"}));
EXPECT_EQ(" \nfoo foo",
absl::StripTrailingAsciiWhitespace({" \nfoo foo\t \n\f\r\n\v"}));
EXPECT_EQ(absl::string_view{}, absl::StripTrailingAsciiWhitespace(
{"\t \n\f\r\v\n\t \n\f\r\v\n"}));
}
TEST(StripTrailingAsciiWhitespace, InPlace) {
std::string str;
absl::StripTrailingAsciiWhitespace(&str);
EXPECT_EQ("", str);
str = "foo";
absl::StripTrailingAsciiWhitespace(&str);
EXPECT_EQ("foo", str);
str = "foo\t \n\f\r\n\v";
absl::StripTrailingAsciiWhitespace(&str);
EXPECT_EQ("foo", str);
str = " \nfoo foo\t \n\f\r\n\v";
absl::StripTrailingAsciiWhitespace(&str);
EXPECT_EQ(" \nfoo foo", str);
str = "\t \n\f\r\v\n\t \n\f\r\v\n";
absl::StripTrailingAsciiWhitespace(&str);
EXPECT_EQ(absl::string_view{}, str);
}
TEST(StripAsciiWhitespace, FromStringView) {
EXPECT_EQ(absl::string_view{},
absl::StripAsciiWhitespace(absl::string_view{}));
EXPECT_EQ("foo", absl::StripAsciiWhitespace({"foo"}));
EXPECT_EQ("foo",
absl::StripAsciiWhitespace({"\t \n\f\r\n\vfoo\t \n\f\r\n\v"}));
EXPECT_EQ("foo foo", absl::StripAsciiWhitespace(
{"\t \n\f\r\n\vfoo foo\t \n\f\r\n\v"}));
EXPECT_EQ(absl::string_view{},
absl::StripAsciiWhitespace({"\t \n\f\r\v\n\t \n\f\r\v\n"}));
}
TEST(StripAsciiWhitespace, InPlace) {
std::string str;
absl::StripAsciiWhitespace(&str);
EXPECT_EQ("", str);
str = "foo";
absl::StripAsciiWhitespace(&str);
EXPECT_EQ("foo", str);
str = "\t \n\f\r\n\vfoo\t \n\f\r\n\v";
absl::StripAsciiWhitespace(&str);
EXPECT_EQ("foo", str);
str = "\t \n\f\r\n\vfoo foo\t \n\f\r\n\v";
absl::StripAsciiWhitespace(&str);
EXPECT_EQ("foo foo", str);
str = "\t \n\f\r\v\n\t \n\f\r\v\n";
absl::StripAsciiWhitespace(&str);
EXPECT_EQ(absl::string_view{}, str);
}
TEST(RemoveExtraAsciiWhitespace, InPlace) {
const char* inputs[] = {"No extra space",
" Leading whitespace",
"Trailing whitespace ",
" Leading and trailing ",
" Whitespace \t in\v middle ",
"'Eeeeep! \n Newlines!\n",
"nospaces",
"",
"\n\t a\t\n\nb \t\n"};
const char* outputs[] = {
"No extra space",
"Leading whitespace",
"Trailing whitespace",
"Leading and trailing",
"Whitespace in middle",
"'Eeeeep! Newlines!",
"nospaces",
"",
"a\nb",
};
const int NUM_TESTS = ABSL_ARRAYSIZE(inputs);
for (int i = 0; i < NUM_TESTS; i++) {
std::string s(inputs[i]);
absl::RemoveExtraAsciiWhitespace(&s);
EXPECT_EQ(outputs[i], s);
}
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/ascii.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/ascii_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
baaeddd6-ac64-4c73-9a7a-ec433d59b970 | cpp | tensorflow/tensorflow | ctstring | tensorflow/core/platform/ctstring.h | third_party/xla/third_party/tsl/tsl/platform/ctstring_test.cc | #ifndef TENSORFLOW_CORE_PLATFORM_CTSTRING_H_
#define TENSORFLOW_CORE_PLATFORM_CTSTRING_H_
#include "tsl/platform/ctstring.h"
#endif | #include "tsl/platform/ctstring.h"
#include <memory>
#include <string>
#include "tsl/platform/ctstring_internal.h"
#include "tsl/platform/test.h"
static const char kLongString[] =
"abcdefghij"
"klmnopqrst"
"uvwxyz0123"
"456789ABCD"
"EFGHIKLMNO";
const size_t kLongStringLen = sizeof(kLongString) / sizeof(char) - sizeof(char);
TEST(TF_CTStringTest, InitAssignMoveDealloc) {
EXPECT_GT(::strlen(kLongString), TF_TString_SmallCapacity);
{
TF_TString s10, s11, s12;
TF_TString_Init(&s10);
TF_TString_Init(&s11);
TF_TString_Init(&s12);
EXPECT_EQ(0, TF_TString_GetSize(&s10));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s10));
EXPECT_STREQ("", TF_TString_GetDataPointer(&s10));
EXPECT_STREQ("", TF_TString_GetMutableDataPointer(&s10));
TF_TString_Assign(&s11, &s10);
EXPECT_EQ(0, TF_TString_GetSize(&s11));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s10));
EXPECT_STREQ("", TF_TString_GetDataPointer(&s11));
EXPECT_STREQ("", TF_TString_GetMutableDataPointer(&s11));
TF_TString_Move(&s12, &s11);
EXPECT_EQ(0, TF_TString_GetSize(&s11));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s10));
EXPECT_STREQ("", TF_TString_GetDataPointer(&s11));
EXPECT_STREQ("", TF_TString_GetMutableDataPointer(&s11));
EXPECT_EQ(0, TF_TString_GetSize(&s12));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s10));
EXPECT_STREQ("", TF_TString_GetDataPointer(&s12));
EXPECT_STREQ("", TF_TString_GetMutableDataPointer(&s12));
TF_TString_Dealloc(&s10);
TF_TString_Dealloc(&s11);
TF_TString_Dealloc(&s12);
}
{
TF_TString s20, s21, s22;
TF_TString_Init(&s20);
TF_TString_Init(&s21);
TF_TString_Init(&s22);
TF_TString_Copy(&s20, "a", 1);
EXPECT_EQ(1, TF_TString_GetSize(&s20));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s20));
EXPECT_STREQ("a", TF_TString_GetDataPointer(&s20));
EXPECT_STREQ("a", TF_TString_GetMutableDataPointer(&s20));
EXPECT_EQ(TF_TString_SmallCapacity, TF_TString_GetCapacity(&s20));
TF_TString_Assign(&s21, &s20);
EXPECT_EQ(1, TF_TString_GetSize(&s21));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s21));
EXPECT_STREQ("a", TF_TString_GetDataPointer(&s21));
EXPECT_STREQ("a", TF_TString_GetMutableDataPointer(&s21));
EXPECT_EQ(TF_TString_SmallCapacity, TF_TString_GetCapacity(&s21));
TF_TString_Move(&s22, &s21);
EXPECT_EQ(1, TF_TString_GetSize(&s22));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s22));
EXPECT_STREQ("a", TF_TString_GetDataPointer(&s22));
EXPECT_STREQ("a", TF_TString_GetMutableDataPointer(&s22));
EXPECT_EQ(TF_TString_SmallCapacity, TF_TString_GetCapacity(&s22));
TF_TString_Dealloc(&s20);
TF_TString_Dealloc(&s21);
TF_TString_Dealloc(&s22);
}
{
TF_TString s30, s31;
TF_TString_Init(&s30);
TF_TString_Init(&s31);
size_t s = TF_TString_SmallCapacity - 1;
EXPECT_EQ(TF_TString_SmallCapacity, TF_TString_GetCapacity(&s30));
TF_TString_Copy(&s30, kLongString, s);
EXPECT_STREQ(std::string(kLongString, s).data(),
TF_TString_GetDataPointer(&s30));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s30));
EXPECT_GT(TF_TString_SmallCapacity, TF_TString_GetSize(&s30));
EXPECT_EQ(TF_TString_SmallCapacity, TF_TString_GetCapacity(&s30));
TF_TString_AppendN(&s30, &kLongString[s++], 1);
EXPECT_STREQ(std::string(kLongString, s).data(),
TF_TString_GetDataPointer(&s30));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s30));
EXPECT_EQ(TF_TString_SmallCapacity, TF_TString_GetSize(&s30));
EXPECT_EQ(TF_TString_SmallCapacity, TF_TString_GetCapacity(&s30));
TF_TString_AppendN(&s30, &kLongString[s++], 1);
EXPECT_STREQ(std::string(kLongString, s).data(),
TF_TString_GetDataPointer(&s30));
EXPECT_STREQ(std::string(kLongString, s).data(),
TF_TString_GetMutableDataPointer(&s30));
EXPECT_EQ(TF_TSTR_LARGE, TF_TString_GetType(&s30));
EXPECT_EQ(s, TF_TString_GetSize(&s30));
EXPECT_LT(TF_TString_SmallCapacity, TF_TString_GetSize(&s30));
EXPECT_LT(TF_TString_SmallCapacity, TF_TString_GetCapacity(&s30));
TF_TString_Move(&s31, &s30);
EXPECT_STREQ("", TF_TString_GetDataPointer(&s30));
EXPECT_STREQ("", TF_TString_GetMutableDataPointer(&s30));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s30));
EXPECT_EQ(0, TF_TString_GetSize(&s30));
EXPECT_STREQ(std::string(kLongString, s).data(),
TF_TString_GetDataPointer(&s31));
EXPECT_STREQ(std::string(kLongString, s).data(),
TF_TString_GetMutableDataPointer(&s31));
EXPECT_EQ(TF_TSTR_LARGE, TF_TString_GetType(&s31));
EXPECT_EQ(s, TF_TString_GetSize(&s31));
EXPECT_LT(TF_TString_SmallCapacity, TF_TString_GetCapacity(&s31));
TF_TString_Dealloc(&s30);
TF_TString_Dealloc(&s31);
}
{
const char kStr[] = "abcdef";
const char kStrLen = sizeof(kStr) / sizeof(char) - sizeof(char);
TF_TString s40, s41;
TF_TString_Init(&s40);
TF_TString_Init(&s41);
TF_TString_Copy(&s40, kLongString, kLongStringLen);
EXPECT_EQ(kLongStringLen, TF_TString_GetSize(&s40));
TF_TString_Assign(&s41, &s40);
EXPECT_STREQ(kLongString, TF_TString_GetDataPointer(&s40));
EXPECT_STREQ(kLongString, TF_TString_GetMutableDataPointer(&s40));
EXPECT_EQ(kLongStringLen, TF_TString_GetSize(&s41));
TF_TString_AppendN(&s40, kLongString, kLongStringLen);
TF_TString_Append(&s40, &s41);
std::string longerString(kLongString);
longerString += kLongString;
longerString += kLongString;
EXPECT_STREQ(longerString.data(), TF_TString_GetDataPointer(&s40));
EXPECT_STREQ(longerString.data(), TF_TString_GetMutableDataPointer(&s40));
EXPECT_EQ(longerString.size(), TF_TString_GetSize(&s40));
TF_TString_AssignView(&s40, kStr, kStrLen);
EXPECT_EQ(TF_TSTR_VIEW, TF_TString_GetType(&s40));
EXPECT_EQ(kStr, TF_TString_GetDataPointer(&s40));
EXPECT_EQ(6, TF_TString_GetSize(&s40));
EXPECT_EQ(0, TF_TString_GetCapacity(&s40));
EXPECT_NE(kStr, TF_TString_GetMutableDataPointer(&s40));
EXPECT_STREQ(kStr, TF_TString_GetMutableDataPointer(&s40));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s40));
EXPECT_EQ(6, TF_TString_GetSize(&s40));
EXPECT_EQ(TF_TString_SmallCapacity, TF_TString_GetCapacity(&s40));
TF_TString_Dealloc(&s40);
TF_TString_Dealloc(&s41);
}
{
TF_TString s50;
TF_TString_Init(&s50);
TF_TString_Copy(&s50, "a", 1);
EXPECT_STREQ("a", TF_TString_GetDataPointer(&s50));
EXPECT_STREQ("a", TF_TString_GetMutableDataPointer(&s50));
EXPECT_EQ(1, TF_TString_GetSize(&s50));
TF_TString_Copy(&s50, kLongString, kLongStringLen);
EXPECT_STREQ(kLongString, TF_TString_GetDataPointer(&s50));
EXPECT_STREQ(kLongString, TF_TString_GetMutableDataPointer(&s50));
EXPECT_EQ(kLongStringLen, TF_TString_GetSize(&s50));
size_t cap1 = TF_TString_GetCapacity(&s50);
TF_TString_Copy(&s50, kLongString, TF_TString_SmallCapacity + 1);
size_t cap2 = TF_TString_GetCapacity(&s50);
EXPECT_STREQ(std::string(kLongString, TF_TString_SmallCapacity + 1).data(),
TF_TString_GetMutableDataPointer(&s50));
EXPECT_EQ(TF_TSTR_LARGE, TF_TString_GetType(&s50));
EXPECT_GT(cap1, cap2);
TF_TString_Copy(&s50, "c", 1);
EXPECT_STREQ("c", TF_TString_GetDataPointer(&s50));
EXPECT_STREQ("c", TF_TString_GetMutableDataPointer(&s50));
EXPECT_EQ(1, TF_TString_GetSize(&s50));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s50));
TF_TString_Dealloc(&s50);
}
}
TEST(TF_CTStringTest, ResizeReserve) {
{
TF_TString s60;
TF_TString_Init(&s60);
TF_TString_Resize(&s60, 2, 'a');
EXPECT_EQ(0, ::memcmp("aa", TF_TString_GetDataPointer(&s60), 2));
TF_TString_Resize(&s60, 4, '\0');
EXPECT_EQ(0, ::memcmp("aa\0\0", TF_TString_GetDataPointer(&s60), 4));
TF_TString_Resize(&s60, 6, 'b');
EXPECT_EQ(0, ::memcmp("aa\0\0bb", TF_TString_GetDataPointer(&s60), 6));
TF_TString_Resize(&s60, 2, 'c');
EXPECT_EQ(0, ::memcmp("aa", TF_TString_GetDataPointer(&s60), 2));
TF_TString_Dealloc(&s60);
}
{
TF_TString s70;
TF_TString_Init(&s70);
TF_TString_Reserve(&s70, TF_TString_SmallCapacity - 1);
EXPECT_EQ(TF_TString_SmallCapacity, TF_TString_GetCapacity(&s70));
EXPECT_EQ(0, TF_TString_GetSize(&s70));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s70));
TF_TString_Reserve(&s70, TF_TString_SmallCapacity);
EXPECT_EQ(TF_TString_SmallCapacity, TF_TString_GetCapacity(&s70));
EXPECT_EQ(0, TF_TString_GetSize(&s70));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s70));
TF_TString_Copy(&s70, "hello", 5);
EXPECT_EQ(5, TF_TString_GetSize(&s70));
EXPECT_EQ(TF_TString_SmallCapacity, TF_TString_GetCapacity(&s70));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s70));
TF_TString_Reserve(&s70, 100);
EXPECT_EQ(111, TF_TString_GetCapacity(&s70));
EXPECT_EQ(5, TF_TString_GetSize(&s70));
EXPECT_EQ(TF_TSTR_LARGE, TF_TString_GetType(&s70));
TF_TString_AssignView(&s70, kLongString, kLongStringLen);
TF_TString_Reserve(&s70, 10);
EXPECT_EQ(TF_TSTR_VIEW, TF_TString_GetType(&s70));
EXPECT_EQ(0, TF_TString_GetCapacity(&s70));
TF_TString_Reserve(&s70, 100);
EXPECT_EQ(TF_TSTR_LARGE, TF_TString_GetType(&s70));
EXPECT_EQ(111, TF_TString_GetCapacity(&s70));
TF_TString_Reserve(&s70, 200);
EXPECT_EQ(TF_TSTR_LARGE, TF_TString_GetType(&s70));
EXPECT_EQ(207, TF_TString_GetCapacity(&s70));
TF_TString_Dealloc(&s70);
}
{
TF_TString s70;
TF_TString_Init(&s70);
TF_TString_ReserveAmortized(&s70, TF_TString_SmallCapacity - 1);
EXPECT_EQ(TF_TString_SmallCapacity, TF_TString_GetCapacity(&s70));
EXPECT_EQ(0, TF_TString_GetSize(&s70));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s70));
TF_TString_ReserveAmortized(&s70, TF_TString_SmallCapacity);
EXPECT_EQ(TF_TString_SmallCapacity, TF_TString_GetCapacity(&s70));
EXPECT_EQ(0, TF_TString_GetSize(&s70));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s70));
TF_TString_Copy(&s70, "hello", 5);
EXPECT_EQ(5, TF_TString_GetSize(&s70));
EXPECT_EQ(TF_TString_SmallCapacity, TF_TString_GetCapacity(&s70));
EXPECT_EQ(TF_TSTR_SMALL, TF_TString_GetType(&s70));
TF_TString_ReserveAmortized(&s70, 100);
EXPECT_EQ(111, TF_TString_GetCapacity(&s70));
EXPECT_EQ(5, TF_TString_GetSize(&s70));
EXPECT_EQ(TF_TSTR_LARGE, TF_TString_GetType(&s70));
TF_TString_AssignView(&s70, kLongString, kLongStringLen);
TF_TString_ReserveAmortized(&s70, 10);
EXPECT_EQ(TF_TSTR_VIEW, TF_TString_GetType(&s70));
EXPECT_EQ(0, TF_TString_GetCapacity(&s70));
TF_TString_ReserveAmortized(&s70, 100);
EXPECT_EQ(TF_TSTR_LARGE, TF_TString_GetType(&s70));
EXPECT_EQ(111, TF_TString_GetCapacity(&s70));
TF_TString_ReserveAmortized(&s70, 200);
EXPECT_EQ(TF_TSTR_LARGE, TF_TString_GetType(&s70));
EXPECT_EQ(223, TF_TString_GetCapacity(&s70));
TF_TString_Dealloc(&s70);
}
}
TEST(TF_CTStringTest, OffsetType) {
{
uint8_t str[] = "test";
constexpr size_t str_size = sizeof(str) / sizeof(str[0]);
uint8_t buf[sizeof(TF_TString) + str_size];
memcpy(buf + sizeof(TF_TString), str, str_size);
TF_TString *offsets = (TF_TString *)buf;
TF_TString_Init(offsets);
offsets[0].u.offset.size = TF_le32toh(str_size << 2 | TF_TSTR_OFFSET);
offsets[0].u.offset.offset = TF_le32toh(sizeof(TF_TString));
offsets[0].u.offset.count = TF_le32toh(1);
EXPECT_EQ(str_size, TF_TString_GetSize(offsets));
EXPECT_EQ(TF_TSTR_OFFSET, TF_TString_GetType(offsets));
EXPECT_EQ(0, ::memcmp(str, TF_TString_GetDataPointer(offsets), str_size));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/platform/ctstring.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/ctstring_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f347f78b-eaa6-4792-ac4e-ae5992680692 | cpp | tensorflow/tensorflow | memory_space_assignment | third_party/xla/xla/service/memory_space_assignment/memory_space_assignment.cc | third_party/xla/xla/service/memory_space_assignment/memory_space_assignment_test.cc | #include "xla/service/memory_space_assignment/memory_space_assignment.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <iterator>
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/service/buffer_value.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_value.h"
#include "xla/service/memory_space_assignment/algorithm.h"
#include "xla/service/memory_space_assignment/allocation.h"
#include "xla/service/memory_space_assignment/memory_space_assignment.pb.h"
#include "xla/service/memory_space_assignment/options.h"
#include "xla/service/memory_space_assignment/simulator.h"
#include "xla/service/memory_space_assignment/slice.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace memory_space_assignment {
namespace {
absl::Status InsertInstructionAndEnsureOperandsInserted(
HloInstruction* new_instruction, HloInstructionSequence* new_sequence,
absl::flat_hash_set<HloInstruction*>* inserted_instructions);
absl::Status EnsureInstructionAndOperandsInserted(
HloInstruction* new_instruction, HloInstructionSequence* new_sequence,
absl::flat_hash_set<HloInstruction*>* inserted_instructions) {
if (inserted_instructions->contains(new_instruction)) {
return absl::OkStatus();
}
return InsertInstructionAndEnsureOperandsInserted(
new_instruction, new_sequence, inserted_instructions);
}
absl::Status InsertInstructionAndEnsureOperandsInserted(
HloInstruction* new_instruction, HloInstructionSequence* new_sequence,
absl::flat_hash_set<HloInstruction*>* inserted_instructions) {
for (HloInstruction* operand : new_instruction->operands()) {
TF_RETURN_IF_ERROR(EnsureInstructionAndOperandsInserted(
operand, new_sequence, inserted_instructions));
}
VLOG(4) << "inserting: " << new_instruction->ToShortString();
new_sequence->push_back(new_instruction);
TF_RET_CHECK(inserted_instructions->insert(new_instruction).second);
return absl::OkStatus();
}
std::string InstructionScheduleToString(const HloLiveRange& hlo_live_range) {
const absl::flat_hash_map<const HloInstruction*, HloLiveRange::LogicalTime>&
instruction_schedule = hlo_live_range.instruction_schedule();
std::vector<std::pair<int64_t, const HloInstruction*>> instructions;
instructions.reserve(instruction_schedule.size());
for (const auto& instruction : instruction_schedule) {
instructions.push_back({instruction.second, instruction.first});
}
std::string instruction_schedule_str = "\n";
absl::c_sort(instructions);
for (auto& instruction : instructions) {
absl::StrAppend(&instruction_schedule_str,
"LogicalTime: ", instruction.first, " ",
instruction.second->ToString(), "\n");
}
return instruction_schedule_str;
}
void EnsureParentAllocationIsAvailableForCopy(CopyAllocation* copy_allocation) {
Allocation& parent_allocation = copy_allocation->mutable_prev_allocation();
parent_allocation.Extend(copy_allocation->copy_done_schedule_before());
if (parent_allocation.is_copy_allocation()) {
auto parent_copy_allocation =
tensorflow::down_cast<CopyAllocation*>(&parent_allocation);
parent_copy_allocation->set_copy_done_schedule_before(
std::min(parent_copy_allocation->copy_done_schedule_before(),
copy_allocation->start_time()));
parent_copy_allocation->set_copy_start_schedule_after(
std::min(parent_copy_allocation->copy_start_schedule_after(),
parent_copy_allocation->copy_done_schedule_before() - 1));
}
}
void MakeCopyAllocationJitForSingleUse(CopyAllocation* copy_allocation,
int64_t use_time) {
copy_allocation->set_start_time(use_time - 1);
copy_allocation->set_copy_start_schedule_after(use_time - 1);
copy_allocation->set_end_time(use_time);
copy_allocation->set_copy_done_schedule_before(use_time);
EnsureParentAllocationIsAvailableForCopy(copy_allocation);
}
int64_t GetUseTime(const HloUse& use, const HloLiveRange& hlo_live_range) {
return hlo_live_range.instruction_schedule().at(use.instruction);
}
void ProcessPrefetchesToAlternateMemory(AllocationSequence& allocations,
const HloLiveRange& hlo_live_range) {
std::vector<Allocation*> allocations_in_raw_pointers =
GetAllocationSequenceInRawPointers(allocations);
for (auto allocation : allocations_in_raw_pointers) {
if (allocation->is_copy_allocation() && allocation->is_in_alternate_mem() &&
!allocation->uses().empty()) {
CopyAllocation* prefetch =
tensorflow::down_cast<CopyAllocation*>(allocation);
std::vector<HloUse> uses = prefetch->uses();
prefetch->clear_uses();
prefetch->AddUse(uses[0]);
MakeCopyAllocationJitForSingleUse(prefetch,
GetUseTime(uses[0], hlo_live_range));
for (size_t use_index = 1; use_index < uses.size(); ++use_index) {
const HloUse& use = uses[use_index];
int64_t use_time = GetUseTime(use, hlo_live_range);
auto jit_single_use_prefetch = std::make_unique<CopyAllocation>(
prefetch->mutable_prev_allocation(), MemorySpace::kAlternate,
prefetch->chunk(), use_time - 1, use_time, use_time);
jit_single_use_prefetch->set_copy_start_schedule_after(use_time - 1);
jit_single_use_prefetch->AddUse(use);
EnsureParentAllocationIsAvailableForCopy(jit_single_use_prefetch.get());
allocations.push_back(std::move(jit_single_use_prefetch));
}
}
}
}
void MakeEvictionImmediate(CopyAllocation* eviction) {
const Allocation& parent_allocation = eviction->prev_allocation();
eviction->set_start_time(parent_allocation.start_time());
eviction->set_copy_start_schedule_after(parent_allocation.start_time());
eviction->set_copy_done_schedule_before(parent_allocation.start_time() + 1);
eviction->Extend(parent_allocation.start_time() + 1);
}
absl::flat_hash_map<Allocation*, CopyAllocation*> GetEvictionsMap(
std::vector<Allocation*>& allocations) {
absl::flat_hash_map<Allocation*, CopyAllocation*> evictions_map;
for (auto& allocation : allocations) {
if (allocation->is_copy_allocation() && allocation->is_in_default_mem()) {
auto eviction = tensorflow::down_cast<CopyAllocation*>(allocation);
Allocation& parent_allocation = eviction->mutable_prev_allocation();
if (!parent_allocation.is_copy_allocation()) {
evictions_map[&parent_allocation] = eviction;
}
}
}
return evictions_map;
}
void ProcessBuffersProducedInAlternateMemory(
AllocationSequence& allocations, const HloLiveRange& hlo_live_range) {
std::vector<Allocation*> allocations_in_raw_pointers =
GetAllocationSequenceInRawPointers(allocations);
absl::flat_hash_map<Allocation*, CopyAllocation*> evictions_map =
GetEvictionsMap(allocations_in_raw_pointers);
for (auto& [_, eviction] : evictions_map) {
MakeEvictionImmediate(eviction);
}
if (VLOG_IS_ON(2)) {
LOG(INFO) << "AllocationSequence after making spills immediate spills\n";
XLA_LOG_LINES(INFO, AllocationSequenceToString(allocations, true));
}
for (auto allocation : allocations_in_raw_pointers) {
if (!allocation->is_copy_allocation() &&
allocation->is_in_alternate_mem()) {
std::vector<HloUse> uses = allocation->uses();
allocation->clear_uses();
allocation->set_end_time(allocation->start_time() + 1);
for (const HloUse& use : uses) {
int64_t use_time = GetUseTime(use, hlo_live_range);
if (allocation->start_time() + 1 == use_time) {
allocation->AddUse(use);
continue;
}
if (!evictions_map.contains(allocation)) {
auto eviction_unique_ptr = std::make_unique<CopyAllocation>(
*allocation, MemorySpace::kDefault, std::nullopt,
allocation->start_time(), allocation->start_time() + 1,
allocation->start_time() + 1);
eviction_unique_ptr->set_copy_start_schedule_after(
allocation->start_time());
evictions_map[allocation] = eviction_unique_ptr.get();
allocations.push_back(std::move(eviction_unique_ptr));
}
CopyAllocation* eviction = evictions_map[allocation];
auto jit_single_use_prefetch = std::make_unique<CopyAllocation>(
*eviction, MemorySpace::kAlternate, allocation->chunk(),
use_time - 1, use_time, use_time);
jit_single_use_prefetch->set_copy_start_schedule_after(use_time - 1);
jit_single_use_prefetch->AddUse(use);
EnsureParentAllocationIsAvailableForCopy(jit_single_use_prefetch.get());
allocations.push_back(std::move(jit_single_use_prefetch));
}
}
}
}
void TransformAllocationSequenceToSpill(AllocationSequence& allocations,
const HloLiveRange& hlo_live_range) {
if (VLOG_IS_ON(2)) {
LOG(INFO) << "InstructionSchedule before transform\n";
XLA_LOG_LINES(INFO, InstructionScheduleToString(hlo_live_range));
LOG(INFO) << "AllocationSequence before transform\n";
XLA_LOG_LINES(INFO, AllocationSequenceToString(allocations, true));
}
ProcessPrefetchesToAlternateMemory(allocations, hlo_live_range);
if (VLOG_IS_ON(2)) {
LOG(INFO) << "AllocationSequence after processing prefetches\n";
XLA_LOG_LINES(INFO, AllocationSequenceToString(allocations, true));
}
ProcessBuffersProducedInAlternateMemory(allocations, hlo_live_range);
if (VLOG_IS_ON(2)) {
VLOG(2) << "AllocationSequence after processing buffers produced in kAlt\n";
XLA_LOG_LINES(INFO, AllocationSequenceToString(allocations, true));
}
SortAllocationSequence(allocations);
}
}
absl::StatusOr<MemorySpaceAssignment::AsyncCopyStats>
MemorySpaceAssignment::CalculateAsyncCopyStats() const {
AsyncCopyStats stats;
int64_t current_copies = 0;
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloDataflowAnalysis> dataflow_analysis,
HloDataflowAnalysis::Run(*module_));
for (const HloComputation* computation :
module_->MakeNonfusionComputations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kCopyStart ||
(instruction->opcode() == HloOpcode::kAsyncStart &&
instruction->async_wrapped_instruction()->opcode() ==
HloOpcode::kSlice)) {
current_copies++;
} else if (instruction->opcode() == HloOpcode::kCopyDone ||
(instruction->opcode() == HloOpcode::kAsyncDone &&
instruction->async_wrapped_instruction()->opcode() ==
HloOpcode::kSlice)) {
current_copies--;
int64_t size =
options_.size_fn(dataflow_analysis->GetUniqueValueAt(instruction));
if (instruction->shape().layout().memory_space() ==
options_.alternate_memory_space) {
++stats.num_prefetches;
stats.prefetch_bytes += size;
if (instruction->opcode() == HloOpcode::kAsyncDone &&
instruction->async_wrapped_instruction()->opcode() ==
HloOpcode::kSlice) {
++stats.num_sliced_prefetch_slices;
}
} else {
++stats.num_evictions;
stats.eviction_bytes += size;
}
} else if (instruction->IsCustomCall(kConcatBitcastCustomCall)) {
++stats.num_sliced_prefetches;
}
stats.max_outstanding_async_copies =
std::max(stats.max_outstanding_async_copies, current_copies);
}
}
return stats;
}
absl::StatusOr<std::unique_ptr<PresetAssignments>>
MemorySpaceAssignment::Run(HloModule* module,
const HloLiveRange& hlo_live_range,
const HloAliasAnalysis& alias_analysis,
const Options& options) {
CHECK(module->has_schedule());
if (VLOG_IS_ON(3)) {
LOG(INFO) << "Module before memory space assignment: ";
XLA_LOG_LINES(INFO, module->ToString());
LOG(INFO) << "Schedule: " << module->schedule().ToString();
}
MemorySpaceAssignment memory_space_assignment(module, options,
hlo_live_range);
return memory_space_assignment.RunMemorySpaceAssignment(hlo_live_range,
alias_analysis);
}
absl::StatusOr<std::unique_ptr<PresetAssignments>>
MemorySpaceAssignment::RunMemorySpaceAssignment(
const HloLiveRange& hlo_live_range,
const HloAliasAnalysis& alias_analysis) {
TF_RETURN_IF_ERROR(FindAllocationSequence(hlo_live_range, alias_analysis));
std::optional<RuntimeSimulator> runtime_simulator = std::nullopt;
if (options_.cost_analysis) {
runtime_simulator.emplace(options_.cost_analysis,
options_.alternate_memory_space);
float estimated_time =
runtime_simulator->SimulateElapsedTimeWithoutAsyncCopyLikes(
hlo_live_range, allocations_);
VLOG(1) << "Estimated elapsed time without async copies (sec): "
<< estimated_time;
}
TF_RETURN_IF_ERROR(Process(hlo_live_range));
ScheduleAsynchronousCopies();
TF_RETURN_IF_ERROR(SimplifyGraph());
TF_RETURN_IF_ERROR(FixSchedule());
TF_RETURN_IF_ERROR(ExportAndColorBuffers());
if (runtime_simulator.has_value()) {
float estimated_time =
runtime_simulator->SimulateElapsedTime(module_, allocations_);
VLOG(1) << "Estimated elapsed time with async copies (sec): "
<< estimated_time;
}
if (VLOG_IS_ON(3)) {
LOG(INFO) << "Module after memory space assignment: ";
XLA_LOG_LINES(INFO, module_->ToString());
}
TF_CHECK_OK(module_->schedule().Verify());
TF_ASSIGN_OR_RETURN(AsyncCopyStats stats, CalculateAsyncCopyStats());
VLOG(1) << "Maximum number of outstanding async copies/slices: "
<< stats.max_outstanding_async_copies;
VLOG(1) << "Number of prefetches: " << stats.num_prefetches
<< ", in bytes: " << stats.prefetch_bytes;
VLOG(1) << "Number of sliced prefetches: " << stats.num_sliced_prefetches
<< ", consuming number of slices: "
<< stats.num_sliced_prefetch_slices;
VLOG(1) << "Number of evictions: " << stats.num_evictions
<< ", in bytes: " << stats.eviction_bytes;
TF_RETURN_IF_ERROR(VerifyAndExportHeapSimulatorTrace());
return std::move(preset_assignments_);
}
absl::Status MemorySpaceAssignment::FindAllocationSequence(
const HloLiveRange& hlo_live_range,
const HloAliasAnalysis& alias_analysis) {
auto algorithm = std::make_unique<MsaAlgorithm>(
&allocations_, options_, alias_analysis, hlo_live_range);
HeapSimulator::Options heap_simulator_options;
heap_simulator_options.may_reuse_operand_buffers = false;
heap_simulator_options.alloc_constants = true;
TF_RETURN_IF_ERROR(HeapSimulator::Run(std::move(algorithm), *module_,
module_->schedule(), alias_analysis,
options_.size_fn,
heap_simulator_options)
.status());
return absl::OkStatus();
}
absl::Status MemorySpaceAssignment::Process(
const HloLiveRange& hlo_live_range) {
VLOG(1) << "Processing assigned buffers...";
absl::flat_hash_set<const Allocation*> needed_allocations;
if (options_.always_spill_to_default_memory) {
TransformAllocationSequenceToSpill(allocations_, hlo_live_range);
}
for (auto& allocation : allocations_) {
allocation->MarkIfNeeded(needed_allocations);
}
for (auto& allocation : allocations_) {
VLOG(3) << "Processing: " << allocation->ToString();
if (!needed_allocations.contains(allocation.get())) {
VLOG(3) << "Allocation not needed.";
continue;
}
TF_RETURN_IF_ERROR(allocation->Process());
if (allocation->is_scoped_allocation()) {
CHECK(allocation->memory_space() == MemorySpace::kAlternate);
scoped_memory_assignments_.emplace_back(
allocation->defining_position().instruction, allocation->chunk());
alternate_memory_size_ =
std::max(alternate_memory_size_, allocation->chunk().chunk_end());
} else if (allocation->memory_space() == MemorySpace::kAlternate) {
if (allocation->is_sliced_copy_allocation()) {
const SlicedCopyAllocation& sliced_copy_allocation =
*static_cast<const SlicedCopyAllocation*>(allocation.get());
for (const SlicedCopyAllocation::SliceDetail& details :
sliced_copy_allocation.slice_details_sorted_by_start_time()) {
alternate_memory_assignments_.push_back(
{{details.copy_done, {}}, details.slice_decision.chunk});
alternate_memory_size_ = std::max(
alternate_memory_size_, details.slice_decision.chunk.chunk_end());
}
CHECK(
!sliced_copy_allocation.cross_program_prefetch_index().has_value());
}
alternate_memory_assignments_.emplace_back(
allocation->defining_position(), allocation->chunk());
alternate_memory_size_ =
std::max(alternate_memory_size_, allocation->chunk().chunk_end());
if (allocation->cross_program_prefetch_index().has_value()) {
TF_RETURN_IF_ERROR(module_->SetCrossProgramPrefetchOffset(
*allocation->cross_program_prefetch_index(),
allocation->chunk().offset));
}
}
}
absl::flat_hash_set<HloPosition> seen_pinned_positions;
for (auto& allocation : allocations_) {
if (needed_allocations.contains(allocation.get())) {
VLOG(3) << "Post-Processing: " << allocation->ToString();
TF_RETURN_IF_ERROR(allocation->PostProcess());
if (allocation->is_pinned_allocation() &&
!allocation->is_scoped_allocation()) {
auto [it, inserted] =
seen_pinned_positions.insert(allocation->defining_position());
TF_RET_CHECK(inserted)
<< "Multiple pinned allocations defined for position "
<< allocation->defining_position().ToString();
}
}
}
return absl::OkStatus();
}
absl::Status MemorySpaceAssignment::ExportAndColorBuffers() {
VLOG(1) << "Exporting buffers...";
TF_ASSIGN_OR_RETURN(auto alias_analysis, HloAliasAnalysis::Run(module_));
absl::flat_hash_map<int64_t, int64_t> seen_buffer_offsets;
VLOG(3) << "Exported alternate memory allocations:";
for (const auto& position_and_chunk : alternate_memory_assignments_) {
const HloPosition& defining_position = position_and_chunk.first;
const HeapSimulator::Chunk& chunk = position_and_chunk.second;
const HloBuffer& buffer = alias_analysis->GetUniqueBufferAt(
defining_position.instruction, defining_position.index);
auto seen_buffer_offset_it = seen_buffer_offsets.find(buffer.id());
if (seen_buffer_offset_it != seen_buffer_offsets.end()) {
CHECK_EQ(chunk.offset, seen_buffer_offset_it->second)
<< "Mismatch in offset for positions that map to the same value: "
<< buffer.ToString() << ", pos: " << defining_position.ToString();
} else {
VLOG(3) << " [" << chunk.offset << ", " << chunk.size
<< "] : " << defining_position.ToString() << " ("
<< buffer.ToString() << ")";
preset_assignments_->add_chunk(defining_position, chunk);
seen_buffer_offsets[buffer.id()] = chunk.offset;
}
}
VLOG(3) << "Exported scoped allocations in alternate memory:";
for (const auto& instruction_and_chunk : scoped_memory_assignments_) {
HloInstruction* instruction = instruction_and_chunk.first;
const HeapSimulator::Chunk& chunk = instruction_and_chunk.second;
VLOG(3) << " [" << chunk.offset << ", " << chunk.size
<< "] : " << instruction->name();
preset_assignments_->add_scoped_allocation_chunk(instruction, chunk);
}
if (!preset_assignments_->chunks().empty() ||
!preset_assignments_->scoped_allocation_chunks().empty()) {
preset_assignments_
->assignment_information_for_space(options_.alternate_memory_space)
->size = alternate_memory_size_;
}
VLOG(3) << "Exported alternate memory sizes:";
for (auto& pair : preset_assignments_->assignment_informations()) {
VLOG(3) << " space: " << pair.first << ", size: " << pair.second.size;
}
VLOG(1) << "Coloring buffers...";
for (const auto& defining_position_and_chunk :
preset_assignments_->chunks()) {
const HloPosition& defining_position = defining_position_and_chunk.first;
for (auto& buffer : alias_analysis->ComputeBuffersAt(
defining_position.instruction, defining_position.index)) {
for (auto& value : buffer->values()) {
for (auto& position : value->positions()) {
VLOG(4) << "Coloring " << position.ToString();
Shape* shape = ShapeUtil::GetMutableSubshape(
position.instruction->mutable_shape(), position.index);
CHECK(shape->IsArray()) << "Coloring a shape that is not an array: "
<< position.ToString();
shape->mutable_layout()->set_memory_space(
options_.alternate_memory_space);
}
}
}
}
return absl::OkStatus();
}
void MemorySpaceAssignment::RemoveAssignmentForInstruction(
const HloInstruction* instruction) {
auto it = alternate_memory_assignments_.begin();
auto end = alternate_memory_assignments_.end();
while (it != end) {
const HloPosition& position = it->first;
if (position.instruction == instruction) {
VLOG(3) << "Removing instruction from alternate memory assignments.";
if (std::next(it) == end) {
alternate_memory_assignments_.pop_back();
break;
} else {
*it = alternate_memory_assignments_.back();
alternate_memory_assignments_.pop_back();
end = alternate_memory_assignments_.end();
}
} else {
++it;
}
}
}
absl::Status MemorySpaceAssignment::SimplifyGraph() {
VLOG(1) << "Simplifying graph...";
for (HloComputation* computation : module_->MakeNonfusionComputations()) {
if (!computations_in_schedule_.contains(computation)) {
VLOG(4) << "Not simplifying " << computation->name()
<< " because it's not in the schedule.";
continue;
}
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
TF_RETURN_IF_ERROR(instruction->DropAllControlDeps());
}
bool computation_modified = true;
while (computation_modified) {
computation_modified = false;
VLOG(4) << "Running simplify graph loop over " << computation->name();
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
if (computation->IsSafelyRemovable(instruction) &&
instruction->IsDead() && !instruction->HasSideEffect() &&
instruction->opcode() != HloOpcode::kCopyStart &&
instruction->opcode() != HloOpcode::kCopyDone) {
VLOG(4) << "Instruction removed: " << instruction->ToString();
RemoveAssignmentForInstruction(instruction);
auto instruction_it =
absl::c_find(flattened_instructions_, instruction);
if (instruction_it != flattened_instructions_.end()) {
*instruction_it = nullptr;
}
TF_RETURN_IF_ERROR(computation->RemoveInstruction(instruction));
computation_modified = true;
} else if (instruction->opcode() == HloOpcode::kGetTupleElement) {
HloInstruction* operand = instruction->mutable_operand(0);
if (operand->opcode() == HloOpcode::kTuple) {
HloInstruction* forwarded_instruction =
operand->mutable_operand(instruction->tuple_index());
VLOG(4) << "Replacing uses of " << instruction->ToString()
<< " with " << forwarded_instruction->ToString();
TF_RETURN_IF_ERROR(
instruction->ReplaceAllUsesWith(forwarded_instruction));
computation_modified = true;
}
} else if (instruction->opcode() == HloOpcode::kTuple) {
bool can_replace =
instruction->operand_count() > 0 &&
instruction->operand(0)->opcode() ==
HloOpcode::kGetTupleElement &&
instruction->operand(0)
->operand(0)
->shape()
.tuple_shapes_size() == instruction->operand_count();
for (int operand_number = 0;
operand_number < instruction->operand_count();
++operand_number) {
const HloInstruction* operand =
instruction->operand(operand_number);
if (operand->opcode() != HloOpcode::kGetTupleElement ||
operand->tuple_index() != operand_number ||
operand->operand(0) != instruction->operand(0)->operand(0)) {
can_replace = false;
break;
}
}
if (can_replace) {
HloInstruction* forwarded_instruction =
instruction->mutable_operand(0)->mutable_operand(0);
VLOG(4) << "Replacing uses of " << instruction->ToString()
<< " with " << forwarded_instruction->ToString();
TF_RETURN_IF_ERROR(
instruction->ReplaceAllUsesWith(forwarded_instruction));
computation_modified = true;
}
}
}
}
}
return absl::OkStatus();
}
namespace {
class AsyncCopyStep {
public:
struct StartPhase {
int64_t schedule_after_time;
HloInstruction* instruction;
};
struct DonePhase {
int64_t schedule_before_time;
HloInstruction* instruction;
};
virtual ~AsyncCopyStep() = default;
bool operator<(const AsyncCopyStep& rhs) const {
std::optional<StartPhase> lhs_start_phase = start_phase();
auto lhs_tuple = std::make_tuple(
done_phase().schedule_before_time,
(lhs_start_phase.has_value() ? lhs_start_phase->schedule_after_time
: done_phase().schedule_before_time));
std::optional<StartPhase> rhs_start_phase = rhs.start_phase();
auto rhs_tuple = std::make_tuple(
rhs.done_phase().schedule_before_time,
(rhs_start_phase.has_value() ? rhs_start_phase->schedule_after_time
: rhs.done_phase().schedule_before_time));
return lhs_tuple < rhs_tuple;
}
virtual HloPosition defining_position() const = 0;
virtual std::optional<StartPhase> start_phase() const = 0;
virtual void set_start_phase_schedule_after_time(int64_t schedule_after) = 0;
virtual DonePhase done_phase() const = 0;
protected:
AsyncCopyStep() = default;
};
class AsyncCopyStepForCopyAllocation : public AsyncCopyStep {
public:
explicit AsyncCopyStepForCopyAllocation(CopyAllocation* copy_allocation)
: AsyncCopyStep(), copy_allocation_(copy_allocation) {}
~AsyncCopyStepForCopyAllocation() override = default;
HloPosition defining_position() const override {
return copy_allocation_->defining_position();
}
std::optional<StartPhase> start_phase() const override {
StartPhase phase{copy_allocation_->copy_start_schedule_after(),
copy_allocation_->copy_start()};
return phase;
}
void set_start_phase_schedule_after_time(int64_t schedule_after) override {
copy_allocation_->set_copy_start_schedule_after(schedule_after);
}
DonePhase done_phase() const override {
return {copy_allocation_->copy_done_schedule_before(),
copy_allocation_->copy_done()};
}
private:
CopyAllocation* copy_allocation_ = nullptr;
};
class AsyncCopyStepForSlice : public AsyncCopyStep {
public:
AsyncCopyStepForSlice(SlicedCopyAllocation* sliced_copy_allocation,
size_t slice_index)
: AsyncCopyStep(),
sliced_copy_allocation_(sliced_copy_allocation),
slice_index_(slice_index) {}
~AsyncCopyStepForSlice() override = default;
HloPosition defining_position() const override {
return sliced_copy_allocation_->defining_position();
}
std::optional<StartPhase> start_phase() const override {
const SlicedCopyAllocation::SliceDetail& slice_details =
sliced_copy_allocation_
->slice_details_sorted_by_start_time()[slice_index_];
StartPhase phase{slice_details.copy_start_after_time,
slice_details.copy_start};
return phase;
}
void set_start_phase_schedule_after_time(int64_t schedule_after) override {
sliced_copy_allocation_
->mutable_slice_details_sorted_by_start_time()[slice_index_]
.copy_start_after_time = schedule_after;
}
DonePhase done_phase() const override {
const SlicedCopyAllocation::SliceDetail& slice_details =
sliced_copy_allocation_
->slice_details_sorted_by_start_time()[slice_index_];
DonePhase phase{slice_details.copy_done_before_time,
slice_details.copy_done};
return phase;
}
private:
SlicedCopyAllocation* sliced_copy_allocation_ = nullptr;
size_t slice_index_;
};
class AsyncCopyStepForSliceConcat : public AsyncCopyStep {
public:
explicit AsyncCopyStepForSliceConcat(
SlicedCopyAllocation* sliced_copy_allocation)
: AsyncCopyStep(), sliced_copy_allocation_(sliced_copy_allocation) {}
~AsyncCopyStepForSliceConcat() override = default;
HloPosition defining_position() const override {
return sliced_copy_allocation_->defining_position();
}
std::optional<StartPhase> start_phase() const override {
return std::nullopt;
}
void set_start_phase_schedule_after_time(int64_t schedule_after) override {}
DonePhase done_phase() const override {
return {sliced_copy_allocation_->earliest_available_time(),
sliced_copy_allocation_->concat()};
}
private:
SlicedCopyAllocation* sliced_copy_allocation_ = nullptr;
};
}
void MemorySpaceAssignment::ScheduleAsynchronousCopies() {
VLOG(1) << "Scheduling asynchronous copies...";
for (MemorySpace memory_space :
{MemorySpace::kDefault, MemorySpace::kAlternate}) {
std::vector<std::unique_ptr<AsyncCopyStep>> async_copy_steps;
for (auto& allocation : allocations_) {
if (allocation->memory_space() != memory_space) {
continue;
}
if (allocation->is_copy_allocation()) {
auto copy_allocation = static_cast<CopyAllocation*>(allocation.get());
async_copy_steps.push_back(
std::make_unique<AsyncCopyStepForCopyAllocation>(copy_allocation));
} else if (allocation->is_sliced_copy_allocation()) {
auto sliced_copy_allocation =
static_cast<SlicedCopyAllocation*>(allocation.get());
for (int i = 0; i < sliced_copy_allocation
->mutable_slice_details_sorted_by_start_time()
.size();
++i) {
async_copy_steps.push_back(std::make_unique<AsyncCopyStepForSlice>(
sliced_copy_allocation, i));
}
async_copy_steps.push_back(
std::make_unique<AsyncCopyStepForSliceConcat>(
sliced_copy_allocation));
}
}
absl::c_stable_sort(
async_copy_steps,
[](const std::unique_ptr<AsyncCopyStep>& lhs,
const std::unique_ptr<AsyncCopyStep>& rhs) { return *lhs < *rhs; });
for (std::unique_ptr<AsyncCopyStep>& async_copy_step : async_copy_steps) {
std::optional<AsyncCopyStep::StartPhase> start_phase =
async_copy_step->start_phase();
if (start_phase.has_value()) {
int64_t copy_start_schedule_after = start_phase->schedule_after_time;
while (
async_copy_step->defining_position().instruction->parent() !=
flattened_instructions_[
std::max<int64_t>(0, copy_start_schedule_after)]
->parent()) {
VLOG(4) << "Delaying CopyStart (" << copy_start_schedule_after
<< " to " << (copy_start_schedule_after + 1) << ") for "
<< start_phase->instruction->ToString()
<< " because it is not in the correct computation.";
async_copy_step->set_start_phase_schedule_after_time(
++copy_start_schedule_after);
}
start_phase = async_copy_step->start_phase();
schedule_after_[start_phase->schedule_after_time].push_back(
start_phase->instruction);
}
AsyncCopyStep::DonePhase done_phase = async_copy_step->done_phase();
schedule_before_[done_phase.schedule_before_time].push_back(
done_phase.instruction);
}
}
}
absl::Status MemorySpaceAssignment::FixSchedule() {
VLOG(1) << "Fixing schedule...";
TF_RET_CHECK(module_->has_schedule());
HloSchedule& schedule = module_->schedule();
for (const HloComputation* computation :
module_->MakeNonfusionComputations()) {
if (!computations_in_schedule_.contains(computation)) {
if (computation->IsAsyncComputation()) {
VLOG(4) << "Created a dummy schedule for async computation "
<< computation->name();
schedule.GetOrCreateSequence(computation);
continue;
}
VLOG(4) << "Not scheduling " << computation->name()
<< " because it's not in the schedule.";
continue;
}
TF_RET_CHECK(schedule.is_computation_scheduled(computation));
HloInstructionSequence new_sequence;
absl::flat_hash_set<HloInstruction*> inserted_instructions;
VLOG(4) << "Scheduling: " << computation->ToString();
for (int64_t instruction_index = -1;; ++instruction_index) {
auto insts_before_iter = schedule_before_.find(instruction_index);
if (insts_before_iter != schedule_before_.end()) {
for (HloInstruction* new_instruction : insts_before_iter->second) {
if (new_instruction->parent() == computation) {
VLOG(4) << "before " << instruction_index << ": "
<< new_instruction->name();
TF_RETURN_IF_ERROR(InsertInstructionAndEnsureOperandsInserted(
new_instruction, &new_sequence, &inserted_instructions));
}
}
}
if (instruction_index != -1) {
if (instruction_index >= flattened_instructions_.size()) {
break;
}
HloInstruction* instruction =
flattened_instructions_[instruction_index];
if (instruction != nullptr && instruction->parent() == computation &&
instruction->opcode() != HloOpcode::kBitcast &&
instruction->opcode() != HloOpcode::kTuple &&
!inserted_instructions.contains(instruction)) {
VLOG(4) << "inst " << instruction_index << ": "
<< instruction->name();
TF_RETURN_IF_ERROR(InsertInstructionAndEnsureOperandsInserted(
instruction, &new_sequence, &inserted_instructions));
}
}
auto insts_after_iter = schedule_after_.find(instruction_index);
if (insts_after_iter != schedule_after_.end()) {
for (HloInstruction* new_instruction : insts_after_iter->second) {
if (new_instruction->parent() == computation) {
VLOG(4) << "after " << instruction_index << ": "
<< new_instruction->name();
TF_RETURN_IF_ERROR(InsertInstructionAndEnsureOperandsInserted(
new_instruction, &new_sequence, &inserted_instructions));
}
}
}
}
TF_RETURN_IF_ERROR(EnsureInstructionAndOperandsInserted(
computation->root_instruction(), &new_sequence,
&inserted_instructions));
CHECK_EQ(new_sequence.size(), computation->instruction_count())
<< "New sequence for computation " << computation->name() << " has "
<< new_sequence.size() << " instructions, expects "
<< computation->instruction_count() << ".";
schedule.set_sequence(computation, new_sequence);
}
TF_RETURN_IF_ERROR(schedule.Update());
return absl::OkStatus();
}
absl::Status MemorySpaceAssignment::VerifyAndExportHeapSimulatorTrace() {
VLOG(1) << "Verifying...";
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
HloAliasAnalysis::Run(module_));
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloLiveRange> hlo_live_range,
HloLiveRange::Run(module_->schedule(), *alias_analysis,
module_->entry_computation()));
BufferIntervalTree interval_tree;
absl::flat_hash_set<int64_t> seen_buffers;
std::map<std::tuple<int64_t, bool, int64_t>,
std::tuple<const HloValue*, HeapSimulator::Chunk,
HeapSimulatorTrace::Event::Kind>>
events;
auto add_allocation_and_verify = [&](int64_t start_time, int64_t end_time,
const HeapSimulator::Chunk& chunk,
const HloValue* value) -> absl::Status {
events[std::make_tuple(start_time, false, value->id())] =
std::make_tuple(value, chunk, HeapSimulatorTrace::Event::ALLOC);
events[std::make_tuple(end_time, true, value->id())] =
std::make_tuple(value, chunk, HeapSimulatorTrace::Event::FREE);
for (const HeapSimulator::Chunk& overlapping_chunk :
interval_tree.ChunksOverlappingInTime(start_time, end_time - 1)) {
if (chunk.OverlapsWith(overlapping_chunk)) {
return Internal(
("Value %s (%d, %d) off: %d size: %d overlaps with another chunk"
" off: %d size: %d"),
value->ToShortString(), start_time, end_time, chunk.offset,
chunk.size, overlapping_chunk.offset, overlapping_chunk.size);
}
}
interval_tree.Add(start_time, end_time - 1, chunk);
return absl::OkStatus();
};
for (const HloComputation* computation :
module_->MakeNonfusionComputations()) {
for (const HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kCopyStart) {
int64_t from_memory_space =
ShapeUtil::GetSubshape(instruction->shape(), {1})
.layout()
.memory_space();
int64_t to_memory_space =
ShapeUtil::GetSubshape(instruction->shape(), {0})
.layout()
.memory_space();
CHECK_NE(from_memory_space, to_memory_space)
<< "Asynchronous copy to the same memory space: "
<< instruction->ToString();
}
}
}
for (const auto& position_and_chunk : preset_assignments_->chunks()) {
const HloPosition& position = position_and_chunk.first;
const HeapSimulator::Chunk& chunk = position_and_chunk.second;
const HloBuffer& buffer =
alias_analysis->GetUniqueBufferAt(position.instruction, position.index);
CHECK(!seen_buffers.contains(buffer.id()))
<< "Multiple preset assignments for the same buffer: "
<< buffer.ToString() << ", pos: " << position.ToString()
<< ", off: " << chunk.offset << ", size: " << chunk.size;
seen_buffers.insert(buffer.id());
for (const HloValue* value : buffer.values()) {
const HloLiveRange::TimeBound& time_bound =
hlo_live_range->buffer_live_ranges().at(value);
const HloInstruction* last_use_instruction = nullptr;
int64_t last_use_time = time_bound.start;
for (const HloUse& use : value->GetUses()) {
int64_t use_time =
hlo_live_range->instruction_schedule().at(use.instruction);
if (use_time > last_use_time) {
last_use_time = use_time;
last_use_instruction = use.instruction;
}
}
std::function<absl::Status(const HloInstruction*, int64_t, int64_t,
absl::string_view)>
split_conditional_buffer;
split_conditional_buffer = [&](const HloInstruction* use_instruction,
int64_t start_time, int64_t end_time,
absl::string_view indent_string) {
VLOG(3) << indent_string
<< "Splitting conditional buffer: " << buffer.ToString()
<< " value: " << value->ToShortString() << ": (" << start_time
<< ", " << end_time << ") off: " << chunk.offset
<< ", size: " << chunk.size;
int64_t earliest_computation_start_time = end_time;
for (const HloComputation* called_computation :
use_instruction->called_computations()) {
int64_t computation_start_time =
hlo_live_range->computation_span_times()
.at(called_computation)
.start;
earliest_computation_start_time =
std::min(earliest_computation_start_time, computation_start_time);
int64_t last_use_time = -1;
const HloInstruction* last_use_instruction = nullptr;
for (const HloUse& use : value->GetUses()) {
int64_t use_time =
hlo_live_range->instruction_schedule().at(use.instruction);
if (use.instruction->parent() == called_computation &&
use_time > last_use_time) {
last_use_time = use_time;
last_use_instruction = use.instruction;
}
}
if (last_use_time != -1) {
VLOG(3) << indent_string
<< " computation: " << called_computation->name() << ": ("
<< computation_start_time << ", " << last_use_time << ")";
CHECK(last_use_instruction);
last_use_time = std::min(last_use_time, end_time);
if (last_use_instruction->opcode() == HloOpcode::kConditional) {
TF_RETURN_IF_ERROR(split_conditional_buffer(
last_use_instruction, computation_start_time, last_use_time,
absl::StrCat(indent_string, " ")));
} else {
TF_RETURN_IF_ERROR(add_allocation_and_verify(
computation_start_time, last_use_time, chunk, value));
}
}
}
VLOG(3) << indent_string << " from beginning until first computation: ("
<< start_time << ", " << (earliest_computation_start_time - 1)
<< ")";
TF_RETURN_IF_ERROR(add_allocation_and_verify(
start_time, earliest_computation_start_time - 1, chunk, value));
return absl::OkStatus();
};
if (last_use_instruction &&
last_use_instruction->opcode() == HloOpcode::kConditional) {
TF_RETURN_IF_ERROR(split_conditional_buffer(
last_use_instruction, time_bound.start, time_bound.end, " "));
} else if (!value->GetUses().empty()) {
last_use_time = std::min(last_use_time, time_bound.end);
VLOG(3) << " buffer: " << buffer.ToString()
<< " value: " << value->ToShortString() << ": ("
<< time_bound.start << ", " << last_use_time
<< ") off: " << chunk.offset << ", size: " << chunk.size;
TF_RETURN_IF_ERROR(add_allocation_and_verify(
time_bound.start, last_use_time, chunk, value));
}
}
}
HeapSimulatorTrace* heap_trace =
&preset_assignments_
->assignment_information_for_space(options_.alternate_memory_space)
->heap_simulator_trace;
int64_t memory_usage = 0;
int64_t max_memory_usage = 0;
int64_t prev_time = 0;
int64_t prev_memory_usage = 0;
for (const auto& event : events) {
int64_t time;
bool is_free;
int64_t buffer_id;
std::tie(time, is_free, buffer_id) = event.first;
const HloValue* value;
HeapSimulator::Chunk chunk;
HeapSimulatorTrace::Event::Kind kind;
std::tie(value, chunk, kind) = event.second;
HeapSimulatorTrace::Event* heap_trace_event = heap_trace->add_events();
heap_trace_event->set_kind(kind);
heap_trace_event->set_buffer_id(buffer_id);
*heap_trace_event->mutable_instruction_name() =
std::string(value->instruction()->name());
*heap_trace_event->mutable_computation_name() =
std::string(value->instruction()->parent()->name());
if (prev_time != time) {
VLOG(2) << "Memory usage: " << std::max(memory_usage, prev_memory_usage)
<< " at time: " << prev_time << " ("
<< hlo_live_range->flattened_instruction_sequence()
.instructions()
.at(prev_time)
->name()
<< ")";
prev_time = time;
prev_memory_usage = memory_usage;
}
if (kind == HeapSimulatorTrace::Event::ALLOC) {
memory_usage += chunk.size;
} else {
CHECK_EQ(kind, HeapSimulatorTrace::Event::FREE);
memory_usage -= chunk.size;
}
prev_memory_usage = std::max(prev_memory_usage, memory_usage);
max_memory_usage = std::max(max_memory_usage, memory_usage);
VLOG(4) << "Memory usage: " << memory_usage << " at time: " << time;
}
VLOG(1) << "Max memory usage ignoring fragmentation: " << max_memory_usage;
return absl::OkStatus();
}
}
} | #include "xla/service/memory_space_assignment/memory_space_assignment.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <limits>
#include <memory>
#include <numeric>
#include <optional>
#include <ostream>
#include <string>
#include <string_view>
#include <tuple>
#include <utility>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/layout_util.h"
#include "xla/literal_util.h"
#include "xla/service/buffer_value.h"
#include "xla/service/heap_simulator/allocation_block.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_value.h"
#include "xla/service/instruction_hoister.h"
#include "xla/service/memory_space_assignment/algorithm.h"
#include "xla/service/memory_space_assignment/allocation.h"
#include "xla/service/memory_space_assignment/buffer_interval_comparator.h"
#include "xla/service/memory_space_assignment/cost_analysis.h"
#include "xla/service/memory_space_assignment/memory_space_assignment.pb.h"
#include "xla/service/memory_space_assignment/options.h"
#include "xla/service/memory_space_assignment/prefetch_interval_picker.h"
#include "xla/service/memory_space_assignment/repacking.h"
#include "xla/service/memory_space_assignment/slice.h"
#include "xla/service/memory_space_assignment/testing_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_utils.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace memory_space_assignment {
namespace {
namespace op = xla::testing::opcode_matchers;
using Chunk = HeapSimulator::Chunk;
using ::testing::_;
using ::testing::Return;
using ::testing::UnorderedElementsAre;
constexpr int64_t kPointerSize = 8;
constexpr float kAsyncCopyBandwidth = 100;
constexpr float kAlternateMemBandwidth = 1000;
constexpr float kBytesPerSecond = 100;
constexpr float kFlopsPerSecond = 1000;
constexpr float kTranscendentalsPerSecond = 10;
int64_t ShapeSize(const Shape& shape) {
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
}
int64_t SizeFunction(const BufferValue& value) {
return ShapeSize(value.shape());
}
int64_t ReservedScopedMemoryFn(
const HloInstruction* instruction,
const absl::flat_hash_set<std::pair<int, ShapeIndex>>&
operands_in_alternate_memory,
const absl::flat_hash_set<ShapeIndex>& outputs_in_alternate_memory) {
return 0;
}
class TestBufferIntervalComparator : public BufferIntervalComparator {
public:
explicit TestBufferIntervalComparator(MsaBufferIntervalCompare compare_method)
: BufferIntervalComparator(), compare_method_(compare_method) {}
~TestBufferIntervalComparator() override = default;
std::string DescribeComparisonCriteria() const override {
return "internal to test";
}
std::string CriteriaToString(
const MsaBufferInterval& buffer_interval) override {
return "internal to test";
}
bool LessThan(const MsaBufferInterval& lhs,
const MsaBufferInterval& rhs) override {
return compare_method_(lhs, rhs);
}
private:
MsaBufferIntervalCompare compare_method_;
};
class MemorySpaceAssignmentTestBase : public HloTestBase {
protected:
const int64_t kDefaultMemorySpace = 0;
const int64_t kAlternateMemorySpace = 1;
HloCostAnalysis::Options DefaultHloCostAnalysisOptions() {
HloCostAnalysis::Options options;
options.shape_size = ShapeSize;
options.set_flops_per_second(kFlopsPerSecond);
options.set_bytes_per_second(kBytesPerSecond);
options.set_transcendentals_per_second(kTranscendentalsPerSecond);
return options;
}
Options DefaultMemorySpaceOptions() {
Options options;
options.max_size_in_bytes = 128;
options.alignment_in_bytes = 8;
options.verify = true;
options.alternate_memory_space = kAlternateMemorySpace;
options.max_outstanding_prefetches = -1;
options.max_outstanding_evictions = -1;
return options;
}
CostAnalysisOptions DefaultCostAnalysisOptions() {
CostAnalysisOptions options;
options.async_copy_bandwidth_bytes_per_second = kAsyncCopyBandwidth;
options.alternate_mem_bandwidth_bytes_per_second = kAlternateMemBandwidth;
return options;
}
Options UpdateMaxAsyncCopies(Options options, int64_t max_async_copies) {
options.max_outstanding_prefetches = max_async_copies;
options.max_outstanding_evictions = max_async_copies;
return options;
}
std::unique_ptr<PresetAssignments> AssignMemorySpaceUsingCostAnalysis(
HloModule* module,
std::optional<Options> memory_space_options_override = std::nullopt,
std::optional<CostAnalysisOptions> cost_analysis_options_override =
std::nullopt,
std::optional<HloCostAnalysis::Options> hlo_cost_options_override =
std::nullopt,
std::optional<MsaSortOrderOverrides> optional_msa_sort_order_overrides =
std::nullopt) {
HloCostAnalysis::Options hlo_cost_options = DefaultHloCostAnalysisOptions();
if (hlo_cost_options_override) {
hlo_cost_options = *hlo_cost_options_override;
}
HloCostAnalysis hlo_cost_analysis(hlo_cost_options);
for (HloComputation* computation : module->MakeNonfusionComputations()) {
TF_CHECK_OK(computation->Accept(&hlo_cost_analysis));
}
auto alias_analysis = HloAliasAnalysis::Run(module).value();
Options memory_space_options = DefaultMemorySpaceOptions();
if (memory_space_options_override) {
memory_space_options = *memory_space_options_override;
}
CostAnalysisOptions cost_analysis_options = DefaultCostAnalysisOptions();
if (cost_analysis_options_override) {
cost_analysis_options = *cost_analysis_options_override;
}
HloCostAnalysisCosts hlo_cost_analysis_costs(hlo_cost_analysis);
auto cost_analysis = CostAnalysis::Create(hlo_cost_analysis_costs,
cost_analysis_options, *module)
.value();
memory_space_options.cost_analysis = cost_analysis.get();
CostAnalysisPrefetchIntervalPicker prefetch_interval_picker(
CostAnalysisPrefetchIntervalPicker(
*cost_analysis, 0.8,
1.5,
10.0,
memory_space_options.max_size_in_bytes));
MsaSortOrderOverrides msa_sort_order_overrides;
if (optional_msa_sort_order_overrides.has_value()) {
msa_sort_order_overrides = optional_msa_sort_order_overrides.value();
}
MemoryBoundednessBufferIntervalComparator comparator(
*cost_analysis, &cache_, msa_sort_order_overrides);
return AssignMemorySpace(
module, memory_space_options,
[&comparator](const MsaBufferInterval& lhs,
const MsaBufferInterval& rhs) {
return comparator.LessThan(lhs, rhs);
},
&prefetch_interval_picker);
}
std::unique_ptr<PresetAssignments> AssignMemorySpace(
HloModule* module, std::optional<Options> options_override = std::nullopt,
int64_t max_prefetch_interval = 10, int64_t min_prefetch_interval = 2) {
InstructionHoister instruction_hoister;
TF_CHECK_OK(instruction_hoister.Run(module).status());
InstructionCountPrefetchIntervalPicker prefetch_interval_picker(
min_prefetch_interval, max_prefetch_interval);
return AssignMemorySpace(module, options_override,
{},
&prefetch_interval_picker);
}
std::unique_ptr<PresetAssignments> AssignMemorySpace(
HloModule* module, std::optional<Options> options_override,
std::optional<MsaBufferIntervalCompare> buffer_interval_compare,
PrefetchIntervalPicker* prefetch_interval_picker) {
auto status_or = AssignMemorySpaceAndReturnStatus(module, options_override,
buffer_interval_compare,
prefetch_interval_picker);
TF_EXPECT_OK(status_or.status());
return std::move(status_or.value());
}
absl::StatusOr<std::unique_ptr<PresetAssignments>>
AssignMemorySpaceAndReturnStatus(
HloModule* module, std::optional<Options> options_override,
std::optional<MsaBufferIntervalCompare> buffer_interval_compare,
PrefetchIntervalPicker* prefetch_interval_picker) {
auto size_fn = [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape(), 8);
};
auto is_allowed_in_alternate_mem = [](const HloValue& value) {
HloInstruction* instruction = value.instruction();
HloComputation* computation = instruction->parent();
bool in_entry_computation =
(computation == computation->parent()->entry_computation());
if (in_entry_computation &&
instruction->opcode() == HloOpcode::kParameter) {
return false;
}
return true;
};
bool check_parameters_in_default_memory = true;
for (const HloInstruction* parameter :
module->entry_computation()->parameter_instructions()) {
ShapeUtil::ForEachSubshape(
parameter->shape(),
[&](const Shape& subshape, const ShapeIndex& ) {
if (subshape.has_layout() &&
subshape.layout().memory_space() == kAlternateMemorySpace) {
check_parameters_in_default_memory = false;
}
});
}
Options options = DefaultMemorySpaceOptions();
if (options_override) {
options = *options_override;
}
std::unique_ptr<TestBufferIntervalComparator> test_comparator;
if (buffer_interval_compare.has_value()) {
test_comparator = std::make_unique<TestBufferIntervalComparator>(
*buffer_interval_compare);
options.buffer_interval_comparator = test_comparator.get();
}
options.prefetch_interval_picker = prefetch_interval_picker;
options.size_fn = size_fn;
if (options.is_allowed_in_alternate_mem_fn == nullptr) {
options.is_allowed_in_alternate_mem_fn = is_allowed_in_alternate_mem;
}
TF_ASSIGN_OR_RETURN(auto alias_analysis, HloAliasAnalysis::Run(module));
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloLiveRange> hlo_live_range,
HloLiveRange::Run(module->schedule(), *alias_analysis,
module->entry_computation()));
TF_ASSIGN_OR_RETURN(std::unique_ptr<PresetAssignments> preset_assignments,
MemorySpaceAssignment::Run(module, *hlo_live_range,
*alias_analysis, options));
if (check_parameters_in_default_memory) {
CheckParametersInDefaultMemory(module);
}
CheckRootInDefaultMemory(module);
CheckPresetAssignments(preset_assignments.get());
return preset_assignments;
}
void CheckPresetAssignments(const PresetAssignments* preset_assignments) {
std::set<HloPosition> positions_in_preset_assignments;
for (auto& position_and_chunk : preset_assignments->chunks()) {
HloPosition position = position_and_chunk.first;
EXPECT_EQ(positions_in_preset_assignments.find(position),
positions_in_preset_assignments.end());
positions_in_preset_assignments.insert(position);
const Shape& subshape =
ShapeUtil::GetSubshape(position.instruction->shape(), position.index);
EXPECT_EQ(subshape.layout().memory_space(), kAlternateMemorySpace)
<< "Exported position is not in alternate mem: "
<< position.ToString();
}
}
void CheckParametersInDefaultMemory(const HloModule* module) {
const HloComputation* entry_computation = module->entry_computation();
for (const HloInstruction* parameter :
entry_computation->parameter_instructions()) {
ShapeUtil::ForEachSubshape(
parameter->shape(),
[&](const Shape& subshape, const ShapeIndex& ) {
if (subshape.has_layout()) {
EXPECT_NE(subshape.layout().memory_space(), kAlternateMemorySpace)
<< "Parameter not in default memory: "
<< parameter->ToString();
}
});
}
}
void CheckRootInDefaultMemory(const HloModule* module) {
const HloInstruction* root =
module->entry_computation()->root_instruction();
if (root->shape().IsArray()) {
EXPECT_EQ(root->shape().layout().memory_space(), kDefaultMemorySpace);
}
}
struct OutstandingAsyncCopies {
int64_t max_copies;
int64_t max_prefetches;
int64_t max_evictions;
};
OutstandingAsyncCopies CountMaximumOutstandingAsyncCopies(
const HloModule& module) {
OutstandingAsyncCopies copies{0, 0, 0};
int64_t current_copies = 0;
int64_t current_prefetches = 0;
int64_t current_evictions = 0;
for (HloInstruction* instruction : module.schedule()
.sequence(module.entry_computation())
.instructions()) {
if (instruction->opcode() == HloOpcode::kCopyStart) {
current_copies++;
if (ShapeUtil::GetSubshape(instruction->shape(), {0})
.layout()
.memory_space() == kAlternateMemorySpace) {
current_prefetches++;
} else {
current_evictions++;
}
} else if (instruction->opcode() == HloOpcode::kCopyDone) {
current_copies--;
if (instruction->shape().layout().memory_space() ==
kAlternateMemorySpace) {
current_prefetches--;
} else {
current_evictions--;
}
}
copies.max_copies = std::max(copies.max_copies, current_copies);
copies.max_prefetches =
std::max(copies.max_prefetches, current_prefetches);
copies.max_prefetches = std::max(copies.max_evictions, current_evictions);
}
return copies;
}
int64_t GetAlternateMemoryOffset(const PresetAssignments& preset_assignments,
const HloInstruction* instruction,
const ShapeIndex& index = {}) const {
const HloModule* module = instruction->GetModule();
auto alias_analysis = HloAliasAnalysis::Run(module).value();
HloBuffer& buffer = alias_analysis->GetUniqueBufferAt(instruction, index);
for (auto& pos_and_chunk : preset_assignments.chunks()) {
for (auto& value : buffer.values()) {
if (pos_and_chunk.first == value->defining_position()) {
return pos_and_chunk.second.offset;
}
}
}
return -1;
}
std::unique_ptr<HloModule> CreateEvictAndPrefetchModule() {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloInstruction* tanh = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, p0));
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, p0, tanh));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kSubtract, p0, p1));
HloInstruction* c = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, p0, p1));
HloInstruction* d = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kSubtract, p0, p1));
HloInstruction* e = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, a, b));
HloInstruction* f = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, a, c));
HloInstruction* g = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, a, d));
HloInstruction* h = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, b, c));
HloInstruction* i = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, b, d));
HloInstruction* j = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, c, d));
HloInstruction* k = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, e, f));
HloInstruction* l = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, g, h));
HloInstruction* m = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, i, j));
HloInstruction* n = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, k, l));
HloInstruction* o = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, n, m));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, o, tanh));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, p1, tanh, a, b, c, d, e, f, g, h, i,
j, k, l, m, n, o, add});
TF_CHECK_OK(module->set_schedule(schedule));
return module;
}
CostAnalysis::Cache cache_;
};
using MemorySpaceAssignmentTest = MemorySpaceAssignmentTestBase;
TEST_F(MemorySpaceAssignmentTest, ParameterOnly) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
EXPECT_THAT(p0, op::ShapeWithLayout(shape));
}
TEST_F(MemorySpaceAssignmentTest, Simple) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, p0, p1));
HloInstruction* sub = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kSubtract, p0, p1));
HloInstruction* mul = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, add, sub));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, p1, add, sub, mul});
TF_CHECK_OK(module->set_schedule(schedule));
auto preset_assignments = AssignMemorySpace(module.get());
Shape shape_in_alternate_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {2, 3},
{1, 0},
{},
1,
0, kAlternateMemorySpace);
EXPECT_THAT(p0, op::ShapeWithLayout(shape));
EXPECT_THAT(p1, op::ShapeWithLayout(shape));
EXPECT_THAT(mul, op::ShapeWithLayout(shape));
EXPECT_THAT(add, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(sub, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_EQ(preset_assignments->chunks().size(), 3);
EXPECT_EQ(preset_assignments->assignment_informations().size(), 1);
EXPECT_NE(preset_assignments->chunks()[0].second.offset,
preset_assignments->chunks()[1].second.offset);
}
TEST_F(MemorySpaceAssignmentTest, NegateChain) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, negate6, p1));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, p1, negate0, negate1, negate2,
negate3, negate4, negate5, negate6, add});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
EXPECT_THAT(add, op::Add(op::Negate(), op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace,
op::Parameter(1))));
EXPECT_THAT(p0, op::ShapeWithLayout(shape));
EXPECT_THAT(p1, op::ShapeWithLayout(shape));
Shape shape_in_alternate_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {2, 3},
{1, 0},
{},
1,
0, kAlternateMemorySpace);
EXPECT_THAT(negate0, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate1, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate2, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate3, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate4, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate5, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate6, op::ShapeWithLayout(shape_in_alternate_mem));
const HloInstructionSequence& sequence =
module->schedule().sequence(computation);
EXPECT_THAT(sequence.instructions()[0], op::Parameter(0));
EXPECT_THAT(sequence.instructions()[1], op::Parameter(1));
EXPECT_THAT(sequence.instructions()[2], op::CopyStart());
EXPECT_THAT(sequence.instructions()[10], op::CopyDone());
}
TEST_F(MemorySpaceAssignmentTest,
SyncCopyReplacementRedundantCopyAfterPrefetch) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[2,3]{1,0} parameter(0)
p1 = f32[2,3]{1,0} parameter(1)
negate0 = f32[2,3]{1,0} negate(p1)
negate1 = f32[2,3]{1,0} negate(negate0)
negate2 = f32[2,3]{1,0} negate(negate1)
negate3 = f32[2,3]{1,0} negate(negate2)
negate4 = f32[2,3]{1,0} negate(negate3)
negate5 = f32[2,3]{1,0} negate(negate4)
negate6 = f32[2,3]{1,0} negate(negate5)
negate7 = f32[2,3]{1,0} negate(negate6)
p0_copy = f32[2,3]{1,0} copy(p0)
ROOT add0 = f32[2,3]{1,0} add(p0_copy, negate7)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.enable_sync_copy_replacement = true;
AssignMemorySpace(module.get(), options);
HloInstruction* add0 = FindInstruction(module.get(), "add0");
ASSERT_NE(add0, nullptr);
HloInstruction* p0 = FindInstruction(module.get(), "p0");
ASSERT_NE(p0, nullptr);
EXPECT_THAT(add0->operand(0),
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace, p0));
}
TEST_F(MemorySpaceAssignmentTest,
SyncCopyReplacementWouldNeedMoreThanOneAsyncCopy) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[2,3]{1,0} parameter(0)
p1 = f32[2,3]{1,0} parameter(1)
negate0 = f32[2,3]{1,0} negate(p1)
negate1 = f32[2,3]{1,0} negate(negate0)
negate2 = f32[2,3]{1,0} negate(negate1)
negate3 = f32[2,3]{1,0} negate(negate2)
negate4 = f32[2,3]{1,0} negate(negate3)
negate5 = f32[2,3]{1,0} negate(negate4)
negate6 = f32[2,3]{1,0} negate(negate5)
negate7 = f32[2,3]{1,0} negate(negate6)
p0_copy = f32[2,3]{1,0} copy(p0)
ROOT tuple0 = tuple(negate7, p0, p0_copy)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.enable_sync_copy_replacement = true;
AssignMemorySpace(module.get(), options);
HloInstruction* tuple0 = FindInstruction(module.get(), "tuple0");
ASSERT_NE(tuple0->operand(1), tuple0->operand(2));
}
TEST_F(MemorySpaceAssignmentTest, SyncCopyReplacementOperandHasMultipleUses) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[2,3]{1,0} parameter(0)
p1 = f32[2,3]{1,0} parameter(1)
negate0 = f32[2,3]{1,0} negate(p1)
negate1 = f32[2,3]{1,0} negate(negate0)
negate2 = f32[2,3]{1,0} negate(negate1)
negate3 = f32[2,3]{1,0} negate(negate2)
negate4 = f32[2,3]{1,0} negate(negate3)
negate5 = f32[2,3]{1,0} negate(negate4)
negate6 = f32[2,3]{1,0} negate(negate5)
negate7 = f32[2,3]{1,0} negate(negate6)
p0_copy = f32[2,3]{1,0} copy(p0)
add0 = add(p0_copy, p0)
ROOT tuple = tuple(negate7, add0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.enable_sync_copy_replacement = true;
AssignMemorySpace(module.get(), options);
HloInstruction* add0 = FindInstruction(module.get(), "add0");
ASSERT_EQ(add0->operand(0), add0->operand(1));
}
TEST_F(MemorySpaceAssignmentTest, AlwaysSpillJitPrefetchTest) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[2,3]{1,0} parameter(0)
p1 = f32[2,3]{1,0} parameter(1)
negate0 = f32[2,3]{1,0} negate(p0)
negate1 = f32[2,3]{1,0} negate(negate0)
negate2 = f32[2,3]{1,0} negate(negate1)
negate3 = f32[2,3]{1,0} negate(negate2)
negate4 = f32[2,3]{1,0} negate(negate3)
negate5 = f32[2,3]{1,0} negate(negate4)
negate6 = f32[2,3]{1,0} negate(negate5)
ROOT add = f32[2,3]{1,0} add(negate6, p1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.always_spill_to_default_memory = true;
AssignMemorySpace(module.get(), options);
const HloInstructionSequence& sequence =
module->schedule().sequence(module->entry_computation());
for (int i = 0; i < sequence.instructions().size(); ++i) {
VLOG(2) << i << " " << sequence.instructions()[i]->ToString();
}
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
HloAliasAnalysis::Run(module.get()));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloLiveRange> live_range,
HloLiveRange::Run(module->schedule(), *alias_analysis,
module->entry_computation()));
const HloInstruction* add = FindInstruction(module.get(), "add");
const HloInstruction* cd = add->operand(1);
EXPECT_THAT(cd, op::CopyDone());
EXPECT_EQ(live_range->instruction_schedule().at(add),
live_range->instruction_schedule().at(cd) + 1);
const HloInstruction* cs = cd->operand(0);
EXPECT_THAT(cs, op::CopyStart());
EXPECT_EQ(live_range->instruction_schedule().at(add),
live_range->instruction_schedule().at(cs) + 2);
EXPECT_THAT(add, op::Add(op::Negate(), op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace,
op::Parameter(1))));
}
TEST_F(MemorySpaceAssignmentTest, AlwaysSpillPrefetchForSecondUseTest) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[2,3]{1,0} parameter(0)
p1 = f32[2,3]{1,0} parameter(1)
negate0 = f32[2,3]{1,0} negate(p0)
negate1 = f32[2,3]{1,0} negate(negate0)
negate2 = f32[2,3]{1,0} negate(negate1)
negate3 = f32[2,3]{1,0} negate(negate2)
negate4 = f32[2,3]{1,0} negate(negate3)
negate5 = f32[2,3]{1,0} negate(negate4)
add0 = f32[2,3]{1,0} add(negate5, negate0)
ROOT add1 = f32[2,3]{1,0} add(add0, p1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.always_spill_to_default_memory = true;
AssignMemorySpace(module.get(), options);
const HloInstructionSequence& sequence =
module->schedule().sequence(module->entry_computation());
for (int i = 0; i < sequence.instructions().size(); ++i) {
VLOG(2) << i << " " << sequence.instructions()[i]->ToString();
}
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
HloAliasAnalysis::Run(module.get()));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloLiveRange> live_range,
HloLiveRange::Run(module->schedule(), *alias_analysis,
module->entry_computation()));
const HloInstruction* add1 = FindInstruction(module.get(), "add1");
const HloInstruction* cd1 = add1->operand(1);
EXPECT_THAT(cd1, op::CopyDone());
EXPECT_EQ(live_range->instruction_schedule().at(add1),
live_range->instruction_schedule().at(cd1) + 1);
const HloInstruction* cs1 = cd1->operand(0);
EXPECT_THAT(cs1, op::CopyStart());
EXPECT_EQ(live_range->instruction_schedule().at(add1),
live_range->instruction_schedule().at(cs1) + 2);
EXPECT_EQ(cd1->shape().layout().memory_space(), kAlternateMemorySpace);
const HloInstruction* add0 = FindInstruction(module.get(), "add0");
const HloInstruction* cd0 = add0->operand(1);
EXPECT_THAT(cd0, op::CopyDone());
EXPECT_EQ(live_range->instruction_schedule().at(add0),
live_range->instruction_schedule().at(cd0) + 1);
const HloInstruction* cs0 = cd0->operand(0);
EXPECT_THAT(cs0, op::CopyStart());
EXPECT_EQ(live_range->instruction_schedule().at(add0),
live_range->instruction_schedule().at(cs0) + 2);
EXPECT_EQ(cd0->shape().layout().memory_space(), kAlternateMemorySpace);
const HloInstruction* eviction_done = cs0->operand(0);
EXPECT_EQ(eviction_done->shape().layout().memory_space(),
kDefaultMemorySpace);
const HloInstruction* evection_start = eviction_done->operand(0);
const HloInstruction* negate0 = evection_start->operand(0);
EXPECT_EQ(live_range->instruction_schedule().at(evection_start),
live_range->instruction_schedule().at(negate0) + 1);
EXPECT_EQ(live_range->instruction_schedule().at(eviction_done),
live_range->instruction_schedule().at(negate0) + 2);
EXPECT_EQ(negate0->name(), "negate0");
}
TEST_F(MemorySpaceAssignmentTest, AlwaysSpillEvictionTest) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[4,3]{1,0} parameter(0)
tanh0 = f32[4,3]{1,0} tanh(p0)
add0 = f32[4,3]{1,0} add(p0, p0)
add1 = f32[4,3]{1,0} add(add0, p0)
add2 = f32[4,3]{1,0} add(add1, p0)
add3 = f32[4,3]{1,0} add(add2, p0)
add4 = f32[4,3]{1,0} add(add3, p0)
add5 = f32[4,3]{1,0} add(add4, tanh0)
negate0 = f32[4,3]{1,0} negate(add5)
tanh1 = f32[4,3]{1,0} tanh(negate0)
negate1 = f32[4,3]{1,0} negate(negate0)
tanh2 = f32[4,3]{1,0} tanh(tanh1)
negate2 = f32[4,3]{1,0} negate(negate1)
ROOT tuple = tuple(tanh0, tanh2, negate2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.always_spill_to_default_memory = true;
AssignMemorySpace(module.get(), options);
const HloInstructionSequence& sequence =
module->schedule().sequence(module->entry_computation());
for (int i = 0; i < sequence.instructions().size(); ++i) {
VLOG(2) << i << " " << sequence.instructions()[i]->ToString();
}
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
HloAliasAnalysis::Run(module.get()));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloLiveRange> live_range,
HloLiveRange::Run(module->schedule(), *alias_analysis,
module->entry_computation()));
const HloInstruction* tuple = FindInstruction(module.get(), "tuple");
const HloInstruction* tanh0_eviction_done = tuple->operand(0);
const HloInstruction* tanh0_eviction_start = tanh0_eviction_done->operand(0);
const HloInstruction* tanh0 = tanh0_eviction_start->operand(0);
EXPECT_EQ(tanh0->name(), "tanh0");
EXPECT_EQ(tanh0_eviction_done->shape().layout().memory_space(),
kDefaultMemorySpace);
EXPECT_EQ(live_range->instruction_schedule().at(tanh0_eviction_start),
live_range->instruction_schedule().at(tanh0) + 1);
EXPECT_EQ(live_range->instruction_schedule().at(tanh0_eviction_done),
live_range->instruction_schedule().at(tanh0) + 2);
const HloInstruction* add5 = FindInstruction(module.get(), "add5");
const HloInstruction* tanh0_prefetch_done = add5->operand(1);
const HloInstruction* tanh0_prefetch_start = tanh0_prefetch_done->operand(0);
EXPECT_EQ(tanh0_prefetch_done->shape().layout().memory_space(),
kAlternateMemorySpace);
EXPECT_EQ(live_range->instruction_schedule().at(add5),
live_range->instruction_schedule().at(tanh0_prefetch_done) + 1);
EXPECT_EQ(live_range->instruction_schedule().at(add5),
live_range->instruction_schedule().at(tanh0_prefetch_start) + 2);
EXPECT_EQ(tanh0_eviction_done, tanh0_prefetch_start->operand(0));
}
TEST_F(MemorySpaceAssignmentTest, FilterUpdatePreferredPrefetchTest) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, negate6, p1));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, p1, negate0, negate1, negate2,
negate3, negate4, negate5, negate6, add});
TF_CHECK_OK(module->set_schedule(schedule));
Options options = DefaultMemorySpaceOptions();
const std::string text_proto = R"pb(
overrides {
hlo_operand_filter { size_lte: 24 size_gte: 24 }
override_options { prefetch_eagerness: 0.5 }
})pb";
TF_ASSERT_OK_AND_ASSIGN(
options.preferred_prefetch_overrides,
ParseTextProto<PreferredPrefetchOverrides>(text_proto));
AssignMemorySpace(module.get(), options);
EXPECT_THAT(add, op::Add(op::Negate(), op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace,
op::Parameter(1))));
EXPECT_THAT(p0, op::ShapeWithLayout(shape));
EXPECT_THAT(p1, op::ShapeWithLayout(shape));
Shape shape_in_alternate_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {2, 3},
{1, 0},
{},
1,
0, kAlternateMemorySpace);
EXPECT_THAT(negate0, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate1, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate2, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate3, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate4, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate5, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate6, op::ShapeWithLayout(shape_in_alternate_mem));
const HloInstructionSequence& sequence =
module->schedule().sequence(computation);
EXPECT_THAT(sequence.instructions()[0], op::Parameter(0));
EXPECT_THAT(sequence.instructions()[1], op::Parameter(1));
EXPECT_THAT(sequence.instructions()[6], op::CopyStart());
EXPECT_THAT(sequence.instructions()[10], op::CopyDone());
}
TEST_F(MemorySpaceAssignmentTest, FilterUpdateConfigExactMatchBeforeTest) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, negate6, p1));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, p1, negate0, negate1, negate2,
negate3, negate4, negate5, negate6, add});
TF_CHECK_OK(module->set_schedule(schedule));
Options options = DefaultMemorySpaceOptions();
const std::string text_proto = R"pb(
overrides {
hlo_operand_filter { instruction_name_regex: "add" operand_number: 1 }
override_options { before_instruction_name: "negate.3" }
})pb";
TF_ASSERT_OK_AND_ASSIGN(
options.preferred_prefetch_overrides,
ParseTextProto<PreferredPrefetchOverrides>(text_proto));
AssignMemorySpace(module.get(), options);
EXPECT_THAT(add, op::Add(op::Negate(), op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace,
op::Parameter(1))));
EXPECT_THAT(p0, op::ShapeWithLayout(shape));
EXPECT_THAT(p1, op::ShapeWithLayout(shape));
Shape shape_in_alternate_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {2, 3},
{1, 0},
{},
1,
0, kAlternateMemorySpace);
EXPECT_THAT(negate0, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate1, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate2, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate3, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate4, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate5, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate6, op::ShapeWithLayout(shape_in_alternate_mem));
const HloInstructionSequence& sequence =
module->schedule().sequence(computation);
EXPECT_THAT(sequence.instructions()[0], op::Parameter(0));
EXPECT_THAT(sequence.instructions()[1], op::Parameter(1));
EXPECT_THAT(sequence.instructions()[5], op::CopyStart());
EXPECT_THAT(sequence.instructions()[10], op::CopyDone());
}
TEST_F(MemorySpaceAssignmentTest, FilterUpdateConfigExactMatchAfterTest) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, negate6, p1));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, p1, negate0, negate1, negate2,
negate3, negate4, negate5, negate6, add});
TF_CHECK_OK(module->set_schedule(schedule));
Options options = DefaultMemorySpaceOptions();
const std::string text_proto = R"pb(
overrides {
hlo_operand_filter { instruction_name_regex: "add" operand_number: 1 }
override_options { after_instruction_name: "negate.1" }
})pb";
TF_ASSERT_OK_AND_ASSIGN(
options.preferred_prefetch_overrides,
ParseTextProto<PreferredPrefetchOverrides>(text_proto));
AssignMemorySpace(module.get(), options);
EXPECT_THAT(add, op::Add(op::Negate(), op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace,
op::Parameter(1))));
EXPECT_THAT(p0, op::ShapeWithLayout(shape));
EXPECT_THAT(p1, op::ShapeWithLayout(shape));
Shape shape_in_alternate_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {2, 3},
{1, 0},
{},
1,
0, kAlternateMemorySpace);
EXPECT_THAT(negate0, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate1, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate2, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate3, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate4, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate5, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate6, op::ShapeWithLayout(shape_in_alternate_mem));
const HloInstructionSequence& sequence =
module->schedule().sequence(computation);
EXPECT_THAT(sequence.instructions()[0], op::Parameter(0));
EXPECT_THAT(sequence.instructions()[1], op::Parameter(1));
EXPECT_THAT(sequence.instructions()[4], op::CopyStart());
EXPECT_THAT(sequence.instructions()[10], op::CopyDone());
}
TEST_F(MemorySpaceAssignmentTest, FilterUpdateConfigExactMatchTooLateTest) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, negate6, p1));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, p1, negate0, negate1, negate2,
negate3, negate4, negate5, negate6, add});
TF_CHECK_OK(module->set_schedule(schedule));
Options options = DefaultMemorySpaceOptions();
const std::string text_proto = R"pb(
overrides {
hlo_operand_filter { instruction_name_regex: "add" operand_number: 1 }
override_options { after_instruction_name: "negate.5" }
})pb";
TF_ASSERT_OK_AND_ASSIGN(
options.preferred_prefetch_overrides,
ParseTextProto<PreferredPrefetchOverrides>(text_proto));
AssignMemorySpace(module.get(), options);
EXPECT_THAT(add, op::Add(op::Negate(), op::Parameter(1)));
EXPECT_THAT(p0, op::ShapeWithLayout(shape));
EXPECT_THAT(p1, op::ShapeWithLayout(shape));
Shape shape_in_alternate_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {2, 3},
{1, 0},
{},
1,
0, kAlternateMemorySpace);
EXPECT_THAT(negate0, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate1, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate2, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate3, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate4, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate5, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate6, op::ShapeWithLayout(shape_in_alternate_mem));
}
TEST_F(MemorySpaceAssignmentTest, FilterUpdateConfigPrecedenceTest) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, negate6, p1));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, p1, negate0, negate1, negate2,
negate3, negate4, negate5, negate6, add});
TF_CHECK_OK(module->set_schedule(schedule));
Options options = DefaultMemorySpaceOptions();
const std::string text_proto = R"pb(
overrides {
hlo_operand_filter { size_lte: 24 size_gte: 24 }
override_options { prefetch_eagerness: 0.5 }
}
overrides {
hlo_operand_filter { instruction_name_regex: "add" operand_number: 1 }
override_options { after_instruction_name: "negate.1" }
})pb";
TF_ASSERT_OK_AND_ASSIGN(
options.preferred_prefetch_overrides,
ParseTextProto<PreferredPrefetchOverrides>(text_proto));
AssignMemorySpace(module.get(), options);
EXPECT_THAT(add, op::Add(op::Negate(), op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace,
op::Parameter(1))));
EXPECT_THAT(p0, op::ShapeWithLayout(shape));
EXPECT_THAT(p1, op::ShapeWithLayout(shape));
Shape shape_in_alternate_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {2, 3},
{1, 0},
{},
1,
0, kAlternateMemorySpace);
EXPECT_THAT(negate0, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate1, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate2, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate3, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate4, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate5, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate6, op::ShapeWithLayout(shape_in_alternate_mem));
const HloInstructionSequence& sequence =
module->schedule().sequence(computation);
EXPECT_THAT(sequence.instructions()[0], op::Parameter(0));
EXPECT_THAT(sequence.instructions()[1], op::Parameter(1));
EXPECT_THAT(sequence.instructions()[6], op::CopyStart());
EXPECT_THAT(sequence.instructions()[10], op::CopyDone());
}
TEST_F(MemorySpaceAssignmentTest, FilterUpdateConfigExactMatchPrecedenceTest) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, negate6, p1));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, p1, negate0, negate1, negate2,
negate3, negate4, negate5, negate6, add});
TF_CHECK_OK(module->set_schedule(schedule));
Options options = DefaultMemorySpaceOptions();
const std::string text_proto = R"pb(
overrides {
hlo_operand_filter { instruction_name_regex: "add" operand_number: 1 }
override_options { after_instruction_name: "negate.1" }
}
overrides {
hlo_operand_filter { size_lte: 24 size_gte: 24 }
override_options { prefetch_eagerness: 0.5 }
}
)pb";
TF_ASSERT_OK_AND_ASSIGN(
options.preferred_prefetch_overrides,
ParseTextProto<PreferredPrefetchOverrides>(text_proto));
AssignMemorySpace(module.get(), options);
EXPECT_THAT(add, op::Add(op::Negate(), op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace,
op::Parameter(1))));
EXPECT_THAT(p0, op::ShapeWithLayout(shape));
EXPECT_THAT(p1, op::ShapeWithLayout(shape));
Shape shape_in_alternate_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {2, 3},
{1, 0},
{},
1,
0, kAlternateMemorySpace);
EXPECT_THAT(negate0, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate1, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate2, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate3, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate4, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate5, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate6, op::ShapeWithLayout(shape_in_alternate_mem));
const HloInstructionSequence& sequence =
module->schedule().sequence(computation);
EXPECT_THAT(sequence.instructions()[0], op::Parameter(0));
EXPECT_THAT(sequence.instructions()[1], op::Parameter(1));
EXPECT_THAT(sequence.instructions()[4], op::CopyStart());
EXPECT_THAT(sequence.instructions()[10], op::CopyDone());
}
TEST_F(MemorySpaceAssignmentTest, FilterUpdatePreferredPrefetchNoMatchTest) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, negate6, p1));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, p1, negate0, negate1, negate2,
negate3, negate4, negate5, negate6, add});
TF_CHECK_OK(module->set_schedule(schedule));
Options options = DefaultMemorySpaceOptions();
const std::string text_proto = R"pb(
overrides {
hlo_operand_filter { size_lte: 24 size_gte: 25 }
override_options { prefetch_eagerness: 0.5 }
}
)pb";
TF_ASSERT_OK_AND_ASSIGN(
options.preferred_prefetch_overrides,
ParseTextProto<PreferredPrefetchOverrides>(text_proto));
AssignMemorySpace(module.get(), options);
EXPECT_THAT(add, op::Add(op::Negate(), op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace,
op::Parameter(1))));
EXPECT_THAT(p0, op::ShapeWithLayout(shape));
EXPECT_THAT(p1, op::ShapeWithLayout(shape));
Shape shape_in_alternate_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {2, 3},
{1, 0},
{},
1,
0, kAlternateMemorySpace);
EXPECT_THAT(negate0, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate1, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate2, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate3, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate4, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate5, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate6, op::ShapeWithLayout(shape_in_alternate_mem));
const HloInstructionSequence& sequence =
module->schedule().sequence(computation);
EXPECT_THAT(sequence.instructions()[0], op::Parameter(0));
EXPECT_THAT(sequence.instructions()[1], op::Parameter(1));
EXPECT_THAT(sequence.instructions()[2], op::CopyStart());
EXPECT_THAT(sequence.instructions()[10], op::CopyDone());
}
TEST_F(MemorySpaceAssignmentTest, EvictAndPrefetch) {
std::unique_ptr<HloModule> module = CreateEvictAndPrefetchModule();
AssignMemorySpace(module.get());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Add(op::Add(),
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::AsyncCopy(kDefaultMemorySpace,
kAlternateMemorySpace, op::Tanh()))));
}
TEST_F(MemorySpaceAssignmentTest, EvictAndPrefetchLimitAsyncCopies0) {
std::unique_ptr<HloModule> module = CreateEvictAndPrefetchModule();
AssignMemorySpace(module.get(),
UpdateMaxAsyncCopies(DefaultMemorySpaceOptions(), 0));
EXPECT_LE(CountMaximumOutstandingAsyncCopies(*module).max_prefetches, 0);
EXPECT_LE(CountMaximumOutstandingAsyncCopies(*module).max_evictions, 0);
}
TEST_F(MemorySpaceAssignmentTest, EvictAndPrefetchLimitAsyncCopies1) {
std::unique_ptr<HloModule> module = CreateEvictAndPrefetchModule();
AssignMemorySpace(module.get(),
UpdateMaxAsyncCopies(DefaultMemorySpaceOptions(), 1));
EXPECT_LE(CountMaximumOutstandingAsyncCopies(*module).max_prefetches, 1);
EXPECT_LE(CountMaximumOutstandingAsyncCopies(*module).max_evictions, 1);
}
TEST_F(MemorySpaceAssignmentTest, EvictAndPrefetchLimitAsyncCopies2) {
std::unique_ptr<HloModule> module = CreateEvictAndPrefetchModule();
AssignMemorySpace(module.get(),
UpdateMaxAsyncCopies(DefaultMemorySpaceOptions(), 2));
EXPECT_LE(CountMaximumOutstandingAsyncCopies(*module).max_prefetches, 2);
EXPECT_LE(CountMaximumOutstandingAsyncCopies(*module).max_evictions, 2);
}
TEST_F(MemorySpaceAssignmentTest,
DISABLED_DontEvictWhenThereIsDefaultMemAllocation) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloInstruction* tanh = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, p0));
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, p0, tanh));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kSubtract, p0, p1));
HloInstruction* c = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, p0, p1));
HloInstruction* d = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kSubtract, p0, p1));
HloInstruction* e = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, a, b));
HloInstruction* f = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, a, c));
HloInstruction* g = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, a, d));
HloInstruction* h = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, b, c));
HloInstruction* i = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, b, d));
HloInstruction* j = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, c, d));
HloInstruction* k = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, e, f));
HloInstruction* l = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, g, h));
HloInstruction* m = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, i, j));
HloInstruction* n = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, k, l));
HloInstruction* o = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, n, m));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, o, tanh));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, p1, tanh, a, b, c, d, e, f, g, h, i,
j, k, l, m, n, o, add});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get(),
UpdateMaxAsyncCopies(DefaultMemorySpaceOptions(), 1));
EXPECT_THAT(f, op::Multiply(op::Add(), op::CopyDone()));
EXPECT_THAT(h, op::Multiply(op::Subtract(), op::Multiply()));
}
TEST_F(MemorySpaceAssignmentTest, EvictAndPrefetchAndPrefetch) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloInstruction* tanh = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, p0));
HloInstruction* a = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, p0, tanh));
HloInstruction* b = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kSubtract, p0, p1));
HloInstruction* c = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, p0, p1));
HloInstruction* d = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kSubtract, p0, p1));
HloInstruction* e = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, a, b));
HloInstruction* f = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, a, c));
HloInstruction* g = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, a, d));
HloInstruction* h = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, b, c));
HloInstruction* i = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, b, d));
HloInstruction* j = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, c, d));
HloInstruction* k = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, e, f));
HloInstruction* l = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, g, h));
HloInstruction* m = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, i, j));
HloInstruction* n = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, k, l));
HloInstruction* o = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, n, m));
HloInstruction* add0 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, o, tanh));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, add0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* negate7 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate6));
HloInstruction* negate8 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate7));
HloInstruction* negate9 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate8));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, negate9, tanh));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(
computation,
{p0, p1, tanh, a, b, c, d, e,
f, g, h, i, j, k, l, m,
n, o, add0, negate0, negate1, negate2, negate3, negate4,
negate5, negate6, negate7, negate8, negate9, add1});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
EXPECT_THAT(
add0,
op::Add(op::Add(),
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::AsyncCopy(kDefaultMemorySpace,
kAlternateMemorySpace, op::Tanh()))));
EXPECT_THAT(
add1,
op::Add(op::Negate(),
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::AsyncCopy(kDefaultMemorySpace,
kAlternateMemorySpace, op::Tanh()))));
}
TEST_F(MemorySpaceAssignmentTest, While) {
auto module = CreateNewVerifiedModule();
Shape shape = ShapeUtil::MakeShape(xla::F32, {2, 3});
Shape scalar_shape = ShapeUtil::MakeShape(xla::F32, {});
Shape tuple_shape = ShapeUtil::MakeTupleShape({shape, scalar_shape});
auto cond_builder = HloComputation::Builder("WhileCond");
HloInstruction* cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "cond_param"));
HloInstruction* cond_iter = cond_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, cond_param, 1));
HloInstruction* cond_limit = cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(50.f)));
HloInstruction* cond_lt = cond_builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), cond_iter,
cond_limit, ComparisonDirection::kLt));
HloComputation* cond_computation =
module->AddEmbeddedComputation(cond_builder.Build());
auto body_builder = HloComputation::Builder("WhileBody");
HloInstruction* body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "body_param"));
HloInstruction* body_iter = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, body_param, 1));
HloInstruction* body_data = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, body_param, 0));
HloInstruction* body_iter_increment = body_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.f)));
HloInstruction* body_iter_next =
body_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, body_iter, body_iter_increment));
HloInstruction* body_data_increment =
body_builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.f, 2.f, 3.f}, {4.f, 5.f, 6.f}})));
HloInstruction* body_data_mul =
body_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kMultiply, body_data, body_data));
HloInstruction* body_data_add =
body_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, body_data, body_data_increment));
HloInstruction* body_data_next =
body_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, body_data_add, body_data_mul));
HloInstruction* body_out = body_builder.AddInstruction(
HloInstruction::CreateTuple({body_data_next, body_iter_next}));
HloComputation* body_computation =
module->AddEmbeddedComputation(body_builder.Build());
auto builder = HloComputation::Builder(TestName());
HloInstruction* data = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param_iter"));
HloInstruction* iter = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "param_data"));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({data, iter}));
HloInstruction* while_op = builder.AddInstruction(HloInstruction::CreateWhile(
tuple_shape, cond_computation, body_computation, tuple));
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(cond_computation,
{cond_param, cond_iter, cond_limit, cond_lt});
schedule.set_sequence(body_computation,
{body_param, body_iter, body_data, body_iter_increment,
body_iter_next, body_data_increment, body_data_mul,
body_data_add, body_data_next, body_out});
schedule.set_sequence(entry_computation, {iter, data, tuple, while_op});
TF_CHECK_OK(module->set_schedule(schedule));
LOG(INFO) << module->ToString(HloPrintOptions::ShortParsable());
AssignMemorySpace(module.get());
Shape shape_in_alternate_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {2, 3},
{1, 0}, {},
1, 0,
kAlternateMemorySpace);
EXPECT_THAT(body_data_mul, op::ShapeWithLayout(shape_in_alternate_mem));
}
TEST_F(MemorySpaceAssignmentTest, Tuple) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
Shape inner_tuple_shape = ShapeUtil::MakeTupleShape({shape});
Shape tuple_shape =
ShapeUtil::MakeTupleShape({shape, shape, inner_tuple_shape});
HloInstruction* p = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p"));
HloInstruction* p0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, p, 0));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* p1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, p, 1));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, negate6, p1));
HloInstruction* p2 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(inner_tuple_shape, p, 2));
HloInstruction* p2_0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, p2, 0));
HloInstruction* mul = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, add, p2_0));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(
computation, {p, p0, negate0, negate1, negate2, negate3, negate4, negate5,
negate6, p1, add, p2, p2_0, mul});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
EXPECT_THAT(
mul,
op::Multiply(op::Add(op::Negate(), op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace,
op::GetTupleElement())),
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::GetTupleElement(op::GetTupleElement()))));
}
TEST_F(MemorySpaceAssignmentTest, Bitcast) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
Shape param_shape = ShapeUtil::MakeShape(F32, {6});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, param_shape, "p1"));
HloInstruction* negate = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* bitcast = builder.AddInstruction(
HloInstruction::CreateBitcast(param_shape, negate));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(param_shape, HloOpcode::kAdd, bitcast, p1));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, p1, negate, bitcast, add});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
bitcast = add->mutable_operand(0);
EXPECT_EQ(bitcast->opcode(), HloOpcode::kBitcast);
EXPECT_EQ(bitcast->shape().layout().memory_space(), kAlternateMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, Bitcast2) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
Shape param_shape = ShapeUtil::MakeShape(F32, {6});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, param_shape, "p1"));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* bitcast =
builder.AddInstruction(HloInstruction::CreateBitcast(shape, p1));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, bitcast, negate4));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, p1, negate0, negate1, negate2,
negate3, negate4, bitcast, add});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
EXPECT_EQ(add->operand(0)->shape().layout().memory_space(),
kAlternateMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, Bitcast3) {
HloComputation::Builder builder(TestName());
Shape shape1 = ShapeUtil::MakeShape(F32, {2, 3});
Shape shape2 = ShapeUtil::MakeShape(F32, {3, 2});
Shape shape3 = ShapeUtil::MakeShape(F32, {1, 6});
Shape param_shape = ShapeUtil::MakeShape(F32, {6});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape1, "p0"));
HloInstruction* p1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, param_shape, "p1"));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape1, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape1, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape1, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape1, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape1, HloOpcode::kNegate, negate3));
HloInstruction* bitcast1 =
builder.AddInstruction(HloInstruction::CreateBitcast(shape1, p1));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape1, HloOpcode::kAdd, bitcast1, negate4));
HloInstruction* bitcast2 =
builder.AddInstruction(HloInstruction::CreateBitcast(shape3, p1));
HloInstruction* bitcast3 =
builder.AddInstruction(HloInstruction::CreateBitcast(shape2, bitcast2));
HloInstruction* bitcast4 =
builder.AddInstruction(HloInstruction::CreateBitcast(shape2, add));
HloInstruction* mul = builder.AddInstruction(HloInstruction::CreateBinary(
shape2, HloOpcode::kMultiply, bitcast3, bitcast4));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation,
{p0, p1, negate0, negate1, negate2, negate3, negate4,
bitcast1, add, bitcast2, bitcast3, bitcast4, mul});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
EXPECT_THAT(
mul,
op::Multiply(
op::Bitcast(op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::Parameter(1))),
op::Bitcast(op::Add(
op::Bitcast(op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace, op::Parameter(1))),
op::Negate()))));
EXPECT_EQ(add->operand(0)->shape().layout().memory_space(),
kAlternateMemorySpace);
EXPECT_EQ(add->shape().layout().memory_space(), kAlternateMemorySpace);
EXPECT_EQ(mul->operand(0)->shape().layout().memory_space(),
kAlternateMemorySpace);
EXPECT_EQ(mul->operand(1)->shape().layout().memory_space(),
kAlternateMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, BitcastTuple) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
Shape param_shape = ShapeUtil::MakeShape(F32, {6});
Shape tuple_shape = ShapeUtil::MakeTupleShape({shape, shape});
auto module = CreateNewVerifiedModule();
HloComputation::Builder fusion_builder("fusion");
HloInstruction* fusion_param = fusion_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p"));
HloInstruction* fusion_element0 = fusion_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion_param, 0));
HloInstruction* fusion_element1 = fusion_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion_param, 1));
fusion_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, fusion_element0, fusion_element1));
HloComputation* fusion_computation =
module->AddEmbeddedComputation(fusion_builder.Build());
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, param_shape, "p1"));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* bitcast =
builder.AddInstruction(HloInstruction::CreateBitcast(shape, p1));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({bitcast, p0}));
HloInstruction* fusion = builder.AddInstruction(HloInstruction::CreateFusion(
shape, HloInstruction::FusionKind::kCustom, {tuple}, fusion_computation));
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation,
{p0, p1, negate0, negate1, negate2, negate3, negate4,
bitcast, tuple, fusion});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, BitcastGetTupleElementTuple) {
absl::string_view hlo_string = R"(
HloModule DoIt_S64_10_0_5_1.3, is_scheduled=true
ENTRY %DoIt_S64_10_0_5_1.3 (p0.1: (u32[10], u32[10])) -> (u32[5], u32[5]) {
%p0.1 = (u32[10]{0:T(128)}, u32[10]{0:T(128)}) parameter(0)
%get-tuple-element.1 = u32[10]{0:T(128)} get-tuple-element((u32[10]{0:T(128)}, u32[10]{0:T(128)}) %p0.1), index=1
%bitcast.1 = u32[5]{0:T(128)} bitcast(u32[10]{0:T(128)} %get-tuple-element.1)
%get-tuple-element = u32[10]{0:T(128)} get-tuple-element((u32[10]{0:T(128)}, u32[10]{0:T(128)}) %p0.1), index=0
%bitcast = u32[5]{0:T(128)} bitcast(u32[10]{0:T(128)} %get-tuple-element)
%tuple.1 = (u32[5]{0:T(128)}, u32[5]{0:T(128)}) tuple(u32[5]{0:T(128)} %bitcast, u32[5]{0:T(128)} %bitcast.1)
%tuple.3 = ((u32[5]{0:T(128)}, u32[5]{0:T(128)}), (u32[5]{0:T(128)}, u32[5]{0:T(128)})) tuple(%tuple.1, %tuple.1)
%get-tuple-element.4 = u32[5]{0:T(128)} get-tuple-element((u32[5]{0:T(128)}, u32[5]{0:T(128)}) %tuple.1), index=0
%get-tuple-element.5 = (u32[5]{0:T(128)}, u32[5]{0:T(128)}) get-tuple-element(%tuple.3), index=0
%get-tuple-element.6 = u32[5]{0:T(128)} get-tuple-element((u32[5]{0:T(128)}, u32[5]{0:T(128)}) %get-tuple-element.5), index=1
%copy.2 = u32[5]{0:T(128)} copy(u32[5]{0:T(128)} %get-tuple-element.4)
%copy.3 = u32[5]{0:T(128)} copy(u32[5]{0:T(128)} %get-tuple-element.6)
ROOT %tuple.2 = (u32[5]{0:T(128)}, u32[5]{0:T(128)}) tuple(u32[5]{0:T(128)} %copy.2, u32[5]{0:T(128)} %copy.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, GetSimplifiedOperandBug) {
absl::string_view hlo_string = R"(
HloModule sort.16, is_scheduled=true
ENTRY %sort.16 (param.0.1: s32[1], param.1.2: f32[1], param.2.3: u32[1], param.3.4: s32[1]) -> (s32[1], f32[1], u32[1], s32[1]) {
%param.3.4 = s32[1]{0:T(128)} parameter(3)
%param.2.3 = u32[1]{0:T(128)} parameter(2)
%param.1.2 = f32[1]{0:T(128)} parameter(1)
%param.0.1 = s32[1]{0:T(128)} parameter(0)
%tuple.1 = (s32[1]{0:T(128)}, f32[1]{0:T(128)}, u32[1]{0:T(128)}, s32[1]{0:T(128)}) tuple(s32[1]{0:T(128)} %param.0.1, f32[1]{0:T(128)} %param.1.2, u32[1]{0:T(128)} %param.2.3, s32[1]{0:T(128)} %param.3.4)
%get-tuple-element.4 = s32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, f32[1]{0:T(128)}, u32[1]{0:T(128)}, s32[1]{0:T(128)}) %tuple.1), index=0
%get-tuple-element.5 = f32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, f32[1]{0:T(128)}, u32[1]{0:T(128)}, s32[1]{0:T(128)}) %tuple.1), index=1
%get-tuple-element.6 = u32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, f32[1]{0:T(128)}, u32[1]{0:T(128)}, s32[1]{0:T(128)}) %tuple.1), index=2
%get-tuple-element.7 = s32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, f32[1]{0:T(128)}, u32[1]{0:T(128)}, s32[1]{0:T(128)}) %tuple.1), index=3
%copy.4 = s32[1]{0:T(128)} copy(s32[1]{0:T(128)} %get-tuple-element.4)
%copy.5 = f32[1]{0:T(128)} copy(f32[1]{0:T(128)} %get-tuple-element.5)
%copy.6 = u32[1]{0:T(128)} copy(u32[1]{0:T(128)} %get-tuple-element.6)
%copy.7 = s32[1]{0:T(128)} copy(s32[1]{0:T(128)} %get-tuple-element.7)
ROOT %tuple.2 = (s32[1]{0:T(128)}, f32[1]{0:T(128)}, u32[1]{0:T(128)}, s32[1]{0:T(128)}) tuple(s32[1]{0:T(128)} %copy.4, f32[1]{0:T(128)} %copy.5, u32[1]{0:T(128)} %copy.6, s32[1]{0:T(128)} %copy.7)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, BitcastMultiUse) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
Shape param_shape = ShapeUtil::MakeShape(F32, {6});
HloInstruction* p0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "p1"));
HloInstruction* bitcast =
builder.AddInstruction(HloInstruction::CreateBitcast(shape, p0));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, bitcast));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, bitcast, negate4));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, bitcast, negate0, negate1, negate2,
negate3, negate4, add});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
Shape shape_in_alternate_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {2, 3},
{1, 0}, {},
1, 0,
kAlternateMemorySpace);
EXPECT_THAT(negate0->operand(0), op::ShapeWithLayout(shape));
EXPECT_THAT(add->operand(0), op::ShapeWithLayout(shape_in_alternate_mem));
}
TEST_F(MemorySpaceAssignmentTest, BitcastMultiUseTuple) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
Shape param_shape = ShapeUtil::MakeShape(F32, {6});
Shape tuple_shape = ShapeUtil::MakeTupleShape({shape, shape});
auto module = CreateNewVerifiedModule();
HloComputation::Builder fusion_builder("fusion");
HloInstruction* fusion_param = fusion_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p"));
HloInstruction* fusion_element0 = fusion_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion_param, 0));
HloInstruction* fusion_element1 = fusion_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion_param, 1));
fusion_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, fusion_element0, fusion_element1));
HloComputation* fusion_computation =
module->AddEmbeddedComputation(fusion_builder.Build());
HloInstruction* p0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "p1"));
HloInstruction* bitcast =
builder.AddInstruction(HloInstruction::CreateBitcast(shape, p0));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, bitcast));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({bitcast, negate4}));
HloInstruction* fusion = builder.AddInstruction(HloInstruction::CreateFusion(
shape, HloInstruction::FusionKind::kCustom, {tuple}, fusion_computation));
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, bitcast, negate0, negate1, negate2,
negate3, negate4, tuple, fusion});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
Shape shape_in_alternate_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {2, 3},
{1, 0}, {},
1, 0,
kAlternateMemorySpace);
EXPECT_THAT(negate0->operand(0), op::ShapeWithLayout(shape));
EXPECT_THAT(fusion->operand(0)->operand(0),
op::ShapeWithLayout(shape_in_alternate_mem));
}
TEST_F(MemorySpaceAssignmentTest, BitcastScheduleBug) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
Shape param_shape = ShapeUtil::MakeShape(F32, {6});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, param_shape, "p1"));
HloInstruction* bitcast =
builder.AddInstruction(HloInstruction::CreateBitcast(shape, p1));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* negate7 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate6));
HloInstruction* negate8 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate7));
HloInstruction* negate9 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate8));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, bitcast, negate9));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(
computation, {p0, p1, bitcast, negate0, negate1, negate2, negate3,
negate4, negate5, negate6, negate7, negate8, negate9, add});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get(), DefaultMemorySpaceOptions(),
5, 4);
EXPECT_EQ(add->operand(0)->shape().layout().memory_space(),
kAlternateMemorySpace);
const auto& instructions =
module->schedule().sequence(module->entry_computation()).instructions();
for (int i = 0; i < instructions.size(); ++i) {
if (instructions.at(i)->opcode() == HloOpcode::kCopyStart) {
EXPECT_EQ(instructions.at(i - 1)->opcode(), HloOpcode::kNegate);
EXPECT_EQ(instructions.at(i + 1)->opcode(), HloOpcode::kNegate);
} else if (instructions.at(i)->opcode() == HloOpcode::kCopyDone) {
EXPECT_EQ(instructions.at(i - 1)->opcode(), HloOpcode::kNegate);
}
}
}
TEST_F(MemorySpaceAssignmentTest, AddDependency) {
absl::string_view hlo_string = R"(
HloModule AddDependency, is_scheduled=true
ENTRY %AddDependency (p: f32[3]) -> f32[3] {
%p = f32[3]{0} parameter(0)
%neg0 = f32[3]{0} negate(f32[3]{0} %p)
%neg1 = f32[3]{0} negate(f32[3]{0} %neg0)
%neg2 = f32[3]{0} negate(f32[3]{0} %neg1)
%neg3 = f32[3]{0} negate(f32[3]{0} %neg2)
%neg4 = f32[3]{0} negate(f32[3]{0} %neg3)
%neg5 = f32[3]{0} negate(f32[3]{0} %neg4)
%neg6 = f32[3]{0} negate(f32[3]{0} %neg5)
%token0 = token[] after-all()
%add_dep = f32[3]{0} add-dependency(f32[3]{0} %p, token[] %token0)
ROOT %add = f32[3]{0} add(f32[3]{0} %add_dep, f32[3]{0} %neg6)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Add(op::AddDependency(), op::Negate()));
}
TEST_F(MemorySpaceAssignmentTest, WhileAllocationBug) {
absl::string_view hlo_string = R"(
HloModule WhileAllocationBug, is_scheduled=true
%WhileBody (body_param: (f32[4,3], f32[])) -> (f32[4,3], f32[]) {
%body_param = (f32[4,3]{1,0}, f32[]) parameter(0)
%get-tuple-element.1 = f32[] get-tuple-element((f32[4,3]{1,0}, f32[]) %body_param), index=1
%get-tuple-element.2 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[]) %body_param), index=0
%constant.1 = f32[] constant(1)
%add = f32[] add(f32[] %get-tuple-element.1, f32[] %constant.1)
%constant.2 = f32[4,3]{1,0} constant({ { 1, 2, 3 }, { 4, 5, 6 }, { 1, 2, 3 }, { 4, 5, 6 } })
%multiply = f32[4,3]{1,0} multiply(f32[4,3]{1,0} %get-tuple-element.2, f32[4,3]{1,0} %get-tuple-element.2)
%multiply2 = f32[4,3]{1,0} multiply(f32[4,3]{1,0} %multiply, f32[4,3]{1,0} %multiply)
%add.1 = f32[4,3]{1,0} add(f32[4,3]{1,0} %get-tuple-element.2, f32[4,3]{1,0} %constant.2)
%add.2 = f32[4,3]{1,0} add(f32[4,3]{1,0} %add.1, f32[4,3]{1,0} %multiply2)
ROOT %tuple = (f32[4,3]{1,0}, f32[]) tuple(f32[4,3]{1,0} %add.2, f32[] %add)
}
%WhileCond (cond_param: (f32[4,3], f32[])) -> pred[] {
%cond_param = (f32[4,3]{1,0}, f32[]) parameter(0)
%get-tuple-element = f32[] get-tuple-element((f32[4,3]{1,0}, f32[]) %cond_param), index=1
%constant = f32[] constant(50)
ROOT %compare = pred[] compare(f32[] %get-tuple-element, f32[] %constant), direction=LT
}
ENTRY %Entry (param_iter: f32[4,3], param_data: f32[], p2: f32[4,3]) -> f32[4,3] {
%param_data = f32[] parameter(1)
%param_iter = f32[4,3]{1,0} parameter(0)
%p2 = f32[4,3]{1,0} parameter(2)
%tanh = f32[4,3]{1,0} tanh(f32[4,3]{1,0} %param_iter)
%neg0 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %p2)
%neg1 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg0)
%neg2 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg1)
%neg3 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg2)
%neg4 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg3)
%neg5 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg4)
%neg6 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg5)
%add.4 = f32[4,3]{1,0} add(f32[4,3]{1,0} %neg6, f32[4,3]{1,0} %tanh)
%tuple.1 = (f32[4,3]{1,0}, f32[]) tuple(f32[4,3]{1,0} %tanh, f32[] %param_data)
%while = (f32[4,3]{1,0}, f32[]) while((f32[4,3]{1,0}, f32[]) %tuple.1), condition=%WhileCond, body=%WhileBody
%get-tuple-element.3 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[]) %while), index=0
ROOT %add.3 = f32[4,3]{1,0} add(f32[4,3]{1,0} %get-tuple-element.3, f32[4,3]{1,0} %add.4)
}
)";
MsaBufferIntervalCompare buffer_interval_compare =
[](const MsaBufferInterval& a, const MsaBufferInterval& b) {
bool a_is_mul =
a.buffer->defining_instruction()->opcode() == HloOpcode::kMultiply;
bool b_is_mul =
b.buffer->defining_instruction()->opcode() == HloOpcode::kMultiply;
if (a_is_mul && !b_is_mul) {
return true;
}
if (!a_is_mul && b_is_mul) {
return false;
}
bool a_is_tanh =
a.buffer->defining_instruction()->opcode() == HloOpcode::kTanh;
bool b_is_tanh =
b.buffer->defining_instruction()->opcode() == HloOpcode::kTanh;
if (a_is_tanh && !b_is_tanh) {
return true;
}
if (!a_is_tanh && b_is_tanh) {
return false;
}
return a.buffer->id() < b.buffer->id();
};
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
InstructionCountPrefetchIntervalPicker prefetch_interval_picker(2, 10);
AssignMemorySpace(module.get(), DefaultMemorySpaceOptions(),
buffer_interval_compare, &prefetch_interval_picker);
for (const HloInstruction* instruction :
module->entry_computation()->instructions()) {
if (instruction->opcode() == HloOpcode::kWhile) {
const Shape& while_subshape =
ShapeUtil::GetSubshape(instruction->shape(), {0});
if (while_subshape.layout().memory_space() == kAlternateMemorySpace) {
const HloInstruction* body_param =
instruction->while_body()->parameter_instruction(0);
const HloInstruction* gte = nullptr;
for (const HloInstruction* user : body_param->users()) {
if (user->opcode() == HloOpcode::kGetTupleElement &&
user->tuple_index() == 0) {
gte = user;
break;
}
}
EXPECT_NE(gte, nullptr);
const HloInstruction* copy_start = nullptr;
for (const HloInstruction* user : gte->users()) {
if (user->opcode() == HloOpcode::kCopyStart) {
copy_start = user;
break;
}
}
EXPECT_NE(copy_start, nullptr);
const Shape& copy_start_subshape =
ShapeUtil::GetSubshape(copy_start->shape(), {0});
EXPECT_NE(copy_start_subshape.layout().memory_space(),
kAlternateMemorySpace);
}
}
}
}
TEST_F(MemorySpaceAssignmentTest, ConsecutiveWhileLoops) {
absl::string_view hlo_string = R"(
HloModule WhileAllocationBug, is_scheduled=true
%WhileBody (body_param: (f32[4,3], f32[4,3], f32[])) -> (f32[4,3], f32[4,3], f32[]) {
%body_param = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) parameter(0)
%get-tuple-element.1 = f32[] get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %body_param), index=2
%get-tuple-element.2 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %body_param), index=0
%get-tuple-element.3 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %body_param), index=1
%constant.1 = f32[] constant(1)
%add = f32[] add(f32[] %get-tuple-element.1, f32[] %constant.1)
%constant.2 = f32[4,3]{1,0} constant({ { 1, 2, 3 }, { 4, 5, 6 }, { 1, 2, 3 }, { 4, 5, 6 } })
%multiply = f32[4,3]{1,0} multiply(f32[4,3]{1,0} %get-tuple-element.2, f32[4,3]{1,0} %get-tuple-element.3)
%multiply2 = f32[4,3]{1,0} multiply(f32[4,3]{1,0} %multiply, f32[4,3]{1,0} %multiply)
%add.1 = f32[4,3]{1,0} add(f32[4,3]{1,0} %get-tuple-element.2, f32[4,3]{1,0} %constant.2)
%add.2 = f32[4,3]{1,0} add(f32[4,3]{1,0} %add.1, f32[4,3]{1,0} %multiply2)
ROOT %tuple = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) tuple(f32[4,3]{1,0} %add.2, f32[4,3]{1,0} %get-tuple-element.3, f32[] %add)
}
%WhileCond (cond_param: (f32[4,3], f32[4,3], f32[])) -> pred[] {
%cond_param = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) parameter(0)
%get-tuple-element = f32[] get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %cond_param), index=2
%constant = f32[] constant(50)
ROOT %compare = pred[] compare(f32[] %get-tuple-element, f32[] %constant), direction=LT
}
%WhileBody2 (body_param: (f32[4,3], f32[4,3], f32[])) -> (f32[4,3], f32[4,3], f32[]) {
%body_param = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) parameter(0)
%get-tuple-element.1 = f32[] get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %body_param), index=2
%get-tuple-element.2 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %body_param), index=0
%get-tuple-element.3 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %body_param), index=1
%constant.1 = f32[] constant(1)
%add = f32[] add(f32[] %get-tuple-element.1, f32[] %constant.1)
%constant.2 = f32[4,3]{1,0} constant({ { 1, 2, 3 }, { 4, 5, 6 }, { 1, 2, 3 }, { 4, 5, 6 } })
%multiply = f32[4,3]{1,0} multiply(f32[4,3]{1,0} %get-tuple-element.2, f32[4,3]{1,0} %get-tuple-element.3)
%multiply2 = f32[4,3]{1,0} multiply(f32[4,3]{1,0} %multiply, f32[4,3]{1,0} %multiply)
%add.1 = f32[4,3]{1,0} add(f32[4,3]{1,0} %get-tuple-element.2, f32[4,3]{1,0} %constant.2)
%add.2 = f32[4,3]{1,0} add(f32[4,3]{1,0} %add.1, f32[4,3]{1,0} %multiply2)
ROOT %tuple = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) tuple(f32[4,3]{1,0} %add.2, f32[4,3]{1,0} %get-tuple-element.3, f32[] %add)
}
%WhileCond2 (cond_param: (f32[4,3], f32[4,3], f32[])) -> pred[] {
%cond_param = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) parameter(0)
%get-tuple-element = f32[] get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %cond_param), index=2
%constant = f32[] constant(50)
ROOT %compare = pred[] compare(f32[] %get-tuple-element, f32[] %constant), direction=LT
}
ENTRY %Entry (param_data: f32[4,3], param_iter: f32[], p2: f32[4,3]) -> f32[4,3] {
%param_iter = f32[] parameter(1)
%param_data = f32[4,3]{1,0} parameter(0)
%p2 = f32[4,3]{1,0} parameter(2)
%neg0 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %p2)
%neg1 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg0)
%neg2 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg1)
%neg3 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg2)
%neg4 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg3)
%neg5 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg4)
%neg6 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg5)
%add.4 = f32[4,3]{1,0} add(f32[4,3]{1,0} %neg6, f32[4,3]{1,0} %p2)
%tuple.1 = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) tuple(f32[4,3]{1,0} add.4, f32[4,3]{1,0} param_data, f32[] %param_iter)
%while = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) while((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %tuple.1), condition=%WhileCond, body=%WhileBody
%get-tuple-element.4 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %while), index=0
%add.3 = f32[4,3]{1,0} add(f32[4,3]{1,0} %get-tuple-element.4, f32[4,3]{1,0} %add.4)
%get-tuple-element.5 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %while), index=1
%tuple.2 = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) tuple(f32[4,3]{1,0} add.3, f32[4,3]{1,0} get-tuple-element.5, f32[] %param_iter)
%while.1 = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) while((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %tuple.2), condition=%WhileCond2, body=%WhileBody2
%get-tuple-element.6 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %while.1), index=0
ROOT %add.5 = f32[4,3]{1,0} add(f32[4,3]{1,0} %get-tuple-element.6, f32[4,3]{1,0} %add.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, WhileLiveRangeBug) {
absl::string_view hlo_string = R"(
HloModule WhileAllocationBug, is_scheduled=true
%WhileBody (body_param: (f32[4,3], f32[4,3], f32[])) -> (f32[4,3], f32[4,3], f32[]) {
%body_param = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) parameter(0)
%get-tuple-element.1 = f32[] get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %body_param), index=2
%get-tuple-element.2 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %body_param), index=0
%get-tuple-element.3 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %body_param), index=1
%neg10 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %get-tuple-element.2)
%neg11 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg10)
%neg12 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg11)
%neg13 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg12)
%neg14 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg13)
%neg15 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg14)
%neg16 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg15)
%neg17 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg16)
%neg18 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg17)
%neg19 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg18)
%neg20 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg19)
%constant.1 = f32[] constant(1)
%add = f32[] add(f32[] %get-tuple-element.1, f32[] %constant.1)
%constant.2 = f32[4,3]{1,0} constant({ { 1, 2, 3 }, { 4, 5, 6 }, { 1, 2, 3 }, { 4, 5, 6 } })
%multiply = f32[4,3]{1,0} multiply(f32[4,3]{1,0} %neg20, f32[4,3]{1,0} %neg20)
%multiply2 = f32[4,3]{1,0} multiply(f32[4,3]{1,0} %multiply, f32[4,3]{1,0} %multiply)
%add.1 = f32[4,3]{1,0} add(f32[4,3]{1,0} get-tuple-element.3, f32[4,3]{1,0} %constant.2)
%add.2 = f32[4,3]{1,0} add(f32[4,3]{1,0} %add.1, f32[4,3]{1,0} %multiply2)
ROOT %tuple = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) tuple(f32[4,3]{1,0} %add.2, f32[4,3]{1,0} %get-tuple-element.3, f32[] %add)
}
%WhileCond (cond_param: (f32[4,3], f32[4,3], f32[])) -> pred[] {
%cond_param = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) parameter(0)
%get-tuple-element = f32[] get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %cond_param), index=2
%constant = f32[] constant(50)
ROOT %compare = pred[] compare(f32[] %get-tuple-element, f32[] %constant), direction=LT
}
ENTRY %Entry (param_data: f32[4,3], param_iter: f32[], p2: f32[4,3]) -> f32[4,3] {
%param_iter = f32[] parameter(1)
%param_data = f32[4,3]{1,0} parameter(0)
%p2 = f32[4,3]{1,0} parameter(2)
%neg0 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %p2)
%neg1 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg0)
%neg2 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg1)
%neg3 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg2)
%neg4 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg3)
%neg5 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg4)
%neg6 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg5)
%add.4 = f32[4,3]{1,0} add(f32[4,3]{1,0} %neg6, f32[4,3]{1,0} %p2)
%tuple.1 = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) tuple(f32[4,3]{1,0} add.4, f32[4,3]{1,0} param_data, f32[] %param_iter)
%while = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) while((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %tuple.1), condition=%WhileCond, body=%WhileBody
%get-tuple-element.4 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %while), index=0
%get-tuple-element.5 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %while), index=1
%add.3 = f32[4,3]{1,0} add(f32[4,3]{1,0} %get-tuple-element.4, f32[4,3]{1,0} %add.4)
ROOT %add.5 = f32[4,3]{1,0} add(f32[4,3]{1,0} %get-tuple-element.5, f32[4,3]{1,0} %add.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, ConsecutiveWhileLoopsOneBuffer) {
absl::string_view hlo_string = R"(
HloModule WhileAllocationBug, is_scheduled=true
%WhileBody (body_param: (f32[4,3], f32[4,3], f32[])) -> (f32[4,3], f32[4,3], f32[]) {
%body_param = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) parameter(0)
%get-tuple-element.1 = f32[] get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %body_param), index=2
%get-tuple-element.2 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %body_param), index=0
%get-tuple-element.3 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %body_param), index=1
%neg10 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %get-tuple-element.2)
%neg11 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg10)
%neg12 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg11)
%neg13 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg12)
%neg14 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg13)
%neg15 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg14)
%neg16 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg15)
%neg17 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg16)
%neg18 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg17)
%neg19 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg18)
%neg20 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg19)
%constant.1 = f32[] constant(1)
%add = f32[] add(f32[] %get-tuple-element.1, f32[] %constant.1)
%constant.2 = f32[4,3]{1,0} constant({ { 1, 2, 3 }, { 4, 5, 6 }, { 1, 2, 3 }, { 4, 5, 6 } })
%multiply = f32[4,3]{1,0} multiply(f32[4,3]{1,0} %neg20, f32[4,3]{1,0} %neg20)
%multiply2 = f32[4,3]{1,0} multiply(f32[4,3]{1,0} %multiply, f32[4,3]{1,0} %multiply)
%add.1 = f32[4,3]{1,0} add(f32[4,3]{1,0} get-tuple-element.3, f32[4,3]{1,0} %constant.2)
%add.2 = f32[4,3]{1,0} add(f32[4,3]{1,0} %add.1, f32[4,3]{1,0} %multiply2)
ROOT %tuple = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) tuple(f32[4,3]{1,0} %add.2, f32[4,3]{1,0} %get-tuple-element.3, f32[] %add)
}
%WhileCond (cond_param: (f32[4,3], f32[4,3], f32[])) -> pred[] {
%cond_param = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) parameter(0)
%get-tuple-element = f32[] get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %cond_param), index=2
%constant = f32[] constant(50)
ROOT %compare = pred[] compare(f32[] %get-tuple-element, f32[] %constant), direction=LT
}
%WhileBody2 (body_param: (f32[4,3], f32[4,3], f32[])) -> (f32[4,3], f32[4,3], f32[]) {
%body_param = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) parameter(0)
%get-tuple-element.1 = f32[] get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %body_param), index=2
%get-tuple-element.2 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %body_param), index=0
%get-tuple-element.3 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %body_param), index=1
%neg10 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %get-tuple-element.2)
%neg11 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg10)
%neg12 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg11)
%neg13 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg12)
%neg14 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg13)
%neg15 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg14)
%neg16 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg15)
%neg17 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg16)
%neg18 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg17)
%neg19 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg18)
%neg20 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg19)
%constant.1 = f32[] constant(1)
%add = f32[] add(f32[] %get-tuple-element.1, f32[] %constant.1)
%constant.2 = f32[4,3]{1,0} constant({ { 1, 2, 3 }, { 4, 5, 6 }, { 1, 2, 3 }, { 4, 5, 6 } })
%multiply = f32[4,3]{1,0} multiply(f32[4,3]{1,0} %neg20, f32[4,3]{1,0} %neg20)
%multiply2 = f32[4,3]{1,0} multiply(f32[4,3]{1,0} %multiply, f32[4,3]{1,0} %multiply)
%add.1 = f32[4,3]{1,0} add(f32[4,3]{1,0} get-tuple-element.3, f32[4,3]{1,0} %constant.2)
%add.2 = f32[4,3]{1,0} add(f32[4,3]{1,0} %add.1, f32[4,3]{1,0} %multiply2)
ROOT %tuple = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) tuple(f32[4,3]{1,0} %add.2, f32[4,3]{1,0} %get-tuple-element.3, f32[] %add)
}
%WhileCond2 (cond_param: (f32[4,3], f32[4,3], f32[])) -> pred[] {
%cond_param = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) parameter(0)
%get-tuple-element = f32[] get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %cond_param), index=2
%constant = f32[] constant(50)
ROOT %compare = pred[] compare(f32[] %get-tuple-element, f32[] %constant), direction=LT
}
ENTRY %Entry (param_data: f32[4,3], param_iter: f32[], p2: f32[4,3]) -> f32[4,3] {
%param_iter = f32[] parameter(1)
%param_data = f32[4,3]{1,0} parameter(0)
%p2 = f32[4,3]{1,0} parameter(2)
%neg0 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %p2)
%neg1 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg0)
%neg2 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg1)
%neg3 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg2)
%neg4 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg3)
%neg5 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg4)
%neg6 = f32[4,3]{1,0} negate(f32[4,3]{1,0} %neg5)
%add.4 = f32[4,3]{1,0} add(f32[4,3]{1,0} %neg6, f32[4,3]{1,0} %p2)
%tuple.1 = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) tuple(f32[4,3]{1,0} add.4, f32[4,3]{1,0} param_data, f32[] %param_iter)
%while = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) while((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %tuple.1), condition=%WhileCond, body=%WhileBody
%get-tuple-element.4 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %while), index=0
%add.3 = f32[4,3]{1,0} add(f32[4,3]{1,0} %get-tuple-element.4, f32[4,3]{1,0} %add.4)
%tuple.2 = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) tuple(f32[4,3]{1,0} add.3, f32[4,3]{1,0} param_data, f32[] %param_iter)
%while.1 = (f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) while((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %tuple.2), condition=%WhileCond2, body=%WhileBody2
%get-tuple-element.5 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %while.1), index=0
%get-tuple-element.6 = f32[4,3]{1,0} get-tuple-element((f32[4,3]{1,0}, f32[4,3]{1,0}, f32[]) %while.1), index=1
ROOT %add.5 = f32[4,3]{1,0} add(f32[4,3]{1,0} %get-tuple-element.5, f32[4,3]{1,0} %get-tuple-element.6)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, WhileCondAliasBug) {
absl::string_view hlo_string = R"(
HloModule WhileWithPrngScalarResult.18, is_scheduled=true
%fused_computation (param_0.1: s32[6], param_1.3: s32[1], param_2.3: s32[5]) -> s32[6] {
%param_1.3 = s32[1]{0:T(128)} parameter(1)
%constant.2 = s32[]{:T(128)} constant(-2147483648)
%pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5
%param_2.3 = s32[5]{0:T(128)} parameter(2)
%pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0
%maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)
%param_0.1 = s32[6]{0:T(128)} parameter(0)
ROOT %add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)
}
%body.3 (prev.4: s32[6]) -> s32[6] {
%constant.7 = s32[]{:T(128)} constant(100)
%constant.6 = s32[]{:T(128)} constant(0)
%constant.5 = s32[1]{0:T(128)} constant({1})
%prev.4 = s32[6]{0:T(128)} parameter(0)
%rng.8 = s32[5]{0:T(128)} rng(s32[]{:T(128)} %constant.6, s32[]{:T(128)} %constant.7), distribution=rng_uniform
%neg = s32[1]{0:T(128)} negate(s32[1]{0:T(128)} %constant.5)
ROOT %fusion = s32[6]{0:T(128)} fusion(s32[6]{0:T(128)} %prev.4, s32[1]{0:T(128)} %neg, s32[5]{0:T(128)} %rng.8), kind=kLoop, calls=%fused_computation
}
%WhileWithPrngScalarResult.11 (prev.12: s32[6]) -> pred[] {
%constant.15 = s32[]{:T(128)} constant(1)
%prev.12 = s32[6]{0:T(128)} parameter(0)
%bitcast.1 = s32[1]{0:T(128)} bitcast(s32[6]{0:T(128)} %prev.12)
%bitcast = s32[]{:T(128)} bitcast(s32[1]{0:T(128)} %bitcast.1)
ROOT %compare.16 = pred[]{:T(128)} compare(s32[]{:T(128)} %constant.15, s32[]{:T(128)} %bitcast), direction=GT
}
ENTRY %WhileWithPrngScalarResult.18 () -> s32[6] {
%constant.1 = s32[]{:T(128)} constant(0)
%broadcast.2 = s32[6]{0:T(128)} broadcast(s32[]{:T(128)} %constant.1), dimensions={}
ROOT %while.17 = s32[6]{0:T(128)} while(s32[6]{0:T(128)} %broadcast.2), condition=%WhileWithPrngScalarResult.11, body=%body.3
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, WhileInPlaceBuffer) {
absl::string_view hlo_string = R"(
HloModule Module, is_scheduled=true
fused_computation {
param0 = f32[2,3] parameter(0)
constant.1 = f32[] constant(0)
broadcast = f32[2,1] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
ROOT dynamic-update-slice.5 = f32[2,3] dynamic-update-slice(param0, broadcast, constant.3, constant.3)
}
%WhileBody (body_param: (f32[2,3], f32[2,3], f32[])) -> (f32[2,3], f32[2,3], f32[]) {
%body_param = (f32[2,3]{1,0}, f32[2,3]{1,0}, f32[]) parameter(0)
%get-tuple-element.1 = f32[] get-tuple-element((f32[2,3]{1,0}, f32[2,3]{1,0}, f32[]) %body_param), index=2
%get-tuple-element.2 = f32[2,3]{1,0} get-tuple-element((f32[2,3]{1,0}, f32[2,3]{1,0}, f32[]) %body_param), index=0
%get-tuple-element.3 = f32[2,3]{1,0} get-tuple-element((f32[2,3]{1,0}, f32[2,3]{1,0}, f32[]) %body_param), index=1
%fusion = f32[2,3]{1,0} fusion(get-tuple-element.3), kind=kLoop, calls=fused_computation
%multiply = f32[2,3]{1,0} multiply(f32[2,3]{1,0} %get-tuple-element.2, f32[2,3]{1,0} %fusion)
ROOT %tuple = (f32[2,3]{1,0}, f32[2,3]{1,0}, f32[]) tuple(f32[2,3]{1,0} %multiply, f32[2,3]{1,0} %fusion, f32[] %get-tuple-element.1)
}
%WhileCond (cond_param: (f32[2,3], f32[2,3], f32[])) -> pred[] {
%cond_param = (f32[2,3]{1,0}, f32[2,3]{1,0}, f32[]) parameter(0)
%get-tuple-element = f32[] get-tuple-element((f32[2,3]{1,0}, f32[2,3]{1,0}, f32[]) %cond_param), index=2
%constant = f32[] constant(50)
ROOT %compare = pred[] compare(f32[] %get-tuple-element, f32[] %constant), direction=LT
}
ENTRY %Entry (param_data: f32[2,3], param_iter: f32[], p2: f32[2,3]) -> f32[2,3] {
%param_iter = f32[] parameter(1)
%param_data = f32[2,3]{1,0} parameter(0)
%p2 = f32[2,3]{1,0} parameter(2)
%copy1 = f32[2,3]{1,0} copy(param_data)
%copy2 = f32[2,3]{1,0} copy(p2)
%tuple.1 = (f32[2,3]{1,0}, f32[2,3]{1,0}, f32[]) tuple(f32[2,3]{1,0} copy1, f32[2,3]{1,0} copy2, f32[] %param_iter)
%while = (f32[2,3]{1,0}, f32[2,3]{1,0}, f32[]) while((f32[2,3]{1,0}, f32[2,3]{1,0}, f32[]) %tuple.1), condition=%WhileCond, body=%WhileBody
%get-tuple-element.4 = f32[2,3]{1,0} get-tuple-element((f32[2,3]{1,0}, f32[2,3]{1,0}, f32[]) %while), index=0
ROOT %copy3 = f32[2,3]{1,0} copy(get-tuple-element.4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
const HloInstruction* while_op =
module->entry_computation()->GetInstructionWithName("while");
EXPECT_EQ(
ShapeUtil::GetSubshape(while_op->shape(), {1}).layout().memory_space(),
kAlternateMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, WhileSharedBufferVerificationBug) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
p0 = (f32[3]{0}, f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(p0), index=3
}
while_body {
p0 = (f32[3]{0}, f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
gte1 = f32[3]{0} get-tuple-element(p0), index=1
gte2 = f32[3]{0} get-tuple-element(p0), index=2
gte3 = pred[] get-tuple-element(p0), index=3
add = f32[3]{0} add(gte0, gte0)
negate0 = f32[3]{0} negate(add)
negate1 = f32[3]{0} negate(negate0)
negate2 = f32[3]{0} negate(negate1)
negate3 = f32[3]{0} negate(negate2)
negate4 = f32[3]{0} negate(negate3)
negate5 = f32[3]{0} negate(negate4)
negate6 = f32[3]{0} negate(negate5)
negate7 = f32[3]{0} negate(negate6)
negate8 = f32[3]{0} negate(negate7)
negate9 = f32[3]{0} negate(negate8)
negate10 = f32[3]{0} negate(negate9)
negate11 = f32[3]{0} negate(negate10)
negate12 = f32[3]{0} negate(negate11)
negate13 = f32[3]{0} negate(negate12)
negate14 = f32[3]{0} negate(negate13)
negate15 = f32[3]{0} negate(negate14)
negate16 = f32[3]{0} negate(negate15)
ROOT tuple = (f32[3]{0}, f32[3]{0}, f32[3]{0}, pred[]) tuple(gte0, gte0, negate16, gte3)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy0 = f32[3]{0} copy(p0)
copy1 = f32[3]{0} copy(p0)
tuple = (f32[3]{0}, f32[3]{0}, f32[3]{0}, pred[]) tuple(copy0, copy0, copy1, p1)
while = (f32[3]{0}, f32[3]{0}, f32[3]{0}, pred[]) while(tuple), condition=while_cond, body=while_body
ROOT gte = f32[3]{0} get-tuple-element(while), index=2
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, b228599972) {
absl::string_view hlo_string = R"(
HloModule entry, is_scheduled=true
fused_computation {
%p0 = f32[2,3]{1,0} parameter(0)
%result0 = f32[2,3]{1,0} copy(%p0)
%result1 = f32[2,3]{1,0} copy(%p0)
ROOT tuple = (f32[2,3]{1,0}, f32[2,3]{1,0}) tuple(%result0, %result1)
}
ENTRY entry {
%p0 = f32[2,3]{1,0} parameter(0)
%p1 = f32[2,3]{1,0} parameter(1)
%unused = (f32[2,3]{1,0}, f32[2,3]{1,0}) fusion(%p0), kind=kLoop, calls=%fused_computation
%unused.0 = f32[2,3]{1,0} get-tuple-element(%unused), index=0
%unused.1 = f32[2,3]{1,0} get-tuple-element(%unused), index=1
%negate.0 = f32[2,3]{1,0} negate(f32[2,3]{1,0} %unused.0)
%negate.1 = f32[2,3]{1,0} negate(f32[2,3]{1,0} %unused.1)
ROOT %result = f32[2,3]{1,0} negate(%p1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, b172243149) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
p0 = (f32[3]{0}, f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(p0), index=3
}
while_body {
p0 = (f32[3]{0}, f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
gte1 = f32[3]{0} get-tuple-element(p0), index=1
gte2 = f32[3]{0} get-tuple-element(p0), index=2
gte3 = pred[] get-tuple-element(p0), index=3
add = f32[3]{0} add(gte1, gte2)
negate0 = f32[3]{0} negate(add)
negate1 = f32[3]{0} negate(negate0)
negate2 = f32[3]{0} negate(negate1)
negate3 = f32[3]{0} negate(negate2)
negate4 = f32[3]{0} negate(negate3)
negate5 = f32[3]{0} negate(negate4)
negate6 = f32[3]{0} negate(negate5)
negate7 = f32[3]{0} negate(negate6)
negate8 = f32[3]{0} negate(negate7)
negate9 = f32[3]{0} negate(negate8)
negate10 = f32[3]{0} negate(negate9)
negate11 = f32[3]{0} negate(negate10)
negate12 = f32[3]{0} negate(negate11)
negate13 = f32[3]{0} negate(negate12)
negate14 = f32[3]{0} negate(negate13)
negate15 = f32[3]{0} negate(negate14)
negate16 = f32[3]{0} negate(negate15)
ROOT tuple = (f32[3]{0}, f32[3]{0}, f32[3]{0}, pred[]) tuple(gte0, add, negate16, gte3)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy0 = f32[3]{0} copy(p0)
copy1 = f32[3]{0} copy(p0)
copy2 = f32[3]{0} copy(p0)
negate = f32[3]{0} negate(copy0)
tuple = (f32[3]{0}, f32[3]{0}, f32[3]{0}, pred[]) tuple(copy0, copy1, copy2, p1)
while = (f32[3]{0}, f32[3]{0}, f32[3]{0}, pred[]) while(tuple), condition=while_cond, body=while_body
gte = f32[3]{0} get-tuple-element(while), index=2
add0 = f32[3]{0} add(negate, copy0)
ROOT add1 = f32[3]{0} add(add0, gte)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, ControlPredecessorsBug) {
absl::string_view hlo_string = R"(
HloModule sort.16, is_scheduled=true
ENTRY %sort.16 (param.0.1: s32[1], param.1.2: f32[1], param.2.3: u32[1], param.3.4: s32[1]) -> (s32[1], f32[1], u32[1], s32[1]) {
%param.3.4 = s32[1]{0:T(128)} parameter(3)
%param.2.3 = u32[1]{0:T(128)} parameter(2)
%param.1.2 = f32[1]{0:T(128)} parameter(1)
%param.0.1 = s32[1]{0:T(128)} parameter(0)
%tuple.1 = (s32[1]{0:T(128)}, f32[1]{0:T(128)}, u32[1]{0:T(128)}, s32[1]{0:T(128)}) tuple(s32[1]{0:T(128)} %param.0.1, f32[1]{0:T(128)} %param.1.2, u32[1]{0:T(128)} %param.2.3, s32[1]{0:T(128)} %param.3.4), control-predecessors={%param.0.1}
%get-tuple-element.4 = s32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, f32[1]{0:T(128)}, u32[1]{0:T(128)}, s32[1]{0:T(128)}) %tuple.1), index=0
%get-tuple-element.5 = f32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, f32[1]{0:T(128)}, u32[1]{0:T(128)}, s32[1]{0:T(128)}) %tuple.1), index=1
%get-tuple-element.6 = u32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, f32[1]{0:T(128)}, u32[1]{0:T(128)}, s32[1]{0:T(128)}) %tuple.1), index=2
%get-tuple-element.7 = s32[1]{0:T(128)} get-tuple-element((s32[1]{0:T(128)}, f32[1]{0:T(128)}, u32[1]{0:T(128)}, s32[1]{0:T(128)}) %tuple.1), index=3
%copy.4 = s32[1]{0:T(128)} copy(s32[1]{0:T(128)} %get-tuple-element.4)
%copy.5 = f32[1]{0:T(128)} copy(f32[1]{0:T(128)} %get-tuple-element.5)
%copy.6 = u32[1]{0:T(128)} copy(u32[1]{0:T(128)} %get-tuple-element.6)
%copy.7 = s32[1]{0:T(128)} copy(s32[1]{0:T(128)} %get-tuple-element.7)
ROOT %tuple.2 = (s32[1]{0:T(128)}, f32[1]{0:T(128)}, u32[1]{0:T(128)}, s32[1]{0:T(128)}) tuple(s32[1]{0:T(128)} %copy.4, f32[1]{0:T(128)} %copy.5, u32[1]{0:T(128)} %copy.6, s32[1]{0:T(128)} %copy.7)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, ConditionalShouldBeAllocatedInAlternateMem) {
absl::string_view hlo_string = R"(
HloModule CondAllocation, is_scheduled=true
true_computation {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg1 = f32[3]{0} negate(gte)
}
false_computation {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg2 = f32[3]{0} negate(gte)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy = f32[3]{0} copy(p0)
tuple = (f32[3]{0}) tuple(copy)
ROOT conditional = f32[3]{0} conditional(p1, tuple, tuple), true_computation=true_computation, false_computation=false_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
auto copy =
module->GetComputationWithName("entry")->GetInstructionWithName("copy");
EXPECT_EQ(copy->shape().layout().memory_space(), kAlternateMemorySpace);
auto neg1 = module->GetComputationWithName("true_computation")
->GetInstructionWithName("neg1");
auto neg1_operand = neg1->operand(0);
EXPECT_EQ(neg1_operand->shape().layout().memory_space(),
kAlternateMemorySpace);
auto neg2 = module->GetComputationWithName("false_computation")
->GetInstructionWithName("neg2");
auto neg2_operand = neg2->operand(0);
EXPECT_EQ(neg2_operand->shape().layout().memory_space(),
kAlternateMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, ConditionalAvoidsUnnecessaryPrefetch) {
absl::string_view hlo_string = R"(
HloModule CondAllocation, is_scheduled=true
true_computation {
p0 = (f32[3]{0}, f32[3]{0}) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
neg0 = f32[3]{0} negate(gte0)
neg1 = f32[3]{0} negate(neg0)
neg2 = f32[3]{0} negate(neg1)
neg3 = f32[3]{0} negate(neg2)
neg4 = f32[3]{0} negate(neg3)
neg5 = f32[3]{0} negate(neg4)
neg6 = f32[3]{0} negate(neg5)
neg7 = f32[3]{0} negate(neg6)
neg8 = f32[3]{0} negate(neg7)
neg9 = f32[3]{0} negate(neg8)
gte1 = f32[3]{0} get-tuple-element(p0), index=1
ROOT add = f32[3]{0} add(neg9, gte1)
}
false_computation {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg = f32[3]{0} negate(gte)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy0 = f32[3]{0} copy(p0)
copy1 = f32[3]{0} copy(p0)
tuple0 = (f32[3]{0}, f32[3]{0}) tuple(copy0, copy1)
tuple1 = (f32[3]{0}) tuple(copy0)
ROOT conditional = f32[3]{0} conditional(p1, tuple0, tuple1), true_computation=true_computation, false_computation=false_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
auto copy0 =
module->GetComputationWithName("entry")->GetInstructionWithName("copy0");
EXPECT_EQ(copy0->shape().layout().memory_space(), kAlternateMemorySpace);
auto copy1 =
module->GetComputationWithName("entry")->GetInstructionWithName("copy1");
EXPECT_EQ(copy1->shape().layout().memory_space(), kDefaultMemorySpace);
auto add = module->GetComputationWithName("true_computation")
->GetInstructionWithName("add");
auto add_operand = add->operand(1);
EXPECT_EQ(add_operand->shape().layout().memory_space(),
kAlternateMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, ConditionalMultiUse) {
absl::string_view hlo_string = R"(
HloModule CondAllocation, is_scheduled=true
true_computation {
p0 = (f32[3]{0}, f32[3]{0}) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
gte1 = f32[3]{0} get-tuple-element(p0), index=1
add0 = f32[3]{0} add(gte0, gte1)
neg0 = f32[3]{0} negate(add0)
neg1 = f32[3]{0} negate(neg0)
neg2 = f32[3]{0} negate(neg1)
neg3 = f32[3]{0} negate(neg2)
neg4 = f32[3]{0} negate(neg3)
neg5 = f32[3]{0} negate(neg4)
neg6 = f32[3]{0} negate(neg5)
neg7 = f32[3]{0} negate(neg6)
neg8 = f32[3]{0} negate(neg7)
ROOT neg9 = f32[3]{0} negate(neg8)
}
false_computation {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg = f32[3]{0} negate(gte)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy0 = f32[3]{0} copy(p0)
copy1 = f32[3]{0} copy(p0)
tuple0 = (f32[3]{0}, f32[3]{0}) tuple(copy0, copy1)
tuple1 = (f32[3]{0}) tuple(copy0)
conditional = f32[3]{0} conditional(p1, tuple0, tuple1), true_computation=true_computation, false_computation=false_computation
ROOT add1 = f32[3]{0} add(copy1, conditional)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
auto copy1 =
module->GetComputationWithName("entry")->GetInstructionWithName("copy1");
EXPECT_EQ(copy1->shape().layout().memory_space(), kAlternateMemorySpace);
auto add0 = module->GetComputationWithName("true_computation")
->GetInstructionWithName("add0");
auto add0_operand = add0->operand(1);
EXPECT_EQ(add0_operand->shape().layout().memory_space(),
kAlternateMemorySpace);
auto add1 =
module->GetComputationWithName("entry")->GetInstructionWithName("add1");
auto add1_operand = add1->operand(0);
EXPECT_EQ(add1_operand->shape().layout().memory_space(), kDefaultMemorySpace);
EXPECT_EQ(add1_operand->opcode(), HloOpcode::kCopyDone);
}
TEST_F(MemorySpaceAssignmentTest, ConditionalMultiUseInWhile) {
absl::string_view hlo_string = R"(
HloModule CondAllocation, is_scheduled=true
true_computation {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg1 = f32[3]{0} negate(gte)
}
false_computation {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg2 = f32[3]{0} negate(gte)
}
while_cond {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(p0), index=2
}
while_body {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
gte1 = f32[3]{0} get-tuple-element(p0), index=1
gte2 = pred[] get-tuple-element(p0), index=2
cond_tuple = (f32[3]{0}) tuple(gte0)
conditional = f32[3]{0} conditional(gte2, cond_tuple, cond_tuple), true_computation=true_computation, false_computation=false_computation
add = f32[3]{0} add(conditional, gte1)
neg0 = f32[3]{0} negate(add)
neg1 = f32[3]{0} negate(neg0)
ROOT tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(gte0, neg1, gte2)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy0 = f32[3]{0} copy(p0)
copy1 = f32[3]{0} copy(p0)
tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(copy0, copy1, p1)
while = (f32[3]{0}, f32[3]{0}, pred[]) while(tuple), condition=while_cond, body=while_body
ROOT gte = f32[3]{0} get-tuple-element(while), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
auto copy0 =
module->GetComputationWithName("entry")->GetInstructionWithName("copy0");
EXPECT_EQ(copy0->shape().layout().memory_space(), kAlternateMemorySpace);
auto conditional = module->GetComputationWithName("while_body")
->GetInstructionWithName("conditional");
auto conditional_operand = conditional->operand(1);
EXPECT_EQ(ShapeUtil::GetSubshape(conditional_operand->shape(), {0})
.layout()
.memory_space(),
kAlternateMemorySpace);
auto while_root =
module->GetComputationWithName("while_body")->root_instruction();
auto while_root_operand = while_root->operand(0);
EXPECT_THAT(
while_root_operand,
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::AsyncCopy(kDefaultMemorySpace, kAlternateMemorySpace,
op::GetTupleElement(op::Parameter(0)))));
}
TEST_F(MemorySpaceAssignmentTest, NestedConditional) {
absl::string_view hlo_string = R"(
HloModule CondAllocation, is_scheduled=true
true_computation2 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg1 = f32[3]{0} negate(gte)
}
false_computation2 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg2 = f32[3]{0} negate(gte)
}
true_computation1 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
slice = f32[1]{0} slice(gte), slice={[0:1]}
bitcast = f32[] bitcast(slice)
constant = f32[] constant(0.0)
compare = pred[] compare(bitcast, constant), direction=GT
ROOT conditional = f32[3]{0} conditional(compare, p0, p0), true_computation=true_computation2, false_computation=false_computation2
}
false_computation1 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg3 = f32[3]{0} negate(gte)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy = f32[3]{0} copy(p0)
tuple = (f32[3]{0}) tuple(copy)
ROOT conditional = f32[3]{0} conditional(p1, tuple, tuple), true_computation=true_computation1, false_computation=false_computation1
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
auto copy =
module->GetComputationWithName("entry")->GetInstructionWithName("copy");
EXPECT_EQ(copy->shape().layout().memory_space(), kAlternateMemorySpace);
auto neg1_operand = module->GetComputationWithName("true_computation2")
->GetInstructionWithName("neg1")
->operand(0);
auto neg2_operand = module->GetComputationWithName("false_computation2")
->GetInstructionWithName("neg2")
->operand(0);
auto neg3_operand = module->GetComputationWithName("false_computation1")
->GetInstructionWithName("neg3")
->operand(0);
EXPECT_EQ(neg1_operand->shape().layout().memory_space(),
kAlternateMemorySpace);
EXPECT_EQ(neg2_operand->shape().layout().memory_space(),
kAlternateMemorySpace);
EXPECT_EQ(neg3_operand->shape().layout().memory_space(),
kAlternateMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, NestedConditionalBufferReuseVerificationBug) {
absl::string_view hlo_string = R"(
HloModule CondAllocation, is_scheduled=true
true_computation2 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
neg1 = f32[3]{0} negate(gte)
neg2 = f32[3]{0} negate(neg1)
ROOT neg3 = f32[3]{0} negate(neg2)
}
false_computation2 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg4 = f32[3]{0} negate(gte)
}
true_computation1 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
slice = f32[1]{0} slice(gte), slice={[0:1]}
bitcast = f32[] bitcast(slice)
constant = f32[] constant(0.0)
compare = pred[] compare(bitcast, constant), direction=GT
tuple = (f32[3]{0}) tuple(gte)
ROOT conditional = f32[3]{0} conditional(compare, tuple, tuple), true_computation=true_computation2, false_computation=false_computation2
}
false_computation1 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg5 = f32[3]{0} negate(gte)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy = f32[3]{0} copy(p0)
tuple = (f32[3]{0}) tuple(copy)
ROOT conditional = f32[3]{0} conditional(p1, tuple, tuple), true_computation=true_computation1, false_computation=false_computation1
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, WhileInsideNestedConditionalVerificationBug) {
absl::string_view hlo_string = R"(
HloModule CondAllocation, is_scheduled=true
while_cond {
p0 = (f32[3]{0}) parameter(0)
ROOT constant = pred[] constant(true)
}
while_body {
p0 = (f32[3]{0}) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
negate0 = f32[3]{0} negate(gte0)
ROOT tuple = (f32[3]{0}) tuple(negate0)
}
true_computation2 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
tuple = (f32[3]{0}) tuple(gte)
while = (f32[3]{0}) while(tuple), condition=while_cond, body=while_body
while_gte0 = f32[3]{0} get-tuple-element(while), index=0
ROOT root = f32[3]{0} negate(while_gte0)
}
false_computation2 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg3 = f32[3]{0} negate(gte)
}
true_computation1 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
constant = pred[] constant(true)
tuple = (f32[3]{0}) tuple(gte)
ROOT conditional = f32[3]{0} conditional(constant, tuple, tuple), true_computation=true_computation2, false_computation=false_computation2
}
false_computation1 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg3 = f32[3]{0} negate(gte)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy = f32[3]{0} copy(p0)
tuple = (f32[3]{0}) tuple(copy)
ROOT conditional = f32[3]{0} conditional(p1, tuple, tuple), true_computation=true_computation1, false_computation=false_computation1
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest,
ConditionalComputationBufferOverlapBeforeParam) {
absl::string_view hlo_string = R"(
HloModule CondAllocation, is_scheduled=true
true_computation {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg2 = f32[3]{0} negate(gte)
}
false_computation {
c = f32[3]{0} constant({0.0, 1.0, 2.0})
neg0 = f32[3]{0} negate(c)
neg1 = f32[3]{0} negate(neg0)
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT add = f32[3]{0} add(gte, neg1)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy = f32[3]{0} copy(p0)
tuple = (f32[3]{0}) tuple(copy)
ROOT conditional = f32[3]{0} conditional(p1, tuple, tuple), true_computation=true_computation, false_computation=false_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto preset_assignments = AssignMemorySpace(module.get());
auto get_offset = [&](absl::string_view hlo_name) {
for (const auto& chunk : preset_assignments->chunks()) {
if (chunk.first.instruction->name() == hlo_name) {
return chunk.second.offset;
}
}
return static_cast<int64_t>(-1);
};
int64_t copy_offset = get_offset("copy");
int64_t neg0_offset = get_offset("neg0");
EXPECT_NE(copy_offset, -1);
EXPECT_NE(neg0_offset, -1);
EXPECT_NE(copy_offset, neg0_offset);
}
TEST_F(MemorySpaceAssignmentTest,
RequestIdentifierShouldNotBeAllocatedInAlternateMem) {
absl::string_view hlo_string = R"(
HloModule SendRecv, is_scheduled=true
ENTRY %AddDependency (p: f32[3]) -> f32[3] {
%p = f32[3]{0} parameter(0)
%after-all = token[] after-all()
%recv.4 = (f32[3]{0}, u32[], token[]) recv(token[] %after-all), channel_id=7
%recv-done.4 = (f32[3]{0}, token[]) recv-done((f32[3]{0}, u32[], token[]) %recv.4), channel_id=7
%token.1 = token[] get-tuple-element((f32[3]{0}, token[]) %recv-done.4), index=1
%data = f32[3]{0} get-tuple-element((f32[3]{0}, token[]) %recv-done.4), index=0
%send = (f32[3]{0}, u32[], token[]) send(f32[3]{0} %data, token[] %token.1), channel_id=2
%send-done = token[] send-done((f32[3]{0}, u32[], token[]) %send), channel_id=2
ROOT %add = f32[3]{0} add(f32[3]{0} %p, f32[3]{0} %data)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
for (const HloInstruction* instruction :
module->entry_computation()->instructions()) {
if (instruction->opcode() == HloOpcode::kSend ||
instruction->opcode() == HloOpcode::kRecv) {
const Shape& request_identifier_shape =
ShapeUtil::GetSubshape(instruction->shape(), {1});
EXPECT_NE(request_identifier_shape.layout().memory_space(),
kAlternateMemorySpace);
}
}
}
TEST_F(MemorySpaceAssignmentTest, SendDoneShouldHaveSendOperand) {
absl::string_view hlo_string = R"(
HloModule SendRecv, is_scheduled=true
ENTRY %AddDependency (p: f32[3]) -> f32[3] {
%p0 = f32[3]{0} parameter(0)
%p1 = f32[3]{0} parameter(1)
%neg0 = f32[3]{0} negate(f32[3]{0} %p1)
%neg1 = f32[3]{0} negate(f32[3]{0} %neg0)
%neg2 = f32[3]{0} negate(f32[3]{0} %neg1)
%neg3 = f32[3]{0} negate(f32[3]{0} %neg2)
%neg4 = f32[3]{0} negate(f32[3]{0} %neg3)
%neg5 = f32[3]{0} negate(f32[3]{0} %neg4)
%neg6 = f32[3]{0} negate(f32[3]{0} %neg5)
%after-all = token[] after-all()
%send = (f32[3]{0}, u32[], token[]) send(f32[3]{0} %p0, token[] %after-all), channel_id=2
%send-done = token[] send-done((f32[3]{0}, u32[], token[]) %send), channel_id=2
ROOT %add = f32[3]{0} add(f32[3]{0} %p0, f32[3]{0} %neg6)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, SendAndSendDoneShouldGetSameAllocation) {
absl::string_view hlo_string = R"(
HloModule SendRecv, is_scheduled=true
ENTRY %AddDependency (p: f32[3]) -> f32[3] {
%p0 = f32[3]{0} parameter(0)
%p1 = f32[3]{0} parameter(1)
%after-all = token[] after-all()
%send = (f32[3]{0}, u32[], token[]) send(f32[3]{0} %p0, token[] %after-all), channel_id=2
%neg0 = f32[3]{0} negate(f32[3]{0} %p1)
%neg1 = f32[3]{0} negate(f32[3]{0} %neg0)
%neg2 = f32[3]{0} negate(f32[3]{0} %neg1)
%neg3 = f32[3]{0} negate(f32[3]{0} %neg2)
%neg4 = f32[3]{0} negate(f32[3]{0} %neg3)
%neg5 = f32[3]{0} negate(f32[3]{0} %neg4)
%neg6 = f32[3]{0} negate(f32[3]{0} %neg5)
%send-done = token[] send-done((f32[3]{0}, u32[], token[]) %send), channel_id=2
ROOT %add = f32[3]{0} add(f32[3]{0} %p0, f32[3]{0} %neg6)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get(), DefaultMemorySpaceOptions(),
10, 4);
}
TEST_F(MemorySpaceAssignmentTest, LastUseOpt) {
HloComputation::Builder builder(TestName());
Shape shape1 = ShapeUtil::MakeShape(F32, {2, 3});
Shape shape2 = ShapeUtil::MakeShape(F32, {2, 4});
PaddingConfig padding_config = MakeEdgePaddingConfig({{0, 0}, {0, 1}});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape1, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape2, "p1"));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape1, HloOpcode::kAdd, p0, p0));
HloInstruction* sub1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape1, HloOpcode::kSubtract, p0, add1));
HloInstruction* mul1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape2, HloOpcode::kMultiply, p1, p1));
HloInstruction* add2 = builder.AddInstruction(
HloInstruction::CreateBinary(shape2, HloOpcode::kAdd, mul1, p1));
HloInstruction* mul2 = builder.AddInstruction(
HloInstruction::CreateBinary(shape1, HloOpcode::kMultiply, add1, sub1));
HloInstruction* padding_value = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(F32)));
HloInstruction* padded_mul2 = builder.AddInstruction(
HloInstruction::CreatePad(shape2, mul2, padding_value, padding_config));
HloInstruction* add3 = builder.AddInstruction(
HloInstruction::CreateBinary(shape2, HloOpcode::kAdd, add2, padded_mul2));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, p1, add1, sub1, mul1, add2, mul2,
padding_value, padded_mul2, add3});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
EXPECT_THAT(
mul2,
op::Multiply(
op::Add(op::Parameter(0), op::Parameter(0)),
op::Subtract(op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::Parameter(0)),
op::Add(op::Parameter(0), op::Parameter(0)))));
}
TEST_F(MemorySpaceAssignmentTest, NonEntryComputationSchedule1) {
auto module = CreateNewVerifiedModule();
Shape shape = ShapeUtil::MakeShape(xla::F32, {2, 3});
Shape scalar_shape = ShapeUtil::MakeShape(xla::F32, {});
Shape tuple_shape = ShapeUtil::MakeTupleShape({shape, scalar_shape});
auto cond_builder = HloComputation::Builder("WhileCond");
HloInstruction* cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "cond_param"));
HloInstruction* cond_iter = cond_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, cond_param, 1));
HloInstruction* cond_limit = cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(50.f)));
HloInstruction* cond_lt = cond_builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), cond_iter,
cond_limit, ComparisonDirection::kLt));
HloComputation* cond_computation =
module->AddEmbeddedComputation(cond_builder.Build());
auto body_builder = HloComputation::Builder("WhileBody");
HloInstruction* body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "body_param"));
HloInstruction* body_iter = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, body_param, 1));
HloInstruction* body_data = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, body_param, 0));
HloInstruction* body_iter_increment = body_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.f)));
HloInstruction* body_iter_next =
body_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, body_iter, body_iter_increment));
HloInstruction* body_data_increment =
body_builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.f, 2.f, 3.f}, {4.f, 5.f, 6.f}})));
HloInstruction* body_data_mul =
body_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kMultiply, body_data, body_data));
HloInstruction* body_data_add =
body_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, body_data, body_data_increment));
HloInstruction* body_data_next =
body_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, body_data_add, body_data_mul));
HloInstruction* body_out = body_builder.AddInstruction(
HloInstruction::CreateTuple({body_data_next, body_iter_next}));
HloComputation* body_computation =
module->AddEmbeddedComputation(body_builder.Build());
auto builder = HloComputation::Builder(TestName());
HloInstruction* data = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param_iter"));
HloInstruction* iter = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "param_data"));
HloInstruction* p2 =
builder.AddInstruction(HloInstruction::CreateParameter(2, shape, "p2"));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({data, iter}));
HloInstruction* while_op = builder.AddInstruction(HloInstruction::CreateWhile(
tuple_shape, cond_computation, body_computation, tuple));
HloInstruction* while_data = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, while_op, 0));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, while_data, p2));
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(cond_computation,
{cond_param, cond_iter, cond_limit, cond_lt});
schedule.set_sequence(body_computation,
{body_param, body_iter, body_data, body_iter_increment,
body_iter_next, body_data_increment, body_data_mul,
body_data_add, body_data_next, body_out});
schedule.set_sequence(entry_computation,
{iter, data, p2, tuple, while_op, while_data, add});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get(), DefaultMemorySpaceOptions(), 50);
}
TEST_F(MemorySpaceAssignmentTest, NonEntryComputationSchedule2) {
auto module = CreateNewVerifiedModule();
Shape shape = ShapeUtil::MakeShape(xla::F32, {2, 3});
Shape shape2 = ShapeUtil::MakeShape(xla::F32, {3, 3});
auto call_builder = HloComputation::Builder("Call");
HloInstruction* call_param = call_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "call_param"));
HloInstruction* call_param2 = call_builder.AddInstruction(
HloInstruction::CreateParameter(1, shape2, "call_param2"));
HloInstruction* slice = call_builder.AddInstruction(
HloInstruction::CreateSlice(shape, call_param2, {0, 0}, {2, 3}, {1, 1}));
HloInstruction* mul =
call_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kMultiply, call_param, slice));
HloInstruction* negate0 = call_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, mul));
HloInstruction* negate1 = call_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = call_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = call_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = call_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = call_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = call_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* negate7 = call_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate6));
HloInstruction* add0 =
call_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, call_param, negate7));
HloComputation* call_computation =
module->AddEmbeddedComputation(call_builder.Build());
auto builder = HloComputation::Builder(TestName());
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape2, "p1"));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, p0, p0));
HloInstruction* add2 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, add1, p0));
HloInstruction* negate8 = builder.AddInstruction(
HloInstruction::CreateUnary(shape2, HloOpcode::kNegate, p1));
HloInstruction* call = builder.AddInstruction(
HloInstruction::CreateCall(shape, {add1, negate8}, call_computation));
HloInstruction* add3 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, p0, add1));
HloInstruction* add4 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, call, add3));
HloInstruction* add5 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, add2, add4));
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(
call_computation,
{call_param, call_param2, slice, mul, negate0, negate1, negate2, negate3,
negate4, negate5, negate6, negate7, add0});
schedule.set_sequence(entry_computation,
{p0, p1, add1, add2, negate8, call, add3, add4, add5});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get(), DefaultMemorySpaceOptions(), 5);
}
TEST_F(MemorySpaceAssignmentTest, NonEntryComputationSchedule3) {
auto module = CreateNewVerifiedModule();
Shape shape = ShapeUtil::MakeShape(xla::F32, {2, 3});
Shape shape2 = ShapeUtil::MakeShape(xla::F32, {3, 3});
auto call_builder = HloComputation::Builder("Call");
HloInstruction* call_param = call_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "call_param"));
HloInstruction* iota =
call_builder.AddInstruction(HloInstruction::CreateIota(shape2, 0));
HloInstruction* slice = call_builder.AddInstruction(
HloInstruction::CreateSlice(shape, iota, {0, 0}, {2, 3}, {1, 1}));
HloInstruction* mul =
call_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kMultiply, call_param, slice));
HloInstruction* negate0 = call_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, mul));
HloInstruction* negate1 = call_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = call_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = call_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = call_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = call_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = call_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* negate7 = call_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate6));
HloInstruction* add0 =
call_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, call_param, negate7));
HloComputation* call_computation =
module->AddEmbeddedComputation(call_builder.Build());
auto builder = HloComputation::Builder(TestName());
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, p0, p0));
HloInstruction* add2 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, add1, p0));
HloInstruction* call = builder.AddInstruction(
HloInstruction::CreateCall(shape, {add1}, call_computation));
HloInstruction* add3 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, call, add1));
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(
call_computation,
{call_param, iota, slice, mul, negate0, negate1, negate2, negate3,
negate4, negate5, negate6, negate7, add0});
schedule.set_sequence(entry_computation, {p0, add1, add2, call, add3});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get(), DefaultMemorySpaceOptions(), 5);
}
TEST_F(MemorySpaceAssignmentTest, DISABLED_NonEntryComputationSchedule4) {
auto module = CreateNewVerifiedModule();
Shape shape = ShapeUtil::MakeShape(xla::F32, {2, 3});
Shape shape2 = ShapeUtil::MakeShape(xla::F32, {3, 3});
auto true_builder = HloComputation::Builder("True");
HloInstruction* true_param = true_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "true_param"));
HloInstruction* iota =
true_builder.AddInstruction(HloInstruction::CreateIota(shape2, 0));
HloInstruction* slice = true_builder.AddInstruction(
HloInstruction::CreateSlice(shape, iota, {0, 0}, {2, 3}, {1, 1}));
HloInstruction* mul =
true_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kMultiply, true_param, slice));
HloInstruction* negate0 = true_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, mul));
HloInstruction* negate1 = true_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = true_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = true_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = true_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = true_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = true_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* negate7 = true_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate6));
HloInstruction* add0 =
true_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, true_param, negate7));
HloComputation* true_computation =
module->AddEmbeddedComputation(true_builder.Build());
auto false_builder = HloComputation::Builder("False");
HloInstruction* false_param = false_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "false_param"));
HloComputation* false_computation =
module->AddEmbeddedComputation(false_builder.Build());
auto builder = HloComputation::Builder(TestName());
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, p0, p0));
HloInstruction* add2 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, add1, p0));
HloInstruction* pred = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
HloInstruction* conditional =
builder.AddInstruction(HloInstruction::CreateConditional(
shape, pred, add1, true_computation, add2, false_computation));
HloInstruction* add3 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, conditional, add1));
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(
true_computation,
{true_param, iota, slice, mul, negate0, negate1, negate2, negate3,
negate4, negate5, negate6, negate7, add0});
schedule.set_sequence(false_computation, {false_param});
schedule.set_sequence(entry_computation,
{p0, add1, add2, pred, conditional, add3});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get(), DefaultMemorySpaceOptions(), 5);
}
TEST_F(MemorySpaceAssignmentTest, NonEntryComputationSchedule5) {
auto module = CreateNewVerifiedModule();
Shape shape = ShapeUtil::MakeShape(xla::F32, {2, 3});
Shape scalar_shape = ShapeUtil::MakeShape(xla::F32, {});
Shape tuple_shape =
ShapeUtil::MakeTupleShape({shape, scalar_shape, scalar_shape});
auto cond_builder = HloComputation::Builder("WhileCond");
HloInstruction* cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "cond_param"));
HloInstruction* cond_iter = cond_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, cond_param, 1));
HloInstruction* cond_limit = cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(50.f)));
HloInstruction* cond_lt = cond_builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), cond_iter,
cond_limit, ComparisonDirection::kLt));
HloComputation* cond_computation =
module->AddEmbeddedComputation(cond_builder.Build());
auto body_builder = HloComputation::Builder("WhileBody");
HloInstruction* body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "body_param"));
HloInstruction* body_iter = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, body_param, 1));
HloInstruction* body_data = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, body_param, 0));
HloInstruction* body_iter_increment = body_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.f)));
HloInstruction* body_iter_next =
body_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, body_iter, body_iter_increment));
HloInstruction* body_data2 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, body_param, 2));
HloInstruction* body_out = body_builder.AddInstruction(
HloInstruction::CreateTuple({body_data, body_iter_next, body_data2}));
HloComputation* body_computation =
module->AddEmbeddedComputation(body_builder.Build());
auto builder = HloComputation::Builder(TestName());
HloInstruction* data = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param_data"));
HloInstruction* iter = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "param_iter"));
HloInstruction* data2 = builder.AddInstruction(
HloInstruction::CreateParameter(2, scalar_shape, "param_data2"));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, data));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* negate7 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate6));
HloInstruction* sub = builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kSubtract, iter, data2));
HloInstruction* tuple = builder.AddInstruction(
HloInstruction::CreateTuple({negate7, iter, data2}));
HloInstruction* while_op = builder.AddInstruction(HloInstruction::CreateWhile(
tuple_shape, cond_computation, body_computation, tuple));
HloInstruction* while_data = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, while_op, 1));
HloInstruction* root =
builder.AddInstruction(HloInstruction::CreateTuple({while_data, sub}));
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(cond_computation,
{cond_param, cond_iter, cond_limit, cond_lt});
schedule.set_sequence(body_computation,
{body_param, body_iter, body_data, body_iter_increment,
body_iter_next, body_data2, body_out});
schedule.set_sequence(
entry_computation,
{iter, data, data2, negate0, negate1, negate2, negate3, negate4, negate5,
negate6, negate7, sub, tuple, while_op, while_data, root});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get(), DefaultMemorySpaceOptions(), 20);
}
TEST_F(MemorySpaceAssignmentTest, NonEntryComputationSchedule6) {
auto module = CreateNewVerifiedModule();
Shape shape = ShapeUtil::MakeShape(xla::F32, {2, 3});
Shape scalar_shape = ShapeUtil::MakeShape(xla::F32, {});
Shape tuple_shape = ShapeUtil::MakeTupleShape({shape, scalar_shape, shape});
auto cond_builder = HloComputation::Builder("WhileCond");
HloInstruction* cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "cond_param"));
HloInstruction* cond_iter = cond_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, cond_param, 1));
HloInstruction* cond_limit = cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(50.f)));
HloInstruction* cond_lt = cond_builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), cond_iter,
cond_limit, ComparisonDirection::kLt));
HloComputation* cond_computation =
module->AddEmbeddedComputation(cond_builder.Build());
auto body_builder = HloComputation::Builder("WhileBody");
HloInstruction* body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "body_param"));
HloInstruction* body_iter = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, body_param, 1));
HloInstruction* body_data = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, body_param, 0));
HloInstruction* body_negate0 = body_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, body_data));
HloInstruction* body_negate1 = body_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, body_negate0));
HloInstruction* body_negate2 = body_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, body_negate1));
HloInstruction* body_negate3 = body_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, body_negate2));
HloInstruction* body_negate4 = body_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, body_negate3));
HloInstruction* body_negate5 = body_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, body_negate4));
HloInstruction* body_negate6 = body_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, body_negate5));
HloInstruction* body_negate7 = body_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, body_negate6));
HloInstruction* body_iter_increment = body_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.f)));
HloInstruction* body_iter_next =
body_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, body_iter, body_iter_increment));
HloInstruction* body_out = body_builder.AddInstruction(
HloInstruction::CreateTuple({body_data, body_iter_next, body_negate7}));
HloComputation* body_computation =
module->AddEmbeddedComputation(body_builder.Build());
auto builder = HloComputation::Builder(TestName());
HloInstruction* data = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param_data"));
HloInstruction* iter = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "param_iter"));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, data));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* negate7 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate6));
HloInstruction* tuple = builder.AddInstruction(
HloInstruction::CreateTuple({data, iter, negate7}));
HloInstruction* while_op = builder.AddInstruction(HloInstruction::CreateWhile(
tuple_shape, cond_computation, body_computation, tuple));
HloInstruction* while_data = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, while_op, 0));
HloInstruction* while_data2 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, while_op, 2));
HloInstruction* root = builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, while_data, while_data2));
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(cond_computation,
{cond_param, cond_iter, cond_limit, cond_lt});
schedule.set_sequence(
body_computation,
{body_param, body_iter, body_data, body_negate0, body_negate1,
body_negate2, body_negate3, body_negate4, body_negate5, body_negate6,
body_negate7, body_iter_increment, body_iter_next, body_out});
schedule.set_sequence(
entry_computation,
{iter, data, negate0, negate1, negate2, negate3, negate4, negate5,
negate6, negate7, tuple, while_op, while_data, while_data2, root});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get(), DefaultMemorySpaceOptions(),
25);
*ShapeUtil::GetMutableSubshape(&tuple_shape, {0})->mutable_layout() =
LayoutUtil::MakeLayout(
{1, 0}, {}, {},
{}, {},
1,
PRIMITIVE_TYPE_INVALID,
PRIMITIVE_TYPE_INVALID,
0, kAlternateMemorySpace);
*ShapeUtil::GetMutableSubshape(&tuple_shape, {1})->mutable_layout() =
LayoutUtil::MakeLayout(
{}, {}, {},
{}, {},
1,
PRIMITIVE_TYPE_INVALID,
PRIMITIVE_TYPE_INVALID,
0, kDefaultMemorySpace);
*ShapeUtil::GetMutableSubshape(&tuple_shape, {2})->mutable_layout() =
LayoutUtil::MakeLayout(
{1, 0}, {}, {},
{}, {},
1,
PRIMITIVE_TYPE_INVALID,
PRIMITIVE_TYPE_INVALID,
0, kDefaultMemorySpace);
EXPECT_THAT(while_op, op::ShapeWithLayout(tuple_shape));
EXPECT_THAT(while_op->operand(0), op::ShapeWithLayout(tuple_shape));
EXPECT_THAT(cond_param, op::ShapeWithLayout(tuple_shape));
EXPECT_THAT(body_param, op::ShapeWithLayout(tuple_shape));
EXPECT_THAT(body_out, op::ShapeWithLayout(tuple_shape));
}
TEST_F(MemorySpaceAssignmentTest, DanglingCopy) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
Shape tuple_shape = ShapeUtil::MakeTupleShape({shape, shape});
HloInstruction* p = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p"));
HloInstruction* p0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, p, 0));
HloInstruction* p1a = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, p, 1));
HloInstruction* copy = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kCopy, p1a));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* p1b = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, p, 1));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, negate6, p1b));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(
computation, {p, p0, negate0, negate1, negate2, negate3, negate4, negate5,
negate6, p1a, copy, p1b, add});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, MultiOutputFusion) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
Shape tuple_shape = ShapeUtil::MakeTupleShape({shape, shape});
auto module = CreateNewVerifiedModule();
HloComputation::Builder fusion_builder("fusion");
HloInstruction* fusion_param0 = fusion_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* fusion_param1 = fusion_builder.AddInstruction(
HloInstruction::CreateParameter(1, shape, "p1"));
fusion_builder.AddInstruction(
HloInstruction::CreateTuple({fusion_param0, fusion_param1}));
HloComputation* fusion_computation =
module->AddEmbeddedComputation(fusion_builder.Build());
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* fusion = builder.AddInstruction(HloInstruction::CreateFusion(
tuple_shape, HloInstruction::FusionKind::kCustom, {p0, p0},
fusion_computation));
HloInstruction* element0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion, 0));
HloInstruction* element1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion, 1));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, element0, element1));
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, fusion, element0, element1, add});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, TupleInput) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
Shape tuple_shape = ShapeUtil::MakeTupleShape({shape, shape});
auto module = CreateNewVerifiedModule();
HloComputation::Builder fusion_builder("fusion");
HloInstruction* fusion_param = fusion_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p"));
HloInstruction* fusion_element0 = fusion_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion_param, 0));
HloInstruction* fusion_element1 = fusion_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion_param, 1));
fusion_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, fusion_element0, fusion_element1));
HloComputation* fusion_computation =
module->AddEmbeddedComputation(fusion_builder.Build());
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p1));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({negate0, negate1}));
HloInstruction* fusion = builder.AddInstruction(HloInstruction::CreateFusion(
shape, HloInstruction::FusionKind::kCustom, {tuple}, fusion_computation));
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, p1, negate0, negate1, tuple, fusion});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, TupleToTuple1) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
Shape tuple_shape = ShapeUtil::MakeTupleShape({shape, shape});
auto module = CreateNewVerifiedModule();
HloComputation::Builder fusion0_builder("fusion0");
HloInstruction* fusion0_param0 = fusion0_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* fusion0_param1 = fusion0_builder.AddInstruction(
HloInstruction::CreateParameter(1, shape, "p1"));
fusion0_builder.AddInstruction(
HloInstruction::CreateTuple({fusion0_param0, fusion0_param1}));
HloComputation* fusion0_computation =
module->AddEmbeddedComputation(fusion0_builder.Build());
HloComputation::Builder fusion1_builder("fusion1");
HloInstruction* fusion1_param = fusion1_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p"));
HloInstruction* fusion1_element0 = fusion1_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion1_param, 0));
HloInstruction* fusion1_element1 = fusion1_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion1_param, 1));
fusion1_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, fusion1_element0, fusion1_element1));
HloComputation* fusion1_computation =
module->AddEmbeddedComputation(fusion1_builder.Build());
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* fusion0 = builder.AddInstruction(HloInstruction::CreateFusion(
tuple_shape, HloInstruction::FusionKind::kCustom, {p0, p0},
fusion0_computation));
HloInstruction* element0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion0, 0));
HloInstruction* element1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion0, 1));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* add0 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, element0, element1));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, add0, negate6));
HloInstruction* fusion1 = builder.AddInstruction(
HloInstruction::CreateFusion(shape, HloInstruction::FusionKind::kCustom,
{fusion0}, fusion1_computation));
HloInstruction* mul = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, add1, fusion1));
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(
computation,
{p0, fusion0, element0, element1, negate0, negate1, negate2, negate3,
negate4, negate5, negate6, add0, add1, fusion1, mul});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get(), DefaultMemorySpaceOptions(), 5);
EXPECT_THAT(fusion1,
op::Fusion(op::Tuple(
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::GetTupleElement(op::Fusion(), 0)),
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::GetTupleElement(op::Fusion(), 1)))));
}
TEST_F(MemorySpaceAssignmentTest, TupleToTuple2) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
Shape tuple_shape = ShapeUtil::MakeTupleShape({shape, shape});
Shape nested_tuple_shape = ShapeUtil::MakeTupleShape({shape, tuple_shape});
auto module = CreateNewVerifiedModule();
HloComputation::Builder fusion0_builder("fusion0");
HloInstruction* fusion0_param0 = fusion0_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* fusion0_param1 = fusion0_builder.AddInstruction(
HloInstruction::CreateParameter(1, shape, "p1"));
HloInstruction* fusion0_tuple = fusion0_builder.AddInstruction(
HloInstruction::CreateTuple({fusion0_param0, fusion0_param1}));
fusion0_builder.AddInstruction(
HloInstruction::CreateTuple({fusion0_param0, fusion0_tuple}));
HloComputation* fusion0_computation =
module->AddEmbeddedComputation(fusion0_builder.Build());
HloComputation::Builder fusion1_builder("fusion1");
HloInstruction* fusion1_param = fusion1_builder.AddInstruction(
HloInstruction::CreateParameter(0, nested_tuple_shape, "p"));
HloInstruction* fusion1_element0 = fusion1_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion1_param, 0));
HloInstruction* fusion1_element1 = fusion1_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(tuple_shape, fusion1_param, 1));
HloInstruction* fusion1_element2 = fusion1_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion1_element1, 1));
fusion1_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, fusion1_element0, fusion1_element2));
HloComputation* fusion1_computation =
module->AddEmbeddedComputation(fusion1_builder.Build());
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* fusion0 = builder.AddInstruction(HloInstruction::CreateFusion(
nested_tuple_shape, HloInstruction::FusionKind::kCustom, {p0, p0},
fusion0_computation));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* fusion1 = builder.AddInstruction(
HloInstruction::CreateFusion(shape, HloInstruction::FusionKind::kCustom,
{fusion0}, fusion1_computation));
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(
computation, {p0, fusion0, negate0, negate1, negate2, negate3, negate4,
negate5, negate6, fusion1});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get(), DefaultMemorySpaceOptions(), 5);
EXPECT_THAT(
fusion1,
op::Fusion(op::Tuple(
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::GetTupleElement(op::Fusion(), 0)),
op::Tuple(
op::AsyncCopy(
kAlternateMemorySpace, kDefaultMemorySpace,
op::GetTupleElement(op::GetTupleElement(op::Fusion(), 1), 0)),
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::GetTupleElement(
op::GetTupleElement(op::Fusion(), 1), 1))))));
}
TEST_F(MemorySpaceAssignmentTest, TupleToTuple3) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
Shape tuple_shape = ShapeUtil::MakeTupleShape({shape, shape});
auto module = CreateNewVerifiedModule();
HloComputation::Builder fusion0_builder("fusion0");
HloInstruction* fusion0_param0 = fusion0_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* fusion0_param1 = fusion0_builder.AddInstruction(
HloInstruction::CreateParameter(1, shape, "p1"));
fusion0_builder.AddInstruction(
HloInstruction::CreateTuple({fusion0_param0, fusion0_param1}));
HloComputation* fusion0_computation =
module->AddEmbeddedComputation(fusion0_builder.Build());
HloComputation::Builder fusion1_builder("fusion1");
HloInstruction* fusion1_param = fusion1_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p"));
HloInstruction* fusion1_element0 = fusion1_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion1_param, 0));
HloInstruction* fusion1_element1 = fusion1_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, fusion1_param, 1));
fusion1_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, fusion1_element0, fusion1_element1));
HloComputation* fusion1_computation =
module->AddEmbeddedComputation(fusion1_builder.Build());
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* fusion0 = builder.AddInstruction(HloInstruction::CreateFusion(
tuple_shape, HloInstruction::FusionKind::kCustom, {p0, p0},
fusion0_computation));
HloInstruction* fusion1 = builder.AddInstruction(
HloInstruction::CreateFusion(shape, HloInstruction::FusionKind::kCustom,
{fusion0}, fusion1_computation));
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, fusion0, fusion1});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
EXPECT_THAT(fusion1, op::Fusion(op::Fusion()));
}
TEST_F(MemorySpaceAssignmentTest, InputOutputAlias) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
Shape tuple_shape = ShapeUtil::MakeTupleShape({shape, shape});
HloInstruction* p = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p"));
HloInstruction* p0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, p, 0));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* p1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, p, 1));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, negate6, p1));
HloInstruction* negate7 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, add));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({p0, add}));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(
computation, {p, p0, negate0, negate1, negate2, negate3, negate4, negate5,
negate6, p1, add, negate7, tuple});
TF_CHECK_OK(module->set_schedule(schedule));
TF_CHECK_OK(module->input_output_alias_config().SetUpAlias({0}, 0, {0}));
TF_CHECK_OK(module->input_output_alias_config().SetUpAlias({1}, 0, {1}));
AssignMemorySpace(module.get());
EXPECT_EQ(p->shape().tuple_shapes(0).layout().memory_space(),
kDefaultMemorySpace);
EXPECT_EQ(p->shape().tuple_shapes(1).layout().memory_space(),
kDefaultMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, CostAnalysis) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, negate6, p1));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {p0, p1, negate0, negate1, negate2,
negate3, negate4, negate5, negate6, add});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpaceUsingCostAnalysis(module.get());
EXPECT_THAT(p0, op::ShapeWithLayout(shape));
EXPECT_THAT(p1, op::ShapeWithLayout(shape));
Shape shape_in_alternate_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {2, 3},
{1, 0}, {},
1, 0,
kAlternateMemorySpace);
EXPECT_THAT(negate0, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate1, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate2, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate3, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate4, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate5, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(negate6, op::ShapeWithLayout(shape_in_alternate_mem));
}
TEST_F(MemorySpaceAssignmentTest, MemoryBoundednessBufferIntervalCompare) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloInstruction* tanh0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, p0));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p1));
HloInstruction* tanh1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, tanh0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* tanh2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, tanh1));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* tanh3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, tanh2));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* tanh4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, tanh3));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({tanh4, negate4}));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation,
{p0, p1, tanh0, negate0, tanh1, negate1, tanh2, negate2,
tanh3, negate3, tanh4, negate4, tuple});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpaceUsingCostAnalysis(module.get());
EXPECT_THAT(p0, op::ShapeWithLayout(shape));
EXPECT_THAT(p1, op::ShapeWithLayout(shape));
Shape shape_in_default_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {4, 3},
{1, 0}, {},
1, 0,
kDefaultMemorySpace);
std::vector<HloInstruction*> negate_instructions = {negate0, negate1, negate2,
negate3, negate4};
int64_t num_negates_in_alternate_mem = absl::c_count_if(
negate_instructions, [&](const HloInstruction* instruction) {
return instruction->shape().layout().memory_space() ==
kAlternateMemorySpace;
});
EXPECT_GE(num_negates_in_alternate_mem, 1);
EXPECT_THAT(tanh0, op::ShapeWithLayout(shape_in_default_mem));
EXPECT_THAT(tanh1, op::ShapeWithLayout(shape_in_default_mem));
EXPECT_THAT(tanh2, op::ShapeWithLayout(shape_in_default_mem));
EXPECT_THAT(tanh3, op::ShapeWithLayout(shape_in_default_mem));
EXPECT_THAT(tanh4, op::ShapeWithLayout(shape_in_default_mem));
}
TEST_F(MemorySpaceAssignmentTest,
MemoryBoundednessOverrideSortOrderAssignFirst) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[3,4]{1,0} parameter(0)
p1 = f32[3,4]{1,0} parameter(1)
tanh0 = f32[3,4]{1,0} tanh(p0)
negate0 = f32[3,4]{1,0} negate(p1)
tanh1 = f32[3,4]{1,0} tanh(tanh0)
negate1 = f32[3,4]{1,0} negate(negate0)
tanh2 = f32[3,4]{1,0} tanh(tanh1)
negate2 = f32[3,4]{1,0} negate(negate1)
tanh3 = f32[3,4]{1,0} tanh(tanh2)
negate3 = f32[3,4]{1,0} negate(negate2)
tanh4 = f32[3,4]{1,0} tanh(tanh3)
negate4 = f32[3,4]{1,0} negate(negate3)
ROOT tuple = (f32[3,4]{1,0}, f32[3,4]{1,0}) tuple(tanh4, negate4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
const std::string text_proto = R"pb(
overrides {
hlo_position_matcher { instruction_name_regex: "negate(.*)" }
override_options { assign_first: true }
})pb";
TF_ASSERT_OK_AND_ASSIGN(auto msa_sort_order_overrides,
ParseTextProto<MsaSortOrderOverrides>(text_proto));
AssignMemorySpaceUsingCostAnalysis(
module.get(), std::nullopt,
std::nullopt,
std::nullopt,
msa_sort_order_overrides);
const HloInstruction* p0 = FindInstruction(module.get(), "p0");
EXPECT_EQ(p0->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* p1 = FindInstruction(module.get(), "p1");
EXPECT_EQ(p1->shape().layout().memory_space(), kDefaultMemorySpace);
HloInstruction* negate0 = FindInstruction(module.get(), "negate0");
EXPECT_EQ(negate0->shape().layout().memory_space(), kAlternateMemorySpace);
HloInstruction* negate1 = FindInstruction(module.get(), "negate1");
EXPECT_EQ(negate1->shape().layout().memory_space(), kAlternateMemorySpace);
HloInstruction* negate2 = FindInstruction(module.get(), "negate2");
EXPECT_EQ(negate2->shape().layout().memory_space(), kAlternateMemorySpace);
HloInstruction* negate3 = FindInstruction(module.get(), "negate3");
EXPECT_EQ(negate3->shape().layout().memory_space(), kAlternateMemorySpace);
HloInstruction* negate4 = FindInstruction(module.get(), "negate4");
EXPECT_EQ(negate4->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* tanh0 = FindInstruction(module.get(), "tanh0");
EXPECT_EQ(tanh0->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* tanh1 = FindInstruction(module.get(), "tanh1");
EXPECT_EQ(tanh1->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* tanh2 = FindInstruction(module.get(), "tanh2");
EXPECT_EQ(tanh2->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* tanh3 = FindInstruction(module.get(), "tanh3");
EXPECT_EQ(tanh3->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* tanh4 = FindInstruction(module.get(), "tanh4");
EXPECT_EQ(tanh4->shape().layout().memory_space(), kDefaultMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest,
MemoryBoundednessOverrideSortOrderAssignLast) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[3,4]{1,0} parameter(0)
p1 = f32[3,4]{1,0} parameter(1)
tanh0 = f32[3,4]{1,0} tanh(p0)
negate0 = f32[3,4]{1,0} negate(p1)
tanh1 = f32[3,4]{1,0} tanh(tanh0)
negate1 = f32[3,4]{1,0} negate(negate0)
tanh2 = f32[3,4]{1,0} tanh(tanh1)
negate2 = f32[3,4]{1,0} negate(negate1)
tanh3 = f32[3,4]{1,0} tanh(tanh2)
negate3 = f32[3,4]{1,0} negate(negate2)
tanh4 = f32[3,4]{1,0} tanh(tanh3)
negate4 = f32[3,4]{1,0} negate(negate3)
ROOT tuple = (f32[3,4]{1,0}, f32[3,4]{1,0}) tuple(tanh4, negate4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
const std::string text_proto = R"pb(
overrides {
hlo_position_matcher { instruction_name_regex: "tanh(.*)" }
override_options { assign_last: true }
}
)pb";
TF_ASSERT_OK_AND_ASSIGN(auto msa_sort_order_overrides,
ParseTextProto<MsaSortOrderOverrides>(text_proto));
AssignMemorySpaceUsingCostAnalysis(
module.get(), std::nullopt,
std::nullopt,
std::nullopt,
msa_sort_order_overrides);
const HloInstruction* p0 = FindInstruction(module.get(), "p0");
EXPECT_EQ(p0->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* p1 = FindInstruction(module.get(), "p1");
EXPECT_EQ(p1->shape().layout().memory_space(), kDefaultMemorySpace);
HloInstruction* negate0 = FindInstruction(module.get(), "negate0");
EXPECT_EQ(negate0->shape().layout().memory_space(), kAlternateMemorySpace);
HloInstruction* negate1 = FindInstruction(module.get(), "negate1");
EXPECT_EQ(negate1->shape().layout().memory_space(), kAlternateMemorySpace);
HloInstruction* negate2 = FindInstruction(module.get(), "negate2");
EXPECT_EQ(negate2->shape().layout().memory_space(), kAlternateMemorySpace);
HloInstruction* negate3 = FindInstruction(module.get(), "negate3");
EXPECT_EQ(negate3->shape().layout().memory_space(), kAlternateMemorySpace);
HloInstruction* negate4 = FindInstruction(module.get(), "negate4");
EXPECT_EQ(negate4->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* tanh0 = FindInstruction(module.get(), "tanh0");
EXPECT_EQ(tanh0->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* tanh1 = FindInstruction(module.get(), "tanh1");
EXPECT_EQ(tanh1->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* tanh2 = FindInstruction(module.get(), "tanh2");
EXPECT_EQ(tanh2->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* tanh3 = FindInstruction(module.get(), "tanh3");
EXPECT_EQ(tanh3->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* tanh4 = FindInstruction(module.get(), "tanh4");
EXPECT_EQ(tanh4->shape().layout().memory_space(), kDefaultMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest,
MemoryBoundednessOverrideSortOrderBySizeLteAssignFirst) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[3,4]{1,0} parameter(0)
p1 = f32[5,4]{1,0} parameter(1)
tanh0 = f32[3,4]{1,0} tanh(p0)
negate0 = f32[5,4]{1,0} negate(p1)
tanh1 = f32[3,4]{1,0} tanh(tanh0)
negate1 = f32[5,4]{1,0} negate(negate0)
tanh2 = f32[3,4]{1,0} tanh(tanh1)
negate2 = f32[5,4]{1,0} negate(negate1)
tanh3 = f32[3,4]{1,0} tanh(tanh2)
negate3 = f32[5,4]{1,0} negate(negate2)
tanh4 = f32[3,4]{1,0} tanh(tanh3)
negate4 = f32[5,4]{1,0} negate(negate3)
ROOT tuple = (f32[3,4]{1,0}, f32[5,4]{1,0}) tuple(tanh4, negate4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
const std::string text_proto = R"pb(
overrides {
hlo_position_matcher { size_lte: 48 }
override_options { assign_first: true }
}
)pb";
TF_ASSERT_OK_AND_ASSIGN(auto msa_sort_order_overrides,
ParseTextProto<MsaSortOrderOverrides>(text_proto));
Options memory_space_options = DefaultMemorySpaceOptions();
memory_space_options.max_size_in_bytes = 120;
AssignMemorySpaceUsingCostAnalysis(
module.get(), memory_space_options,
std::nullopt,
std::nullopt,
msa_sort_order_overrides);
const HloInstruction* p0 = FindInstruction(module.get(), "p0");
EXPECT_EQ(p0->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* p1 = FindInstruction(module.get(), "p1");
EXPECT_EQ(p1->shape().layout().memory_space(), kDefaultMemorySpace);
HloInstruction* negate0 = FindInstruction(module.get(), "negate0");
EXPECT_EQ(negate0->shape().layout().memory_space(), kDefaultMemorySpace);
HloInstruction* negate1 = FindInstruction(module.get(), "negate1");
EXPECT_EQ(negate1->shape().layout().memory_space(), kDefaultMemorySpace);
HloInstruction* negate2 = FindInstruction(module.get(), "negate2");
EXPECT_EQ(negate2->shape().layout().memory_space(), kDefaultMemorySpace);
HloInstruction* negate3 = FindInstruction(module.get(), "negate3");
EXPECT_EQ(negate3->shape().layout().memory_space(), kDefaultMemorySpace);
HloInstruction* negate4 = FindInstruction(module.get(), "negate4");
EXPECT_EQ(negate4->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* tanh0 = FindInstruction(module.get(), "tanh0");
EXPECT_EQ(tanh0->shape().layout().memory_space(), kAlternateMemorySpace);
const HloInstruction* tanh1 = FindInstruction(module.get(), "tanh1");
EXPECT_EQ(tanh1->shape().layout().memory_space(), kAlternateMemorySpace);
const HloInstruction* tanh2 = FindInstruction(module.get(), "tanh2");
EXPECT_EQ(tanh2->shape().layout().memory_space(), kAlternateMemorySpace);
const HloInstruction* tanh3 = FindInstruction(module.get(), "tanh3");
EXPECT_EQ(tanh3->shape().layout().memory_space(), kAlternateMemorySpace);
const HloInstruction* tanh4 = FindInstruction(module.get(), "tanh4");
EXPECT_EQ(tanh4->shape().layout().memory_space(), kDefaultMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest,
MemoryBoundednessOverrideSortOrderBySizeGteAssignFirst) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[3,4]{1,0} parameter(0)
p1 = f32[5,4]{1,0} parameter(1)
tanh0 = f32[3,4]{1,0} tanh(p0)
negate0 = f32[5,4]{1,0} negate(p1)
tanh1 = f32[3,4]{1,0} tanh(tanh0)
negate1 = f32[5,4]{1,0} negate(negate0)
tanh2 = f32[3,4]{1,0} tanh(tanh1)
negate2 = f32[5,4]{1,0} negate(negate1)
tanh3 = f32[3,4]{1,0} tanh(tanh2)
negate3 = f32[5,4]{1,0} negate(negate2)
tanh4 = f32[3,4]{1,0} tanh(tanh3)
negate4 = f32[5,4]{1,0} negate(negate3)
ROOT tuple = (f32[3,4]{1,0}, f32[5,4]{1,0}) tuple(tanh4, negate4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
const std::string text_proto = R"pb(
overrides {
hlo_position_matcher { size_gte: 80 }
override_options { assign_first: true }
}
)pb";
TF_ASSERT_OK_AND_ASSIGN(auto msa_sort_order_overrides,
ParseTextProto<MsaSortOrderOverrides>(text_proto));
Options memory_space_options = DefaultMemorySpaceOptions();
memory_space_options.max_size_in_bytes = 160;
AssignMemorySpaceUsingCostAnalysis(
module.get(), memory_space_options,
std::nullopt,
std::nullopt,
msa_sort_order_overrides);
const HloInstruction* p0 = FindInstruction(module.get(), "p0");
EXPECT_EQ(p0->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* p1 = FindInstruction(module.get(), "p1");
EXPECT_EQ(p1->shape().layout().memory_space(), kDefaultMemorySpace);
HloInstruction* negate0 = FindInstruction(module.get(), "negate0");
EXPECT_EQ(negate0->shape().layout().memory_space(), kAlternateMemorySpace);
HloInstruction* negate1 = FindInstruction(module.get(), "negate1");
EXPECT_EQ(negate1->shape().layout().memory_space(), kAlternateMemorySpace);
HloInstruction* negate2 = FindInstruction(module.get(), "negate2");
EXPECT_EQ(negate2->shape().layout().memory_space(), kAlternateMemorySpace);
HloInstruction* negate3 = FindInstruction(module.get(), "negate3");
EXPECT_EQ(negate3->shape().layout().memory_space(), kAlternateMemorySpace);
HloInstruction* negate4 = FindInstruction(module.get(), "negate4");
EXPECT_EQ(negate4->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* tanh0 = FindInstruction(module.get(), "tanh0");
EXPECT_EQ(tanh0->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* tanh1 = FindInstruction(module.get(), "tanh1");
EXPECT_EQ(tanh1->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* tanh2 = FindInstruction(module.get(), "tanh2");
EXPECT_EQ(tanh2->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* tanh3 = FindInstruction(module.get(), "tanh3");
EXPECT_EQ(tanh3->shape().layout().memory_space(), kDefaultMemorySpace);
const HloInstruction* tanh4 = FindInstruction(module.get(), "tanh4");
EXPECT_EQ(tanh4->shape().layout().memory_space(), kDefaultMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, SimpleWhileTupleTest) {
Shape s32 = ShapeUtil::MakeShape(xla::S32, {});
Shape f32v1 = ShapeUtil::MakeShape(F32, {1});
Shape t_s32_f32v1 = ShapeUtil::MakeTupleShape({s32, f32v1});
auto module = CreateNewVerifiedModule("SimpleWhile");
HloSchedule schedule(module.get());
HloComputation* cond_computation;
{
auto builder = HloComputation::Builder("WhileCond");
auto const4 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(4)));
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, t_s32_f32v1, "x"));
auto index = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(const4->shape(), param, 0));
auto compare = builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), index,
const4, ComparisonDirection::kLt));
cond_computation = module->AddEmbeddedComputation(builder.Build());
schedule.set_sequence(cond_computation, {const4, param, index, compare});
}
HloComputation* body_computation;
{
auto builder = HloComputation::Builder("WhileBody");
auto const1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(1)));
auto constv = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>({1.1f})));
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, t_s32_f32v1, "x"));
auto indexc = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(const1->shape(), param, 0));
auto addc = builder.AddInstruction(HloInstruction::CreateBinary(
indexc->shape(), HloOpcode::kAdd, indexc, const1));
auto indexv = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(constv->shape(), param, 1));
auto addv = builder.AddInstruction(HloInstruction::CreateBinary(
constv->shape(), HloOpcode::kAdd, indexv, constv));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({addc, addv}));
body_computation = module->AddEmbeddedComputation(builder.Build());
schedule.set_sequence(body_computation, {const1, constv, param, indexc,
addc, indexv, addv, tuple});
}
auto builder = HloComputation::Builder("SimpleWhile");
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, t_s32_f32v1, "param"));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(s32, param, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(f32v1, param, 1));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({gte0, gte1}));
auto while0 = builder.AddInstruction(HloInstruction::CreateWhile(
t_s32_f32v1, cond_computation, body_computation, tuple));
HloComputation* computation = module->AddEntryComputation(builder.Build());
schedule.set_sequence(computation, {param, gte0, gte1, tuple, while0});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get(), DefaultMemorySpaceOptions(),
50);
Shape shape_in_default_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {4, 6},
{1, 0}, {},
1, 0,
kDefaultMemorySpace);
Shape s32_in_default_mem = ShapeUtil::MakeShapeWithDenseLayout(
xla::S32, {},
{}, {},
1, 0,
kDefaultMemorySpace);
Shape f32v1_in_default_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {1},
{0}, {},
1, 0,
kDefaultMemorySpace);
Shape t_s32_f32v1_in_default_mem =
ShapeUtil::MakeTupleShape({s32_in_default_mem, f32v1_in_default_mem});
EXPECT_THAT(param, op::ShapeWithLayout(t_s32_f32v1_in_default_mem));
EXPECT_THAT(while0, op::ShapeWithLayout(t_s32_f32v1_in_default_mem));
}
TEST_F(MemorySpaceAssignmentTest, EvictionsShouldntBeDelayed) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4, 3});
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* tanh0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, p0));
HloInstruction* tanh_redundant0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, p0));
HloInstruction* tanh_redundant1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, p0));
HloInstruction* tanh_redundant2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, p0));
HloInstruction* tanh_redundant3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, p0));
HloInstruction* tanh_redundant4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, p0));
HloInstruction* tanh_redundant5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, p0));
HloInstruction* tanh_redundant6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, p0));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, tanh0));
HloInstruction* tanh1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, negate0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* tanh2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, tanh1));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* tanh3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kTanh, tanh2));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* tuple = builder.AddInstruction(
HloInstruction::CreateTuple({tanh3, negate3, tanh0}));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(
computation,
{p0, tanh0, tanh_redundant0, tanh_redundant1, tanh_redundant2,
tanh_redundant3, tanh_redundant4, tanh_redundant5, tanh_redundant6,
negate0, tanh1, negate1, tanh2, negate2, tanh3, negate3, tuple});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpaceUsingCostAnalysis(module.get());
TF_ASSERT_OK_AND_ASSIGN(auto alias_analysis,
HloAliasAnalysis::Run(module.get()));
TF_ASSERT_OK_AND_ASSIGN(auto hlo_live_range,
HloLiveRange::Run(module->schedule(), *alias_analysis,
module->entry_computation()));
std::vector<int> num_live_buffers_in_alternate_mem(
hlo_live_range->flattened_instruction_sequence().size() + 1, 0);
for (const HloValue* value : alias_analysis->dataflow_analysis().values()) {
const Shape& shape = value->shape();
if (!shape.has_layout() ||
shape.layout().memory_space() == kDefaultMemorySpace) {
continue;
}
HloLiveRange::TimeBound time_bound =
hlo_live_range->buffer_live_ranges().at(value);
for (int i = time_bound.start; i <= time_bound.end; ++i) {
++num_live_buffers_in_alternate_mem[i];
}
}
for (int i = 0; i < num_live_buffers_in_alternate_mem.size(); ++i) {
EXPECT_LE(num_live_buffers_in_alternate_mem[i], 2);
}
}
TEST_F(MemorySpaceAssignmentTest,
InputOutputsInAlternateMemShouldntBeAssigned) {
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
Shape shape_in_alternate_mem = ShapeUtil::MakeShapeWithDenseLayout(
F32, {2, 3},
{1, 0}, {},
1, 0,
kAlternateMemorySpace);
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
HloInstruction* p1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, shape_in_alternate_mem, "p1"));
HloInstruction* negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0));
HloInstruction* negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate0));
HloInstruction* negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate1));
HloInstruction* negate3 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate2));
HloInstruction* negate4 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate3));
HloInstruction* negate5 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate4));
HloInstruction* negate6 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, negate5));
HloInstruction* add = builder.AddInstruction(HloInstruction::CreateBinary(
shape_in_alternate_mem, HloOpcode::kAdd, negate6, p1));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({add, negate5}));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation,
{p0, p1, negate0, negate1, negate2, negate3, negate4,
negate5, negate6, add, tuple});
TF_CHECK_OK(module->set_schedule(schedule));
Options options = DefaultMemorySpaceOptions();
options.is_allowed_in_alternate_mem_fn = [](const HloValue& value) {
return true;
};
std::unique_ptr<PresetAssignments> preset_assignments =
AssignMemorySpace(module.get(), options);
EXPECT_THAT(p1, op::ShapeWithLayout(shape_in_alternate_mem));
EXPECT_THAT(add, op::Add(op::Negate(), op::Parameter(1)));
EXPECT_THAT(add, op::ShapeWithLayout(shape_in_alternate_mem));
for (const auto& position_and_chunk : preset_assignments->chunks()) {
const HloPosition& position = position_and_chunk.first;
EXPECT_NE(position.instruction, p1);
EXPECT_NE(position.instruction, add);
}
}
TEST_F(MemorySpaceAssignmentTest, PendingChunkMemoryCorruptionBug) {
absl::string_view hlo_string = R"(
HloModule bug, is_scheduled=true
ENTRY %Entry {
%param0 = f32[8,3] parameter(0)
%param1 = f32[2,4] parameter(1)
%a = f32[8,3] sine(%param0)
%b = f32[2,4] cosine(%param1)
%d = f32[8,3] tanh(%a)
%c = f32[8,3] negate(%a)
%e = f32[2,4] negate(%b)
%f = f32[2,4] negate(%e)
%g = f32[2,4] negate(%f)
%h = f32[2,4] negate(%g)
%i = f32[2,4] negate(%h)
%j = f32[2,4] negate(%i)
%k = f32[2,4] negate(%j)
%l = f32[2,4] negate(%k)
%m = f32[8,3] negate(%d)
%n = f32[2,4] sine(%l)
%o = f32[8,3] negate(%d)
%p = f32[2,4] negate(%n)
%q = f32[8,3] negate(%m)
ROOT %tuple = (f32[2,4], f32[8,3], f32[8,3]) tuple(%p, %q, %o)
}
)";
MsaBufferIntervalCompare buffer_interval_compare =
[](const MsaBufferInterval& a, const MsaBufferInterval& b) {
auto get_opcode_priority = [](const HloOpcode& opcode) {
switch (opcode) {
case HloOpcode::kSin:
return 0;
case HloOpcode::kCos:
return 1;
case HloOpcode::kTanh:
return 2;
default:
return 3;
}
};
return get_opcode_priority(a.buffer->defining_instruction()->opcode()) <
get_opcode_priority(b.buffer->defining_instruction()->opcode());
};
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
InstructionCountPrefetchIntervalPicker prefetch_interval_picker(2, 10);
AssignMemorySpace(module.get(), DefaultMemorySpaceOptions(),
buffer_interval_compare, &prefetch_interval_picker);
}
TEST_F(MemorySpaceAssignmentTest, WhileAliasedArgumentRequiredAssignmentBug) {
absl::string_view hlo_string = R"(
HloModule bug, is_scheduled=true
while_condition {
param1 = (f32[2,4], f32[2,4], f32[2,4]) parameter(0)
ROOT cond = pred[] constant(true)
}
while_body {
param2 = (f32[2,4], f32[2,4], f32[2,4]) parameter(0)
gte2 = f32[2,4] get-tuple-element(param2), index=0
gte3 = f32[2,4] get-tuple-element(param2), index=1
gte4 = f32[2,4] get-tuple-element(param2), index=2
add = f32[2,4] add(gte2, gte3)
ROOT tuple2 = (f32[2,4], f32[2,4], f32[2,4]) tuple(add, gte3, gte4)
}
ENTRY Entry {
param0 = f32[2,4] parameter(0)
a = f32[2,4] negate(param0)
b = f32[2,4] negate(param0)
tuple = (f32[2,4], f32[2,4], f32[2,4]) tuple(a, b, b)
while = (f32[2,4], f32[2,4], f32[2,4]) while(tuple), condition=while_condition, body=while_body
gte1 = f32[2,4] get-tuple-element(while), index=0
gte2 = f32[2,4] get-tuple-element(while), index=1
ROOT root = f32[2,4] add(gte1, gte2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, DisallowedUseBug) {
absl::string_view hlo_string = R"(
HloModule bug, is_scheduled=true
ENTRY Entry {
param0 = f32[8,3] parameter(0)
param1 = f32[2,4] parameter(1)
a = f32[8,3] cosine(param0)
b = f32[2,4] negate(param1)
d = f32[8,3] negate(a)
c = f32[2,4] negate(b)
e = f32[2,4] negate(c)
f = f32[8,3] tanh(a)
g = f32[2,4] negate(e)
h = f32[2,4] negate(g)
i = f32[2,4] negate(h)
j = f32[2,4] negate(i)
k = f32[2,4] negate(j)
l = f32[2,4] negate(k)
m = f32[2,4] negate(l)
n = f32[2,4] sine(m)
o = f32[8,3] negate(a)
p = f32[2,4] negate(n)
q = f32[8,3] add(o, f)
r = f32[8,3] add(q, d)
ROOT tuple = (f32[2,4], f32[8,3]) tuple(p, r)
}
)";
MsaBufferIntervalCompare buffer_interval_compare =
[](const MsaBufferInterval& a, const MsaBufferInterval& b) {
auto get_opcode_priority = [](const HloOpcode& opcode) {
switch (opcode) {
case HloOpcode::kSin:
return 0;
case HloOpcode::kCos:
return 1;
case HloOpcode::kTanh:
return 2;
default:
return 3;
}
};
return get_opcode_priority(a.buffer->defining_instruction()->opcode()) <
get_opcode_priority(b.buffer->defining_instruction()->opcode());
};
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
InstructionCountPrefetchIntervalPicker prefetch_interval_picker(2, 10);
Options options = DefaultMemorySpaceOptions();
options.is_use_allowed_in_alternate_mem_fn = [](const HloUse& use) {
return use.instruction->opcode() != HloOpcode::kTanh;
};
AssignMemorySpace(module.get(), options, buffer_interval_compare,
&prefetch_interval_picker);
}
TEST_F(MemorySpaceAssignmentTest, DisallowedUseBugInWhile) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
p0 = (f32[3]{0}, f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(p0), index=3
}
while_body {
p0 = (f32[3]{0}, f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
gte1 = f32[3]{0} get-tuple-element(p0), index=1
gte2 = f32[3]{0} get-tuple-element(p0), index=2
gte3 = pred[] get-tuple-element(p0), index=3
add = f32[3]{0} add(gte0, gte0)
negate0 = f32[3]{0} negate(add)
negate1 = f32[3]{0} negate(negate0)
negate2 = f32[3]{0} negate(negate1)
negate3 = f32[3]{0} negate(negate2)
negate4 = f32[3]{0} negate(negate3)
negate5 = f32[3]{0} negate(negate4)
negate6 = f32[3]{0} negate(negate5)
negate7 = f32[3]{0} negate(negate6)
negate8 = f32[3]{0} negate(negate7)
negate9 = f32[3]{0} negate(negate8)
negate10 = f32[3]{0} negate(negate9)
negate11 = f32[3]{0} negate(negate10)
negate12 = f32[3]{0} negate(negate11)
negate13 = f32[3]{0} negate(negate12)
negate14 = f32[3]{0} negate(negate13)
negate15 = f32[3]{0} negate(gte2)
tanh = f32[3]{0} tanh(gte2)
ROOT tuple = (f32[3]{0}, f32[3]{0}, f32[3]{0}, pred[]) tuple(negate14, tanh, gte2, gte3)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy0 = f32[3]{0} copy(p0)
copy1 = f32[3]{0} copy(p0)
tuple = (f32[3]{0}, f32[3]{0}, f32[3]{0}, pred[]) tuple(copy0, copy0, copy1, p1)
while = (f32[3]{0}, f32[3]{0}, f32[3]{0}, pred[]) while(tuple), condition=while_cond, body=while_body
ROOT gte = f32[3]{0} get-tuple-element(while), index=2
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.is_use_allowed_in_alternate_mem_fn = [](const HloUse& use) {
return use.instruction->opcode() != HloOpcode::kTanh;
};
AssignMemorySpace(module.get(), options);
}
TEST_F(MemorySpaceAssignmentTest, AvoidRedundantEvictionInWhile) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(p0), index=2
}
while_body {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
gte1 = f32[3]{0} get-tuple-element(p0), index=1
tanh = f32[3]{0} tanh(gte1)
gte2 = pred[] get-tuple-element(p0), index=2
negate0 = f32[3]{0} negate(gte0)
negate1 = f32[3]{0} negate(negate0)
negate2 = f32[3]{0} negate(negate1)
negate3 = f32[3]{0} negate(negate2)
negate4 = f32[3]{0} negate(negate3)
negate5 = f32[3]{0} negate(negate4)
negate6 = f32[3]{0} negate(negate5)
negate7 = f32[3]{0} negate(negate6)
negate8 = f32[3]{0} negate(negate7)
negate9 = f32[3]{0} negate(negate8)
negate10 = f32[3]{0} negate(negate9)
negate11 = f32[3]{0} negate(negate10)
negate12 = f32[3]{0} negate(negate11)
negate13 = f32[3]{0} negate(negate12)
negate14 = f32[3]{0} negate(negate13)
add = f32[3]{0} add(negate14, tanh)
ROOT tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(add, gte1, gte2)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy = f32[3]{0} copy(p0)
tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(copy, p0, p1)
while = (f32[3]{0}, f32[3]{0}, pred[]) while(tuple), condition=while_cond, body=while_body
gte = f32[3]{0} get-tuple-element(while), index=1
ROOT negate = f32[3]{0} negate(gte)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
const HloInstruction* while_instr = FindInstruction(module.get(), "while");
EXPECT_EQ(while_instr->shape().tuple_shapes(1).layout().memory_space(),
kAlternateMemorySpace);
const HloInstruction* gte1 = FindInstruction(module.get(), "gte1");
EXPECT_EQ(gte1->user_count(), 1);
EXPECT_EQ(gte1->users()[0]->opcode(), HloOpcode::kTanh);
const HloInstruction* while_root =
while_instr->while_body()->root_instruction();
EXPECT_THAT(while_root->operand(1),
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::GetTupleElement(op::Parameter(0))));
}
TEST_F(MemorySpaceAssignmentTest,
RedundantEvictionEliminationShouldntAddRedundantParam) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(p0), index=2
}
while_body {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
gte1 = f32[3]{0} get-tuple-element(p0), index=1
tanh = f32[3]{0} tanh(gte1)
gte2 = pred[] get-tuple-element(p0), index=2
negate0 = f32[3]{0} negate(gte0)
negate1 = f32[3]{0} negate(negate0)
add = f32[3]{0} add(negate1, tanh)
ROOT tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(add, gte1, gte2)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy = f32[3]{0} copy(p0)
tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(copy, p0, p1)
while = (f32[3]{0}, f32[3]{0}, pred[]) while(tuple), condition=while_cond, body=while_body
gte = f32[3]{0} get-tuple-element(while), index=1
ROOT negate = f32[3]{0} negate(gte)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
const HloInstruction* while_instr = FindInstruction(module.get(), "while");
EXPECT_EQ(while_instr->shape().tuple_shapes_size(), 3);
}
TEST_F(MemorySpaceAssignmentTest, AvoidRedundantEvictionInNestedWhile) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond2 {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(p0), index=2
}
while_body2 {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
gte1 = f32[3]{0} get-tuple-element(p0), index=1
tanh = f32[3]{0} tanh(gte1)
gte2 = pred[] get-tuple-element(p0), index=2
negate0 = f32[3]{0} negate(gte0)
negate1 = f32[3]{0} negate(negate0)
negate2 = f32[3]{0} negate(negate1)
negate3 = f32[3]{0} negate(negate2)
negate4 = f32[3]{0} negate(negate3)
negate5 = f32[3]{0} negate(negate4)
negate6 = f32[3]{0} negate(negate5)
negate7 = f32[3]{0} negate(negate6)
negate8 = f32[3]{0} negate(negate7)
negate9 = f32[3]{0} negate(negate8)
negate10 = f32[3]{0} negate(negate9)
negate11 = f32[3]{0} negate(negate10)
negate12 = f32[3]{0} negate(negate11)
negate13 = f32[3]{0} negate(negate12)
negate14 = f32[3]{0} negate(negate13)
add = f32[3]{0} add(negate14, tanh)
ROOT tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(add, gte1, gte2)
}
while_cond1 {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(p0), index=2
}
while_body1 {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT while2 = (f32[3]{0}, f32[3]{0}, pred[]) while(p0), condition=while_cond2, body=while_body2
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy = f32[3]{0} copy(p0)
tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(copy, p0, p1)
while1 = (f32[3]{0}, f32[3]{0}, pred[]) while(tuple), condition=while_cond1, body=while_body1
gte = f32[3]{0} get-tuple-element(while1), index=1
ROOT negate = f32[3]{0} negate(gte)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
const HloInstruction* while1_instr = FindInstruction(module.get(), "while1");
EXPECT_EQ(while1_instr->shape().tuple_shapes(1).layout().memory_space(),
kAlternateMemorySpace);
const HloInstruction* while2_instr = FindInstruction(module.get(), "while2");
EXPECT_EQ(while2_instr->shape().tuple_shapes(1).layout().memory_space(),
kAlternateMemorySpace);
const HloInstruction* gte1 = FindInstruction(module.get(), "gte1");
EXPECT_EQ(gte1->user_count(), 1);
EXPECT_EQ(gte1->users()[0]->opcode(), HloOpcode::kTanh);
const HloInstruction* while_root =
while2_instr->while_body()->root_instruction();
EXPECT_THAT(while_root->operand(1),
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::GetTupleElement(op::Parameter(0))));
}
TEST_F(MemorySpaceAssignmentTest, RedundantEvictionEliminationBug) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(p0), index=2
}
while_body {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
gte1 = f32[3]{0} get-tuple-element(p0), index=1
tanh = f32[3]{0} tanh(gte1)
gte2 = pred[] get-tuple-element(p0), index=2
negate0 = f32[3]{0} negate(gte0)
negate1 = f32[3]{0} negate(negate0)
negate2 = f32[3]{0} negate(negate1)
negate3 = f32[3]{0} negate(negate2)
negate4 = f32[3]{0} negate(negate3)
negate5 = f32[3]{0} negate(negate4)
negate6 = f32[3]{0} negate(negate5)
negate7 = f32[3]{0} negate(negate6)
negate8 = f32[3]{0} negate(negate7)
negate9 = f32[3]{0} negate(negate8)
negate10 = f32[3]{0} negate(negate9)
negate11 = f32[3]{0} negate(negate10)
negate12 = f32[3]{0} negate(negate11)
negate13 = f32[3]{0} negate(negate12)
negate14 = f32[3]{0} negate(negate13)
add0 = f32[3]{0} add(negate14, tanh)
add1 = f32[3]{0} add(add0, gte1)
negate = f32[3]{0} negate(add1)
ROOT tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(add1, negate, gte2)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy = f32[3]{0} copy(p0)
tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(copy, p0, p1)
while = (f32[3]{0}, f32[3]{0}, pred[]) while(tuple), condition=while_cond, body=while_body
gte = f32[3]{0} get-tuple-element(while), index=1
ROOT negate = f32[3]{0} negate(gte)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
const HloInstruction* while_instr = FindInstruction(module.get(), "while");
EXPECT_EQ(while_instr->shape().tuple_shapes_size(), 3);
EXPECT_EQ(while_instr->shape().tuple_shapes(1).layout().memory_space(),
kAlternateMemorySpace);
const HloInstruction* gte1 = FindInstruction(module.get(), "gte1");
EXPECT_EQ(gte1->user_count(), 2);
EXPECT_NE(
absl::c_find_if(gte1->users(), HloPredicateIsOp<HloOpcode::kCopyStart>),
gte1->users().end());
}
TEST_F(MemorySpaceAssignmentTest, RedundantEvictionEliminationInChainedWhile) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond1 {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(p0), index=2
}
while_body1 {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
gte1 = f32[3]{0} get-tuple-element(p0), index=1
tanh = f32[3]{0} tanh(gte1)
gte2 = pred[] get-tuple-element(p0), index=2
negate0 = f32[3]{0} negate(gte0)
negate1 = f32[3]{0} negate(negate0)
negate2 = f32[3]{0} negate(negate1)
negate3 = f32[3]{0} negate(negate2)
negate4 = f32[3]{0} negate(negate3)
negate5 = f32[3]{0} negate(negate4)
negate6 = f32[3]{0} negate(negate5)
negate7 = f32[3]{0} negate(negate6)
negate8 = f32[3]{0} negate(negate7)
negate9 = f32[3]{0} negate(negate8)
negate10 = f32[3]{0} negate(negate9)
negate11 = f32[3]{0} negate(negate10)
negate12 = f32[3]{0} negate(negate11)
negate13 = f32[3]{0} negate(negate12)
negate14 = f32[3]{0} negate(negate13)
add = f32[3]{0} add(negate14, tanh)
ROOT tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(add, gte1, gte2)
}
while_cond2 {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(p0), index=2
}
while_body2 {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
gte1 = f32[3]{0} get-tuple-element(p0), index=1
tanh = f32[3]{0} tanh(gte1)
gte2 = pred[] get-tuple-element(p0), index=2
negate0 = f32[3]{0} negate(gte0)
add = f32[3]{0} add(negate0, tanh)
ROOT tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(add, gte1, gte2)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy = f32[3]{0} copy(p0)
tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(copy, p0, p1)
while1 = (f32[3]{0}, f32[3]{0}, pred[]) while(tuple), condition=while_cond1, body=while_body1
while2 = (f32[3]{0}, f32[3]{0}, pred[]) while(while1), condition=while_cond2, body=while_body2
gte = f32[3]{0} get-tuple-element(while2), index=1
ROOT negate = f32[3]{0} negate(gte)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
EXPECT_EQ(
FindInstruction(module.get(), "while1")->shape().tuple_shapes_size(),
FindInstruction(module.get(), "while2")->shape().tuple_shapes_size() + 1);
}
TEST_F(MemorySpaceAssignmentTest, AvoidRedundantEvictionAfterWhile) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(p0), index=2
}
while_body {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
gte1 = f32[3]{0} get-tuple-element(p0), index=1
gte2 = pred[] get-tuple-element(p0), index=2
add = f32[3]{0} add(gte0, gte1)
ROOT tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(gte0, add, gte2)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy = f32[3]{0} copy(p0)
negate0 = f32[3]{0} negate(p0)
negate1 = f32[3]{0} negate(negate0)
negate2 = f32[3]{0} negate(negate1)
negate3 = f32[3]{0} negate(negate2)
negate4 = f32[3]{0} negate(negate3)
negate5 = f32[3]{0} negate(negate4)
negate6 = f32[3]{0} negate(negate5)
negate7 = f32[3]{0} negate(negate6)
negate8 = f32[3]{0} negate(negate7)
negate9 = f32[3]{0} negate(negate8)
negate10 = f32[3]{0} negate(negate9)
negate11 = f32[3]{0} negate(negate10)
negate12 = f32[3]{0} negate(negate11)
negate13 = f32[3]{0} negate(negate12)
negate14 = f32[3]{0} negate(negate13)
tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(copy, negate14, p1)
while = (f32[3]{0}, f32[3]{0}, pred[]) while(tuple), condition=while_cond, body=while_body
gte0 = f32[3]{0} get-tuple-element(while), index=0
gte1 = f32[3]{0} get-tuple-element(while), index=1
negate20 = f32[3]{0} negate(gte1)
negate21 = f32[3]{0} negate(negate20)
negate22 = f32[3]{0} negate(negate21)
negate23 = f32[3]{0} negate(negate22)
negate24 = f32[3]{0} negate(negate23)
negate25 = f32[3]{0} negate(negate24)
negate26 = f32[3]{0} negate(negate25)
negate27 = f32[3]{0} negate(negate26)
negate28 = f32[3]{0} negate(negate27)
negate29 = f32[3]{0} negate(negate28)
negate30 = f32[3]{0} negate(negate29)
negate31 = f32[3]{0} negate(negate30)
negate32 = f32[3]{0} negate(negate31)
negate33 = f32[3]{0} negate(negate32)
negate34 = f32[3]{0} negate(negate33)
ROOT add = f32[3]{0} add(negate34, gte0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
EXPECT_THAT(
module->entry_computation()->root_instruction()->operand(1),
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace, op::Copy()));
}
TEST_F(MemorySpaceAssignmentTest, AvoidRedundantEvictionAfterWhile2) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond1 {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(p0), index=2
}
while_body1 {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
gte1 = f32[3]{0} get-tuple-element(p0), index=1
gte2 = pred[] get-tuple-element(p0), index=2
add = f32[3]{0} add(gte0, gte1)
ROOT tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(gte0, add, gte2)
}
while_cond2 {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(p0), index=2
}
while_body2 {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
gte1 = f32[3]{0} get-tuple-element(p0), index=1
gte2 = pred[] get-tuple-element(p0), index=2
add = f32[3]{0} add(gte0, gte1)
ROOT tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(gte0, add, gte2)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy = f32[3]{0} copy(p0)
tuple1 = (f32[3]{0}, f32[3]{0}, pred[]) tuple(copy, p0, p1)
while1 = (f32[3]{0}, f32[3]{0}, pred[]) while(tuple1), condition=while_cond1, body=while_body1
gte0 = f32[3]{0} get-tuple-element(while1), index=0
gte1 = f32[3]{0} get-tuple-element(while1), index=1
negate0 = f32[3]{0} negate(gte1)
negate1 = f32[3]{0} negate(negate0)
negate2 = f32[3]{0} negate(negate1)
negate3 = f32[3]{0} negate(negate2)
negate4 = f32[3]{0} negate(negate3)
negate5 = f32[3]{0} negate(negate4)
negate6 = f32[3]{0} negate(negate5)
negate7 = f32[3]{0} negate(negate6)
negate8 = f32[3]{0} negate(negate7)
negate9 = f32[3]{0} negate(negate8)
negate10 = f32[3]{0} negate(negate9)
negate11 = f32[3]{0} negate(negate10)
negate12 = f32[3]{0} negate(negate11)
negate13 = f32[3]{0} negate(negate12)
negate14 = f32[3]{0} negate(negate13)
tuple2 = (f32[3]{0}, f32[3]{0}, pred[]) tuple(gte0, negate14, p1)
while2 = (f32[3]{0}, f32[3]{0}, pred[]) while(tuple2), condition=while_cond2, body=while_body2
gte2 = f32[3]{0} get-tuple-element(while2), index=0
gte3 = f32[3]{0} get-tuple-element(while2), index=1
negate20 = f32[3]{0} negate(gte3)
negate21 = f32[3]{0} negate(negate20)
negate22 = f32[3]{0} negate(negate21)
negate23 = f32[3]{0} negate(negate22)
negate24 = f32[3]{0} negate(negate23)
negate25 = f32[3]{0} negate(negate24)
negate26 = f32[3]{0} negate(negate25)
negate27 = f32[3]{0} negate(negate26)
negate28 = f32[3]{0} negate(negate27)
negate29 = f32[3]{0} negate(negate28)
negate30 = f32[3]{0} negate(negate29)
negate31 = f32[3]{0} negate(negate30)
negate32 = f32[3]{0} negate(negate31)
negate33 = f32[3]{0} negate(negate32)
negate34 = f32[3]{0} negate(negate33)
ROOT add = f32[3]{0} add(negate34, gte2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
EXPECT_THAT(
module->entry_computation()->root_instruction()->operand(1),
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::AsyncCopy(kDefaultMemorySpace, kAlternateMemorySpace,
op::GetTupleElement(op::While()))));
}
TEST_F(MemorySpaceAssignmentTest,
AfterWhileRedundantEarlierEvictionModifiedBuffer) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(p0), index=2
}
while_body {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
gte1 = f32[3]{0} get-tuple-element(p0), index=1
gte2 = pred[] get-tuple-element(p0), index=2
add = f32[3]{0} add(gte0, gte1)
negate = f32[3]{0} negate(gte0)
ROOT tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(negate, add, gte2)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
copy = f32[3]{0} copy(p0)
negate0 = f32[3]{0} negate(p0)
negate1 = f32[3]{0} negate(negate0)
negate2 = f32[3]{0} negate(negate1)
negate3 = f32[3]{0} negate(negate2)
negate4 = f32[3]{0} negate(negate3)
negate5 = f32[3]{0} negate(negate4)
negate6 = f32[3]{0} negate(negate5)
negate7 = f32[3]{0} negate(negate6)
negate8 = f32[3]{0} negate(negate7)
negate9 = f32[3]{0} negate(negate8)
negate10 = f32[3]{0} negate(negate9)
negate11 = f32[3]{0} negate(negate10)
negate12 = f32[3]{0} negate(negate11)
negate13 = f32[3]{0} negate(negate12)
negate14 = f32[3]{0} negate(negate13)
tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(copy, negate14, p1)
while = (f32[3]{0}, f32[3]{0}, pred[]) while(tuple), condition=while_cond, body=while_body
gte0 = f32[3]{0} get-tuple-element(while), index=0
gte1 = f32[3]{0} get-tuple-element(while), index=1
negate20 = f32[3]{0} negate(gte1)
negate21 = f32[3]{0} negate(negate20)
negate22 = f32[3]{0} negate(negate21)
negate23 = f32[3]{0} negate(negate22)
negate24 = f32[3]{0} negate(negate23)
negate25 = f32[3]{0} negate(negate24)
negate26 = f32[3]{0} negate(negate25)
negate27 = f32[3]{0} negate(negate26)
negate28 = f32[3]{0} negate(negate27)
negate29 = f32[3]{0} negate(negate28)
negate30 = f32[3]{0} negate(negate29)
negate31 = f32[3]{0} negate(negate30)
negate32 = f32[3]{0} negate(negate31)
negate33 = f32[3]{0} negate(negate32)
negate34 = f32[3]{0} negate(negate33)
ROOT add = f32[3]{0} add(negate34, gte0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
EXPECT_THAT(
module->entry_computation()->root_instruction()->operand(1),
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::AsyncCopy(kDefaultMemorySpace, kAlternateMemorySpace,
op::GetTupleElement(op::While()))));
}
TEST_F(MemorySpaceAssignmentTest,
WhileRedundantEvictionWithInefficientAllocationBug) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
while_cond {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(p0), index=2
}
while_body {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
gte1 = f32[3]{0} get-tuple-element(p0), index=1
tanh = f32[3]{0} tanh(gte1)
gte2 = pred[] get-tuple-element(p0), index=2
negate0 = f32[3]{0} negate(gte0)
negate1 = f32[3]{0} negate(negate0)
add = f32[3]{0} add(negate1, tanh)
ROOT tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(add, gte1, gte2)
}
while_cond1 {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(p0), index=2
}
while_body1 {
p0 = (f32[3]{0}, f32[3]{0}, pred[]) parameter(0)
gte0 = f32[3]{0} get-tuple-element(p0), index=0
gte2 = pred[] get-tuple-element(p0), index=2
negate0 = f32[3]{0} negate(gte0)
negate1 = f32[3]{0} negate(negate0)
negate2 = f32[3]{0} negate(negate1)
negate3 = f32[3]{0} negate(negate2)
negate4 = f32[3]{0} negate(negate3)
negate5 = f32[3]{0} negate(negate4)
negate6 = f32[3]{0} negate(negate5)
negate7 = f32[3]{0} negate(negate6)
negate8 = f32[3]{0} negate(negate7)
negate9 = f32[3]{0} negate(negate8)
negate10 = f32[3]{0} negate(negate9)
negate11 = f32[3]{0} negate(negate10)
negate12 = f32[3]{0} negate(negate11)
negate13 = f32[3]{0} negate(negate12)
negate14 = f32[3]{0} negate(negate13)
gte1 = f32[3]{0} get-tuple-element(p0), index=1
tanh = f32[3]{0} tanh(gte1)
add = f32[3]{0} add(negate14, tanh)
ROOT tuple = (f32[3]{0}, f32[3]{0}, pred[]) tuple(add, gte1, gte2)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = pred[] parameter(1)
p2 = f32[3]{0} parameter(2)
copy = f32[3]{0} copy(p0)
tuple1 = (f32[3]{0}, f32[3]{0}, pred[]) tuple(copy, p0, p1)
while1 = (f32[3]{0}, f32[3]{0}, pred[]) while(tuple1), condition=while_cond, body=while_body
gte0 = f32[3]{0} get-tuple-element(while1), index=0
gte1 = f32[3]{0} get-tuple-element(while1), index=1
negate0_entry = f32[3]{0} negate(gte1)
gte2 = pred[] get-tuple-element(while1), index=2
tuple2 = (f32[3]{0}, f32[3]{0}, pred[]) tuple(gte0, gte1, gte2)
while2 = (f32[3]{0}, f32[3]{0}, pred[]) while(tuple2), condition=while_cond1, body=while_body1
negate1 = f32[3]{0} negate(negate0_entry)
negate2 = f32[3]{0} negate(negate1)
negate3 = f32[3]{0} negate(negate2)
negate4 = f32[3]{0} negate(negate3)
negate5 = f32[3]{0} negate(negate4)
negate6 = f32[3]{0} negate(negate5)
negate7 = f32[3]{0} negate(negate6)
negate8 = f32[3]{0} negate(negate7)
negate9 = f32[3]{0} negate(negate8)
negate10 = f32[3]{0} negate(negate9)
negate11 = f32[3]{0} negate(negate10)
negate12 = f32[3]{0} negate(negate11)
negate13 = f32[3]{0} negate(negate12)
negate14 = f32[3]{0} negate(negate13)
gte = f32[3]{0} get-tuple-element(while2), index=1
ROOT add = f32[3]{0} add(gte, negate14)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
bool marked_inefficient = false;
options.get_inefficient_allocation_sites_fn =
[&](absl::Span<HloPosition> defining_positions)
-> std::vector<std::variant<HloPosition, HloUse>> {
if (absl::c_find(defining_positions,
HloPosition{FindInstruction(module.get(), "while1"),
{1}}) != defining_positions.end() &&
!marked_inefficient) {
LOG(INFO) << "Marking the use inefficient.";
marked_inefficient = true;
return {HloUse{FindInstruction(module.get(), "negate0_entry"), 0}};
}
return {};
};
AssignMemorySpace(module.get(), options);
}
TEST_F(MemorySpaceAssignmentTest, DisablePrefetch) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = f32[3]{0} parameter(1)
negate1 = f32[3]{0} negate(p1)
negate2 = f32[3]{0} negate(negate1)
negate3 = f32[3]{0} negate(negate2)
negate4 = f32[3]{0} negate(negate3)
negate5 = f32[3]{0} negate(negate4)
negate6 = f32[3]{0} negate(negate5)
negate7 = f32[3]{0} negate(negate6)
negate8 = f32[3]{0} negate(negate7)
negate9 = f32[3]{0} negate(negate8)
ROOT add = f32[3]{0} add(negate9, p0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.max_outstanding_prefetches = 0;
AssignMemorySpace(module.get(), options);
EXPECT_THAT(module->entry_computation()->root_instruction()->operand(1),
op::Parameter());
}
TEST_F(MemorySpaceAssignmentTest, BitcastRoot) {
absl::string_view hlo_string = R"(
HloModule primitive_computation_gather.4, is_scheduled=true
%while_body {
%param.1 = (s32[], f32[3,3,3]) parameter(0)
%get-tuple-element.32 = s32[] get-tuple-element(%param.1), index=0
%copy.6 = s32[] copy(s32[] %get-tuple-element.32)
%constant.8 = s32[] constant(1)
%add = s32[] add(s32[] %copy.6, s32[] %constant.8)
%get-tuple-element.35 = f32[3,3,3] get-tuple-element(%param.1), index=1
negate = f32[3,3,3] negate(get-tuple-element.35)
ROOT %tuple.10 = (s32[], f32[3,3,3]) tuple(s32[] %add, f32[3,3,3] negate)
}
%while_cond {
%param.0 = (s32[], f32[3,3,3]) parameter(0)
%get-tuple-element = s32[] get-tuple-element(%param.0), index=0
%constant.3 = s32[] constant(3)
ROOT %compare = pred[] compare(s32[] %get-tuple-element, s32[] %constant.3), direction=LT
}
ENTRY %primitive_computation_gather.4 (parameter.1: f32[3,10,5], parameter.2: s32[3,1]) -> f32[3,3,3] {
%constant.1 = s32[] constant(0)
%copy.11 = s32[] copy(s32[] %constant.1)
%constant = f32[] constant(0)
%broadcast = f32[3,3,3] broadcast(f32[] %constant), dimensions={}
%tuple.8 = (s32[], f32[3,3,3]) tuple(s32[] %copy.11, f32[3,3,3] %broadcast)
%while = (s32[], f32[3,3,3]) while(%tuple.8), condition=%while_cond, body=%while_body
%get-tuple-element.7 = f32[3,3,3] get-tuple-element(%while), index=1
ROOT %bitcast.1 = f32[3,3,3] bitcast(f32[3,3,3] %get-tuple-element.7)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_TRUE(!root->shape().has_layout() ||
root->shape().layout().memory_space() == kDefaultMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, PrecoloredBuffer) {
absl::string_view hlo_string = R"(
HloModule bug, is_scheduled=true
ENTRY Entry {
param0 = f32[8,3] parameter(0)
param1 = f32[2,4] parameter(1)
a = f32[8,3]{1,0:S(1)} cosine(param0)
b = f32[2,4] negate(param1)
d = f32[8,3] negate(a)
c = f32[2,4] negate(b)
e = f32[2,4] negate(c)
f = f32[8,3] negate(d)
g = f32[2,4] negate(e)
h = f32[2,4] negate(g)
i = f32[2,4] negate(h)
j = f32[2,4] negate(i)
k = f32[2,4] negate(j)
l = f32[2,4] negate(k)
m = f32[2,4] negate(l)
n = f32[2,4] negate(m)
o = f32[8,3] negate(f)
p = f32[2,4] negate(n)
q = f32[8,3] add(f, o)
r = f32[8,3] add(q, a)
ROOT tuple = (f32[2,4], f32[8,3]) tuple(p, r)
}
)";
MsaBufferIntervalCompare buffer_interval_compare =
[](const MsaBufferInterval& a, const MsaBufferInterval& b) {
auto get_opcode_priority = [](const HloOpcode& opcode) {
switch (opcode) {
case HloOpcode::kNegate:
return 0;
case HloOpcode::kAdd:
return 1;
case HloOpcode::kCos:
return 2;
default:
return 3;
}
};
return get_opcode_priority(a.buffer->defining_instruction()->opcode()) <
get_opcode_priority(b.buffer->defining_instruction()->opcode());
};
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
InstructionCountPrefetchIntervalPicker prefetch_interval_picker(2, 10);
Options options = DefaultMemorySpaceOptions();
std::unique_ptr<PresetAssignments> preset_assignments =
AssignMemorySpace(module.get(), options, buffer_interval_compare,
&prefetch_interval_picker);
const HloInstruction* r = FindInstruction(module.get(), "r");
const HloInstruction* d = FindInstruction(module.get(), "d");
const HloInstruction* a = FindInstruction(module.get(), "a");
EXPECT_EQ(r->operand(1), a);
EXPECT_EQ(d->operand(0), a);
EXPECT_EQ(a->shape().layout().memory_space(), kAlternateMemorySpace);
auto a_entry = std::find_if(
preset_assignments->chunks().begin(), preset_assignments->chunks().end(),
[&](std::pair<HloPosition, HeapSimulator::Chunk> position_and_chunk) {
return position_and_chunk.first.instruction == a;
});
EXPECT_NE(a_entry, preset_assignments->chunks().end());
}
TEST_F(MemorySpaceAssignmentTest, PrecoloredBufferOOM) {
absl::string_view hlo_string = R"(
HloModule bug, is_scheduled=true
ENTRY Entry {
param0 = f32[8,3] parameter(0)
param1 = f32[2,4] parameter(1)
a = f32[8,3]{1,0:S(1)} cosine(param0)
b = f32[2,4] negate(param1)
d = f32[8,3] negate(a)
c = f32[2,4] negate(b)
e = f32[2,4] negate(c)
f = f32[8,3] negate(d)
g = f32[2,4] negate(e)
h = f32[2,4] negate(g)
i = f32[2,4] negate(h)
j = f32[2,4] negate(i)
k = f32[2,4] negate(j)
l = f32[2,4] negate(k)
m = f32[2,4] negate(l)
n = f32[2,4] negate(m)
o = f32[8,3]{1,0:S(1)} negate(f)
p = f32[2,4] negate(n)
q = f32[8,3] add(f, o)
r = f32[8,3] add(q, a)
ROOT tuple = (f32[2,4], f32[8,3]) tuple(p, r)
}
)";
MsaBufferIntervalCompare buffer_interval_compare =
[](const MsaBufferInterval& a, const MsaBufferInterval& b) {
auto get_opcode_priority = [](const HloOpcode& opcode) {
switch (opcode) {
case HloOpcode::kNegate:
return 0;
case HloOpcode::kAdd:
return 1;
case HloOpcode::kCos:
return 2;
default:
return 3;
}
};
return get_opcode_priority(a.buffer->defining_instruction()->opcode()) <
get_opcode_priority(b.buffer->defining_instruction()->opcode());
};
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
InstructionCountPrefetchIntervalPicker prefetch_interval_picker(2, 10);
Options options = DefaultMemorySpaceOptions();
auto status_or = AssignMemorySpaceAndReturnStatus(module.get(), options,
buffer_interval_compare,
&prefetch_interval_picker);
EXPECT_THAT(
status_or.status(),
tsl::testing::StatusIs(
tsl::error::FAILED_PRECONDITION,
::testing::HasSubstr("requires allocation in the alternate memory, "
"which could not be satisfied")));
}
TEST_F(MemorySpaceAssignmentTest, AsyncOpShortLiveRange) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param = bf16[4]{0} parameter(0)
negate0 = bf16[4]{0} negate(param)
collective-permute-start = (bf16[4]{0}, bf16[4]{0}, u32[], u32[]) collective-permute-start(negate0), source_target_pairs={{0,1},{1,2},{2,3}}
negate1 = bf16[4]{0} negate(param)
negate2 = bf16[4]{0} negate(negate1)
negate3 = bf16[4]{0} negate(negate2)
collective-permute-done = bf16[4]{0} collective-permute-done(collective-permute-start)
ROOT add = add(collective-permute-done, negate3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
HloInstruction* collective_permute_start =
module->entry_computation()->GetInstructionWithName(
"collective-permute-start");
EXPECT_TRUE(collective_permute_start->shape()
.tuple_shapes(0)
.layout()
.memory_space() == kAlternateMemorySpace);
EXPECT_TRUE(collective_permute_start->shape()
.tuple_shapes(1)
.layout()
.memory_space() == kAlternateMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, AsyncOpShortLiveRangeInputBufferConsumer) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param = bf16[4]{0} parameter(0)
negate0 = bf16[4]{0} negate(param)
collective-permute-start = (bf16[4]{0}, bf16[4]{0}, u32[], u32[]) collective-permute-start(negate0), source_target_pairs={{0,1},{1,2},{2,3}}
negate1 = bf16[4]{0} negate(negate0)
negate2 = bf16[4]{0} negate(negate1)
negate3 = bf16[4]{0} negate(negate2)
collective-permute-done = bf16[4]{0} collective-permute-done(collective-permute-start)
ROOT add = add(collective-permute-done, negate3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
HloInstruction* collective_permute_start =
module->entry_computation()->GetInstructionWithName(
"collective-permute-start");
EXPECT_TRUE(collective_permute_start->shape()
.tuple_shapes(0)
.layout()
.memory_space() == kDefaultMemorySpace);
EXPECT_TRUE(collective_permute_start->shape()
.tuple_shapes(1)
.layout()
.memory_space() == kAlternateMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, AsyncOpLongLiveRange) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param = bf16[4]{0} parameter(0)
negate0 = bf16[4]{0} negate(param)
collective-permute-start = (bf16[4]{0}, bf16[4]{0}, u32[], u32[]) collective-permute-start(negate0), source_target_pairs={{0,1},{1,2},{2,3}}
negate1 = bf16[4]{0} negate(param)
negate2 = bf16[4]{0} negate(negate1)
negate3 = bf16[4]{0} negate(negate2)
negate4 = bf16[4]{0} negate(negate3)
negate5 = bf16[4]{0} negate(negate4)
negate6 = bf16[4]{0} negate(negate5)
negate7 = bf16[4]{0} negate(negate6)
negate8 = bf16[4]{0} negate(negate7)
negate9 = bf16[4]{0} negate(negate8)
negate10 = bf16[4]{0} negate(negate9)
negate11 = bf16[4]{0} negate(negate10)
negate12 = bf16[4]{0} negate(negate11)
negate13 = bf16[4]{0} negate(negate12)
collective-permute-done = bf16[4]{0} collective-permute-done(collective-permute-start)
ROOT add = add(collective-permute-done, negate13)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
HloInstruction* collective_permute_start =
module->entry_computation()->GetInstructionWithName(
"collective-permute-start");
EXPECT_TRUE(collective_permute_start->shape()
.tuple_shapes(0)
.layout()
.memory_space() == kDefaultMemorySpace);
EXPECT_TRUE(collective_permute_start->shape()
.tuple_shapes(1)
.layout()
.memory_space() == kDefaultMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, AsyncOpLongLiveRangeInputBufferConsumer) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param = bf16[4]{0} parameter(0)
negate0 = bf16[4]{0} negate(param)
collective-permute-start = (bf16[4]{0}, bf16[4]{0}, u32[], u32[]) collective-permute-start(negate0), source_target_pairs={{0,1},{1,2},{2,3}}
negate1 = bf16[4]{0} negate(negate0)
negate2 = bf16[4]{0} negate(negate1)
negate3 = bf16[4]{0} negate(negate2)
negate4 = bf16[4]{0} negate(negate3)
negate5 = bf16[4]{0} negate(negate4)
negate6 = bf16[4]{0} negate(negate5)
negate7 = bf16[4]{0} negate(negate6)
negate8 = bf16[4]{0} negate(negate7)
negate9 = bf16[4]{0} negate(negate8)
negate10 = bf16[4]{0} negate(negate9)
negate11 = bf16[4]{0} negate(negate10)
negate12 = bf16[4]{0} negate(negate11)
negate13 = bf16[4]{0} negate(negate12)
collective-permute-done = bf16[4]{0} collective-permute-done(collective-permute-start)
ROOT add = add(collective-permute-done, negate13)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
HloInstruction* collective_permute_start =
module->entry_computation()->GetInstructionWithName(
"collective-permute-start");
EXPECT_TRUE(collective_permute_start->shape()
.tuple_shapes(0)
.layout()
.memory_space() == kDefaultMemorySpace);
EXPECT_TRUE(collective_permute_start->shape()
.tuple_shapes(1)
.layout()
.memory_space() == kDefaultMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, InPlaceAsyncCollectivePermute) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param = bf16[4]{0} parameter(0)
negate0 = bf16[4]{0} negate(param)
negate1 = bf16[4]{0} negate(param)
const0 = s32[] constant(0)
const1 = s32[] constant(1)
tuple0 = (s32[]) tuple(const0)
tuple1 = (s32[]) tuple(const1)
collective-permute-start = (bf16[4]{0}, bf16[4]{0}, u32[], u32[]) collective-permute-start(negate0, negate1, tuple0, tuple1), source_target_pairs={{0,1},{1,2},{2,3}}, slice_sizes={{1}}
negate2 = bf16[4]{0} negate(param)
negate3 = bf16[4]{0} negate(negate2)
negate4 = bf16[4]{0} negate(negate3)
collective-permute-done = bf16[4]{0} collective-permute-done(collective-permute-start)
ROOT add = add(collective-permute-done, negate4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
HloInstruction* collective_permute_start =
module->entry_computation()->GetInstructionWithName(
"collective-permute-start");
EXPECT_TRUE(collective_permute_start->shape()
.tuple_shapes(0)
.layout()
.memory_space() == kAlternateMemorySpace);
EXPECT_TRUE(collective_permute_start->shape()
.tuple_shapes(1)
.layout()
.memory_space() == kAlternateMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, InPlaceAsyncCollectivePermuteSameBuffer) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param = bf16[4]{0} parameter(0)
negate0 = bf16[4]{0} negate(param)
const0 = s32[] constant(0)
const1 = s32[] constant(1)
tuple0 = (s32[]) tuple(const0)
tuple1 = (s32[]) tuple(const1)
collective-permute-start = (bf16[4]{0}, bf16[4]{0}, u32[], u32[]) collective-permute-start(negate0, negate0, tuple0, tuple1), source_target_pairs={{0,1},{1,2},{2,3}}, slice_sizes={{1}}
negate2 = bf16[4]{0} negate(param)
negate3 = bf16[4]{0} negate(negate2)
negate4 = bf16[4]{0} negate(negate3)
collective-permute-done = bf16[4]{0} collective-permute-done(collective-permute-start)
ROOT add = add(collective-permute-done, negate4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
HloInstruction* collective_permute_start =
module->entry_computation()->GetInstructionWithName(
"collective-permute-start");
EXPECT_TRUE(collective_permute_start->shape()
.tuple_shapes(0)
.layout()
.memory_space() == kAlternateMemorySpace);
EXPECT_TRUE(collective_permute_start->shape()
.tuple_shapes(1)
.layout()
.memory_space() == kAlternateMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest,
InPlaceAsyncCollectivePermuteSameBufferChained) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param = bf16[4]{0} parameter(0)
negate0 = bf16[4]{0} negate(param)
const0 = s32[] constant(0)
const1 = s32[] constant(1)
tuple0 = (s32[]) tuple(const0)
tuple1 = (s32[]) tuple(const1)
collective-permute-start.1 = (bf16[4]{0}, bf16[4]{0}, u32[], u32[]) collective-permute-start(negate0, negate0, tuple0, tuple1), source_target_pairs={{0,1},{1,2},{2,3}}, slice_sizes={{1}}
negate2 = bf16[4]{0} negate(param)
negate3 = bf16[4]{0} negate(negate2)
negate4 = bf16[4]{0} negate(negate3)
collective-permute-done.1 = bf16[4]{0} collective-permute-done(collective-permute-start.1)
collective-permute-start.2 = (bf16[4]{0}, bf16[4]{0}, u32[], u32[]) collective-permute-start(collective-permute-done.1, collective-permute-done.1, tuple0, tuple1), source_target_pairs={{0,1},{1,2},{2,3}}, slice_sizes={{1}}
negate5 = bf16[4]{0} negate(negate4)
negate6 = bf16[4]{0} negate(negate5)
negate7 = bf16[4]{0} negate(negate6)
collective-permute-done.2 = bf16[4]{0} collective-permute-done(collective-permute-start.2)
ROOT add = add(collective-permute-done.2, negate7)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
HloInstruction* collective_permute_start_1 =
module->entry_computation()->GetInstructionWithName(
"collective-permute-start.1");
EXPECT_TRUE(collective_permute_start_1->shape()
.tuple_shapes(0)
.layout()
.memory_space() == kAlternateMemorySpace);
EXPECT_TRUE(collective_permute_start_1->shape()
.tuple_shapes(1)
.layout()
.memory_space() == kAlternateMemorySpace);
HloInstruction* collective_permute_start_2 =
module->entry_computation()->GetInstructionWithName(
"collective-permute-start.2");
EXPECT_TRUE(collective_permute_start_2->shape()
.tuple_shapes(0)
.layout()
.memory_space() == kAlternateMemorySpace);
EXPECT_TRUE(collective_permute_start_2->shape()
.tuple_shapes(1)
.layout()
.memory_space() == kAlternateMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest,
TupleInPlaceAsyncCollectivePermuteSameBufferChained) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param = bf16[4]{0} parameter(0)
param2 = bf16[48]{0} parameter(1)
negate0.1 = bf16[48]{0} negate(param2)
negate0.2 = bf16[48]{0} negate(param2)
const0 = s32[] constant(0)
const1 = s32[] constant(1)
tuple0.0 = (s32[]) tuple(const0)
tuple0 = ((s32[]), (s32[])) tuple(tuple0.0, tuple0.0)
tuple1.0 = (s32[]) tuple(const1)
tuple1 = ((s32[]), (s32[])) tuple(tuple1.0, tuple1.0)
tuple2 = (bf16[48]{0}, bf16[48]{0}) tuple(negate0.1, negate0.2)
collective-permute-start.1 = ((bf16[48]{0}, bf16[48]{0}), (bf16[48]{0}, bf16[48]{0}), u32[], u32[]) collective-permute-start(tuple2, tuple2, tuple0, tuple1), source_target_pairs={{0,1},{1,2},{2,3}}, slice_sizes={{1}}
negate2 = bf16[4]{0} negate(param)
negate3 = bf16[4]{0} negate(negate2)
negate4 = bf16[4]{0} negate(negate3)
collective-permute-done.1 = (bf16[48]{0}, bf16[48]{0}) collective-permute-done(collective-permute-start.1)
collective-permute-start.2 = ((bf16[48]{0}, bf16[48]{0}), (bf16[48]{0}, bf16[48]{0}), u32[], u32[]) collective-permute-start(collective-permute-done.1, collective-permute-done.1, tuple0, tuple1), source_target_pairs={{0,1},{1,2},{2,3}}, slice_sizes={{1}}
negate5 = bf16[4]{0} negate(negate4)
negate6 = bf16[4]{0} negate(negate5)
negate7 = bf16[4]{0} negate(negate6)
collective-permute-done.2 = (bf16[48]{0}, bf16[48]{0}) collective-permute-done(collective-permute-start.2)
gte = bf16[48]{0} get-tuple-element(collective-permute-done.2), index=0
ROOT root = (bf16[48]{0}, bf16[4]{0}) tuple(gte, negate7)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
const HloInstruction* cp_done1 =
FindInstruction(module.get(), "collective-permute-done.1");
EXPECT_EQ(cp_done1->operand(0)->opcode(), HloOpcode::kCollectivePermuteStart);
const HloInstruction* cp_done2 =
FindInstruction(module.get(), "collective-permute-done.2");
EXPECT_EQ(cp_done2->operand(0)->opcode(), HloOpcode::kCollectivePermuteStart);
}
TEST_F(MemorySpaceAssignmentTest,
TupleInPlaceAsyncCollectivePermuteSameBuffer) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param = bf16[4]{0} parameter(0)
param2 = bf16[48]{0} parameter(1)
negate0.1 = bf16[48]{0} negate(param2)
negate0.2 = bf16[48]{0} negate(param2)
const0 = s32[] constant(0)
const1 = s32[] constant(1)
tuple0.0 = (s32[]) tuple(const0)
tuple0 = ((s32[]), (s32[])) tuple(tuple0.0, tuple0.0)
tuple1.0 = (s32[]) tuple(const1)
tuple1 = ((s32[]), (s32[])) tuple(tuple1.0, tuple1.0)
tuple2 = (bf16[48]{0}, bf16[48]{0}) tuple(negate0.1, negate0.1)
tuple3 = (bf16[48]{0}, bf16[48]{0}) tuple(negate0.2, negate0.2)
collective-permute-start.1 = ((bf16[48]{0}, bf16[48]{0}), (bf16[48]{0}, bf16[48]{0}), u32[], u32[]) collective-permute-start(tuple2, tuple3, tuple0, tuple1), source_target_pairs={{0,1},{1,2},{2,3}}, slice_sizes={{1}}
negate2 = bf16[4]{0} negate(param)
negate3 = bf16[4]{0} negate(negate2)
negate4 = bf16[4]{0} negate(negate3)
collective-permute-done.1 = (bf16[48]{0}, bf16[48]{0}) collective-permute-done(collective-permute-start.1)
gte = bf16[48]{0} get-tuple-element(collective-permute-done.1), index=0
ROOT root = (bf16[48]{0}, bf16[4]{0}) tuple(gte, negate4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
const HloInstruction* cp_done1 =
FindInstruction(module.get(), "collective-permute-done.1");
EXPECT_EQ(cp_done1->operand(0)->opcode(), HloOpcode::kCollectivePermuteStart);
}
TEST_F(MemorySpaceAssignmentTest,
TupleInPlaceAsyncCollectivePermuteSameBufferRoot) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param = bf16[4]{0} parameter(0)
param2 = bf16[48]{0} parameter(1)
negate0.1 = bf16[48]{0} negate(param2)
negate0.2 = bf16[48]{0} negate(param2)
const0 = s32[] constant(0)
const1 = s32[] constant(1)
tuple0.0 = (s32[]) tuple(const0)
tuple0 = ((s32[]), (s32[])) tuple(tuple0.0, tuple0.0)
tuple1.0 = (s32[]) tuple(const1)
tuple1 = ((s32[]), (s32[])) tuple(tuple1.0, tuple1.0)
tuple2 = (bf16[48]{0}, bf16[48]{0}) tuple(negate0.1, negate0.1)
tuple3 = (bf16[48]{0}, bf16[48]{0}) tuple(negate0.2, negate0.2)
collective-permute-start.1 = ((bf16[48]{0}, bf16[48]{0}), (bf16[48]{0}, bf16[48]{0}), u32[], u32[]) collective-permute-start(tuple2, tuple3, tuple0, tuple1), source_target_pairs={{0,1},{1,2},{2,3}}, slice_sizes={{1}}
ROOT collective-permute-done.1 = (bf16[48]{0}, bf16[48]{0}) collective-permute-done(collective-permute-start.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
const HloInstruction* cp_done1 =
FindInstruction(module.get(), "collective-permute-done.1");
EXPECT_EQ(cp_done1->operand(0)->opcode(), HloOpcode::kCollectivePermuteStart);
ShapeUtil::ForEachSubshape(
cp_done1->shape(),
[&](const Shape& subshape, const ShapeIndex& ) {
if (subshape.IsArray() && subshape.has_layout()) {
EXPECT_EQ(subshape.layout().memory_space(), kDefaultMemorySpace);
}
});
}
TEST_F(MemorySpaceAssignmentTest, TupleInPlaceAsyncCollectivePermuteRoot) {
absl::string_view hlo_string = R"(
HloModule inplace_collective_permute, is_scheduled=true
ENTRY %inplace_collective_permute {
%param.0 = u32[8,1,1] parameter(0)
%constant.1000 = u32[] constant(1000)
%broadcast.1 = u32[8,1,1] broadcast(u32[] %constant.1000), dimensions={}
%broadcast.2 = u32[8,1,1] broadcast(u32[] %constant.1000), dimensions={}
%tuple.input = (u32[8,1,1], u32[8,1,1]) tuple(u32[8,1,1] %param.0, u32[8,1,1] %param.0)
%tuple.output = (u32[8,1,1], u32[8,1,1]) tuple(u32[8,1,1] %broadcast.1, u32[8,1,1] %broadcast.2)
%constant.0 = s32[] constant(0)
%constant.1 = s32[] constant(1)
%constant.2 = s32[] constant(2)
%indices.0.0.0 = (s32[], s32[], s32[]) tuple(s32[] %constant.0, s32[] %constant.0, s32[] %constant.0)
%indices.1.0.0 = (s32[], s32[], s32[]) tuple(s32[] %constant.1, s32[] %constant.0, s32[] %constant.0)
%indices.2.0.0 = (s32[], s32[], s32[]) tuple(s32[] %constant.2, s32[] %constant.0, s32[] %constant.0)
%indices.000.100 = ((s32[], s32[], s32[]), (s32[], s32[], s32[])) tuple((s32[], s32[], s32[]) %indices.0.0.0, (s32[], s32[], s32[]) %indices.1.0.0)
%indices.000.200 = ((s32[], s32[], s32[]), (s32[], s32[], s32[])) tuple((s32[], s32[], s32[]) %indices.0.0.0, (s32[], s32[], s32[]) %indices.2.0.0)
%indices.000.0 = ((s32[], s32[], s32[]), (s32[], s32[], s32[])) tuple((s32[], s32[], s32[]) %indices.0.0.0, (s32[], s32[], s32[]) %indices.0.0.0)
%input.indices = (((s32[], s32[], s32[]), (s32[], s32[], s32[])), ((s32[], s32[], s32[]), (s32[], s32[], s32[]))) tuple(((s32[], s32[], s32[]), (s32[], s32[], s32[])) %indices.000.100, ((s32[], s32[], s32[]), (s32[], s32[], s32[])) %indices.000.0)
%output.indices = (((s32[], s32[], s32[]), (s32[], s32[], s32[])), ((s32[], s32[], s32[]), (s32[], s32[], s32[]))) tuple(((s32[], s32[], s32[]), (s32[], s32[], s32[])) %indices.000.100, ((s32[], s32[], s32[]), (s32[], s32[], s32[])) %indices.000.200)
%collective-permute-start = ((u32[8,1,1], u32[8,1,1]), (u32[8,1,1], u32[8,1,1]), u32[], u32[]) collective-permute-start((u32[8,1,1], u32[8,1,1]) %tuple.input, (u32[8,1,1], u32[8,1,1]) %tuple.output, (((s32[], s32[], s32[]), (s32[], s32[], s32[])), ((s32[], s32[], s32[]), (s32[], s32[], s32[]))) %input.indices, (((s32[], s32[], s32[]), (s32[], s32[], s32[])), ((s32[], s32[], s32[]), (s32[], s32[], s32[]))) %output.indices), channel_id=42, source_target_pairs={{0,1},{1,0},{1,0},{0,1}}, slice_sizes={{4},{4},{4},{4}}
ROOT %collective-permute-done = (u32[8,1,1], u32[8,1,1]) collective-permute-done(((u32[8,1,1], u32[8,1,1]), (u32[8,1,1], u32[8,1,1]), u32[], u32[]) %collective-permute-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
const HloInstruction* cp_done =
FindInstruction(module.get(), "collective-permute-done");
EXPECT_EQ(cp_done->operand(0)->opcode(), HloOpcode::kCollectivePermuteStart);
ShapeUtil::ForEachSubshape(
cp_done->shape(),
[&](const Shape& subshape, const ShapeIndex& ) {
if (subshape.IsArray() && subshape.has_layout()) {
EXPECT_EQ(subshape.layout().memory_space(), kDefaultMemorySpace);
}
});
}
TEST_F(MemorySpaceAssignmentTest, ReservedScopedMemory) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param0 = f32[2,4] parameter(0)
a = f32[2,4] negate(param0)
b = f32[2,4] negate(a)
c = f32[2,4] negate(b)
d = f32[2,4] negate(c)
e = f32[2,4] negate(d)
ROOT f = f32[2,4] add(e, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.reserved_scoped_memory_fn =
[&](const HloInstruction* instruction,
const absl::flat_hash_set<std::pair<int, ShapeIndex>>
operands_in_alternate_memory,
const absl::flat_hash_set<ShapeIndex> outputs_in_alternate_memory) {
if (instruction->name() == "c") {
return 100;
}
return 0;
};
AssignMemorySpace(module.get(), options);
auto get_memory_space = [&](absl::string_view instruction_name) {
return module->entry_computation()
->GetInstructionWithName(instruction_name)
->shape()
.layout()
.memory_space();
};
EXPECT_TRUE(get_memory_space("a") == kAlternateMemorySpace);
EXPECT_TRUE(get_memory_space("b") == kDefaultMemorySpace);
EXPECT_TRUE(get_memory_space("c") == kDefaultMemorySpace);
EXPECT_TRUE(get_memory_space("d") == kAlternateMemorySpace);
EXPECT_TRUE(get_memory_space("e") == kAlternateMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, ConstantAllocationFar) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param0 = f32[2,4] parameter(0)
const = f32[2,4] constant({...})
a = f32[2,4] negate(param0)
b = f32[2,4] negate(a)
c = f32[2,4] negate(b)
d = f32[2,4] negate(c)
e = f32[2,4] negate(d)
ROOT negate = f32[2,4] add(const, e)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
EXPECT_TRUE(module->entry_computation()
->GetInstructionWithName("const")
->shape()
.layout()
.memory_space() == kDefaultMemorySpace);
EXPECT_TRUE(module->entry_computation()
->GetInstructionWithName("negate")
->operand(0)
->shape()
.layout()
.memory_space() == kAlternateMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, ConstantAllocationNear) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param0 = f32[2,4] parameter(0)
a = f32[2,4] negate(param0)
b = f32[2,4] negate(a)
c = f32[2,4] negate(b)
d = f32[2,4] negate(c)
e = f32[2,4] negate(d)
const = f32[2,4] constant({...})
ROOT negate = f32[2,4] add(const, e)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
EXPECT_TRUE(module->entry_computation()
->GetInstructionWithName("const")
->shape()
.layout()
.memory_space() == kDefaultMemorySpace);
EXPECT_TRUE(module->entry_computation()
->GetInstructionWithName("negate")
->operand(0)
->shape()
.layout()
.memory_space() == kAlternateMemorySpace);
}
class FakeMemorySpaceAssignmentRepacker : public MemorySpaceAssignmentRepacker {
public:
explicit FakeMemorySpaceAssignmentRepacker(
absl::flat_hash_map<std::pair<int64_t, int64_t>, int64_t>& repack_map,
std::function<void(absl::Span<AllocationBlock*>)> check_fun = nullptr,
bool always_return_modified = false)
: MemorySpaceAssignmentRepacker(128, 8),
repack_map_(repack_map),
check_fun_(check_fun),
always_return_modified_(always_return_modified) {}
absl::StatusOr<bool> Repack(
absl::Span<AllocationBlock*> allocations) override {
bool modified = false;
for (AllocationBlock* block : allocations) {
absl::flat_hash_set<int64_t> colocations;
std::string colocations_str;
for (const AllocationBlock* colocation : block->GetColocations()) {
absl::StrAppend(&colocations_str, colocation->id, ", ");
colocations.insert(colocation->id);
}
VLOG(1) << "Alloc id: " << block->id << " time: ["
<< block->inclusive_start_time << ", " << block->end_time
<< "] size: " << block->size
<< " init offset: " << block->initial_offset << " colocations: {"
<< colocations_str << "}";
auto it = repack_map_.find(
{block->inclusive_start_time, block->initial_offset});
if (it != repack_map_.end()) {
modified = true;
block->offset = it->second;
} else {
block->offset = block->initial_offset;
}
for (AllocationBlock* colocation : block->GetColocations()) {
if (it != repack_map_.end()) {
colocation->offset = it->second;
} else {
colocation->offset = colocation->initial_offset;
}
}
}
if (check_fun_) {
check_fun_(allocations);
}
return always_return_modified_ || modified;
}
private:
absl::flat_hash_map<std::pair<int64_t, int64_t>, int64_t> repack_map_;
std::function<void(absl::Span<AllocationBlock*>)> check_fun_;
bool always_return_modified_;
};
TEST_F(MemorySpaceAssignmentTest, Repack) {
absl::string_view hlo_string = R"(
HloModule bug, is_scheduled=true
ENTRY Entry {
param0 = f32[8,3] parameter(0)
param1 = f32[2,4] parameter(1)
a = f32[2,4] sine(param1)
b = f32[2,4] cosine(param1)
c = f32[8,3] negate(param0)
j = f32[2,4] negate(a)
d = f32[8,3] tanh(param0)
k = f32[2,4] negate(j)
l = f32[2,4] add(b, k)
m = f32[8,3] negate(d)
n = f32[2,4] sine(l)
o = f32[8,3] negate(m)
p = f32[2,4] negate(n)
q = f32[8,3] negate(m)
ROOT tuple = (f32[2,4], f32[8,3], f32[8,3]) tuple(p, q, o)
}
)";
MsaBufferIntervalCompare buffer_interval_compare =
[](const MsaBufferInterval& a, const MsaBufferInterval& b) {
auto get_opcode_priority = [](const HloOpcode& opcode) {
switch (opcode) {
case HloOpcode::kSin:
return 0;
case HloOpcode::kCos:
return 1;
case HloOpcode::kTanh:
return 2;
default:
return 3;
}
};
return get_opcode_priority(a.buffer->defining_instruction()->opcode()) <
get_opcode_priority(b.buffer->defining_instruction()->opcode());
};
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
InstructionCountPrefetchIntervalPicker prefetch_interval_picker(2, 10);
absl::flat_hash_map<std::pair<int64_t, int64_t>, int64_t> repack_map;
repack_map[{2, 0}] = 32;
repack_map[{3, 32}] = 0;
FakeMemorySpaceAssignmentRepacker repacker =
FakeMemorySpaceAssignmentRepacker(repack_map);
Options options = DefaultMemorySpaceOptions();
options.max_repacks = 1;
options.repacker = &repacker;
AssignMemorySpace(module.get(), options, buffer_interval_compare,
&prefetch_interval_picker);
const HloInstruction* d =
module->entry_computation()->GetInstructionWithName("d");
EXPECT_EQ(d->shape().layout().memory_space(), kAlternateMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, RepackExportsAliasedOffsets) {
absl::string_view hlo_string = R"(
HloModule bug, is_scheduled=true
while_condition {
param1 = (f32[2,4], f32[2,4]) parameter(0)
ROOT cond = pred[] constant(true)
}
while_body {
param2 = (f32[2,4], f32[2,4]) parameter(0)
gte2 = f32[2,4] get-tuple-element(param2), index=0
gte3 = f32[2,4] get-tuple-element(param2), index=1
add = f32[2,4] add(gte2, gte3)
ROOT tuple2 = (f32[2,4], f32[2,4]) tuple(add, gte3)
}
ENTRY Entry {
param0 = f32[2,4] parameter(0)
a = f32[2,4] sine(param0)
b = f32[2,4] negate(a)
c = f32[2,4] negate(b)
d = f32[2,4] negate(c)
e = f32[2,4] negate(d)
f = f32[2,4] negate(e)
g = f32[2,4] negate(f)
h = f32[2,4] negate(g)
i = f32[2,4] negate(h)
j = f32[2,4] negate(i)
k = f32[2,4] negate(j)
l = f32[2,4] negate(k)
m = f32[2,4] negate(l)
n = f32[2,4] negate(m)
o = f32[2,4] negate(n)
p = f32[2,4] negate(o)
q = f32[2,4] add(p, a)
tuple = (f32[2,4], f32[2,4]) tuple(q, a)
while = (f32[2,4], f32[2,4]) while(tuple), condition=while_condition, body=while_body
gte0 = f32[2,4] get-tuple-element(while), index=0
gte1 = f32[2,4] get-tuple-element(while), index=1
r = f32[2,4] negate(gte0)
s = f32[2,4] negate(r)
t = f32[2,4] negate(s)
constant = f32[] constant(0)
broadcast = f32[8,4] broadcast(constant), dimensions={}
cos = f32[8,4] cosine(broadcast)
u = f32[2,4] add(t, gte1)
v = f32[2,4] add(u, param0)
w = f32[8,4] negate(cos)
ROOT tuple3 = (f32[2,4], f32[8,4]) tuple(v, w)
}
)";
MsaBufferIntervalCompare buffer_interval_compare =
[](const MsaBufferInterval& a, const MsaBufferInterval& b) {
auto get_opcode_priority = [](const HloOpcode& opcode) {
switch (opcode) {
case HloOpcode::kSin:
return 0;
case HloOpcode::kCos:
return 1;
case HloOpcode::kTanh:
return 2;
default:
return 3;
}
};
return get_opcode_priority(a.buffer->defining_instruction()->opcode()) <
get_opcode_priority(b.buffer->defining_instruction()->opcode());
};
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
InstructionCountPrefetchIntervalPicker prefetch_interval_picker(2, 10);
absl::flat_hash_map<std::pair<int64_t, int64_t>, int64_t> repack_map;
auto check_fun = [](absl::Span<AllocationBlock*> allocations) {
EXPECT_TRUE(allocations.at(0)->GetColocationsCount() == 1 ||
allocations.at(0)->GetColocationsCount() == 3);
EXPECT_EQ(allocations.at(1)->GetColocationsCount(), 3);
EXPECT_EQ(allocations.at(2)->GetColocationsCount(), 3);
EXPECT_TRUE(allocations.at(3)->GetColocationsCount() == 1 ||
allocations.at(3)->GetColocationsCount() == 3);
};
FakeMemorySpaceAssignmentRepacker repacker =
FakeMemorySpaceAssignmentRepacker(repack_map, check_fun);
Options options = DefaultMemorySpaceOptions();
options.max_repacks = 1;
options.repacker = &repacker;
AssignMemorySpace(module.get(), options, buffer_interval_compare,
&prefetch_interval_picker);
}
TEST_F(MemorySpaceAssignmentTest,
RepackExportsAliasedOffsetsForReservedScopedMemory) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param0 = f32[2,4] parameter(0)
a = f32[2,4] negate(param0)
b = f32[2,4] negate(a)
c = f32[2,4] negate(b)
d = f32[2,4] negate(c)
e = f32[2,4] negate(d)
ROOT f = f32[2,4] add(e, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.max_repacks = 1;
options.reserved_scoped_memory_fn =
[&](const HloInstruction* instruction,
const absl::flat_hash_set<std::pair<int, ShapeIndex>>
operands_in_alternate_memory,
const absl::flat_hash_set<ShapeIndex> outputs_in_alternate_memory) {
if (instruction->name() == "c" || instruction->name() == "d") {
return 100;
}
return 0;
};
absl::flat_hash_map<std::pair<int64_t, int64_t>, int64_t> repack_map;
bool repacker_ran = false;
auto check_fun = [&](absl::Span<AllocationBlock*> allocations) {
EXPECT_EQ(allocations.at(0)->GetColocationsCount(), 2);
EXPECT_EQ(allocations.at(1)->GetColocationsCount(), 2);
repacker_ran = true;
};
FakeMemorySpaceAssignmentRepacker repacker =
FakeMemorySpaceAssignmentRepacker(repack_map, check_fun);
options.repacker = &repacker;
AssignMemorySpace(module.get(), options);
EXPECT_TRUE(repacker_ran);
}
TEST_F(MemorySpaceAssignmentTest, ReduceReservedScopedVmemIfOperandInVmem) {
absl::string_view hlo_string = R"(
HloModule bug, is_scheduled=true
ENTRY Entry {
param0 = f32[8,3] parameter(0)
param1 = f32[2,4] parameter(1)
a = f32[2,4] sine(param1)
b = f32[2,4] cosine(param1)
c = f32[8,3] negate(param0)
j = f32[2,4] negate(a)
d = f32[8,3] tanh(param0)
k = f32[2,4] negate(j)
l = f32[2,4] add(b, k)
m = f32[8,3] negate(d)
n = f32[2,4] sine(l)
o = f32[8,3] negate(m)
p = f32[2,4] negate(n)
q = f32[8,3] negate(m)
ROOT tuple = (f32[2,4], f32[8,3], f32[8,3], f32[8,3]) tuple(p, q, o, c)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
absl::flat_hash_map<std::pair<int64_t, int64_t>, int64_t> repack_map;
Options options = DefaultMemorySpaceOptions();
options.max_repacks = 10;
options.repack_after_every_allocation = true;
options.reduce_scoped_memory_limit = true;
options.reserved_scoped_memory_fn =
[&](const HloInstruction* instruction,
const absl::flat_hash_set<std::pair<int, ShapeIndex>>
operands_in_alternate_memory,
const absl::flat_hash_set<ShapeIndex> outputs_in_alternate_memory) {
int64_t scoped_memory_size = 0;
if (operands_in_alternate_memory.empty()) {
scoped_memory_size += 1;
LOG(INFO) << instruction->name() << " has no operand in vmem";
}
if (outputs_in_alternate_memory.empty()) {
scoped_memory_size += 2;
LOG(INFO) << instruction->name() << " has no output in vmem";
}
return scoped_memory_size;
};
FakeMemorySpaceAssignmentRepacker repacker =
FakeMemorySpaceAssignmentRepacker(repack_map, nullptr);
options.repacker = &repacker;
std::unique_ptr<PresetAssignments> assignments =
AssignMemorySpace(module.get(), options);
auto instruction_consumes_assignment_fn =
[&](absl::string_view instruction_name) -> bool {
HloInstruction* instruction =
module->entry_computation()->GetInstructionWithName(instruction_name);
for (auto& pair : assignments->chunks()) {
HloInstruction* consumer = pair.first.instruction;
if (absl::c_any_of(instruction->operands(),
[&](const HloInstruction* operand) {
return operand == consumer;
})) {
return true;
}
}
return false;
};
auto instruction_produces_assignment_fn =
[&](absl::string_view instruction_name) -> bool {
HloInstruction* instruction =
module->entry_computation()->GetInstructionWithName(instruction_name);
for (auto& pair : assignments->chunks()) {
HloInstruction* producer = pair.first.instruction;
if (producer == instruction) {
return true;
}
}
return false;
};
auto check_reserved_scoped_memory_fn =
[&](absl::string_view instruction_name) -> bool {
int64_t scoped_memory_size = -1;
for (auto& pair : assignments->scoped_allocation_chunks()) {
HloInstruction* instruction = pair.first;
if (instruction->name() == instruction_name) {
scoped_memory_size = pair.second.size;
}
}
if (!instruction_consumes_assignment_fn(instruction_name)) {
scoped_memory_size -= 1;
}
if (!instruction_produces_assignment_fn(instruction_name)) {
scoped_memory_size -= 2;
}
return scoped_memory_size == 0;
};
for (auto& pair : assignments->assignment_informations()) {
LOG(INFO) << " space: " << pair.first << ", size: " << pair.second.size;
}
for (auto& pair : assignments->scoped_allocation_chunks()) {
HloInstruction* instruction = pair.first;
LOG(INFO) << instruction->name() << ": " << pair.second.size;
}
EXPECT_TRUE(check_reserved_scoped_memory_fn("a"));
EXPECT_TRUE(check_reserved_scoped_memory_fn("b"));
EXPECT_TRUE(check_reserved_scoped_memory_fn("c"));
EXPECT_TRUE(check_reserved_scoped_memory_fn("j"));
EXPECT_TRUE(check_reserved_scoped_memory_fn("d"));
EXPECT_TRUE(check_reserved_scoped_memory_fn("k"));
EXPECT_TRUE(check_reserved_scoped_memory_fn("l"));
EXPECT_TRUE(check_reserved_scoped_memory_fn("m"));
EXPECT_TRUE(check_reserved_scoped_memory_fn("n"));
EXPECT_TRUE(check_reserved_scoped_memory_fn("o"));
EXPECT_TRUE(check_reserved_scoped_memory_fn("p"));
EXPECT_TRUE(check_reserved_scoped_memory_fn("q"));
}
TEST_F(MemorySpaceAssignmentTest, ScopedAllocationWithDifferentOffset) {
absl::string_view hlo_string = R"(
HloModule bug, is_scheduled=true
ENTRY Entry {
param0 = f32[8,3] parameter(0)
param1 = f32[2,4] parameter(1)
a = f32[2,4] sine(param1)
b = f32[2,4] cosine(param1)
c = f32[8,3] negate(param0)
j = f32[2,4] negate(a)
d = f32[8,3] tanh(param0)
k = f32[2,4] negate(j)
l = f32[2,4] add(b, k)
m = f32[8,3] negate(d)
n = f32[2,4] sine(l)
o = f32[8,3] negate(m)
p = f32[2,4] negate(n)
q = f32[8,3] negate(m)
ROOT tuple = (f32[2,4], f32[8,3], f32[8,3]) tuple(p, q, o)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto check_fun = [](absl::Span<AllocationBlock*> allocations) {
for (AllocationBlock* block : allocations) {
if (block->inclusive_start_time == block->end_time) {
EXPECT_GT(block->GetColocationsCount(), 0);
}
}
};
absl::flat_hash_map<std::pair<int64_t, int64_t>, int64_t> repack_map;
FakeMemorySpaceAssignmentRepacker repacker =
FakeMemorySpaceAssignmentRepacker(repack_map, check_fun);
Options options = DefaultMemorySpaceOptions();
options.reserved_scoped_memory_fn =
[&](const HloInstruction* instruction,
const absl::flat_hash_set<std::pair<int, ShapeIndex>>
operands_in_alternate_memory,
const absl::flat_hash_set<ShapeIndex> outputs_in_alternate_memory) {
return 1;
};
options.max_repacks = 1;
options.repacker = &repacker;
options.allocate_reserved_scoped_memory_at_same_offset = false;
AssignMemorySpace(module.get(), options);
}
TEST_F(MemorySpaceAssignmentTest,
RepackShouldntEraseRequiredAssignmentForConditionalOutput) {
absl::string_view hlo_string = R"(
HloModule CondAllocation, is_scheduled=true
true_computation {
p0 = (f32[3]) parameter(0)
gte = f32[3] get-tuple-element(p0), index=0
neg1 = f32[3] negate(gte)
ROOT tuple1 = (f32[3]) tuple(neg1)
}
false_computation {
p0 = (f32[3]) parameter(0)
gte = f32[3] get-tuple-element(p0), index=0
neg2 = f32[3] negate(gte)
ROOT tuple2 = (f32[3]) tuple(neg2)
}
ENTRY entry {
p0 = f32[3] parameter(0)
p1 = pred[] parameter(1)
copy = f32[3] copy(p0)
tuple = (f32[3]) tuple(copy)
conditional = (f32[3]) conditional(p1, tuple, tuple), true_computation=true_computation, false_computation=false_computation
ROOT gte = f32[3] get-tuple-element(conditional), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
absl::flat_hash_map<std::pair<int64_t, int64_t>, int64_t> repack_map;
FakeMemorySpaceAssignmentRepacker repacker =
FakeMemorySpaceAssignmentRepacker(repack_map, nullptr,
true);
Options options = DefaultMemorySpaceOptions();
options.max_repacks = 10;
options.repacker = &repacker;
options.repack_after_every_allocation = true;
InstructionCountPrefetchIntervalPicker prefetch_interval_picker(2, 10);
AssignMemorySpace(module.get(), options,
{}, &prefetch_interval_picker);
}
TEST_F(MemorySpaceAssignmentTest, Determinism) {
std::unique_ptr<HloModule> module = CreateEvictAndPrefetchModule();
AssignMemorySpace(module.get());
std::string module_str = module->ToString();
for (int i = 0; i < 10; ++i) {
std::unique_ptr<HloModule> other_module = CreateEvictAndPrefetchModule();
AssignMemorySpace(other_module.get());
EXPECT_EQ(module_str, other_module->ToString());
}
}
TEST_F(MemorySpaceAssignmentTest, InPlaceOp) {
absl::string_view hlo_string = R"(
HloModule Module, is_scheduled=true
fused_computation {
param0 = f32[2,3] parameter(0)
constant.1 = f32[] constant(0)
broadcast = f32[2,1] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
ROOT dynamic-update-slice.5 = f32[2,3] dynamic-update-slice(param0, broadcast, constant.3, constant.3)
}
ENTRY main {
param = f32[2,3] parameter(0)
negate = f32[2,3] negate(param)
fusion = f32[2,3] fusion(negate), kind=kLoop, calls=fused_computation
ROOT add = f32[2,3] add(fusion, fusion)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto preset_assignments = AssignMemorySpace(module.get());
HloInstruction* negate_instruction =
module->entry_computation()->GetInstructionWithName("negate");
int64_t negate_offset =
GetAlternateMemoryOffset(*preset_assignments, negate_instruction);
HloInstruction* fusion_instruction =
module->entry_computation()->GetInstructionWithName("fusion");
int64_t fusion_offset =
GetAlternateMemoryOffset(*preset_assignments, fusion_instruction);
EXPECT_EQ(negate_offset, fusion_offset);
EXPECT_NE(negate_offset, -1);
}
TEST_F(MemorySpaceAssignmentTest, ConditionalInPlaceOp) {
absl::string_view hlo_string = R"(
HloModule Module, is_scheduled=true
fused_computation {
param0 = f32[2,3] parameter(0)
constant.1 = f32[] constant(0)
broadcast = f32[2,1] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
ROOT dynamic-update-slice.5 = f32[2,3] dynamic-update-slice(param0, broadcast, constant.3, constant.3)
}
true_computation {
p0 = (f32[2,3]) parameter(0)
gte = f32[2,3] get-tuple-element(p0), index=0
ROOT neg1 = f32[2,3] negate(gte)
}
false_computation {
p0 = (f32[2,3]) parameter(0)
gte = f32[2,3] get-tuple-element(p0), index=0
neg2 = f32[2,3] negate(gte)
ROOT fusion = f32[2,3] fusion(neg2), kind=kLoop, calls=fused_computation
}
ENTRY entry {
p0 = f32[2,3] parameter(0)
p1 = pred[] parameter(1)
copy = f32[2,3] copy(p0)
tuple = (f32[2,3]) tuple(copy)
ROOT conditional = f32[2,3] conditional(p1, tuple, tuple), true_computation=true_computation, false_computation=false_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
}
TEST_F(MemorySpaceAssignmentTest, AsyncCallDisableAlternateMem) {
absl::string_view hlo_string = R"(
HloModule Module, is_scheduled=true
called_comp {
p0 = f32[2,3] parameter(0)
negate10 = f32[2,3] negate(p0)
negate11 = f32[2,3] negate(negate10)
negate12 = f32[2,3] negate(negate11)
negate13 = f32[2,3] negate(negate12)
negate14 = f32[2,3] negate(negate13)
ROOT negate15 = f32[2,3] negate(negate14)
}, execution_thread="foobar"
async_comp {
p0 = f32[2,3] parameter(0)
ROOT call = f32[2,3] call(p0), to_apply=called_comp
}, execution_thread="foobar"
ENTRY entry {
p0 = f32[2,3] parameter(0)
negate0 = f32[2,3] negate(p0)
negate1 = f32[2,3] negate(negate0)
negate2 = f32[2,3] negate(negate1)
negate3 = f32[2,3] negate(negate2)
negate4 = f32[2,3] negate(negate3)
async-start = ((f32[2,3]), f32[2,3], f32[2]) async-start(negate1), async_execution_thread="foobar", calls=async_comp
async-done = f32[2,3] async-done(async-start), async_execution_thread="foobar", calls=async_comp
add0 = f32[2,3] add(negate0, async-done)
negate5 = f32[2,3] negate(add0)
negate6 = f32[2,3] negate(negate5)
negate7 = f32[2,3] negate(negate6)
negate8 = f32[2,3] negate(negate7)
negate9 = f32[2,3] negate(negate8)
ROOT add1 = f32[2,3] add(negate9, async-done)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.is_use_allowed_in_alternate_mem_fn = [](const HloUse& use) {
return use.instruction->opcode() != HloOpcode::kAsyncStart &&
use.instruction->opcode() != HloOpcode::kAsyncDone &&
use.instruction->parent()->IsMainThread();
};
options.is_position_allowed_in_alternate_mem_fn = [](const HloPosition& pos) {
return pos.instruction->opcode() != HloOpcode::kAsyncStart &&
pos.instruction->opcode() != HloOpcode::kAsyncDone &&
pos.instruction->parent()->IsMainThread();
};
AssignMemorySpace(module.get(), options);
auto has_alternate_memory_allocation =
[&](const HloInstruction* instruction) {
bool result = false;
auto shape_has_alternate_memory_allocation =
[&](const Shape& subshape, const ShapeIndex& ) {
if (subshape.IsArray() &&
subshape.layout().memory_space() == kAlternateMemorySpace) {
result = true;
}
};
ShapeUtil::ForEachSubshape(instruction->shape(),
shape_has_alternate_memory_allocation);
for (const HloInstruction* operand : instruction->operands()) {
ShapeUtil::ForEachSubshape(operand->shape(),
shape_has_alternate_memory_allocation);
}
return result;
};
const HloInstruction* async_start =
FindInstruction(module.get(), "async-start");
const HloInstruction* async_done =
FindInstruction(module.get(), "async-done");
EXPECT_FALSE(has_alternate_memory_allocation(async_start));
EXPECT_FALSE(has_alternate_memory_allocation(async_done));
for (const HloInstruction* instruction :
async_start->async_wrapped_instruction()
->called_computations()[0]
->instructions()) {
EXPECT_FALSE(has_alternate_memory_allocation(instruction));
}
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Add(op::Negate(),
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::AsyncDone())));
EXPECT_THAT(async_start,
op::AsyncStart(op::AsyncCopy(
kDefaultMemorySpace, kAlternateMemorySpace, op::Negate())));
}
TEST_F(MemorySpaceAssignmentTest, InefficientAllocation) {
absl::string_view hlo_string = R"(
HloModule Module, is_scheduled=true
fused_computation {
param0 = f32[2,3] parameter(0)
constant.1 = f32[] constant(0)
broadcast = f32[2,1] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
ROOT dynamic-update-slice.5 = f32[2,3] dynamic-update-slice(param0, broadcast, constant.3, constant.3)
}
ENTRY entry {
p0 = f32[2,3] parameter(0)
p1 = pred[] parameter(1)
p2 = f32[2,3] parameter(2)
neg0 = f32[2,3] negate(p2)
neg1 = f32[2,3] negate(neg0)
neg2 = f32[2,3] negate(neg1)
neg3 = f32[2,3] negate(neg2)
neg4 = f32[2,3] negate(neg3)
neg5 = f32[2,3] negate(neg4)
neg6 = f32[2,3] negate(neg5)
neg7 = f32[2,3] negate(neg6)
fusion = f32[2,3] fusion(p0), kind=kLoop, calls=fused_computation
neg8 = f32[2,3] negate(neg7)
neg9 = f32[2,3] negate(neg8)
neg10 = f32[2,3] negate(neg9)
neg11 = f32[2,3] negate(neg10)
neg12 = f32[2,3] negate(neg11)
neg13 = f32[2,3] negate(neg12)
neg14 = f32[2,3] negate(neg13)
neg15 = f32[2,3] negate(neg14)
ROOT tuple = (f32[2,3], f32[2,3]) tuple(fusion, neg15)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.enable_cross_program_prefetch = false;
options.inefficient_use_to_copy_ratio = 0.0;
AssignMemorySpaceUsingCostAnalysis(module.get(),
options);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::AsyncCopy(kDefaultMemorySpace, kAlternateMemorySpace,
op::Fusion(op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace,
op::Parameter()))),
op::Negate()));
TF_ASSERT_OK_AND_ASSIGN(module, ParseAndReturnVerifiedModule(hlo_string));
options.inefficient_use_to_copy_ratio = 0.5;
AssignMemorySpaceUsingCostAnalysis(module.get(),
options);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::Fusion(op::Parameter()), op::Negate()));
}
TEST_F(MemorySpaceAssignmentTest, InefficientAllocationLivelockBug) {
absl::string_view hlo_string = R"(
HloModule Module, is_scheduled=true
fused_computation_1 {
param0 = f32[5,4] parameter(0)
constant.1 = f32[] constant(0)
broadcast = f32[5,1] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
ROOT dynamic-update-slice.5 = f32[5,4] dynamic-update-slice(param0, broadcast, constant.3, constant.3)
}
fused_computation_2 {
param0 = f32[5,4] parameter(0)
constant.1 = f32[] constant(0)
broadcast = f32[5,1] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
ROOT dynamic-update-slice.5 = f32[5,4] dynamic-update-slice(param0, broadcast, constant.3, constant.3)
}
ENTRY entry {
p0 = f32[5,4] parameter(0)
p1 = pred[] parameter(1)
p2 = f32[2,3] parameter(2)
neg0 = f32[2,3] negate(p2)
neg1 = f32[2,3] negate(neg0)
neg2 = f32[2,3] negate(neg1)
neg3 = f32[2,3] negate(neg2)
neg4 = f32[2,3] negate(neg3)
neg5 = f32[2,3] negate(neg4)
neg6 = f32[2,3] negate(neg5)
neg7 = f32[2,3] negate(neg6)
fusion.1 = f32[5,4] fusion(p0), kind=kLoop, calls=fused_computation_1
tanh = f32[2,3] tanh(neg7)
fusion.2 = f32[5,4] fusion(fusion.1), kind=kLoop, calls=fused_computation_2
neg8 = f32[2,3] negate(tanh)
neg9 = f32[2,3] negate(neg8)
neg10 = f32[2,3] negate(neg0)
neg11 = f32[2,3] negate(neg10)
neg12 = f32[2,3] negate(neg11)
ROOT tuple = (f32[5,4], f32[2,3]) tuple(fusion.2, neg12)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.enable_cross_program_prefetch = false;
options.inefficient_use_to_copy_ratio = 0.5;
HloCostAnalysis::Options hlo_cost_options = DefaultHloCostAnalysisOptions();
hlo_cost_options.set_transcendentals_per_second(0.4);
AssignMemorySpaceUsingCostAnalysis(
module.get(), options,
std::nullopt,
hlo_cost_options);
}
TEST_F(MemorySpaceAssignmentTest,
CalledComputationInefficientAllocationLiveLockBug) {
absl::string_view hlo_string = R"(
HloModule CondAllocation, is_scheduled=true
true_computation {
p0 = (f32[3], f32[3]) parameter(0)
gte = f32[3] get-tuple-element(p0), index=0
neg1 = f32[3] negate(gte)
ROOT tuple1 = (f32[3]) tuple(neg1)
}
false_computation {
p0 = (f32[3], f32[3]) parameter(0)
gte = f32[3] get-tuple-element(p0), index=0
neg2 = f32[3] negate(gte)
ROOT tuple2 = (f32[3]) tuple(neg2)
}
ENTRY entry {
p0 = f32[3] parameter(0)
p1 = pred[] parameter(1)
p2 = f32[3] parameter(2)
copy0 = f32[3] copy(p0)
negate0 = f32[3] negate(p0)
negate1 = f32[3] negate(negate0)
negate2 = f32[3] negate(negate1)
negate3 = f32[3] negate(negate2)
negate4 = f32[3] negate(negate3)
negate5 = f32[3] negate(negate4)
negate6 = f32[3] negate(negate5)
negate7 = f32[3] negate(negate6)
negate8 = f32[3] negate(negate7)
tuple = (f32[3], f32[3]) tuple(copy0, p2)
conditional = (f32[3]) conditional(p1, tuple, tuple), true_computation=true_computation, false_computation=false_computation
gte = f32[3] get-tuple-element(conditional), index=0
ROOT add = f32[3] add(gte, negate8)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.enable_cross_program_prefetch = false;
options.inefficient_use_to_copy_ratio = 0.5;
HloCostAnalysis::Options hlo_cost_options = DefaultHloCostAnalysisOptions();
hlo_cost_options.set_transcendentals_per_second(0.4);
AssignMemorySpaceUsingCostAnalysis(
module.get(), options,
std::nullopt,
hlo_cost_options);
}
TEST_F(MemorySpaceAssignmentTest, AsyncOpElapsedTime) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param0 = bf16[16]{0} parameter(0)
param1 = bf16[4]{0} parameter(1)
collective-permute-start = (bf16[16]{0}, bf16[16]{0}, u32[], u32[]) collective-permute-start(param0), source_target_pairs={{0,1},{1,2},{2,3}}
negate1 = bf16[4]{0} negate(param1)
collective-permute-done = bf16[16]{0} collective-permute-done(collective-permute-start)
ROOT negate2 = bf16[4]{0} negate(negate1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpaceUsingCostAnalysis(module.get());
EXPECT_THAT(FindInstruction(module.get(), "negate1")->operand(0),
op::Parameter(1));
}
TEST_F(MemorySpaceAssignmentTest, AliasedOperandBug) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
param0 = f32[4,4]{0,1} parameter(0)
param1 = f32[4]{0} parameter(1)
param2 = f32[4,4]{0,1} parameter(2)
negate0 = f32[4]{0} negate(param1)
negate1 = f32[4]{0} negate(negate0)
negate2 = f32[4]{0} negate(negate1)
negate3 = f32[4]{0} negate(negate2)
negate4 = f32[4]{0} negate(negate3)
negate5 = f32[4]{0} negate(negate4)
custom_call1 = f32[4,4]{0,1} custom-call(param0), custom_call_target="FooBar", output_to_operand_aliasing={{}: (0, {})}
tanh = f32[4,4]{0,1} tanh(param2)
negate6 = f32[4]{0} negate(negate5)
negate7 = f32[4]{0} negate(negate6)
negate8 = f32[4]{0} negate(negate7)
negate9 = f32[4]{0} negate(negate8)
negate10 = f32[4]{0} negate(negate9)
negate11 = f32[4]{0} negate(negate10)
negate12 = f32[4]{0} negate(negate11)
negate13 = f32[4]{0} negate(negate12)
negate14 = f32[4]{0} negate(negate13)
negate15 = f32[4]{0} negate(negate14)
negate16 = f32[4]{0} negate(negate15)
custom_call2 = f32[4,4]{0,1} custom-call(custom_call1), custom_call_target="FooBar", output_to_operand_aliasing={{}: (0, {})}
custom_call3 = f32[4,4]{0,1} custom-call(param0, custom_call2), custom_call_target="FooBar", output_to_operand_aliasing={{}: (0, {})}
ROOT root = f32[4,4]{0,1} add(tanh, custom_call2)
}
)";
MsaBufferIntervalCompare buffer_interval_compare =
[](const MsaBufferInterval& a, const MsaBufferInterval& b) {
auto get_inst_priority = [](const HloInstruction* instruction) {
if (instruction->name() == "param2") {
return 0;
}
if (instruction->name() == "param0") {
return 1;
}
return 2;
};
return get_inst_priority(a.buffer->defining_instruction()) <
get_inst_priority(b.buffer->defining_instruction());
};
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
InstructionCountPrefetchIntervalPicker prefetch_interval_picker(2, 10);
Options options = DefaultMemorySpaceOptions();
AssignMemorySpace(module.get(), options, buffer_interval_compare,
&prefetch_interval_picker);
}
TEST_F(MemorySpaceAssignmentTest, AsyncOpCustomFusionShortLiveRange) {
absl::string_view hlo_string = R"(
HloModule Module, is_scheduled=true
fused_computation_start {
param0 = f32[2,1] parameter(0)
negate = f32[2,1] negate(param0)
ROOT custom-call = (f32[2,1], f32[2,1], u32[], u32[]) custom-call(negate), custom_call_target="AsyncOpStart"
}
fused_computation_update {
param0 = f32[2,1] parameter(0)
param1 = f32[2,1] parameter(1)
param2 = f32[2,1] parameter(2)
param3 = f32[2,1] parameter(3)
param4 = u32[] parameter(4)
param5 = u32[] parameter(5)
add = f32[2,1] add(param0, param1)
negate = f32[2,1] negate(param2)
ROOT tuple = (f32[2,1], f32[2,1], f32[2,1], f32[2,1], u32[], u32[]) tuple(add, param2, param3, negate, param4, param5)
}
fused_computation_done {
param0 = f32[2,1] parameter(0)
param1 = f32[2,1] parameter(1)
param2 = u32[] parameter(2)
param3 = u32[] parameter(3)
negate = f32[2,1] negate(param0)
ROOT custom-call = f32[2,1] custom-call(param0, param1, negate, param2, param3), custom_call_target="AsyncOpDone"
}
ENTRY main {
param = f32[2,1] parameter(0)
negate1 = f32[2,1] negate(param)
negate2 = f32[2,1] negate(negate1)
fusion1 = (f32[2,1], f32[2,1], u32[], u32[]) fusion(negate1), kind=kCustom, output_to_operand_aliasing={{0}: (0, {})}, calls=fused_computation_start
negate3 = f32[2,1] negate(negate2)
negate4 = f32[2,1] negate(negate3)
gte0 = f32[2,1] get-tuple-element(fusion1), index=0
gte1 = f32[2,1] get-tuple-element(fusion1), index=1
gte2 = u32[] get-tuple-element(fusion1), index=2
gte3 = u32[] get-tuple-element(fusion1), index=3
fusion2 = (f32[2,1], f32[2,1], f32[2,1], f32[2,1], u32[], u32[]) fusion(negate4, negate2, gte0, gte1, gte2, gte3), kind=kLoop, output_to_operand_aliasing={{1}: (2, {}), {2}: (3, {}), {3}: (3, {}), {4}: (4, {}), {5}: (5, {})}, calls=fused_computation_update
gte4 = f32[2,1] get-tuple-element(fusion2), index=0
negate5 = f32[2,1] negate(gte4)
gte5 = f32[2,1] get-tuple-element(fusion2), index=1
gte6 = f32[2,1] get-tuple-element(fusion2), index=2
gte7 = u32[] get-tuple-element(fusion2), index=4
gte8 = u32[] get-tuple-element(fusion2), index=5
fusion3 = f32[2,1] fusion(gte5, gte6, gte7, gte8), kind=kCustom, output_to_operand_aliasing={{}: (1, {})}, calls=fused_computation_done
ROOT add = f32[2,1] add(negate5, fusion3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.position_requires_contiguous_allocation_fn =
[](const HloPosition& position) {
std::string_view inst_name = position.instruction->name();
if (inst_name == "fusion1" ||
(inst_name == "fusion2" && position.index != ShapeIndex({0}))) {
return true;
}
return false;
};
AssignMemorySpace(module.get(), options);
HloInstruction* fusion1 =
module->entry_computation()->GetInstructionWithName("fusion1");
HloInstruction* fusion2 =
module->entry_computation()->GetInstructionWithName("fusion2");
HloInstruction* fusion3 =
module->entry_computation()->GetInstructionWithName("fusion3");
EXPECT_THAT(fusion2->operand(2), op::GetTupleElement(fusion1, 0));
EXPECT_THAT(fusion2->operand(3), op::GetTupleElement(fusion1, 1));
EXPECT_THAT(fusion3->operand(0), op::GetTupleElement(fusion2, 1));
EXPECT_THAT(fusion3->operand(1), op::GetTupleElement(fusion2, 2));
EXPECT_THAT(fusion2->operand(2)->shape().layout().memory_space(),
kAlternateMemorySpace);
EXPECT_THAT(fusion2->operand(3)->shape().layout().memory_space(),
kAlternateMemorySpace);
EXPECT_THAT(fusion3->operand(0)->shape().layout().memory_space(),
kAlternateMemorySpace);
EXPECT_THAT(fusion3->operand(1)->shape().layout().memory_space(),
kAlternateMemorySpace);
EXPECT_THAT(fusion2->operand(0)->shape().layout().memory_space(),
kAlternateMemorySpace);
EXPECT_THAT(fusion2->operand(1)->shape().layout().memory_space(),
kAlternateMemorySpace);
EXPECT_THAT(
ShapeUtil::GetSubshape(fusion2->shape(), {0}).layout().memory_space(),
kAlternateMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, AsyncOpCustomFusionLongLiveRange) {
absl::string_view hlo_string = R"(
HloModule Module, is_scheduled=true
fused_computation_start {
param0 = f32[2,1] parameter(0)
negate = f32[2,1] negate(param0)
ROOT custom-call = (f32[2,1], f32[2,1], u32[], u32[]) custom-call(negate), custom_call_target="AsyncOpStart"
}
fused_computation_update {
param0 = f32[2,1] parameter(0)
param1 = f32[2,1] parameter(1)
param2 = f32[2,1] parameter(2)
param3 = f32[2,1] parameter(3)
param4 = u32[] parameter(4)
param5 = u32[] parameter(5)
add = f32[2,1] add(param0, param1)
negate = f32[2,1] negate(param2)
ROOT tuple = (f32[2,1], f32[2,1], f32[2,1], f32[2,1], u32[], u32[]) tuple(add, param2, param3, negate, param4, param5)
}
fused_computation_done {
param0 = f32[2,1] parameter(0)
param1 = f32[2,1] parameter(1)
param2 = u32[] parameter(2)
param3 = u32[] parameter(3)
negate = f32[2,1] negate(param0)
ROOT custom-call = f32[2,1] custom-call(param0, param1, negate, param2, param3), custom_call_target="AsyncOpDone"
}
ENTRY main {
param = f32[2,1] parameter(0)
negate1 = f32[2,1] negate(param)
negate2 = f32[2,1] negate(negate1)
fusion1 = (f32[2,1], f32[2,1], u32[], u32[]) fusion(negate1), kind=kCustom, output_to_operand_aliasing={{0}: (0, {})}, calls=fused_computation_start
negate3 = f32[2,1] negate(negate2)
negate4 = f32[2,1] negate(negate3)
negate5 = f32[2,1] negate(negate4)
negate6 = f32[2,1] negate(negate5)
negate7 = f32[2,1] negate(negate6)
negate8 = f32[2,1] negate(negate7)
negate9 = f32[2,1] negate(negate8)
negate10 = f32[2,1] negate(negate9)
negate11 = f32[2,1] negate(negate10)
negate12 = f32[2,1] negate(negate11)
gte0 = f32[2,1] get-tuple-element(fusion1), index=0
gte1 = f32[2,1] get-tuple-element(fusion1), index=1
gte2 = u32[] get-tuple-element(fusion1), index=2
gte3 = u32[] get-tuple-element(fusion1), index=3
fusion2 = (f32[2,1], f32[2,1], f32[2,1], f32[2,1], u32[], u32[]) fusion(negate12, negate2, gte0, gte1, gte2, gte3), kind=kLoop, output_to_operand_aliasing={{1}: (2, {}), {2}: (3, {}), {3}: (3, {}), {4}: (4, {}), {5}: (5, {})}, calls=fused_computation_update
gte4 = f32[2,1] get-tuple-element(fusion2), index=0
negate13 = f32[2,1] negate(gte4)
gte5 = f32[2,1] get-tuple-element(fusion2), index=1
gte6 = f32[2,1] get-tuple-element(fusion2), index=2
gte7 = u32[] get-tuple-element(fusion2), index=4
gte8 = u32[] get-tuple-element(fusion2), index=5
fusion3 = f32[2,1] fusion(gte5, gte6, gte7, gte8), kind=kCustom, output_to_operand_aliasing={{}: (1, {})}, calls=fused_computation_done
ROOT add = f32[2,1] add(negate13, fusion3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.position_requires_contiguous_allocation_fn =
[](const HloPosition& position) {
std::string_view inst_name = position.instruction->name();
if (inst_name == "fusion1" ||
(inst_name == "fusion2" && position.index != ShapeIndex({0}))) {
return true;
}
return false;
};
AssignMemorySpace(module.get(), options);
HloInstruction* fusion1 =
module->entry_computation()->GetInstructionWithName("fusion1");
HloInstruction* fusion2 =
module->entry_computation()->GetInstructionWithName("fusion2");
HloInstruction* fusion3 =
module->entry_computation()->GetInstructionWithName("fusion3");
EXPECT_THAT(fusion2->operand(2), op::GetTupleElement(fusion1, 0));
EXPECT_THAT(fusion2->operand(2)->shape().layout().memory_space(),
kDefaultMemorySpace);
EXPECT_THAT(fusion2->operand(3), op::GetTupleElement(fusion1, 1));
EXPECT_THAT(fusion2->operand(3)->shape().layout().memory_space(),
kDefaultMemorySpace);
EXPECT_THAT(fusion3->operand(0), op::GetTupleElement(fusion2, 1));
EXPECT_THAT(fusion3->operand(0)->shape().layout().memory_space(),
kDefaultMemorySpace);
EXPECT_THAT(fusion3->operand(1), op::GetTupleElement(fusion2, 2));
EXPECT_THAT(fusion3->operand(1)->shape().layout().memory_space(),
kDefaultMemorySpace);
EXPECT_THAT(fusion2->operand(0)->shape().layout().memory_space(),
kAlternateMemorySpace);
EXPECT_THAT(fusion2->operand(1)->shape().layout().memory_space(),
kAlternateMemorySpace);
EXPECT_THAT(
ShapeUtil::GetSubshape(fusion2->shape(), {0}).layout().memory_space(),
kAlternateMemorySpace);
}
TEST_F(MemorySpaceAssignmentTest, AsyncOpCustomFusionMultipleUsers) {
absl::string_view hlo_string = R"(
HloModule Module, is_scheduled=true
fused_computation_start {
param0 = f32[2,1] parameter(0)
negate = f32[2,1] negate(param0)
ROOT custom-call = (f32[2,1], f32[2,1], u32[], u32[]) custom-call(negate), custom_call_target="AsyncOpStart"
}
fused_computation_update1 {
param0 = f32[2,1] parameter(0)
param1 = f32[2,1] parameter(1)
param2 = f32[2,1] parameter(2)
param3 = f32[2,1] parameter(3)
param4 = u32[] parameter(4)
param5 = u32[] parameter(5)
add = f32[2,1] add(param0, param1)
negate = f32[2,1] negate(param2)
ROOT tuple = (f32[2,1], f32[2,1], f32[2,1], f32[2,1], u32[], u32[]) tuple(add, param2, param3, negate, param4, param5)
}
fused_computation_update2 {
param0 = f32[2,1] parameter(0)
param1 = f32[2,1] parameter(1)
param2 = f32[2,1] parameter(2)
param3 = f32[2,1] parameter(3)
param4 = u32[] parameter(4)
param5 = u32[] parameter(5)
add = f32[2,1] add(param0, param1)
negate = f32[2,1] negate(param2)
ROOT tuple = (f32[2,1], f32[2,1], f32[2,1], f32[2,1], u32[], u32[]) tuple(add, param2, param3, negate, param4, param5)
}
fused_computation_done {
param0 = f32[2,1] parameter(0)
param1 = f32[2,1] parameter(1)
param2 = u32[] parameter(2)
param3 = u32[] parameter(3)
negate = f32[2,1] negate(param0)
ROOT custom-call = f32[2,1] custom-call(param0, param1, negate, param2, param3), custom_call_target="AsyncOpDone"
}
ENTRY main {
param = f32[2,1] parameter(0)
negate1 = f32[2,1] negate(param)
negate2 = f32[2,1] negate(negate1)
fusion1 = (f32[2,1], f32[2,1], u32[], u32[]) fusion(negate1), kind=kCustom, output_to_operand_aliasing={{0}: (0, {})}, calls=fused_computation_start
negate3 = f32[2,1] negate(negate2)
negate4 = f32[2,1] negate(negate3)
gte0 = f32[2,1] get-tuple-element(fusion1), index=0
gte1 = f32[2,1] get-tuple-element(fusion1), index=1
gte2 = u32[] get-tuple-element(fusion1), index=2
gte3 = u32[] get-tuple-element(fusion1), index=3
fusion2 = (f32[2,1], f32[2,1], f32[2,1], f32[2,1], u32[], u32[]) fusion(negate4, negate2, gte0, gte1, gte2, gte3), kind=kLoop, output_to_operand_aliasing={{1}: (2, {}), {2}: (3, {}), {3}: (3, {}), {4}: (4, {}), {5}: (5, {})}, calls=fused_computation_update1
gte4 = f32[2,1] get-tuple-element(fusion2), index=0
negate5 = f32[2,1] negate(gte4)
negate10 = f32[2,1] negate(negate5)
negate11 = f32[2,1] negate(negate10)
negate12 = f32[2,1] negate(negate11)
negate13 = f32[2,1] negate(negate12)
negate14 = f32[2,1] negate(negate13)
negate15 = f32[2,1] negate(negate14)
negate16 = f32[2,1] negate(negate15)
negate17 = f32[2,1] negate(negate16)
negate18 = f32[2,1] negate(negate17)
negate19 = f32[2,1] negate(negate18)
fusion3 = (f32[2,1], f32[2,1], f32[2,1], f32[2,1], u32[], u32[]) fusion(negate19, negate2, gte0, gte1, gte2, gte3), kind=kLoop, output_to_operand_aliasing={{1}: (2, {}), {2}: (3, {}), {3}: (3, {}), {4}: (4, {}), {5}: (5, {})}, calls=fused_computation_update2
gte9 = f32[2,1] get-tuple-element(fusion3), index=0
negate6 = f32[2,1] negate(gte9)
gte5 = f32[2,1] get-tuple-element(fusion3), index=1
gte6 = f32[2,1] get-tuple-element(fusion3), index=2
gte7 = u32[] get-tuple-element(fusion3), index=4
gte8 = u32[] get-tuple-element(fusion3), index=5
fusion4 = f32[2,1] fusion(gte5, gte6, gte7, gte8), kind=kCustom, output_to_operand_aliasing={{}: (1, {})}, calls=fused_computation_done
ROOT add = f32[2,1] add(negate6, fusion4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.position_requires_contiguous_allocation_fn =
[](const HloPosition& position) {
std::string_view inst_name = position.instruction->name();
if (inst_name == "fusion1" ||
(inst_name == "fusion2" && position.index != ShapeIndex({0})) ||
(inst_name == "fusion3" && position.index != ShapeIndex({0}))) {
return true;
}
return false;
};
AssignMemorySpace(module.get(), options);
}
TEST_F(MemorySpaceAssignmentTest, HoistCopyStart) {
absl::string_view hlo_string = R"(
HloModule cross_program_prefetch, is_scheduled=true
ENTRY cross_program_prefetch {
p0 = (f32[8,8]{1,0}, f32[8,2]{1,0}) parameter(0)
get-tuple-element.0 = f32[8,8]{1,0} get-tuple-element(p0), index=0
add.0 = f32[8,8]{1,0} add(get-tuple-element.0, get-tuple-element.0)
get-tuple-element.1 = f32[8,2]{1,0} get-tuple-element(p0), index=1
dot.0 = f32[8,2]{1,0} dot(add.0, get-tuple-element.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
negate.1 = f32[8,2]{1,0} negate(dot.0)
negate.2 = f32[8,2]{1,0} negate(negate.1)
negate.3 = f32[8,2]{1,0} negate(negate.2)
negate.4 = f32[8,2]{1,0} negate(negate.3)
negate.5 = f32[8,2]{1,0} negate(negate.4)
negate.6 = f32[8,2]{1,0} negate(negate.5)
negate.7 = f32[8,2]{1,0} negate(negate.6)
negate.8 = f32[8,2]{1,0} negate(negate.7)
ROOT dot.1 = f32[2,2]{1,0} dot(negate.8, get-tuple-element.1), lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.enable_cross_program_prefetch = true;
AssignMemorySpace(module.get(), options);
auto cross_program_prefetches = module->CrossProgramPrefetches();
ASSERT_EQ(cross_program_prefetches.size(), 1);
ASSERT_EQ(cross_program_prefetches[0].parameter, 0);
ASSERT_EQ(cross_program_prefetches[0].index, ShapeIndex({1}));
for (auto* instruction : module->schedule()
.sequence(module->entry_computation())
.instructions()) {
auto p0 = op::Parameter(0);
auto get_tuple_element_1 = op::GetTupleElement(p0, 1);
auto copy_start = op::CopyStart(get_tuple_element_1);
EXPECT_THAT(instruction, AnyOf(p0, get_tuple_element_1, copy_start));
if (::testing::Matches(copy_start)(instruction)) {
EXPECT_TRUE(instruction->cross_program_prefetch_index().has_value());
break;
}
}
}
TEST_F(MemorySpaceAssignmentTest, WindowPrefetch) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
%fused_computation {
%p0 = bf16[64,8]{1,0:T(8,128)(2,1)} parameter(0)
%p1 = bf16[64,8]{1,0:T(8,128)(2,1)} parameter(1)
%p2 = bf16[64,8]{1,0:T(8,128)(2,1)} parameter(2)
%add0 = bf16[64,8]{1,0:T(8,128)(2,1)} add(%p0, %p1)
ROOT %add1 = bf16[64,8]{1,0:T(8,128)(2,1)} add(%add0, %p2)
}
entry {
%p0 = bf16[64,8]{1,0:T(8,128)(2,1)} parameter(0)
%p1 = bf16[64,8]{1,0:T(8,128)(2,1)} parameter(1)
%p2 = bf16[64,8]{1,0:T(8,128)(2,1)} parameter(2)
ROOT fusion = bf16[64,8]{1,0:T(8,128)(2,1)} fusion(bf16[64,8]{1,0:T(8,128)(2,1)} %p0, bf16[64,8]{1,0:T(8,128)(2,1)} %p1, bf16[64,8]{1,0:T(8,128)(2,1)} %p2), kind=kLoop, calls=%fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto window_prefetch_detail_fn = [&](const HloInstruction* instruction) {
WindowPrefetchDetail window_prefetch_detail;
const HloInstruction* fusion = FindInstruction(module.get(), "fusion");
if (instruction == fusion) {
for (int i = 0; i < 3; ++i) {
auto* operand = window_prefetch_detail.add_windows();
operand->set_operand(i);
operand->set_size(32);
}
}
return window_prefetch_detail;
};
Options options = DefaultMemorySpaceOptions();
options.enable_window_prefetch = true;
options.window_prefetch_detail_fn = window_prefetch_detail_fn;
AssignMemorySpace(module.get(), options, 10,
0);
const HloInstruction* fusion = FindInstruction(module.get(), "fusion");
EXPECT_EQ(fusion->operand_count(), 5);
for (int i = 3; i < 5; i++) {
const HloInstruction* async_done = fusion->operand(i);
EXPECT_EQ(async_done->opcode(), HloOpcode::kAsyncDone);
EXPECT_EQ(async_done->operand_count(), 1);
EXPECT_TRUE(async_done->async_wrapped_instruction()->IsCustomCall(
"WindowPrefetch"));
const HloInstruction* async_start = async_done->operand(0);
EXPECT_EQ(async_start->opcode(), HloOpcode::kAsyncStart);
EXPECT_EQ(async_start->operand_count(), 1);
EXPECT_TRUE(async_start->async_wrapped_instruction()->IsCustomCall(
"WindowPrefetch"));
}
VLOG(2) << "module: " << module->ToString();
}
using AsynchronousCopyOrderingTest = ::testing::Test;
TEST_F(AsynchronousCopyOrderingTest, Simple) {
auto alternate_mem_space = MemorySpace::kAlternate;
AsynchronousCopyOrdering ordering;
EXPECT_FALSE(ordering.ViolatesOrdering(3, 11));
ordering.AddCopy({3, 11, 1, alternate_mem_space, 0});
EXPECT_FALSE(ordering.ViolatesOrdering(1, 8));
ordering.AddCopy({1, 8, 1, alternate_mem_space, 1});
EXPECT_FALSE(ordering.ViolatesOrdering(5, 14));
ordering.AddCopy({5, 14, 1, alternate_mem_space, 2});
EXPECT_FALSE(ordering.ViolatesOrdering(7, 14));
ordering.AddCopy({7, 14, 1, alternate_mem_space, 3});
EXPECT_TRUE(ordering.ViolatesOrdering(2, 16));
EXPECT_TRUE(ordering.ViolatesOrdering(9, 12));
EXPECT_TRUE(ordering.ViolatesOrdering(6, 17));
EXPECT_FALSE(ordering.ViolatesOrdering(5, 13));
ordering.AddCopy({5, 13, 1, alternate_mem_space, 4});
EXPECT_FALSE(ordering.ViolatesOrdering(5, 14));
ordering.AddCopy({5, 14, 1, alternate_mem_space, 5});
}
TEST_F(AsynchronousCopyOrderingTest, SameInterval) {
auto alternate_mem_space = MemorySpace::kAlternate;
AsynchronousCopyOrdering ordering;
EXPECT_FALSE(ordering.ViolatesOrdering(1, 5));
EXPECT_FALSE(ordering.ViolatesOrdering(2, 4));
ordering.AddCopy({1, 5, 1, alternate_mem_space, 0});
EXPECT_TRUE(ordering.ViolatesOrdering(2, 4));
ordering.AddCopy({1, 5, 1, alternate_mem_space, 1});
EXPECT_TRUE(ordering.ViolatesOrdering(2, 4));
ordering.AddCopy({1, 5, 1, alternate_mem_space, 2});
EXPECT_TRUE(ordering.ViolatesOrdering(2, 4));
ordering.RemoveCopy({1, 5, 1, alternate_mem_space, 1});
EXPECT_TRUE(ordering.ViolatesOrdering(2, 4));
ordering.RemoveCopy({1, 5, 1, alternate_mem_space, 2});
EXPECT_TRUE(ordering.ViolatesOrdering(2, 4));
ordering.RemoveCopy({1, 5, 1, alternate_mem_space, 0});
EXPECT_FALSE(ordering.ViolatesOrdering(2, 4));
}
using AsynchronousCopyResourceTest = ::testing::Test;
TEST_F(AsynchronousCopyResourceTest, Simple) {
auto alternate_mem_space = MemorySpace::kAlternate;
AsynchronousCopyResource resource(
{2.0, 3.0, 1.0, 6.0, 7.0, 1.0, 7.0, 2.0, 2.0, 4.0});
EXPECT_TRUE(resource.HasEnoughResource(-1, 3, 5.0));
resource.AddCopy({-1, 3, 5.0, alternate_mem_space, 0});
EXPECT_TRUE(resource.HasEnoughResource(1, 4, 4.0));
resource.AddCopy({1, 4, 4.0, alternate_mem_space, 1});
EXPECT_TRUE(resource.HasEnoughResource(5, 9, 10.0));
resource.AddCopy({5, 9, 10.0, alternate_mem_space, 2});
EXPECT_FALSE(resource.HasEnoughResource(4, 9, 3.0));
EXPECT_TRUE(resource.HasEnoughResource(4, 8, 2.0));
resource.AddCopy({4, 8, 2.0, alternate_mem_space, 3});
}
TEST_F(AsynchronousCopyResourceTest, Propagate) {
auto alternate_mem_space = MemorySpace::kAlternate;
AsynchronousCopyResource resource(
{2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0});
EXPECT_TRUE(resource.HasEnoughResource(6, 10, 2.0));
resource.AddCopy({6, 10, 2.0, alternate_mem_space, 0});
EXPECT_EQ(
resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 0.0, 2.0, 2.0}));
EXPECT_TRUE(resource.HasEnoughResource(5, 9, 2.0));
resource.AddCopy({5, 9, 2.0, alternate_mem_space, 1});
EXPECT_TRUE(resource.HasEnoughResource(4, 8, 2.0));
resource.AddCopy({4, 8, 2.0, alternate_mem_space, 2});
EXPECT_TRUE(resource.HasEnoughResource(3, 7, 2.0));
resource.AddCopy({3, 7, 2.0, alternate_mem_space, 3});
EXPECT_TRUE(resource.HasEnoughResource(2, 6, 2.0));
resource.AddCopy({2, 6, 2.0, alternate_mem_space, 4});
EXPECT_TRUE(resource.HasEnoughResource(1, 5, 2.0));
resource.AddCopy({1, 5, 2.0, alternate_mem_space, 5});
EXPECT_EQ(
resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 2.0}));
EXPECT_TRUE(resource.HasEnoughResource(0, 4, 3.0));
resource.AddCopy({0, 4, 3.0, alternate_mem_space, 6});
EXPECT_EQ(
resource.GetCurrentResources(),
std::vector<float>({2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0}));
EXPECT_TRUE(resource.HasEnoughResource(0, 4, 3.0));
resource.AddCopy({0, 4, 3.0, alternate_mem_space, 7});
EXPECT_EQ(
resource.GetCurrentResources(),
std::vector<float>({2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0}));
EXPECT_FALSE(resource.HasEnoughResource(0, 4, 1.0));
}
TEST_F(AsynchronousCopyResourceTest, CantPropagate) {
auto alternate_mem_space = MemorySpace::kAlternate;
AsynchronousCopyResource resource(
{2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0});
EXPECT_TRUE(resource.HasEnoughResource(5, 10, 2.0));
resource.AddCopy({5, 10, 2.0, alternate_mem_space, 0});
EXPECT_EQ(
resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 0.0, 2.0, 2.0, 2.0}));
EXPECT_TRUE(resource.HasEnoughResource(4, 7, 2.0));
resource.AddCopy({4, 7, 2.0, alternate_mem_space, 1});
EXPECT_EQ(
resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 2.0, 2.0, 2.0, 0.0, 0.0, 2.0, 2.0, 2.0}));
EXPECT_TRUE(resource.HasEnoughResource(4, 8, 4.0));
resource.AddCopy({4, 8, 4.0, alternate_mem_space, 2});
EXPECT_EQ(
resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 2.0, 2.0, 2.0, 0.0, 0.0, 0.0, 0.0, 2.0}));
EXPECT_FALSE(resource.HasEnoughResource(3, 6, 4.0));
}
TEST_F(AsynchronousCopyResourceTest, Nested) {
auto alternate_mem_space = MemorySpace::kAlternate;
AsynchronousCopyResource resource({2.0, 2.0, 2.0, 2.0, 2.0});
EXPECT_TRUE(resource.HasEnoughResource(1, 3, 2.0));
resource.AddCopy({1, 3, 2.0, alternate_mem_space, 0});
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 0.0, 2.0, 2.0}));
EXPECT_FALSE(resource.HasEnoughResource(0, 4, 4.0));
}
TEST_F(AsynchronousCopyResourceTest, Remove) {
auto alternate_mem_space = MemorySpace::kAlternate;
AsynchronousCopyResource resource({2.0, 2.0, 2.0, 2.0, 2.0});
AsynchronousCopy copy1{2, 5, 2.0, alternate_mem_space, 0};
AsynchronousCopy copy2{-1, 2, 3.0, alternate_mem_space, 1};
AsynchronousCopy copy3{0, 4, 4.0, alternate_mem_space, 2};
EXPECT_TRUE(resource.HasEnoughResource(2, 5, 2.0));
resource.AddCopy(copy1);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 2.0, 0.0, 2.0}));
EXPECT_TRUE(resource.HasEnoughResource(-1, 2, 3.0));
resource.AddCopy(copy2);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({0.0, 1.0, 2.0, 0.0, 2.0}));
EXPECT_TRUE(resource.HasEnoughResource(0, 4, 4.0));
resource.AddCopy(copy3);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({0.0, 0.0, 0.0, 0.0, 1.0}));
resource.RemoveCopy(copy3);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({0.0, 1.0, 2.0, 0.0, 2.0}));
resource.RemoveCopy(copy1);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({0.0, 1.0, 2.0, 2.0, 2.0}));
resource.RemoveCopy(copy2);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 2.0, 2.0, 2.0}));
}
TEST_F(AsynchronousCopyResourceTest, NestedRemove) {
auto alternate_mem_space = MemorySpace::kAlternate;
AsynchronousCopyResource resource({2.0, 2.0, 2.0, 2.0, 2.0});
AsynchronousCopy copy1{1, 3, 2.0, alternate_mem_space, 0};
AsynchronousCopy copy2{0, 4, 4.0, alternate_mem_space, 1};
EXPECT_TRUE(resource.HasEnoughResource(1, 3, 2.0));
resource.AddCopy(copy1);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 0.0, 2.0, 2.0}));
EXPECT_FALSE(resource.HasEnoughResource(0, 4, 4.0));
resource.RemoveCopy(copy1);
auto current_resources = resource.GetCurrentResources();
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 2.0, 2.0, 2.0}));
EXPECT_TRUE(resource.HasEnoughResource(0, 4, 4.0));
resource.AddCopy(copy2);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 0.0, 0.0, 2.0, 2.0}));
EXPECT_FALSE(resource.HasEnoughResource(1, 3, 2.0));
resource.RemoveCopy(copy2);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 2.0, 2.0, 2.0}));
EXPECT_TRUE(resource.HasEnoughResource(1, 3, 2.0));
}
TEST_F(AsynchronousCopyResourceTest, PropagateRemove) {
auto alternate_mem_space = MemorySpace::kAlternate;
AsynchronousCopyResource resource(
{2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0});
EXPECT_TRUE(resource.HasEnoughResource(6, 10, 2.0));
resource.AddCopy({6, 10, 2.0, alternate_mem_space, 0});
EXPECT_EQ(
resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 0.0, 2.0, 2.0}));
EXPECT_TRUE(resource.HasEnoughResource(5, 9, 2.0));
resource.AddCopy({5, 9, 2.0, alternate_mem_space, 1});
EXPECT_TRUE(resource.HasEnoughResource(4, 8, 2.0));
resource.AddCopy({4, 8, 2.0, alternate_mem_space, 2});
EXPECT_TRUE(resource.HasEnoughResource(3, 7, 2.0));
resource.AddCopy({3, 7, 2.0, alternate_mem_space, 3});
EXPECT_TRUE(resource.HasEnoughResource(2, 6, 2.0));
resource.AddCopy({2, 6, 2.0, alternate_mem_space, 4});
EXPECT_TRUE(resource.HasEnoughResource(1, 5, 2.0));
resource.AddCopy({1, 5, 2.0, alternate_mem_space, 5});
EXPECT_EQ(
resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 2.0}));
AsynchronousCopy copy1{0, 4, 3.0, alternate_mem_space, 6};
EXPECT_TRUE(resource.HasEnoughResource(0, 4, 3.0));
resource.AddCopy(copy1);
EXPECT_EQ(
resource.GetCurrentResources(),
std::vector<float>({2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0}));
EXPECT_TRUE(resource.HasEnoughResource(0, 5, 3.0));
AsynchronousCopy copy2{0, 5, 3.0, alternate_mem_space, 7};
resource.AddCopy(copy2);
EXPECT_EQ(
resource.GetCurrentResources(),
std::vector<float>({2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0}));
resource.RemoveCopy(copy2);
EXPECT_EQ(
resource.GetCurrentResources(),
std::vector<float>({2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 2.0}));
resource.RemoveCopy(copy1);
EXPECT_EQ(
resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 2.0}));
}
TEST_F(AsynchronousCopyResourceTest, StartAtZeroAndRemove) {
auto alternate_mem_space = MemorySpace::kAlternate;
AsynchronousCopyResource resource({0.0, 0.0, 1.0, 1.0, 2.0});
AsynchronousCopy copy1{0, 4, 2.0, alternate_mem_space, 0};
EXPECT_TRUE(resource.HasEnoughResource(0, 4, 2.0));
resource.AddCopy(copy1);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({0.0, 0.0, 0.0, 0.0, 2.0}));
resource.RemoveCopy(copy1);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({0.0, 0.0, 1.0, 1.0, 2.0}));
resource.AddCopy(copy1);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({0.0, 0.0, 0.0, 0.0, 2.0}));
}
TEST_F(AsynchronousCopyResourceTest, OutOfOrderRemovalSameStartTime) {
auto alternate_mem_space = MemorySpace::kAlternate;
AsynchronousCopyResource resource({2.0, 2.0, 2.0, 2.0, 2.0});
AsynchronousCopy copy1{1, 3, 1.0, alternate_mem_space, 0};
AsynchronousCopy copy2{1, 4, 2.0, alternate_mem_space, 1};
EXPECT_TRUE(resource.HasEnoughResource(1, 3, 1.0));
resource.AddCopy(copy1);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 1.0, 2.0, 2.0}));
EXPECT_TRUE(resource.HasEnoughResource(1, 4, 2.0));
resource.AddCopy(copy2);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 0.0, 1.0, 2.0}));
resource.RemoveCopy(copy1);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 0.0, 2.0, 2.0}));
AsynchronousCopy copy3{1, 5, 1.0, alternate_mem_space, 2};
AsynchronousCopy copy4{1, 5, 1.0, alternate_mem_space, 3};
AsynchronousCopy copy5{1, 5, 1.0, alternate_mem_space, 4};
AsynchronousCopy copy6{1, 5, 1.0, alternate_mem_space, 5};
EXPECT_TRUE(resource.HasEnoughResource(1, 5, 1.0));
resource.AddCopy(copy3);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 0.0, 1.0, 2.0}));
EXPECT_TRUE(resource.HasEnoughResource(1, 5, 1.0));
resource.AddCopy(copy4);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 0.0, 0.0, 2.0}));
EXPECT_TRUE(resource.HasEnoughResource(1, 5, 1.0));
resource.AddCopy(copy5);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 0.0, 0.0, 1.0}));
EXPECT_TRUE(resource.HasEnoughResource(1, 5, 1.0));
resource.AddCopy(copy6);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 0.0, 0.0, 0.0}));
EXPECT_FALSE(resource.HasEnoughResource(1, 5, 1.0));
resource.RemoveCopy(copy2);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 0.0, 0.0, 2.0}));
resource.RemoveCopy(copy3);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 0.0, 1.0, 2.0}));
resource.RemoveCopy(copy4);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 0.0, 2.0, 2.0}));
resource.RemoveCopy(copy5);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 1.0, 2.0, 2.0}));
resource.RemoveCopy(copy6);
EXPECT_EQ(resource.GetCurrentResources(),
std::vector<float>({2.0, 2.0, 2.0, 2.0, 2.0}));
}
TEST_F(AsynchronousCopyResourceTest, HasEnoughResourceMultiCheckSuccess) {
auto alternate_mem_space = MemorySpace::kAlternate;
AsynchronousCopyResource resource(
{2.0, 1.0, 3.0, 6.0, 7.0, 3.0, 7.0, 2.0, 2.0, 4.0});
EXPECT_TRUE(resource.HasEnoughResource(-1, 3, 5.0));
resource.AddCopy({-1, 3, 5.0, alternate_mem_space, 0});
EXPECT_TRUE(resource.HasEnoughResource(1, 10, 4.0));
resource.AddCopy({1, 10, 4.0, alternate_mem_space, 1});
LOG(INFO) << "AsynchronousCopyResource after setup:\n"
<< resource.Dump(0, 10, alternate_mem_space);
for (int i = 0; i < 4; ++i) {
EXPECT_TRUE(
resource.HasEnoughResourceMultiCheck({{0, 6, 4.0}, {4, 6, 3.0}}));
}
}
TEST_F(AsynchronousCopyResourceTest, HasEnoughResourceMultiCheckFailure) {
auto alternate_mem_space = MemorySpace::kAlternate;
AsynchronousCopyResource resource(
{2.0, 1.0, 3.0, 6.0, 7.0, 3.0, 7.0, 2.0, 2.0, 4.0});
EXPECT_TRUE(resource.HasEnoughResource(-1, 3, 5.0));
resource.AddCopy({-1, 3, 5.0, alternate_mem_space, 0});
EXPECT_TRUE(resource.HasEnoughResource(1, 10, 4.0));
resource.AddCopy({1, 10, 4.0, alternate_mem_space, 1});
LOG(INFO) << "AsynchronousCopyResource after setup:\n"
<< resource.Dump(0, 10, alternate_mem_space);
EXPECT_FALSE(
resource.HasEnoughResourceMultiCheck({{0, 6, 4.0}, {4, 6, 4.0}}));
}
TEST_F(AsynchronousCopyResourceTest,
HasEnoughResourceMultiCheckRegressionTest) {
auto alternate_mem_space = MemorySpace::kAlternate;
AsynchronousCopyResource resource({ 24.0f,
0.0f,
6.0f,
411.0f,
3479.0f,
0.0f,
0.0f,
1537.0f,
3095.0f,
0.0f,
26.7f});
AsynchronousCopy copy1({1, 8, 170.8f, alternate_mem_space, 1});
AsynchronousCopy copy2({2, 8, 170.8f, alternate_mem_space, 2});
resource.AddCopy(copy1);
resource.AddCopy(copy2);
LOG(INFO) << "AsynchronousCopyResource after setup:\n"
<< resource.Dump(0, 11, alternate_mem_space);
EXPECT_FALSE(
resource.HasEnoughResourceMultiCheck({{0, 4, 170.8}, {1, 4, 170.8}}));
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchTest) {
HloComputation::Builder builder(TestName());
constexpr int kBatch = 8;
constexpr int kFeature = 8;
constexpr int kOutput = 2;
auto lhs_shape = ShapeUtil::MakeShape(F32, {kBatch, kFeature});
auto rhs_shape = ShapeUtil::MakeShape(F32, {kFeature, kOutput});
auto result_shape = ShapeUtil::MakeShape(F32, {kBatch, kOutput});
HloInstruction* lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_shape, "lhs"));
HloInstruction* rhs = builder.AddInstruction(
HloInstruction::CreateParameter(1, rhs_shape, "rhs"));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot = builder.AddInstruction(HloInstruction::CreateDot(
result_shape, lhs, rhs, dot_dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {lhs, rhs, dot});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 1);
if (!cross_program_prefetches.empty()) {
EXPECT_EQ(cross_program_prefetches[0].parameter, 1);
EXPECT_EQ(cross_program_prefetches[0].index, ShapeIndex({}));
}
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Dot(op::Parameter(0),
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::Parameter(1))));
}
TEST_F(MemorySpaceAssignmentTest, MultiCrossProgramPrefetchTest) {
HloComputation::Builder builder(TestName());
constexpr int kBatch = 8;
constexpr int kFeature = 8;
constexpr int kFirstOutput = 4;
constexpr int kSecondOutput = 2;
auto lhs_shape = ShapeUtil::MakeShape(F32, {kBatch, kFeature});
auto first_weight_shape = ShapeUtil::MakeShape(F32, {kFeature, kFirstOutput});
auto second_weight_shape =
ShapeUtil::MakeShape(F32, {kFirstOutput, kSecondOutput});
auto intermediate_shape = ShapeUtil::MakeShape(F32, {kBatch, kFirstOutput});
auto result_shape = ShapeUtil::MakeShape(F32, {kBatch, kSecondOutput});
HloInstruction* lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_shape, "lhs"));
HloInstruction* first_weight = builder.AddInstruction(
HloInstruction::CreateParameter(1, first_weight_shape, "first_weight"));
HloInstruction* second_weight = builder.AddInstruction(
HloInstruction::CreateParameter(2, second_weight_shape, "second_weight"));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto first_dot = builder.AddInstruction(
HloInstruction::CreateDot(intermediate_shape, lhs, first_weight,
dot_dnums, DefaultPrecisionConfig(2)));
auto second_dot = builder.AddInstruction(
HloInstruction::CreateDot(result_shape, first_dot, second_weight,
dot_dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(
computation, {lhs, first_weight, second_weight, first_dot, second_dot});
TF_CHECK_OK(module->set_schedule(schedule));
Options options = DefaultMemorySpaceOptions();
options.max_cross_program_prefetches = -1;
options.max_size_in_bytes = 256;
options.alignment_in_bytes = 8;
options.verify = true;
AssignMemorySpace(module.get(), options);
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 2);
if (!cross_program_prefetches.empty()) {
EXPECT_EQ(cross_program_prefetches[0].parameter, 1);
EXPECT_EQ(cross_program_prefetches[0].index, ShapeIndex({}));
}
if (cross_program_prefetches.size() > 1) {
EXPECT_EQ(cross_program_prefetches[1].parameter, 2);
EXPECT_EQ(cross_program_prefetches[1].index, ShapeIndex({}));
}
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Dot(op::Dot(op::Parameter(0),
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::Parameter(1))),
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::Parameter(2))));
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchTupleTest) {
HloComputation::Builder builder(TestName());
constexpr int kBatch = 8;
constexpr int kFeature = 8;
constexpr int kOutput = 2;
auto lhs_shape = ShapeUtil::MakeShape(F32, {kBatch, kFeature});
auto rhs_shape = ShapeUtil::MakeShape(F32, {kFeature, kOutput});
auto result_shape = ShapeUtil::MakeShape(F32, {kBatch, kOutput});
auto tuple_shape = ShapeUtil::MakeTupleShape({lhs_shape, rhs_shape});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p0"));
auto lhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(lhs_shape, param, 0));
auto rhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(rhs_shape, param, 1));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot = builder.AddInstruction(HloInstruction::CreateDot(
result_shape, lhs, rhs, dot_dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {param, lhs, rhs, dot});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 1);
if (!cross_program_prefetches.empty()) {
EXPECT_EQ(cross_program_prefetches[0].parameter, 0);
EXPECT_EQ(cross_program_prefetches[0].index, ShapeIndex({1}));
}
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchBitcastTest) {
HloComputation::Builder builder(TestName());
constexpr int kBatch = 8;
constexpr int kFeature = 8;
constexpr int kOutput = 2;
auto lhs_shape = ShapeUtil::MakeShape(F32, {kBatch, kFeature});
auto rhs_shape = ShapeUtil::MakeShape(F32, {kOutput, kFeature});
auto bitcast_shape = ShapeUtil::MakeShape(F32, {kFeature, kOutput});
auto result_shape = ShapeUtil::MakeShape(F32, {kBatch, kOutput});
HloInstruction* lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_shape, "lhs"));
HloInstruction* rhs = builder.AddInstruction(
HloInstruction::CreateParameter(1, rhs_shape, "rhs"));
auto bitcast =
builder.AddInstruction(HloInstruction::CreateBitcast(bitcast_shape, rhs));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot = builder.AddInstruction(HloInstruction::CreateDot(
result_shape, lhs, bitcast, dot_dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {lhs, rhs, bitcast, dot});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 1);
if (!cross_program_prefetches.empty()) {
EXPECT_EQ(cross_program_prefetches[0].parameter, 1);
EXPECT_EQ(cross_program_prefetches[0].index, ShapeIndex({}));
}
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchBitcastTupleTest) {
HloComputation::Builder builder(TestName());
constexpr int kBatch = 8;
constexpr int kFeature = 8;
constexpr int kOutput = 2;
auto lhs_shape = ShapeUtil::MakeShape(F32, {kBatch, kFeature});
auto rhs_shape = ShapeUtil::MakeShape(F32, {kOutput, kFeature});
auto bitcast_shape = ShapeUtil::MakeShape(F32, {kFeature, kOutput});
auto result_shape = ShapeUtil::MakeShape(F32, {kBatch, kOutput});
auto tuple_shape = ShapeUtil::MakeTupleShape({lhs_shape, rhs_shape});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p0"));
auto lhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(lhs_shape, param, 0));
auto rhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(rhs_shape, param, 1));
auto bitcast =
builder.AddInstruction(HloInstruction::CreateBitcast(bitcast_shape, rhs));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot = builder.AddInstruction(HloInstruction::CreateDot(
result_shape, lhs, bitcast, dot_dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {param, lhs, rhs, bitcast, dot});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 1);
if (!cross_program_prefetches.empty()) {
EXPECT_EQ(cross_program_prefetches[0].parameter, 0);
EXPECT_EQ(cross_program_prefetches[0].index, ShapeIndex({1}));
}
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchNestedTupleTest) {
HloComputation::Builder builder(TestName());
constexpr int kBatch = 8;
constexpr int kFeature = 8;
constexpr int kOutput = 2;
auto lhs_shape = ShapeUtil::MakeShape(F32, {kBatch, kFeature});
auto rhs_shape = ShapeUtil::MakeShape(F32, {kFeature, kOutput});
auto result_shape = ShapeUtil::MakeShape(F32, {kBatch, kOutput});
auto tuple_shape = ShapeUtil::MakeTupleShape({lhs_shape, rhs_shape});
auto tuple_tuple_shape = ShapeUtil::MakeTupleShape({tuple_shape});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_tuple_shape, "p0"));
auto gte = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(tuple_shape, param, 0));
auto lhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(lhs_shape, gte, 0));
auto rhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(rhs_shape, gte, 1));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot = builder.AddInstruction(HloInstruction::CreateDot(
result_shape, lhs, rhs, dot_dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {param, gte, lhs, rhs, dot});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 0);
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchUnusedParamTest) {
HloComputation::Builder builder(TestName());
constexpr int kFeature = 8;
constexpr int kOutput = 2;
auto rhs_shape = ShapeUtil::MakeShape(F32, {kFeature, kOutput});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, rhs_shape, "p0"));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {param});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 0);
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchTooBigTest) {
HloComputation::Builder builder(TestName());
constexpr int kBatch = 8;
constexpr int kFeature = 8;
constexpr int kOutput = 8;
auto lhs_shape = ShapeUtil::MakeShape(F32, {kBatch, kFeature});
auto rhs_shape = ShapeUtil::MakeShape(F32, {kFeature, kOutput});
auto result_shape = ShapeUtil::MakeShape(F32, {kBatch, kOutput});
HloInstruction* lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_shape, "lhs"));
HloInstruction* rhs = builder.AddInstruction(
HloInstruction::CreateParameter(1, rhs_shape, "rhs"));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot = builder.AddInstruction(HloInstruction::CreateDot(
result_shape, lhs, rhs, dot_dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {lhs, rhs, dot});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 0);
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchTooBigTupleTest) {
HloComputation::Builder builder(TestName());
constexpr int kBatch = 8;
constexpr int kFeature = 8;
constexpr int kOutput = 8;
auto lhs_shape = ShapeUtil::MakeShape(F32, {kBatch, kFeature});
auto rhs_shape = ShapeUtil::MakeShape(F32, {kFeature, kOutput});
auto result_shape = ShapeUtil::MakeShape(F32, {kBatch, kOutput});
auto tuple_shape = ShapeUtil::MakeTupleShape({lhs_shape, rhs_shape});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p0"));
auto lhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(lhs_shape, param, 0));
auto rhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(rhs_shape, param, 1));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot = builder.AddInstruction(HloInstruction::CreateDot(
result_shape, lhs, rhs, dot_dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {param, lhs, rhs, dot});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 0);
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchFusionTest) {
HloComputation::Builder builder(TestName());
constexpr int kBatch = 2;
constexpr int kFeature = 2;
constexpr int kOutput = 2;
auto lhs_shape = ShapeUtil::MakeShape(F32, {kBatch, kFeature});
auto rhs_shape = ShapeUtil::MakeShape(F32, {kFeature, kOutput});
auto result_shape = ShapeUtil::MakeShape(F32, {kBatch, kOutput});
auto module = CreateNewVerifiedModule();
HloComputation::Builder fusion_builder("fusion");
{
HloInstruction* lhs = fusion_builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_shape, "lhs"));
HloInstruction* rhs = fusion_builder.AddInstruction(
HloInstruction::CreateParameter(1, rhs_shape, "rhs"));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot = fusion_builder.AddInstruction(HloInstruction::CreateDot(
result_shape, lhs, rhs, dot_dnums, DefaultPrecisionConfig(2)));
(void)dot;
}
HloComputation* fusion_computation =
module->AddEmbeddedComputation(fusion_builder.Build());
auto activations = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{0.0, 1.0}, {2.0, 3.0}})));
auto weights = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{0.0, 1.0}, {2.0, 3.0}})));
HloInstruction* fusion = builder.AddInstruction(HloInstruction::CreateFusion(
result_shape, HloInstruction::FusionKind::kCustom, {activations, weights},
fusion_computation));
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {activations, weights, fusion});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 0);
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchFusionTupleTest) {
HloComputation::Builder builder(TestName());
constexpr int kBatch = 2;
constexpr int kFeature = 2;
constexpr int kOutput = 2;
auto lhs_shape = ShapeUtil::MakeShape(F32, {kBatch, kFeature});
auto rhs_shape = ShapeUtil::MakeShape(F32, {kFeature, kOutput});
auto result_shape = ShapeUtil::MakeShape(F32, {kBatch, kOutput});
auto tuple_shape = ShapeUtil::MakeTupleShape({lhs_shape, rhs_shape});
auto module = CreateNewVerifiedModule();
HloComputation::Builder fusion_builder("fusion");
{
HloInstruction* param = fusion_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p0"));
auto lhs = fusion_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(lhs_shape, param, 0));
auto rhs = fusion_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(rhs_shape, param, 1));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot = fusion_builder.AddInstruction(HloInstruction::CreateDot(
result_shape, lhs, rhs, dot_dnums, DefaultPrecisionConfig(2)));
(void)dot;
}
HloComputation* fusion_computation =
module->AddEmbeddedComputation(fusion_builder.Build());
auto activations = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{0.0, 1.0}, {2.0, 3.0}})));
auto weights = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{0.0, 1.0}, {2.0, 3.0}})));
HloInstruction* tuple = builder.AddInstruction(
HloInstruction::CreateTuple({activations, weights}));
HloInstruction* fusion = builder.AddInstruction(HloInstruction::CreateFusion(
result_shape, HloInstruction::FusionKind::kCustom, {tuple},
fusion_computation));
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {activations, weights, tuple, fusion});
TF_CHECK_OK(module->set_schedule(schedule));
AssignMemorySpace(module.get());
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 0);
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchPinnedTest) {
HloComputation::Builder builder(TestName());
constexpr int kBatch = 8;
constexpr int kFeature = 8;
constexpr int kOutput = 2;
auto lhs_shape = ShapeUtil::MakeShape(F32, {kBatch, kFeature});
auto rhs_shape = ShapeUtil::MakeShapeWithDenseLayout(
F32, {kFeature, kOutput},
{1, 0}, {},
1, 0,
kAlternateMemorySpace);
auto result_shape = ShapeUtil::MakeShape(F32, {kBatch, kOutput});
HloInstruction* lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, lhs_shape, "lhs"));
HloInstruction* rhs = builder.AddInstruction(
HloInstruction::CreateParameter(1, rhs_shape, "rhs"));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot = builder.AddInstruction(HloInstruction::CreateDot(
result_shape, lhs, rhs, dot_dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {lhs, rhs, dot});
TF_CHECK_OK(module->set_schedule(schedule));
Options options = DefaultMemorySpaceOptions();
options.is_allowed_in_alternate_mem_fn = [](const HloValue& value) {
return true;
};
std::unique_ptr<PresetAssignments> preset_assignments =
AssignMemorySpace(module.get(), options);
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 0);
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchPinnedTupleTest) {
HloComputation::Builder builder(TestName());
constexpr int kBatch = 8;
constexpr int kFeature = 8;
constexpr int kOutput = 2;
auto lhs_shape = ShapeUtil::MakeShape(F32, {kBatch, kFeature});
auto rhs_shape = ShapeUtil::MakeShapeWithDenseLayout(
F32, {kFeature, kOutput},
{1, 0}, {},
1, 0,
kAlternateMemorySpace);
auto result_shape = ShapeUtil::MakeShape(F32, {kBatch, kOutput});
auto tuple_shape = ShapeUtil::MakeTupleShape({lhs_shape, rhs_shape});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p0"));
auto lhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(lhs_shape, param, 0));
auto rhs = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(rhs_shape, param, 1));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot = builder.AddInstruction(HloInstruction::CreateDot(
result_shape, lhs, rhs, dot_dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
HloSchedule schedule(module.get());
schedule.set_sequence(computation, {param, lhs, rhs, dot});
TF_CHECK_OK(module->set_schedule(schedule));
Options options = DefaultMemorySpaceOptions();
options.is_allowed_in_alternate_mem_fn = [](const HloValue& value) {
return true;
};
std::unique_ptr<PresetAssignments> preset_assignments =
AssignMemorySpace(module.get(), options);
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 0);
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramRootDupMayAlias) {
absl::string_view hlo_string = R"(
HloModule cross_program_prefetch, is_scheduled=true, input_output_alias={ {}: (0, {}, may-alias) }
ENTRY CrossProgramPrefetch {
c0 = s32[1,2] constant({{77, 77}})
c1 = s32[] constant(0)
p0 = s32[2,2] parameter(0)
ROOT dup = s32[2,2] dynamic-update-slice(s32[2,2] p0, s32[1,2] c0, s32[] c1, s32[] c1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto preset_assignments = AssignMemorySpace(
module.get(), DefaultMemorySpaceOptions(),
5, 2);
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 0);
EXPECT_THAT(FindInstruction(module.get(), "dup")->operand(0),
op::Parameter(0));
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramRootDusFusionMayAlias) {
absl::string_view hlo_string = R"(
HloModule cross_program_prefetch, is_scheduled=true, input_output_alias={ {}: (0, {}, may-alias) }
fused_computation {
fused_p0 = s32[2,2] parameter(0)
fused_p1 = s32[1,2] parameter(1)
fused_p2 = s32[] parameter(2)
fused_p3 = s32[] parameter(3)
ROOT dus = s32[2,2] dynamic-update-slice(fused_p0, fused_p1, fused_p2, fused_p3)
}
ENTRY CrossProgramPrefetch {
p0 = s32[2,2] parameter(0)
c0 = s32[1,2] constant({{77, 77}})
c1 = s32[] constant(0)
bitcast1 = s32[2,2] bitcast(p0)
ROOT fusion = s32[2,2] fusion(bitcast1, c0, c1, c1), kind=kLoop, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto preset_assignments = AssignMemorySpace(
module.get(), DefaultMemorySpaceOptions(),
5, 2);
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 0);
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramRootDup) {
absl::string_view hlo_string = R"(
HloModule cross_program_prefetch, is_scheduled=true
ENTRY CrossProgramPrefetch {
c0 = s32[1,2] constant({{77, 77}})
c1 = s32[] constant(0)
p0 = s32[2,2] parameter(0)
ROOT dup = s32[2,2] dynamic-update-slice(s32[2,2] p0, s32[1,2] c0, s32[] c1, s32[] c1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto preset_assignments = AssignMemorySpace(
module.get(), DefaultMemorySpaceOptions(),
5, 2);
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 0);
EXPECT_THAT(FindInstruction(module.get(), "dup")->operand(0),
op::Parameter(0));
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramRootDupDot) {
absl::string_view hlo_string = R"(
HloModule cross_program_prefetch, is_scheduled=true
ENTRY CrossProgramPrefetch {
c0 = s32[1,2] constant({{77, 77}})
c1 = s32[] constant(0)
p0 = s32[2,2] parameter(0)
p1 = s32[2,2] parameter(1)
dup = s32[2,2] dynamic-update-slice(s32[2,2] p0, s32[1,2] c0, s32[] c1, s32[] c1)
ROOT dot = s32[2,2] dot(p1, dup), lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto preset_assignments = AssignMemorySpace(
module.get(), DefaultMemorySpaceOptions(),
5, 2);
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 1);
EXPECT_THAT(FindInstruction(module.get(), "dup")->operand(0),
op::AsyncCopy(kAlternateMemorySpace, kDefaultMemorySpace,
op::Parameter(0)));
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramRootDotMayAlias) {
absl::string_view hlo_string = R"(
HloModule cross_program_prefetch, is_scheduled=true, input_output_alias={ {}: (0, {}, may-alias) }
ENTRY CrossProgramPrefetch {
p0 = s32[2,2] parameter(0)
p1 = s32[2,2] parameter(1)
ROOT dot = s32[2,2] dot(p1, p0), lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto preset_assignments = AssignMemorySpace(
module.get(), DefaultMemorySpaceOptions(),
5, 2);
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 0);
EXPECT_THAT(FindInstruction(module.get(), "dot")->operand(1),
op::Parameter(0));
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramRootLiveOutBug) {
absl::string_view hlo_string = R"(
HloModule cross_program_prefetch, is_scheduled=true, input_output_alias={ {0}: (0, {}, may-alias) }
fused_computation {
p0 = s32[2,2] parameter(0)
p1 = s32[2,2] parameter(1)
slice = s32[1,2] slice(p1), slice={[0:1], [0:2]}
c1 = s32[] constant(0)
ROOT dus = s32[2,2] dynamic-update-slice(s32[2,2] p0, s32[1,2] slice, s32[] c1, s32[] c1)
}
ENTRY CrossProgramPrefetch {
p0 = s32[2,2] parameter(0)
p1 = s32[2,2] parameter(1)
dot = s32[2,2] dot(p1, p0), lhs_contracting_dims={0}, rhs_contracting_dims={0}
fusion = s32[2,2] fusion(p0, dot), kind=kLoop, calls=fused_computation
ROOT root = (s32[2,2], s32[2,2]) tuple(fusion, dot)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto preset_assignments = AssignMemorySpace(
module.get(), DefaultMemorySpaceOptions(),
5, 2);
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 0);
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramRootParameter) {
absl::string_view hlo_string = R"(
HloModule cross_program_prefetch, is_scheduled=true
ENTRY CrossProgramPrefetch {
p0 = s32[2,2] parameter(0)
ROOT bitcast = u32[2,2] bitcast(p0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto preset_assignments = AssignMemorySpace(
module.get(), DefaultMemorySpaceOptions(),
5, 2);
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 0);
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchNoReuse) {
absl::string_view hlo_string = R"(
HloModule cross_program_prefetch, is_scheduled=true
ENTRY CrossProgramPrefetch {
p0 = f32[8,8]{1,0} parameter(0)
p1 = f32[8,2]{1,0} parameter(1)
dot = f32[8,2]{1,0} dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
negate.1 = f32[8,2]{1,0} negate(dot)
negate.2 = f32[8,2]{1,0} negate(negate.1)
negate.3 = f32[8,2]{1,0} negate(negate.2)
negate.4 = f32[8,2]{1,0} negate(negate.3)
negate.5 = f32[8,2]{1,0} negate(negate.4)
negate.6 = f32[8,2]{1,0} negate(negate.5)
negate.7 = f32[8,2]{1,0} negate(negate.6)
negate.8 = f32[8,2]{1,0} negate(negate.7)
ROOT negate.9 = f32[8,2]{1,0} negate(negate.8)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto preset_assignments = AssignMemorySpace(
module.get(), DefaultMemorySpaceOptions(),
5, 2);
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 1);
if (!cross_program_prefetches.empty()) {
EXPECT_EQ(cross_program_prefetches[0].parameter, 1);
EXPECT_EQ(cross_program_prefetches[0].index, ShapeIndex({}));
}
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloDataflowAnalysis> dataflow_analysis,
HloDataflowAnalysis::Run(*module));
LOG(ERROR) << "module: " << module->ToString();
const HloValue& cross_program_prefetched_value =
dataflow_analysis->GetValueDefinedAt(
module->entry_computation()->parameter_instruction(1), {});
auto is_cross_program_prefetch = [](const HloUse& use) {
return use.instruction->opcode() == HloOpcode::kCopyStart &&
use.instruction->cross_program_prefetch_index().has_value();
};
EXPECT_EQ(absl::c_count_if(cross_program_prefetched_value.GetUses(),
is_cross_program_prefetch),
1);
auto is_end_of_program_prefetch = [](const HloUse& use) {
return use.instruction->opcode() == HloOpcode::kCopyStart &&
!use.instruction->cross_program_prefetch_index().has_value();
};
EXPECT_EQ(absl::c_count_if(cross_program_prefetched_value.GetUses(),
is_end_of_program_prefetch),
1);
const HloInstruction* last_instruction =
module->schedule()
.sequence(module->entry_computation())
.instructions()[module->entry_computation()->instruction_count() - 1];
EXPECT_THAT(last_instruction, op::CopyDone());
EXPECT_NE(last_instruction, module->entry_computation()->root_instruction());
bool has_zero_offset_allocations = false;
for (auto pos_and_chunk : preset_assignments->chunks()) {
if (pos_and_chunk.first.instruction->opcode() == HloOpcode::kNegate &&
pos_and_chunk.second.offset == 0) {
has_zero_offset_allocations = true;
}
}
EXPECT_TRUE(has_zero_offset_allocations);
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchTupleNoReuse) {
absl::string_view hlo_string = R"(
HloModule cross_program_prefetch, is_scheduled=true
ENTRY CrossProgramPrefetch {
p0 = (f32[8,8]{1,0}, f32[8,2]{1,0}) parameter(0)
get-tuple-element = f32[8,8]{1,0} get-tuple-element(p0), index=0
get-tuple-element.1 = f32[8,2]{1,0} get-tuple-element(p0), index=1
dot = f32[8,2]{1,0} dot(get-tuple-element, get-tuple-element.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
negate.1 = f32[8,2]{1,0} negate(dot)
negate.2 = f32[8,2]{1,0} negate(negate.1)
negate.3 = f32[8,2]{1,0} negate(negate.2)
negate.4 = f32[8,2]{1,0} negate(negate.3)
negate.5 = f32[8,2]{1,0} negate(negate.4)
negate.6 = f32[8,2]{1,0} negate(negate.5)
negate.7 = f32[8,2]{1,0} negate(negate.6)
negate.8 = f32[8,2]{1,0} negate(negate.7)
ROOT negate.9 = f32[8,2]{1,0} negate(negate.8)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto preset_assignments = AssignMemorySpace(
module.get(), DefaultMemorySpaceOptions(),
5, 2);
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 1);
if (!cross_program_prefetches.empty()) {
EXPECT_EQ(cross_program_prefetches[0].parameter, 0);
EXPECT_EQ(cross_program_prefetches[0].index, ShapeIndex({1}));
}
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloDataflowAnalysis> dataflow_analysis,
HloDataflowAnalysis::Run(*module));
const HloValue& cross_program_prefetched_value =
dataflow_analysis->GetValueDefinedAt(
module->entry_computation()->parameter_instruction(0), {1});
auto is_cross_program_prefetch = [](const HloUse& use) {
return use.instruction->opcode() == HloOpcode::kCopyStart &&
use.instruction->cross_program_prefetch_index().has_value();
};
EXPECT_EQ(absl::c_count_if(cross_program_prefetched_value.GetUses(),
is_cross_program_prefetch),
1);
auto is_end_of_program_prefetch = [](const HloUse& use) {
return use.instruction->opcode() == HloOpcode::kCopyStart &&
!use.instruction->cross_program_prefetch_index().has_value();
};
EXPECT_EQ(absl::c_count_if(cross_program_prefetched_value.GetUses(),
is_end_of_program_prefetch),
1);
const HloInstruction* last_instruction =
module->schedule()
.sequence(module->entry_computation())
.instructions()[module->entry_computation()->instruction_count() - 1];
EXPECT_THAT(last_instruction, op::CopyDone());
EXPECT_NE(last_instruction, module->entry_computation()->root_instruction());
bool has_zero_offset_allocations = false;
for (auto pos_and_chunk : preset_assignments->chunks()) {
if (pos_and_chunk.first.instruction->opcode() == HloOpcode::kNegate &&
pos_and_chunk.second.offset == 0) {
has_zero_offset_allocations = true;
}
}
EXPECT_TRUE(has_zero_offset_allocations);
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchReuse) {
absl::string_view hlo_string = R"(
HloModule cross_program_prefetch, is_scheduled=true
ENTRY CrossProgramPrefetch {
p0 = f32[8,8]{1,0} parameter(0)
p1 = f32[8,2]{1,0} parameter(1)
dot = f32[8,2]{1,0} dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
negate.1 = f32[8,2]{1,0} negate(dot)
negate.2 = f32[8,2]{1,0} negate(negate.1)
negate.3 = f32[8,2]{1,0} negate(negate.2)
negate.4 = f32[8,2]{1,0} negate(negate.3)
negate.5 = f32[8,2]{1,0} negate(negate.4)
negate.6 = f32[8,2]{1,0} negate(negate.5)
negate.7 = f32[8,2]{1,0} negate(negate.6)
negate.8 = f32[8,2]{1,0} negate(negate.7)
ROOT dot.2 = f32[2,2]{1,0} dot(negate.8, p1), lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get(), DefaultMemorySpaceOptions(),
5, 2);
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 1);
if (!cross_program_prefetches.empty()) {
EXPECT_EQ(cross_program_prefetches[0].parameter, 1);
EXPECT_EQ(cross_program_prefetches[0].index, ShapeIndex({}));
}
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloDataflowAnalysis> dataflow_analysis,
HloDataflowAnalysis::Run(*module));
const HloValue& cross_program_prefetched_value =
dataflow_analysis->GetValueDefinedAt(
module->entry_computation()->parameter_instruction(1), {});
auto is_cross_program_prefetch = [](const HloUse& use) {
return use.instruction->opcode() == HloOpcode::kCopyStart &&
use.instruction->cross_program_prefetch_index().has_value();
};
EXPECT_EQ(absl::c_count_if(cross_program_prefetched_value.GetUses(),
is_cross_program_prefetch),
1);
auto is_end_of_program_prefetch = [](const HloUse& use) {
return use.instruction->opcode() == HloOpcode::kCopyStart &&
!use.instruction->cross_program_prefetch_index().has_value();
};
EXPECT_EQ(absl::c_count_if(cross_program_prefetched_value.GetUses(),
is_end_of_program_prefetch),
0);
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchTupleReuse) {
absl::string_view hlo_string = R"(
HloModule cross_program_prefetch, is_scheduled=true
ENTRY CrossProgramPrefetch {
p0 = (f32[8,8]{1,0}, f32[8,2]{1,0}) parameter(0)
get-tuple-element = f32[8,8]{1,0} get-tuple-element(p0), index=0
get-tuple-element.1 = f32[8,2]{1,0} get-tuple-element(p0), index=1
dot = f32[8,2]{1,0} dot(get-tuple-element, get-tuple-element.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
negate.1 = f32[8,2]{1,0} negate(dot)
negate.2 = f32[8,2]{1,0} negate(negate.1)
negate.3 = f32[8,2]{1,0} negate(negate.2)
negate.4 = f32[8,2]{1,0} negate(negate.3)
negate.5 = f32[8,2]{1,0} negate(negate.4)
negate.6 = f32[8,2]{1,0} negate(negate.5)
negate.7 = f32[8,2]{1,0} negate(negate.6)
negate.8 = f32[8,2]{1,0} negate(negate.7)
ROOT dot.2 = f32[2,2]{1,0} dot(negate.8, get-tuple-element.1), lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get(), DefaultMemorySpaceOptions(),
5, 2);
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 1);
if (!cross_program_prefetches.empty()) {
EXPECT_EQ(cross_program_prefetches[0].parameter, 0);
EXPECT_EQ(cross_program_prefetches[0].index, ShapeIndex({1}));
}
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloDataflowAnalysis> dataflow_analysis,
HloDataflowAnalysis::Run(*module));
const HloValue& cross_program_prefetched_value =
dataflow_analysis->GetValueDefinedAt(
module->entry_computation()->parameter_instruction(0), {1});
auto is_cross_program_prefetch = [](const HloUse& use) {
return use.instruction->opcode() == HloOpcode::kCopyStart &&
use.instruction->cross_program_prefetch_index().has_value();
};
EXPECT_EQ(absl::c_count_if(cross_program_prefetched_value.GetUses(),
is_cross_program_prefetch),
1);
auto is_end_of_program_prefetch = [](const HloUse& use) {
return use.instruction->opcode() == HloOpcode::kCopyStart &&
!use.instruction->cross_program_prefetch_index().has_value();
};
EXPECT_EQ(absl::c_count_if(cross_program_prefetched_value.GetUses(),
is_end_of_program_prefetch),
0);
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchBufferUnused) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
%fused_computation {
%param_0.2 = f32[32]{0} parameter(0)
%param_1.4 = s32[100]{0} parameter(1)
%custom-call.1 = s32[100]{0} custom-call(s32[100]{0} %param_1.4), custom_call_target="AssumeGatherIndicesInBound", operand_layout_constraints={s32[100]{0}}
%slice.1 = s32[32]{0} slice(s32[100]{0} %custom-call.1), slice={[0:32]}
%reshape.7 = s32[32]{0} reshape(s32[32]{0} %slice.1)
%transpose.5 = s32[32]{0} transpose(s32[32]{0} %reshape.7), dimensions={0}
%gather.1 = f32[32]{0} gather(f32[32]{0} %param_0.2, s32[32]{0} %transpose.5), offset_dims={}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1}
%transpose.4 = f32[32]{0} transpose(f32[32]{0} %gather.1), dimensions={0}
ROOT %reshape.6 = f32[32]{0} reshape(f32[32]{0} %transpose.4)
}
%i.reduce_sub_computation {
%rhs = s32[] parameter(1)
%lhs = s32[] parameter(0)
ROOT %add = s32[] add(s32[] %lhs, s32[] %rhs)
}
%fused_computation.1 {
%constant.4 = s32[] constant(0)
%broadcast.4 = s32[100]{0} broadcast(s32[] %constant.4), dimensions={}
%param_0.4 = s32[32]{0} parameter(0)
%pad.1 = s32[100]{0} pad(s32[32]{0} %param_0.4, s32[] %constant.4), padding=0_68
%constant.3 = s32[] constant(76031)
%broadcast.3 = s32[100]{0} broadcast(s32[] %constant.3), dimensions={}
ROOT %clamp.1 = s32[100]{0} clamp(s32[100]{0} %broadcast.4, s32[100]{0} %pad.1, s32[100]{0} %broadcast.3)
}
ENTRY %main {
%constant = s32[] constant(0)
%i = s32[32,1]{0,1} parameter(1)
%o = f32[32]{0} parameter(0)
%reduce = s32[32]{0} reduce(s32[32,1]{0,1} %i, s32[] %constant), dimensions={1}, to_apply=%i.reduce_sub_computation
%fusion.1 = s32[100]{0} fusion(s32[32]{0} %reduce), kind=kLoop, calls=%fused_computation.1
ROOT %fusion = f32[32]{0} fusion(f32[32]{0} %o, s32[100]{0} %fusion.1), kind=kCustom, calls=%fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AssignMemorySpace(module.get());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Fusion(op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace, op::Parameter(0)),
op::Fusion()));
}
TEST_F(MemorySpaceAssignmentTest, CrossProgramPrefetchPermissiveMode) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
fused_computation {
param_0 = f32[2] parameter(0)
param_1 = f32[4,2] parameter(1)
broadcast = f32[4,2] broadcast(param_0), dimensions={1}
ROOT multiply = f32[4,2] multiply(broadcast, param_1)
}
ENTRY entry {
p0 = f32[2] parameter(0)
p1 = f32[4,2] parameter(1)
fusion = f32[4,2] fusion(p0, p1), kind=kLoop, calls=fused_computation
ROOT negate = f32[4,2] negate(fusion)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.cross_program_prefetch_permissive_mode = true;
AssignMemorySpace(module.get(), options);
auto cross_program_prefetches = module->CrossProgramPrefetches();
EXPECT_EQ(cross_program_prefetches.size(), 1);
}
TEST_F(MemorySpaceAssignmentTest, CopyResourceIntegration) {
std::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY main {
p0 = s32[8,8] parameter(0)
p1 = s32[8,8] parameter(1)
p2 = s32[] parameter(2)
a = negate(p2)
b = negate(a)
c = add(p0, p0)
d = negate(b)
e = negate(d)
f = add(p1, p1)
ROOT result = tuple(e,c,f)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
Options options = DefaultMemorySpaceOptions();
options.max_size_in_bytes = 300;
HloCostAnalysis::Properties properties;
properties[HloCostAnalysis::kBytesAccessedKey] = kBytesPerSecond;
HloCostAnalysis hlo_cost_analysis(ShapeSize, properties);
CostAnalysisOptions cost_analysis_options;
HloCostAnalysisCosts hlo_cost_analysis_costs(hlo_cost_analysis);
TF_ASSERT_OK_AND_ASSIGN(
auto cost_analysis,
FakeCostAnalysis::Create(hlo_cost_analysis_costs, *module,
cost_analysis_options));
cost_analysis->SetOverrideForGetInstructionElapsed(
[](const HloInstruction& instruction) -> float { return 10.0; });
cost_analysis->SetOverrideForGetAsyncCopyElapsed(
[](const Shape& shape) -> float { return 20.0; });
options.cost_analysis = cost_analysis.get();
CostAnalysisPrefetchIntervalPicker prefetch_interval_picker(
CostAnalysisPrefetchIntervalPicker(
*cost_analysis, 0.8,
1.5,
10.0,
options.max_size_in_bytes));
MsaBufferIntervalCompare compare = [](const MsaBufferInterval& lhs,
const MsaBufferInterval& rhs) -> bool {
auto lookup = [](const MsaBufferInterval& x) {
int priority = 100;
if (x.buffer->instruction()->name() == "p0") {
priority = 0;
} else if (x.buffer->instruction()->name() == "p1") {
priority = 1;
}
return std::make_tuple(priority, x.buffer->instruction()->name());
};
return lookup(lhs) < lookup(rhs);
};
AssignMemorySpace(module.get(), options, compare, &prefetch_interval_picker);
ASSERT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(_,
op::Add(op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace, op::Parameter(0)),
op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace, op::Parameter(0))),
op::Add(op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace, op::Parameter(1)),
op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace, op::Parameter(1)))));
const std::vector<HloInstruction*>& schedule =
module->schedule().sequence(module->entry_computation()).instructions();
auto find_schedule_index = [&schedule](std::string_view name) -> int {
for (int i = 0; i < schedule.size(); ++i) {
if (schedule[i]->name() == name) {
return i;
}
}
LOG(FATAL) << "Unable to find index of instruction with name " << name;
};
int c_index = find_schedule_index("c");
int p1_copy_start = find_schedule_index(module->entry_computation()
->root_instruction()
->operand(2)
->operand(0)
->operand(0)
->name());
int d_index = find_schedule_index("d");
int e_index = find_schedule_index("e");
int p1_copy_end = find_schedule_index(module->entry_computation()
->root_instruction()
->operand(2)
->operand(0)
->name());
int f_index = find_schedule_index("f");
EXPECT_EQ(p1_copy_start, c_index + 1);
EXPECT_EQ(d_index, p1_copy_start + 1);
EXPECT_EQ(e_index, d_index + 1);
EXPECT_EQ(p1_copy_end, e_index + 1);
EXPECT_EQ(f_index, p1_copy_end + 1);
}
class SlicedPrefetchTest : public MemorySpaceAssignmentTestBase {
protected:
enum class InstructionClass {
kUnknown,
kRelatedSliceStart,
kRelatedSliceDone,
kRelatedConcatBitcast,
kStartAfterNonCopy,
kDoneBeforeNonCopy,
kUnrelatedCopyLike,
kUnrelatedNonCopy,
};
static std::string InstructionClassToString(
InstructionClass instruction_class) {
switch (instruction_class) {
case InstructionClass::kUnknown:
return "unknown";
case InstructionClass::kRelatedSliceStart:
return "slice start";
case InstructionClass::kRelatedSliceDone:
return "slice done";
case InstructionClass::kRelatedConcatBitcast:
return "concat-bitcast";
case InstructionClass::kStartAfterNonCopy:
return "start after non-copy";
case InstructionClass::kDoneBeforeNonCopy:
return "done before non-copy";
case InstructionClass::kUnrelatedCopyLike:
return "unrelated copy-like";
case InstructionClass::kUnrelatedNonCopy:
return "unrelated non-copy";
}
}
class SliceProposer {
public:
SliceProposer() = default;
virtual ~SliceProposer() = default;
virtual absl::StatusOr<SliceProposalCollection> ProposeSlices(
const Shape& shape, const SlicedPrefetchOptions& options) = 0;
};
class MockSliceProposer : public SliceProposer {
public:
MOCK_METHOD(absl::StatusOr<SliceProposalCollection>, ProposeSlices,
(const Shape& shape, const SlicedPrefetchOptions& options),
(override));
};
class AsyncSlicedCopy
: public ::testing::MatcherInterface<const HloInstruction*> {
public:
AsyncSlicedCopy(int64_t to_space, int64_t from_space,
std::vector<std::vector<SliceParam>>
expected_slice_params_per_slice_in_spatial_order,
::testing::Matcher<const HloInstruction*> operand,
bool expect_bitcasted_io)
: to_space_(to_space),
from_space_(from_space),
expected_slice_params_per_slice_in_spatial_order_(
std::move(expected_slice_params_per_slice_in_spatial_order)),
base_hlo_matcher_(CreateBaseHloMatcher(
operand, expected_slice_params_per_slice_in_spatial_order_.size(),
expect_bitcasted_io)),
expect_bitcasted_io_(expect_bitcasted_io) {}
bool MatchAndExplain(
const HloInstruction* instruction,
::testing::MatchResultListener* listener) const override {
if (!base_hlo_matcher_.MatchAndExplain(instruction, listener)) {
return false;
}
if (!MatchMemorySpace(instruction, to_space_, "copy result", listener)) {
return false;
}
const HloInstruction* concat_bitcast =
(expect_bitcasted_io_ ? instruction->operand(0) : instruction);
VLOG(2) << "AsyncSlicedCopy identified the concat-bitcast as "
<< concat_bitcast->name();
const HloInstruction* copy_operand =
concat_bitcast->operand(0)->operand(0)->operand(0);
const HloInstruction* original_copy_operand =
(expect_bitcasted_io_ ? copy_operand->operand(0) : copy_operand);
VLOG(2) << "AsyncSlicedCopy identified the copy operand as "
<< copy_operand->name() << ", and the original copy operand as "
<< original_copy_operand->name();
if (!MatchMemorySpace(original_copy_operand, from_space_, "copy operand",
listener)) {
return false;
}
if (!Shape::Equal().IgnoreMemorySpaceInLayout()(
instruction->shape(), original_copy_operand->shape())) {
*listener << " has a shape of "
<< original_copy_operand->shape().ToString(
true)
<< " before copying but a shape of "
<< instruction->shape().ToString(true)
<< " after copying (ignoring memory space)";
return false;
}
CHECK_EQ(concat_bitcast->operand_count(),
expected_slice_params_per_slice_in_spatial_order_.size());
std::vector<const HloInstruction*> sorted_slices =
SortSlicesInExpectedSpatialOrder(concat_bitcast);
for (int i = 0; i < sorted_slices.size(); ++i) {
const HloInstruction* slice =
sorted_slices[i]->async_wrapped_instruction();
if (!MatchMemorySpace(slice, to_space_, "slice", listener)) {
return false;
}
const std::vector<SliceParam>& expected_slice_params_per_dim =
expected_slice_params_per_slice_in_spatial_order_[i];
if (slice->slice_starts().empty()) {
*listener << " has slice (" << slice->name()
<< "), with no slicing parameters";
return false;
}
if (slice->slice_limits().size() != slice->slice_starts().size() ||
slice->slice_strides().size() != slice->slice_limits().size()) {
*listener
<< " has slice (" << slice->name()
<< "), with an inconsistent number slice starts/limits/strides";
return false;
}
if (slice->slice_starts().size() != copy_operand->shape().rank()) {
*listener
<< " has slice (" << slice->name() << "), with "
<< slice->slice_starts().size()
<< " slice parameters (i.e., starts/limits/strides), expected "
<< expected_slice_params_per_slice_in_spatial_order_.size();
return false;
}
for (int dim = 0; dim < slice->slice_starts().size(); ++dim) {
const SliceParam& expected_slice_params =
expected_slice_params_per_dim[dim];
if (slice->slice_starts()[dim] !=
expected_slice_params.start_inclusive) {
*listener << " has slice (" << slice->name()
<< "), with slice start of " << slice->slice_starts()[dim]
<< " at dim " << dim << ", expected "
<< expected_slice_params.start_inclusive;
return false;
}
if (slice->slice_limits()[dim] !=
expected_slice_params.end_exclusive) {
*listener << " has slice (" << slice->name()
<< "), with slice limit of " << slice->slice_limits()[dim]
<< " at dim " << dim << ", expected "
<< expected_slice_params.end_exclusive;
return false;
}
if (slice->slice_strides()[dim] != 1) {
*listener << " has slice (" << slice->name()
<< "), slice stride of " << slice->slice_strides()[dim]
<< " at dim " << dim << ", expected 1";
return false;
}
}
}
return true;
}
void DescribeTo(std::ostream* os) const override {
base_hlo_matcher_.DescribeTo(os);
std::vector<std::string> slice_parameters_per_operand;
for (int op_idx = 0;
op_idx < expected_slice_params_per_slice_in_spatial_order_.size();
++op_idx) {
std::vector<std::string> slice_params_per_dim;
for (int dim = 0;
dim <
expected_slice_params_per_slice_in_spatial_order_[op_idx].size();
++dim) {
const SliceParam& slice_params =
expected_slice_params_per_slice_in_spatial_order_[op_idx][dim];
slice_params_per_dim.push_back(absl::StrCat(
"dim ", dim, ": {start: ", slice_params.start_inclusive,
", limit: ", slice_params.end_exclusive, "}"));
}
slice_parameters_per_operand.push_back(
absl::StrCat("operand ", op_idx, ": { ",
absl::StrJoin(slice_params_per_dim, ", "), " }"));
}
*os << " (copying from memory space " << from_space_ << " to "
<< to_space_
<< ", with asynchronous slice operands using the following slice "
"parameters: { "
<< absl::StrJoin(slice_parameters_per_operand, ", ") << " })";
}
private:
static ::testing::Matcher<const HloInstruction*> CreateBaseHloMatcher(
::testing::Matcher<const HloInstruction*> operand, int64_t num_slices,
bool expect_bitcasted_io) {
if (expect_bitcasted_io) {
return op::Bitcast(op::CustomCall(
kConcatBitcastCustomCall,
std::vector<::testing::Matcher<const HloInstruction*>>(
num_slices,
op::AsyncDone(op::AsyncStart(op::Bitcast(operand))))));
}
return op::CustomCall(
kConcatBitcastCustomCall,
std::vector<::testing::Matcher<const HloInstruction*>>(
num_slices, op::AsyncDone(op::AsyncStart(operand))));
}
static bool MatchMemorySpace(const HloInstruction* instruction,
int64_t expected_memory_space,
std::string_view error_message_identifier,
::testing::MatchResultListener* listener) {
if (!instruction->shape().has_layout()) {
*listener << " contains " << error_message_identifier << " named "
<< instruction->name()
<< " without a layout, expected a layout with memory space "
<< expected_memory_space;
return false;
}
if (instruction->shape().layout().memory_space() !=
expected_memory_space) {
*listener << " contains " << error_message_identifier << " named "
<< instruction->name() << " in memory space "
<< expected_memory_space << ", expected "
<< expected_memory_space;
return false;
}
return true;
}
int64_t to_space_;
int64_t from_space_;
std::vector<std::vector<SliceParam>>
expected_slice_params_per_slice_in_spatial_order_;
::testing::Matcher<const HloInstruction*> base_hlo_matcher_;
bool expect_bitcasted_io_;
};
static inline ::testing::Matcher<const HloInstruction*> IsAsyncSlicedCopy(
int64_t to_space, int64_t from_space,
std::vector<std::vector<SliceParam>>
expected_slice_params_per_slice_in_spatial_order,
::testing::Matcher<const HloInstruction*> operand_matcher,
bool expect_bitcasted_io = false) {
return ::testing::MakeMatcher(new AsyncSlicedCopy(
to_space, from_space, expected_slice_params_per_slice_in_spatial_order,
operand_matcher, expect_bitcasted_io));
}
class SlicedPrefetchOptionsMatcher
: public ::testing::MatcherInterface<const SlicedPrefetchOptions&> {
public:
explicit SlicedPrefetchOptionsMatcher(
SlicedPrefetchOptions expected_options)
: expected_options_(std::move(expected_options)) {}
bool MatchAndExplain(
const SlicedPrefetchOptions& options,
::testing::MatchResultListener* listener) const override {
if (options.max_slices() != expected_options_.max_slices()) {
*listener << " has " << options.max_slices() << " max slices, expected "
<< expected_options_.max_slices();
return false;
}
if (options.min_bytes() != expected_options_.min_bytes()) {
*listener << " has " << options.min_bytes() << " min bytes, expected "
<< expected_options_.min_bytes();
return false;
}
if (options.fail_on_non_alignment_boundary_slice_proposal() !=
expected_options_.fail_on_non_alignment_boundary_slice_proposal()) {
*listener
<< " has fail_on_non_alignment_boundary_slice_proposal set to "
<< options.fail_on_non_alignment_boundary_slice_proposal()
<< ", expected "
<< expected_options_
.fail_on_non_alignment_boundary_slice_proposal();
return false;
}
return true;
}
void DescribeTo(std::ostream* os) const override {
*os << " has the following options: max_slices("
<< expected_options_.max_slices() << "), min_bytes("
<< expected_options_.min_bytes()
<< ") fail_on_non_alignment_boundary_slice_proposal("
<< expected_options_.fail_on_non_alignment_boundary_slice_proposal()
<< ")";
}
private:
SlicedPrefetchOptions expected_options_;
};
static inline ::testing::Matcher<const SlicedPrefetchOptions&>
EqualsSlicedPrefetchOptions(SlicedPrefetchOptions expected_options) {
return ::testing::MakeMatcher(
new SlicedPrefetchOptionsMatcher(std::move(expected_options)));
}
static std::vector<const HloInstruction*> SortSlicesInExpectedSpatialOrder(
const HloInstruction* concat_bitcast) {
std::vector<const HloInstruction*> sorted_slices(
concat_bitcast->operands().begin(), concat_bitcast->operands().end());
absl::c_sort(sorted_slices, [](const HloInstruction* lhs,
const HloInstruction* rhs) {
CHECK(IsAsyncSliceDone(lhs));
CHECK(IsAsyncSliceDone(rhs));
CHECK(!lhs->async_wrapped_instruction()->slice_starts().empty());
CHECK(!rhs->async_wrapped_instruction()->slice_starts().empty());
return lhs->async_wrapped_instruction()->slice_starts().front() <
rhs->async_wrapped_instruction()->slice_starts().front();
});
return sorted_slices;
}
static bool IsAsyncCopyStart(const HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kCopyStart;
}
static bool IsAsyncCopyDone(const HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kCopyDone;
}
static bool IsAsyncSliceStart(const HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kAsyncStart &&
instruction->async_wrapped_instruction()->opcode() ==
HloOpcode::kSlice;
}
static bool IsAsyncSliceDone(const HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kAsyncDone &&
instruction->async_wrapped_instruction()->opcode() ==
HloOpcode::kSlice;
}
static bool IsConcatBitcast(const HloInstruction* instruction) {
return instruction->IsCustomCall(kConcatBitcastCustomCall);
}
static absl::StatusOr<int> FindScheduleIndexOfInstruction(
const std::vector<HloInstruction*>& schedule, std::string_view name,
InstructionClass c) {
for (int i = 0; i < schedule.size(); ++i) {
if (schedule[i]->name() == name) {
return i;
}
}
return NotFound(
"%s",
absl::StrCat("Could not find ", InstructionClassToString(c),
" instruction ", name, " in the instruction schedule."));
}
static const HloInstruction* FindNamedScheduledInstruction(
const HloModule& module, std::string_view name) {
for (const HloInstruction* i : module.entry_computation()->instructions()) {
if (i->name() == name) {
return i;
}
}
return nullptr;
}
static absl::StatusOr<std::vector<int>> GetSliceStartIndicies(
const std::vector<HloInstruction*>& schedule,
const HloInstruction* concat_bitcast) {
std::vector<int> indicies;
if (!IsConcatBitcast(concat_bitcast)) {
return InvalidArgumentStrCat(concat_bitcast->name(),
" is not a concat-bitcast.");
}
for (int i = 0; i < concat_bitcast->operand_count(); ++i) {
const HloInstruction* async_slice_done = concat_bitcast->operand(i);
if (!IsAsyncSliceDone(async_slice_done)) {
return InvalidArgumentStrCat("Operand ", i, " of ",
concat_bitcast->name(),
" is not an async-slice-done.");
}
const HloInstruction* async_slice_start = async_slice_done->operand(0);
if (!IsAsyncSliceStart(async_slice_start)) {
return InvalidArgumentStrCat("Operand 0, of operand ", i, " of ",
concat_bitcast->name(),
" is not an async-slice-start.");
}
TF_ASSIGN_OR_RETURN(
int schedule_index,
FindScheduleIndexOfInstruction(schedule, async_slice_start->name(),
InstructionClass::kRelatedSliceStart));
indicies.push_back(schedule_index);
}
return indicies;
}
static absl::Status ConcatBitcastAndSlicesAfterInstruction(
const std::vector<HloInstruction*>& schedule,
const std::vector<InstructionClass>& schedule_to_class,
int slices_start_after_index) {
for (int i = 0; i < slices_start_after_index; ++i) {
InstructionClass c = schedule_to_class[i];
const HloInstruction* instruction = schedule[i];
if (c == InstructionClass::kRelatedSliceStart ||
c == InstructionClass::kRelatedSliceDone ||
c == InstructionClass::kRelatedConcatBitcast) {
return FailedPrecondition(
"%s", absl::StrCat(InstructionClassToString(c), " ",
instruction->name(), " is scheduled at ", i,
", but is expected to be after ",
schedule[slices_start_after_index]->name(),
" at ", slices_start_after_index, "."));
}
}
return absl::OkStatus();
}
static absl::Status AtLeastOneNonCopyLikeInstructionBetweenSliceStarts(
const std::vector<HloInstruction*>& schedule,
const std::vector<InstructionClass>& schedule_to_class) {
bool found_non_copy_since_last_slice_start = true;
for (int i = 0; i < schedule_to_class.size(); ++i) {
InstructionClass c = schedule_to_class[i];
if (c == InstructionClass::kRelatedSliceStart &&
!found_non_copy_since_last_slice_start) {
return FailedPrecondition(
"%s",
absl::StrCat(
"Did not find a non-copy-like instruction between slice start ",
schedule[i]->name(), " at ", i,
" and the previous slice start."));
}
if (c == InstructionClass::kRelatedSliceStart) {
found_non_copy_since_last_slice_start = false;
} else if (c == InstructionClass::kUnrelatedNonCopy) {
found_non_copy_since_last_slice_start = true;
}
}
return absl::OkStatus();
}
static absl::Status OneSliceStartAfterInstructionWithNoCopyLikeBetween(
const std::vector<HloInstruction*>& schedule,
const std::vector<InstructionClass>& schedule_to_class,
int slices_start_after_index) {
int first_slice_start_after_schedule_after = -1;
int first_non_copy_after_schedule_after = -1;
for (int i = slices_start_after_index + 1;
i < schedule_to_class.size() &&
(first_slice_start_after_schedule_after == -1 ||
first_non_copy_after_schedule_after == -1);
++i) {
if (first_slice_start_after_schedule_after == -1 &&
schedule_to_class[i] == InstructionClass::kRelatedSliceStart) {
first_slice_start_after_schedule_after = i;
continue;
}
if (first_non_copy_after_schedule_after == -1 &&
schedule_to_class[i] == InstructionClass::kUnrelatedNonCopy) {
first_non_copy_after_schedule_after = i;
continue;
}
}
if (first_slice_start_after_schedule_after == -1) {
return NotFound(
"%s", absl::StrCat("Could not find a slice start instruction "
"after start after instruction ",
schedule[slices_start_after_index]->name(), " at ",
slices_start_after_index, "."));
}
if (first_non_copy_after_schedule_after == -1) {
return NotFound(
"%s", absl::StrCat("Could not a find non-copy-like instruction "
"after start after instruction ",
schedule[slices_start_after_index]->name(), " at ",
slices_start_after_index, "."));
}
if (first_slice_start_after_schedule_after >
first_non_copy_after_schedule_after) {
return FailedPrecondition(
"%s", absl::StrCat(
"Unexpectedly found a non-copy-like instruction at ",
first_non_copy_after_schedule_after, ", between ",
schedule[slices_start_after_index]->name(), " at ",
slices_start_after_index, ", and the first slice start at ",
first_slice_start_after_schedule_after, "."));
}
return absl::OkStatus();
}
static absl::Status ConcatBitcastAndSlicesBeforeInstruction(
const std::vector<HloInstruction*>& schedule,
const std::vector<InstructionClass>& schedule_to_class,
int slices_done_before_index) {
for (int i = slices_done_before_index + 1; i < schedule_to_class.size();
++i) {
InstructionClass c = schedule_to_class[i];
const HloInstruction* instruction = schedule[i];
if (c == InstructionClass::kRelatedSliceStart ||
c == InstructionClass::kRelatedSliceDone ||
c == InstructionClass::kRelatedConcatBitcast) {
return FailedPrecondition(
"%s", absl::StrCat(InstructionClassToString(c), " ",
instruction->name(), " is scheduled at ", i,
", but is expected to be before ",
schedule[slices_done_before_index]->name(),
" at ", slices_done_before_index, "."));
}
}
return absl::OkStatus();
}
static absl::Status
ConcatBitcastAndSliceDonesBeforeInstructionWithNoCopyLikeBetween(
const std::vector<HloInstruction*>& schedule,
const std::vector<InstructionClass>& schedule_to_class,
int slices_done_before_index) {
bool found_non_copy = false;
for (int i = slices_done_before_index - 1; i >= 0; --i) {
InstructionClass c = schedule_to_class[i];
const HloInstruction* instruction = schedule[i];
if (c == InstructionClass::kUnrelatedNonCopy) {
found_non_copy = true;
continue;
}
if (found_non_copy && (c == InstructionClass::kRelatedSliceDone ||
c == InstructionClass::kRelatedConcatBitcast)) {
return FailedPrecondition(
"%s",
absl::StrCat("Found non-copy instruction between ",
InstructionClassToString(c), " ", instruction->name(),
" at ", i, ", and slice done before instruction ",
schedule[slices_done_before_index]->name(), " at ",
slices_done_before_index, "."));
}
}
return absl::OkStatus();
}
static absl::Status ConcatBitcastAfterSliceDones(
const std::vector<HloInstruction*>& schedule,
const std::vector<InstructionClass>& schedule_to_class) {
int concat_bitcast_index = -1;
for (int i = 0; i < schedule_to_class.size(); ++i) {
InstructionClass c = schedule_to_class[i];
const HloInstruction* instruction = schedule[i];
if (concat_bitcast_index == -1 &&
c == InstructionClass::kRelatedConcatBitcast) {
concat_bitcast_index = i;
continue;
}
if (concat_bitcast_index != -1 &&
c == InstructionClass::kRelatedSliceDone) {
return FailedPrecondition(
"%s", absl::StrCat("Unexpectedly, found concat-bitcast ",
schedule[concat_bitcast_index]->name(), " at ",
concat_bitcast_index,
", which is before the slice done ",
instruction->name(), " at ", i, "."));
}
}
return absl::OkStatus();
}
static absl::Status CheckSchedule(
const HloModule& module, const HloInstruction* concat_bitcast,
std::string_view slices_start_after_instruction_name,
std::string_view slices_done_before_instruction_name,
bool expect_slices_started_at_different_times) {
CHECK(concat_bitcast->IsCustomCall(kConcatBitcastCustomCall));
auto entry_schedule =
module.schedule().sequence(module.entry_computation()).instructions();
std::vector<InstructionClass> schedule_to_class(
entry_schedule.size(), InstructionClass::kUnrelatedNonCopy);
for (int i = 0; i < entry_schedule.size(); ++i) {
const HloInstruction* instruction = entry_schedule[i];
if (IsAsyncCopyStart(instruction) || IsAsyncCopyDone(instruction) ||
IsAsyncSliceStart(instruction) || IsAsyncSliceDone(instruction) ||
IsConcatBitcast(instruction)) {
schedule_to_class[i] = InstructionClass::kUnrelatedCopyLike;
}
}
int slices_start_after_index;
TF_ASSIGN_OR_RETURN(slices_start_after_index,
FindScheduleIndexOfInstruction(
entry_schedule, slices_start_after_instruction_name,
InstructionClass::kStartAfterNonCopy));
schedule_to_class[slices_start_after_index] =
InstructionClass::kStartAfterNonCopy;
int slices_done_before_index;
TF_ASSIGN_OR_RETURN(slices_done_before_index,
FindScheduleIndexOfInstruction(
entry_schedule, slices_done_before_instruction_name,
InstructionClass::kDoneBeforeNonCopy));
schedule_to_class[slices_done_before_index] =
InstructionClass::kDoneBeforeNonCopy;
int concat_bitcast_index;
TF_ASSIGN_OR_RETURN(concat_bitcast_index,
FindScheduleIndexOfInstruction(
entry_schedule, concat_bitcast->name(),
InstructionClass::kRelatedConcatBitcast));
schedule_to_class[concat_bitcast_index] =
InstructionClass::kRelatedConcatBitcast;
for (const HloInstruction* slice : concat_bitcast->operands()) {
int done_index;
TF_ASSIGN_OR_RETURN(done_index, FindScheduleIndexOfInstruction(
entry_schedule, slice->name(),
InstructionClass::kRelatedSliceDone));
schedule_to_class[done_index] = InstructionClass::kRelatedSliceDone;
int start_index;
TF_ASSIGN_OR_RETURN(start_index,
FindScheduleIndexOfInstruction(
entry_schedule, slice->operand(0)->name(),
InstructionClass::kRelatedSliceStart));
schedule_to_class[start_index] = InstructionClass::kRelatedSliceStart;
}
TF_RETURN_IF_ERROR(ConcatBitcastAndSlicesAfterInstruction(
entry_schedule, schedule_to_class, slices_start_after_index));
TF_RETURN_IF_ERROR(OneSliceStartAfterInstructionWithNoCopyLikeBetween(
entry_schedule, schedule_to_class, slices_start_after_index));
if (expect_slices_started_at_different_times) {
TF_RETURN_IF_ERROR(AtLeastOneNonCopyLikeInstructionBetweenSliceStarts(
entry_schedule, schedule_to_class));
}
TF_RETURN_IF_ERROR(ConcatBitcastAndSlicesBeforeInstruction(
entry_schedule, schedule_to_class, slices_done_before_index));
TF_RETURN_IF_ERROR(
ConcatBitcastAndSliceDonesBeforeInstructionWithNoCopyLikeBetween(
entry_schedule, schedule_to_class, slices_done_before_index));
TF_RETURN_IF_ERROR(
ConcatBitcastAfterSliceDones(entry_schedule, schedule_to_class));
return absl::OkStatus();
}
static absl::Status CheckSliceChunks(const PresetAssignments& assignments,
const HloInstruction* sliced_copy_result,
bool expect_bitcasted_io = false) {
const HloInstruction* concat_bitcast =
(expect_bitcasted_io ? sliced_copy_result->operand(0)
: sliced_copy_result);
CHECK(concat_bitcast->IsCustomCall(kConcatBitcastCustomCall));
absl::flat_hash_map<const HloInstruction*, Chunk> slices_to_chunks;
std::optional<Chunk> result_chunk = std::nullopt;
for (const std::pair<HloPosition, Chunk>& position_chunk_pair :
assignments.chunks()) {
if (position_chunk_pair.first.instruction == sliced_copy_result) {
if (result_chunk.has_value()) {
return FailedPrecondition(
"%s", absl::StrCat("Sliced copy ", sliced_copy_result->name(),
" is assigned more than one chunk: ",
result_chunk->ToString(), " and ",
position_chunk_pair.second.ToString()));
}
result_chunk = position_chunk_pair.second;
}
for (const HloInstruction* slice : concat_bitcast->operands()) {
if (position_chunk_pair.first.instruction == slice) {
auto it = slices_to_chunks.find(slice);
if (it != slices_to_chunks.end()) {
return FailedPrecondition(
"%s", absl::StrCat("Slice ", slice->name(),
" is assigned more than one chunk: ",
it->second.ToString(), " and ",
position_chunk_pair.second.ToString()));
}
slices_to_chunks[slice] = position_chunk_pair.second;
}
}
}
std::vector<const HloInstruction*> sorted_slices =
SortSlicesInExpectedSpatialOrder(concat_bitcast);
VLOG(1) << "Chunk assignments for " << sliced_copy_result->name() << ":\n"
<< absl::StrJoin(
sorted_slices, "\n",
[&](std::string* out, const HloInstruction* slice) {
auto it = slices_to_chunks.find(slice);
std::string chunk = "no chunk assigned";
if (it != slices_to_chunks.end()) {
chunk = it->second.ToString();
}
absl::StrAppend(out, " slice ", slice->name(), ": ",
chunk);
})
<< "\n sliced copy result " << sliced_copy_result->name() << ": "
<< (result_chunk.has_value() ? result_chunk->ToString()
: "no chunk assigned");
if (sorted_slices.empty()) {
return absl::OkStatus();
}
int64_t previous_end = -1;
int64_t min_offset = std::numeric_limits<int64_t>::max();
int64_t max_limit = std::numeric_limits<int64_t>::min();
for (const HloInstruction* slice : sorted_slices) {
auto it = slices_to_chunks.find(slice);
if (it == slices_to_chunks.end()) {
return FailedPrecondition(
"%s",
absl::StrCat("Slice ", slice->name(), " is not assigned a chunk"));
}
const Chunk& chunk = it->second;
if (chunk.size != ShapeSize(slice->shape())) {
return FailedPrecondition(
"%s",
absl::StrCat("Slice ", slice->name(), " is assigned chunk ",
chunk.ToString(), " with size ", chunk.size,
". Expected a size of ", ShapeSize(slice->shape()),
", to match its shape."));
}
if (previous_end != -1 && chunk.offset != previous_end) {
return FailedPrecondition(
"%s", absl::StrCat(
"Slice ", slice->name(), " starts at offset ",
chunk.offset, ". Expected it to start at ", previous_end,
" because that's where the previous slice ended."));
}
previous_end = chunk.chunk_end();
min_offset = std::min(min_offset, chunk.offset);
max_limit = std::max(max_limit, chunk.chunk_end());
}
if (!result_chunk.has_value()) {
return FailedPrecondition(
"%s", absl::StrCat("Sliced copy result ", sliced_copy_result->name(),
" is not assigned a chunk."));
}
Chunk expected_result_chunk = Chunk::FromOffsetEnd(min_offset, max_limit);
if (!(*result_chunk == expected_result_chunk)) {
return FailedPrecondition(
"%s", absl::StrCat("Sliced copy result ", sliced_copy_result->name(),
" is assigned chunk ", result_chunk->ToString(),
", but it's expected to be assigned chunk ",
expected_result_chunk.ToString()));
}
if (result_chunk->size != ShapeSize(sliced_copy_result->shape())) {
return FailedPrecondition(
"%s", absl::StrCat("Sliced copy result ", sliced_copy_result->name(),
" is assigned chunk ", result_chunk->ToString(),
" with size ", result_chunk->size,
". Expected a size of ",
ShapeSize(sliced_copy_result->shape()),
", to match its shape."));
}
return absl::OkStatus();
}
SlicedPrefetchTest() {
EXPECT_CALL(slice_proposer_, ProposeSlices(_, _)).Times(0);
options_.max_size_in_bytes = 1024;
options_.sliced_prefetch_options.set_max_slices(2);
options_.sliced_prefetch_options.set_min_bytes(8);
options_.propose_slice_fn = [&](const Shape& shape,
const SlicedPrefetchOptions& options) {
return slice_proposer_.ProposeSlices(shape, options);
};
options_.get_equivalent_s8_shape_fn = [](const Shape& original_shape) {
return ShapeUtil::MakeShape(S8, {ShapeSize(original_shape)});
};
}
void SetupProposeSlicesToExpect2SlicesOfF32x8x8() {
EXPECT_CALL(slice_proposer_,
ProposeSlices(f32_8_8_, EqualsSlicedPrefetchOptions(
options_.sliced_prefetch_options)))
.WillRepeatedly(Return(SliceProposalCollection({
SliceProposal({f32_4_8_, std::vector<SliceParam>({{0, 4}, {0, 8}}),
ShapeSize(f32_4_8_)}),
SliceProposal({f32_4_8_, std::vector<SliceParam>({{4, 8}, {0, 8}}),
ShapeSize(f32_4_8_)}),
})));
}
const Shape f32_8_8_ = ShapeUtil::MakeShape(F32, {8, 8});
const Shape f32_4_8_ = ShapeUtil::MakeShape(F32, {4, 8});
MockSliceProposer slice_proposer_;
Options options_ = DefaultMemorySpaceOptions();
};
TEST_F(SlicedPrefetchTest, TwoSlices) {
std::string hlo_text = R"zz(
HloModule Slice, is_scheduled=true
ENTRY main {
p0 = f32[8,8] parameter(0)
p1 = f32[8,8] parameter(1)
a = f32[8,8] tanh(p0)
b = f32[8,8] tanh(a)
c = f32[8,8] tanh(b)
ROOT r = f32[8,8] add(c, p1)
})zz";
SetupProposeSlicesToExpect2SlicesOfF32x8x8();
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text));
VLOG(1) << "Original module:\n"
<< module->ToString(HloPrintOptions::ShortParsable());
std::unique_ptr<PresetAssignments> assignments = AssignMemorySpace(
module.get(), options_,
10, 1);
VLOG(1) << "Post-MSA module:\n"
<< module->ToString(HloPrintOptions::ShortParsable());
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Add(_, IsAsyncSlicedCopy(
kAlternateMemorySpace, kDefaultMemorySpace,
{{{0, 4}, {0, 8}}, {{4, 8}, {0, 8}}},
op::Parameter(1))));
TF_EXPECT_OK(
CheckSchedule(*module, root->operand(1),
"p1",
"r",
true));
TF_EXPECT_OK(CheckSliceChunks(*assignments, root->operand(1)));
}
TEST_F(SlicedPrefetchTest, ThreeSlices) {
std::string hlo_text = R"zz(
HloModule Slice, is_scheduled=true
ENTRY main {
p0 = f32[8,8] parameter(0)
p1 = f32[8,8] parameter(1)
a = f32[8,8] tanh(p0)
b = f32[8,8] tanh(a)
c = f32[8,8] tanh(b)
ROOT r = f32[8,8] add(c, p1)
})zz";
const Shape f32_3_8 = ShapeUtil::MakeShape(F32, {3, 8});
const Shape f32_2_8 = ShapeUtil::MakeShape(F32, {2, 8});
options_.sliced_prefetch_options.set_max_slices(3);
EXPECT_CALL(slice_proposer_,
ProposeSlices(f32_8_8_, EqualsSlicedPrefetchOptions(
options_.sliced_prefetch_options)))
.WillRepeatedly(Return(SliceProposalCollection({
SliceProposal({f32_3_8, std::vector<SliceParam>({{0, 3}, {0, 8}}),
ShapeSize(f32_3_8)}),
SliceProposal({f32_3_8, std::vector<SliceParam>({{3, 6}, {0, 8}}),
ShapeSize(f32_3_8)}),
SliceProposal({f32_2_8, std::vector<SliceParam>({{6, 8}, {0, 8}}),
ShapeSize(f32_2_8)}),
})));
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text));
VLOG(1) << "Original module:\n"
<< module->ToString(HloPrintOptions::ShortParsable());
std::unique_ptr<PresetAssignments> assignments = AssignMemorySpace(
module.get(), options_,
10, 1);
VLOG(1) << "Post-MSA module:\n"
<< module->ToString(HloPrintOptions::ShortParsable());
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
op::Add(_, IsAsyncSlicedCopy(
kAlternateMemorySpace, kDefaultMemorySpace,
{{{0, 3}, {0, 8}}, {{3, 6}, {0, 8}}, {{6, 8}, {0, 8}}},
op::Parameter(1))));
TF_EXPECT_OK(
CheckSchedule(*module, root->operand(1),
"p1",
"r",
true));
TF_EXPECT_OK(CheckSliceChunks(*assignments, root->operand(1)));
}
TEST_F(SlicedPrefetchTest, SlicingDisabled) {
std::string hlo_text = R"zz(
HloModule Slice, is_scheduled=true
ENTRY main {
p0 = f32[8,8] parameter(0)
p1 = f32[8,8] parameter(1)
a = f32[8,8] tanh(p0)
b = f32[8,8] tanh(a)
c = f32[8,8] tanh(b)
ROOT r = f32[8,8] add(c, p1)
})zz";
options_.sliced_prefetch_options.set_max_slices(0);
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text));
VLOG(1) << "Original module:\n"
<< module->ToString(HloPrintOptions::ShortParsable());
std::unique_ptr<PresetAssignments> assignments = AssignMemorySpace(
module.get(), options_,
10, 1);
VLOG(1) << "Post-MSA module:\n"
<< module->ToString(HloPrintOptions::ShortParsable());
auto entry_schedule =
module->schedule().sequence(module->entry_computation()).instructions();
for (const HloInstruction* instruction : entry_schedule) {
EXPECT_FALSE(IsAsyncSliceStart(instruction));
EXPECT_FALSE(IsAsyncSliceDone(instruction));
EXPECT_FALSE(IsConcatBitcast(instruction));
}
}
TEST_F(SlicedPrefetchTest, TooSmallToSlice) {
std::string hlo_text = R"zz(
HloModule Slice, is_scheduled=true
ENTRY main {
p0 = f32[8,8] parameter(0)
p1 = f32[8,8] parameter(1)
a = f32[8,8] tanh(p0)
b = f32[8,8] tanh(a)
c = f32[8,8] tanh(b)
ROOT r = f32[8,8] add(c, p1)
})zz";
options_.sliced_prefetch_options.set_min_bytes(1000000000);
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text));
VLOG(1) << "Original module:\n"
<< module->ToString(HloPrintOptions::ShortParsable());
std::unique_ptr<PresetAssignments> assignments = AssignMemorySpace(
module.get(), options_,
10, 1);
VLOG(1) << "Post-MSA module:\n"
<< module->ToString(HloPrintOptions::ShortParsable());
auto entry_schedule =
module->schedule().sequence(module->entry_computation()).instructions();
for (const HloInstruction* instruction : entry_schedule) {
EXPECT_FALSE(IsAsyncSliceStart(instruction));
EXPECT_FALSE(IsAsyncSliceDone(instruction));
EXPECT_FALSE(IsConcatBitcast(instruction));
}
}
TEST_F(SlicedPrefetchTest, FallbackToUnsliced) {
std::string hlo_text = R"zz(
HloModule Slice, is_scheduled=true
ENTRY main {
p0 = f32[8,8] parameter(0)
p1 = f32[8,8] parameter(1)
a = f32[8,8] tanh(p0)
b = f32[8,8] tanh(a)
c = f32[8,8] tanh(b)
ROOT r = f32[8,8] add(c, p1)
})zz";
EXPECT_CALL(slice_proposer_,
ProposeSlices(f32_8_8_, EqualsSlicedPrefetchOptions(
options_.sliced_prefetch_options)))
.WillRepeatedly(Return(absl::StatusOr<SliceProposalCollection>(
FailedPrecondition("%s", "Cannot slice."))));
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text));
VLOG(1) << "Original module:\n"
<< module->ToString(HloPrintOptions::ShortParsable());
std::unique_ptr<PresetAssignments> assignments = AssignMemorySpace(
module.get(), options_,
10, 1);
VLOG(1) << "Post-MSA module:\n"
<< module->ToString(HloPrintOptions::ShortParsable());
auto entry_schedule =
module->schedule().sequence(module->entry_computation()).instructions();
for (const HloInstruction* instruction : entry_schedule) {
EXPECT_FALSE(IsAsyncSliceStart(instruction));
EXPECT_FALSE(IsAsyncSliceDone(instruction));
EXPECT_FALSE(IsConcatBitcast(instruction));
}
}
TEST_F(SlicedPrefetchTest, UsingCostAnalysisIntervalPicker) {
std::string hlo_text = R"zz(
HloModule Slice, is_scheduled=true
ENTRY main {
p0 = f32[8,8] parameter(0)
p1 = f32[8,8] parameter(1)
a = f32[8,8] tanh(p0)
b = f32[8,8] tanh(a)
c = f32[8,8] tanh(b)
ROOT r = f32[8,8] add(c, p1)
})zz";
SetupProposeSlicesToExpect2SlicesOfF32x8x8();
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text));
VLOG(1) << "Original module:\n"
<< module->ToString(HloPrintOptions::ShortParsable());
std::unique_ptr<PresetAssignments> assignments =
AssignMemorySpaceUsingCostAnalysis(
module.get(), options_);
VLOG(1) << "Post-MSA module:\n"
<< module->ToString(HloPrintOptions::ShortParsable());
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Add(_, IsAsyncSlicedCopy(
kAlternateMemorySpace, kDefaultMemorySpace,
{{{0, 4}, {0, 8}}, {{4, 8}, {0, 8}}},
op::Parameter(1))));
TF_EXPECT_OK(CheckSchedule(
*module, root->operand(1),
"a",
"r",
true));
TF_EXPECT_OK(CheckSliceChunks(*assignments, root->operand(1)));
}
TEST_F(SlicedPrefetchTest, LoopAliasing) {
std::string hlo_text = R"zz(
HloModule Slice, is_scheduled=true
WhileBody {
body_param = (f32[8,8], f32[8,8], f32[], f32[]) parameter(0)
v0 = f32[8,8] get-tuple-element(body_param), index=0
v1 = f32[8,8] get-tuple-element(body_param), index=1
i = f32[] get-tuple-element(body_param), index=2
limit = f32[] get-tuple-element(body_param), index=3
one = f32[] constant(1)
new_i = f32[] add(i, one)
new_v1 = f32[8,8] add(v0, v1)
ROOT while_result = (f32[8,8], f32[8,8], f32[], f32[]) tuple(v0, new_v1, new_i, limit)
}
WhileCond {
cond_param = (f32[8,8], f32[8,8], f32[], f32[]) parameter(0)
i = f32[] get-tuple-element(cond_param), index=2
limit = f32[] get-tuple-element(cond_param), index=3
ROOT cond_result = pred[] compare(i, limit), direction=LT
}
ENTRY main {
p0 = f32[8,8] parameter(0)
p1 = f32[8,8] parameter(1)
iterations = f32[] parameter(2)
initial = f32[] constant(0)
a = f32[8,8] tanh(p0)
b = f32[8,8] tanh(a)
c = f32[8,8] tanh(b)
t = (f32[8,8], f32[8,8], f32[], f32[]) tuple(p0, p1, initial, iterations)
w = (f32[8,8], f32[8,8], f32[], f32[]) while(t), condition=WhileCond, body=WhileBody
d = f32[8,8] get-tuple-element(w), index=1
ROOT r = f32[8,8] add(c, d)
})zz";
SetupProposeSlicesToExpect2SlicesOfF32x8x8();
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text));
VLOG(1) << "Original module:\n"
<< module->ToString(HloPrintOptions::ShortParsable());
std::unique_ptr<PresetAssignments> assignments =
AssignMemorySpaceUsingCostAnalysis(
module.get(), options_);
VLOG(1) << "Post-MSA module:\n"
<< module->ToString(HloPrintOptions::ShortParsable());
auto root = module->entry_computation()->root_instruction();
ASSERT_THAT(
root,
op::Add(_,
op::GetTupleElement(
op::While(
op::Tuple(_,
IsAsyncSlicedCopy(
kAlternateMemorySpace, kDefaultMemorySpace,
{{{0, 4}, {0, 8}}, {{4, 8}, {0, 8}}},
op::Parameter(1)),
_, _)),
1)));
HloInstruction* w = root->mutable_operand(1)->mutable_operand(0);
HloInstruction* t = w->mutable_operand(0);
HloInstruction* concat_bitcast = t->mutable_operand(1);
HloComputation* while_body = w->while_body();
HloInstruction* body_param = while_body->parameter_instruction(0);
HloComputation* while_cond = w->while_condition();
HloInstruction* cond_param = while_cond->parameter_instruction(0);
absl::flat_hash_set<HloPosition> expected_aliases({
HloPosition{concat_bitcast, {}},
HloPosition{w, {1}},
HloPosition{t, {1}},
HloPosition{body_param, {1}},
HloPosition{cond_param, {1}},
});
auto alias_analysis = HloAliasAnalysis::Run(module.get()).value();
VLOG(2) << alias_analysis->ToString();
const HloBuffer& concat_bitcast_buffer =
alias_analysis->GetUniqueBufferAt(concat_bitcast);
EXPECT_THAT(concat_bitcast_buffer.ComputePositions(),
::testing::IsSupersetOf(expected_aliases));
int num_chunks_for_expected_aliases = 0;
for (const auto& position_chunk_pair : assignments->chunks()) {
if (expected_aliases.contains(position_chunk_pair.first)) {
num_chunks_for_expected_aliases++;
}
}
EXPECT_EQ(num_chunks_for_expected_aliases, 1);
}
class MockRepacker : public MemorySpaceAssignmentRepacker {
public:
MockRepacker()
: MemorySpaceAssignmentRepacker(std::numeric_limits<int64_t>::max(), 1) {}
MOCK_METHOD(absl::StatusOr<bool>, Repack, (absl::Span<AllocationBlock*>),
(override));
};
TEST_F(SlicedPrefetchTest, Repack) {
absl::string_view hlo_string = R"(
HloModule Slice, is_scheduled=true
ENTRY main {
p0 = f32[] parameter(0)
p1 = f32[16,16] parameter(1)
p2 = f32[32,16] parameter(2)
p3 = f32[16,16] parameter(3)
p4 = f32[32,16] parameter(4)
x1 = f32[] add(p0,p0)
x2 = f32[] add(x1, x1)
a = f32[16,16] sine(p1)
c = f32[16,16] sine(p3)
x3 = f32[] add(x2, x2)
x4 = f32[] add(x3, x3)
b = f32[32,16] sine(p2)
d = f32[32,16] sine(p4)
z1 = f32[16,16] broadcast(x4), dimensions={}
z2 = f32[16,16] add(z1, a)
z3 = f32[32,16] concatenate(z2, c), dimensions={0}
z4 = f32[32,16] add(z3, b)
ROOT z5 = f32[32,16] add(z4, d)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module_no_repacking,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(auto module_with_repacking,
ParseAndReturnVerifiedModule(hlo_string));
VLOG(1) << "Original module:\n"
<< module_no_repacking->ToString(HloPrintOptions::ShortParsable());
Shape f32_16_16 = ShapeUtil::MakeShape(F32, {16, 16});
Shape f32_32_16 = ShapeUtil::MakeShape(F32, {32, 16});
EXPECT_CALL(slice_proposer_,
ProposeSlices(f32_16_16, EqualsSlicedPrefetchOptions(
options_.sliced_prefetch_options)))
.WillRepeatedly(Return(SliceProposalCollection({})));
EXPECT_CALL(slice_proposer_,
ProposeSlices(f32_32_16, EqualsSlicedPrefetchOptions(
options_.sliced_prefetch_options)))
.WillRepeatedly(Return(SliceProposalCollection({
SliceProposal({f32_16_16, std::vector<SliceParam>({{0, 16}, {0, 16}}),
ShapeSize(f32_16_16)}),
SliceProposal({f32_16_16,
std::vector<SliceParam>({{16, 32}, {0, 16}}),
ShapeSize(f32_16_16)}),
})));
MsaBufferIntervalCompare buffer_interval_compare =
[](const MsaBufferInterval& lhs, const MsaBufferInterval& rhs) {
auto lookup = [](const MsaBufferInterval& x) {
int priority = 100;
if (x.buffer->instruction()->name() == "p1") {
priority = 1;
} else if (x.buffer->instruction()->name() == "p2") {
priority = 2;
} else if (x.buffer->instruction()->name() == "p3") {
priority = 3;
} else if (x.buffer->instruction()->name() == "p4") {
priority = 4;
}
return std::make_tuple(priority, x.buffer->instruction()->name());
};
return lookup(lhs) < lookup(rhs);
};
InstructionCountPrefetchIntervalPicker prefetch_interval_picker(2, 50);
options_.max_size_in_bytes = 4 * 1024;
options_.max_repacks = 0;
std::unique_ptr<PresetAssignments> assignments =
AssignMemorySpace(module_no_repacking.get(), options_,
buffer_interval_compare, &prefetch_interval_picker);
VLOG(1) << "Post-MSA module (no repacking):\n"
<< module_no_repacking->ToString(HloPrintOptions::ShortParsable());
const HloInstruction* d =
FindNamedScheduledInstruction(*module_no_repacking, "d");
ASSERT_NE(d, nullptr);
EXPECT_FALSE(IsConcatBitcast(d->operand(0)));
MockRepacker repacker;
absl::flat_hash_map<std::pair<int64_t, int64_t>, int64_t> repack_map;
EXPECT_CALL(repacker, Repack(_))
.WillRepeatedly([](absl::Span<AllocationBlock*> allocations)
-> absl::StatusOr<bool> {
bool found_p2 = false;
bool found_p3 = false;
for (AllocationBlock* block : allocations) {
VLOG(1) << "Allocation block: " << block->ToString();
if (block->inclusive_start_time == 3 &&
block->initial_offset == 1024 && block->size == 2048) {
found_p2 = true;
block->offset = 2048;
EXPECT_TRUE(block->original_slice_data.has_value());
if (block->original_slice_data.has_value()) {
SlicedAllocationData expected(
{{AllocatedSlice{1024, 1024, 3},
AllocatedSlice{1024, 2048, 7}}});
EXPECT_EQ(*block->original_slice_data, expected)
<< "\nExpected: " << expected.ToString()
<< "\nGot: " << block->original_slice_data->ToString();
block->repacked_slice_data = SlicedAllocationData(
{{AllocatedSlice{1024, 2048, 7},
AllocatedSlice{1024, 3072, 3}}});
}
} else if (block->inclusive_start_time == 4 &&
block->initial_offset == 3072 && block->size == 1024) {
found_p3 = true;
block->offset = 1024;
EXPECT_FALSE(block->original_slice_data.has_value());
} else {
block->offset = block->initial_offset;
}
}
EXPECT_TRUE(found_p2);
EXPECT_TRUE(found_p3);
return true;
});
options_.max_repacks = 1;
options_.repacker = &repacker;
assignments =
AssignMemorySpace(module_with_repacking.get(), options_,
buffer_interval_compare, &prefetch_interval_picker);
VLOG(1) << "Post-MSA module (with repacking):\n"
<< module_with_repacking->ToString(HloPrintOptions::ShortParsable());
d = FindNamedScheduledInstruction(*module_with_repacking, "d");
ASSERT_NE(d, nullptr);
EXPECT_TRUE(IsConcatBitcast(d->operand(0)));
TF_EXPECT_OK(CheckSliceChunks(*assignments, d->operand(0)));
std::vector<const HloInstruction*> p2_slice_dones;
for (const HloInstruction* i :
module_with_repacking->entry_computation()->instructions()) {
if (IsAsyncSliceStart(i) && i->operand_count() == 1 &&
i->operand(0)->name() == "p2") {
ASSERT_EQ(i->user_count(), 1);
p2_slice_dones.push_back(i->users()[0]);
}
}
ASSERT_EQ(p2_slice_dones.size(), 2);
std::vector<int64_t> p2_slice_offsets;
for (const HloInstruction* i : p2_slice_dones) {
for (const std::pair<HloPosition, Chunk>& position_chunk_pair :
assignments->chunks()) {
if (position_chunk_pair.first.instruction == i) {
p2_slice_offsets.push_back(position_chunk_pair.second.offset);
}
}
}
ASSERT_EQ(p2_slice_offsets.size(), 2);
EXPECT_THAT(p2_slice_dones[0]->async_wrapped_instruction()->slice_starts(),
::testing::ElementsAreArray({16, 0}));
EXPECT_THAT(p2_slice_dones[0]->async_wrapped_instruction()->slice_limits(),
::testing::ElementsAreArray({32, 16}));
EXPECT_EQ(p2_slice_offsets[0], 3072);
EXPECT_THAT(p2_slice_dones[1]->async_wrapped_instruction()->slice_starts(),
::testing::ElementsAreArray({0, 0}));
EXPECT_THAT(p2_slice_dones[1]->async_wrapped_instruction()->slice_limits(),
::testing::ElementsAreArray({16, 16}));
EXPECT_EQ(p2_slice_offsets[1], 2048);
}
struct ModuleAndAssignments {
std::unique_ptr<VerifiedHloModule> module;
std::unique_ptr<PresetAssignments> assignments;
};
TEST_F(SlicedPrefetchTest, BackToBackWhileLoops) {
const std::string while_cond = R"zz(
WhileCond$ID {
cond_param = (f32[8,8], f32[8,8], f32[], f32[]) parameter(0)
i = f32[] get-tuple-element(cond_param), index=2
limit = f32[] get-tuple-element(cond_param), index=3
ROOT cond_result = pred[] compare(i, limit), direction=LT
})zz";
const std::string while_body = R"zz(
WhileBody$ID {
body_param = (f32[8,8], f32[8,8], f32[], f32[]) parameter(0)
v0 = f32[8,8] get-tuple-element(body_param), index=0
v1 = f32[8,8] get-tuple-element(body_param), index=1
i = f32[] get-tuple-element(body_param), index=2
limit = f32[] get-tuple-element(body_param), index=3
one = f32[] constant(1)
new_i = f32[] add(i, one)
$COMPUTATION
ROOT while_result = (f32[8,8], f32[8,8], f32[], f32[]) tuple(v0, new_v1, new_i, limit)
})zz";
const std::string while_computation_cheap = R"zz(
new_v1 = f32[8,8] add(v0, v1))zz";
std::string while_computation_expensive = R"zz(
new_v1_0 = f32[8,8] add(v0, v1)
new_v1_1 = f32[8,8] tanh(new_v1_0)
new_v1_2 = f32[8,8] tanh(new_v1_1)
new_v1_3 = f32[8,8] tanh(new_v1_2)
new_v1 = f32[8,8] tanh(new_v1_3))zz";
std::string module_text = R"zz(
HloModule Slice, is_scheduled=true
$WHILEBODY1
$WHILECOND1
$WHILEBODY2
$WHILECOND2
ENTRY main {
loop1_input1 = f32[8,8] parameter(0)
loop1_input2 = f32[8,8] parameter(1)
loop1_iterations = f32[] parameter(2)
loop1_begin = f32[] constant(0)
loop1_tuple = (f32[8,8], f32[8,8], f32[], f32[]) tuple(loop1_input1, loop1_input2, loop1_iterations, loop1_begin)
loop2_input1 = f32[8,8] parameter(3)
loop2_input2 = f32[8,8] parameter(4)
loop2_iterations = f32[] parameter(5)
loop2_begin = f32[] constant(0)
loop2_tuple = (f32[8,8], f32[8,8], f32[], f32[]) tuple(loop2_input1, loop2_input2, loop2_iterations, loop2_begin)
prefetch = f32[8,8] parameter(6)
loop1_output = (f32[8,8], f32[8,8], f32[], f32[]) while(loop1_tuple), condition=WhileCond1, body=WhileBody1
loop2_output = (f32[8,8], f32[8,8], f32[], f32[]) while(loop2_tuple), condition=WhileCond2, body=WhileBody2
prefetch_use = f32[8,8] tanh(prefetch)
loop1_result = f32[8,8] get-tuple-element(loop1_output), index=1
loop2_result = f32[8,8] get-tuple-element(loop2_output), index=1
tmp1 = f32[8,8] add(loop1_result, loop2_result)
ROOT r = f32[8,8] add(tmp1, prefetch_use)
})zz";
auto gen_hlo = [&](std::string_view while_computation1,
std::string_view while_computation2) {
return absl::StrReplaceAll(
module_text,
{
{"$WHILEBODY1",
absl::StrReplaceAll(
while_body,
{{"$ID", "1"}, {"$COMPUTATION", while_computation1}})},
{"$WHILECOND1", absl::StrReplaceAll(while_cond, {{"$ID", "1"}})},
{"$WHILEBODY2",
absl::StrReplaceAll(
while_body,
{{"$ID", "2"}, {"$COMPUTATION", while_computation2}})},
{"$WHILECOND2", absl::StrReplaceAll(while_cond, {{"$ID", "2"}})},
});
};
SetupProposeSlicesToExpect2SlicesOfF32x8x8();
MsaBufferIntervalCompare buffer_interval_compare =
[](const MsaBufferInterval& lhs, const MsaBufferInterval& rhs) {
auto lookup = [](const MsaBufferInterval& x) {
int priority = 100;
if (x.buffer->instruction()->name() == "prefetch") {
priority = 1;
}
return std::make_tuple(priority, x.buffer->instruction()->name());
};
return lookup(lhs) < lookup(rhs);
};
InstructionCountPrefetchIntervalPicker prefetch_interval_picker(32, 100);
options_.max_size_in_bytes = 4 * 64;
auto run_msa =
[&](std::string_view hlo_text) -> absl::StatusOr<ModuleAndAssignments> {
ModuleAndAssignments module_and_assignments;
TF_ASSIGN_OR_RETURN(module_and_assignments.module,
ParseAndReturnVerifiedModule(hlo_text));
VLOG(1) << "Original module:\n"
<< module_and_assignments.module->ToString(
HloPrintOptions::ShortParsable());
module_and_assignments.assignments =
AssignMemorySpace(module_and_assignments.module.get(), options_,
buffer_interval_compare, &prefetch_interval_picker);
VLOG(1) << "Post-MSA module:\n"
<< module_and_assignments.module->ToString(
HloPrintOptions::ShortParsable());
return module_and_assignments;
};
TF_ASSERT_OK_AND_ASSIGN(
ModuleAndAssignments module_and_assignments1,
run_msa(gen_hlo(while_computation_cheap, while_computation_expensive)));
auto root1 =
module_and_assignments1.module->entry_computation()->root_instruction();
EXPECT_THAT(root1, op::Add(_, op::Tanh(IsAsyncSlicedCopy(
kAlternateMemorySpace, kDefaultMemorySpace,
{{{0, 4}, {0, 8}}, {{4, 8}, {0, 8}}},
op::Parameter(6)))));
TF_EXPECT_OK(CheckSchedule(
*module_and_assignments1.module, root1->operand(1)->operand(0),
"prefetch",
"prefetch_use",
true));
auto entry_schedule1 =
module_and_assignments1.module->schedule()
.sequence(module_and_assignments1.module->entry_computation())
.instructions();
TF_ASSERT_OK_AND_ASSIGN(
std::vector<int> start_indicies,
GetSliceStartIndicies(entry_schedule1, root1->operand(1)->operand(0)));
ASSERT_EQ(start_indicies.size(), 2);
TF_ASSERT_OK_AND_ASSIGN(
int first_while,
FindScheduleIndexOfInstruction(
entry_schedule1, "loop1_output",
SlicedPrefetchTest::InstructionClass::kUnrelatedNonCopy));
TF_ASSERT_OK_AND_ASSIGN(
int second_while,
FindScheduleIndexOfInstruction(
entry_schedule1, "loop2_output",
SlicedPrefetchTest::InstructionClass::kUnrelatedNonCopy));
EXPECT_TRUE(
absl::c_is_sorted<std::vector<int>>(
{start_indicies[0], first_while, start_indicies[1], second_while}) ||
absl::c_is_sorted<std::vector<int>>(
{start_indicies[1], first_while, start_indicies[0], second_while}));
TF_ASSERT_OK_AND_ASSIGN(
ModuleAndAssignments module_and_assignments2,
run_msa(gen_hlo(while_computation_expensive, while_computation_cheap)));
auto root2 =
module_and_assignments2.module->entry_computation()->root_instruction();
EXPECT_THAT(root2, op::Add(_, op::Tanh(op::AsyncCopy(kAlternateMemorySpace,
kDefaultMemorySpace,
op::Parameter(6)))));
auto entry_schedule2 =
module_and_assignments2.module->schedule()
.sequence(module_and_assignments2.module->entry_computation())
.instructions();
TF_ASSERT_OK_AND_ASSIGN(
int copy_done,
FindScheduleIndexOfInstruction(
entry_schedule2, root2->operand(1)->operand(0)->name(),
SlicedPrefetchTest::InstructionClass::kUnrelatedNonCopy));
TF_ASSERT_OK_AND_ASSIGN(
int copy_start,
FindScheduleIndexOfInstruction(
entry_schedule2, root2->operand(1)->operand(0)->operand(0)->name(),
SlicedPrefetchTest::InstructionClass::kUnrelatedNonCopy));
TF_ASSERT_OK_AND_ASSIGN(
first_while,
FindScheduleIndexOfInstruction(
entry_schedule2, "loop1_output",
SlicedPrefetchTest::InstructionClass::kUnrelatedNonCopy));
TF_ASSERT_OK_AND_ASSIGN(
second_while,
FindScheduleIndexOfInstruction(
entry_schedule2, "loop2_output",
SlicedPrefetchTest::InstructionClass::kUnrelatedNonCopy));
EXPECT_TRUE(absl::c_is_sorted<std::vector<int>>(
{copy_start, first_while, second_while, copy_done}));
}
using RepackingTest = ::testing::Test;
TEST_F(RepackingTest, Colocations) {
AllocationBlock a{10, 20, 100, 0, 1000, 0};
AllocationBlock b{15, 25, 150, 0, 2000, 1};
AllocationBlock c{18, 22, 50, 0, 500, 2};
AllocationBlock d{5, 9, 20, 0, 3000, 3};
AllocationBlock e{17, 22, 100, 0, 1500, 4};
AllocationBlock f{25, 27, 150, 0, 2500, 5};
a.next_colocated = &a;
b.next_colocated = &c;
c.next_colocated = &b;
d.next_colocated = &f;
e.next_colocated = &d;
f.next_colocated = &e;
EXPECT_EQ(a.GetColocationsCount(), 1);
EXPECT_THAT(a.GetColocations(), UnorderedElementsAre(&a));
EXPECT_EQ(b.GetColocationsCount(), 2);
EXPECT_THAT(b.GetColocations(), UnorderedElementsAre(&b, &c));
EXPECT_EQ(c.GetColocationsCount(), 2);
EXPECT_THAT(c.GetColocations(), UnorderedElementsAre(&b, &c));
EXPECT_EQ(d.GetColocationsCount(), 3);
EXPECT_THAT(d.GetColocations(), UnorderedElementsAre(&d, &e, &f));
EXPECT_EQ(e.GetColocationsCount(), 3);
EXPECT_THAT(e.GetColocations(), UnorderedElementsAre(&d, &e, &f));
EXPECT_EQ(f.GetColocationsCount(), 3);
EXPECT_THAT(f.GetColocations(), UnorderedElementsAre(&d, &e, &f));
}
TEST_F(SlicedPrefetchTest, UniformSizedSlicing) {
std::string hlo_text = R"zz(
HloModule Slice, is_scheduled=true
ENTRY main {
p0 = f32[8,8] parameter(0)
p1 = f32[8,8] parameter(1)
p2 = f32[8,16] parameter(2)
constant1 = f32[] constant(1.1)
a = f32[8,8] tanh(p0)
b = f32[8,8] tanh(a)
c = f32[8,8] tanh(b)
d = f32[8,8] tanh(c)
e = f32[8,8] tanh(d)
f = f32[8,8] tanh(e)
g = f32[8,8] tanh(f)
h = f32[8,8] tanh(g)
x = f32[8,8] add(p1, h)
padded_x = f32[8,16] pad(x, constant1), padding=0_0x0_8
ROOT r = f32[8,16] add(padded_x, p2)
})zz";
const Shape f32_8_16 = ShapeUtil::MakeShape(F32, {8, 16});
const Shape s8_128 = ShapeUtil::MakeShape(S8, {128});
options_.sliced_prefetch_options.set_max_slices(100000);
options_.sliced_prefetch_options.set_preferred_slice_size(4 * 8 * 4);
EXPECT_CALL(slice_proposer_,
ProposeSlices(f32_8_8_, EqualsSlicedPrefetchOptions(
options_.sliced_prefetch_options)))
.WillRepeatedly(Return(SliceProposalCollection({
SliceProposal(
{s8_128, std::vector<SliceParam>({{0, 128}}), ShapeSize(s8_128)}),
SliceProposal({s8_128, std::vector<SliceParam>({{128, 256}}),
ShapeSize(s8_128)}),
})));
EXPECT_CALL(slice_proposer_,
ProposeSlices(f32_8_16, EqualsSlicedPrefetchOptions(
options_.sliced_prefetch_options)))
.WillRepeatedly(Return(SliceProposalCollection({
SliceProposal(
{s8_128, std::vector<SliceParam>({{0, 128}}), ShapeSize(s8_128)}),
SliceProposal({s8_128, std::vector<SliceParam>({{128, 256}}),
ShapeSize(s8_128)}),
SliceProposal({s8_128, std::vector<SliceParam>({{256, 384}}),
ShapeSize(s8_128)}),
SliceProposal({s8_128, std::vector<SliceParam>({{384, 512}}),
ShapeSize(s8_128)}),
})));
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text));
VLOG(1) << "Original module:\n"
<< module->ToString(HloPrintOptions::ShortParsable());
std::unique_ptr<PresetAssignments> assignments = AssignMemorySpace(
module.get(), options_,
100, 1);
VLOG(1) << "Post-MSA module:\n"
<< module->ToString(HloPrintOptions::ShortParsable());
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
op::Add(op::Pad(op::Add(IsAsyncSlicedCopy(
kAlternateMemorySpace, kDefaultMemorySpace,
{{{0, 128}}, {{128, 256}}}, op::Parameter(1),
true),
_),
_),
IsAsyncSlicedCopy(
kAlternateMemorySpace, kDefaultMemorySpace,
{{{0, 128}}, {{128, 256}}, {{256, 384}}, {{384, 512}}},
op::Parameter(2), true)));
TF_EXPECT_OK(CheckSliceChunks(*assignments, root->operand(1),
true));
TF_EXPECT_OK(CheckSliceChunks(*assignments,
root->operand(0)->operand(0)->operand(0),
true));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/memory_space_assignment/memory_space_assignment.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/memory_space_assignment/memory_space_assignment_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5b2e1d7e-be28-4c3b-854c-5f1e07a81350 | cpp | tensorflow/tensorflow | gpu_compatibility | tensorflow/lite/tools/versioning/gpu_compatibility.cc | tensorflow/lite/tools/versioning/gpu_compatibility_test.cc | #include "tensorflow/lite/tools/versioning/gpu_compatibility.h"
#include <algorithm>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "tensorflow/lite/builtin_op_data.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/tools/versioning/op_signature.h"
namespace tflite {
namespace {
const std::string GetOpName(const OpSignature& op_sig) {
if (op_sig.op == tflite::BuiltinOperator_CUSTOM) {
return op_sig.custom_name;
}
return tflite::EnumNamesBuiltinOperator()[op_sig.op];
}
int NumElements(const std::vector<int32_t>& dims) {
int count = 1;
for (int i = 0; i < dims.size(); ++i) {
count *= dims.at(i);
}
return count;
}
#define RETURN_IF_ERROR(s) \
{ \
auto c = (s); \
if (!c.ok()) return c; \
}
template <typename ParamsT>
absl::Status RetrieveBuiltinData(const OpSignature& op_sig,
const ParamsT** tf_options) {
*tf_options = static_cast<const ParamsT*>(op_sig.builtin_data);
if (!*tf_options) {
return absl::InternalError("Unable to retrieve builtin_data.");
}
return absl::OkStatus();
}
template <typename ParamsT>
absl::Status RetrieveCustomInitialData(const OpSignature& op_sig,
const ParamsT** tf_options) {
*tf_options = static_cast<const ParamsT*>(op_sig.custom_initial_data);
if (!*tf_options) {
return absl::InternalError("Unable to retrieve custom_initial_data.");
}
return absl::OkStatus();
}
absl::Status IsActivationSupported(TfLiteFusedActivation fused_activation) {
switch (fused_activation) {
case kTfLiteActNone:
case kTfLiteActRelu:
case kTfLiteActReluN1To1:
case kTfLiteActRelu6:
case kTfLiteActTanh:
case kTfLiteActSigmoid:
return absl::OkStatus();
case kTfLiteActSignBit:
return absl::UnimplementedError(
"TfLiteFusedActivation.kTfLiteActSignBit");
}
}
int GetNumberOfRuntimeInputs(const OpSignature& op_sig) {
int number_of_runtime_inputs = 0;
for (auto& input : op_sig.inputs) {
if (!input.is_const && input.type != kTfLiteNoType) {
number_of_runtime_inputs++;
}
}
return number_of_runtime_inputs;
}
absl::Status CheckInputsOutputs(const OpSignature& op_sig,
const int required_runtime_inputs,
const int required_outputs) {
const int runtime_inputs_from_model = GetNumberOfRuntimeInputs(op_sig);
if (runtime_inputs_from_model != required_runtime_inputs) {
return absl::InternalError(
absl::StrCat("Expected ", required_runtime_inputs,
" runtime input tensor(s), but node has ",
runtime_inputs_from_model, " runtime input(s)."));
}
const int outputs_from_model = op_sig.outputs.size();
if (outputs_from_model != required_outputs) {
return absl::InternalError(absl::StrCat("Expected ", required_outputs,
" output tensor(s), but node has ",
outputs_from_model, " output(s)."));
}
return absl::OkStatus();
}
absl::Status CheckInputsConstsOutputs(const OpSignature& op_sig,
int required_runtime_inputs,
int required_const_inputs,
int required_outputs) {
int const_inputs_from_model = 0;
for (auto& input : op_sig.inputs) {
if (input.is_const) {
++const_inputs_from_model;
}
}
if (const_inputs_from_model != required_const_inputs) {
return absl::InternalError(
absl::StrCat("Expected ", required_const_inputs,
" const input tensor(s), but node has ",
const_inputs_from_model, " const input(s)."));
}
return CheckInputsOutputs(op_sig, required_runtime_inputs, required_outputs);
}
absl::Status CheckTensorIsAvailable(const OpSignature& op_sig, int idx) {
if (idx >= op_sig.inputs.size()) {
return absl::OutOfRangeError(
absl::StrCat("Requested index goes beyond array size: ", idx, " vs ",
op_sig.inputs.size()));
}
return absl::OkStatus();
}
absl::Status CheckConvoultionInputOutput(const OpSignature& op_sig) {
const int runtime_inputs = GetNumberOfRuntimeInputs(op_sig);
if (runtime_inputs > 2) {
return absl::InternalError(
absl::StrCat("Expected 1 or 2 input tensor(s), but node has ",
runtime_inputs, " runtime inputs."));
}
const int runtime_outputs = op_sig.outputs.size();
if (runtime_outputs != 1) {
return absl::InternalError(
absl::StrCat("Expected 1 output tensor(s), but node has ",
runtime_outputs, " runtime outputs."));
}
if (runtime_inputs == 1) {
RETURN_IF_ERROR(CheckTensorIsAvailable(op_sig, 1));
}
return absl::OkStatus();
}
absl::Status CheckStrides(int strides_h, int strides_w) {
if (strides_h <= 0 || strides_w <= 0) {
return absl::InvalidArgumentError(
absl::StrCat("Incorrect stride values: stride_height = ", strides_h,
", stride_width = ", strides_w));
}
return absl::OkStatus();
}
absl::Status CheckDilation(int dilation_h, int dilation_w) {
if (dilation_h <= 0 || dilation_w <= 0) {
return absl::InvalidArgumentError(absl::StrCat(
"Incorrect dilation values: dilation_height = ", dilation_h,
", dilation_width = ", dilation_w));
}
return absl::OkStatus();
}
absl::Status CheckStridesAndDilation(int strides_h, int strides_w,
int dilation_h, int dilation_w) {
RETURN_IF_ERROR(CheckStrides(strides_h, strides_w));
RETURN_IF_ERROR(CheckDilation(dilation_h, dilation_w));
return absl::OkStatus();
}
absl::Status CheckKernels(int kernel_h, int kernel_w) {
if (kernel_h <= 0 || kernel_w <= 0) {
return absl::InvalidArgumentError(
absl::StrCat("Incorrect kernel values: kernel_height = ", kernel_h,
", kernel_width = ", kernel_w));
}
return absl::OkStatus();
}
absl::Status CheckKernelsAndStrides(int kernel_h, int kernel_w, int strides_h,
int strides_w) {
RETURN_IF_ERROR(CheckKernels(kernel_h, kernel_w));
RETURN_IF_ERROR(CheckStrides(strides_h, strides_w));
return absl::OkStatus();
}
absl::Status CheckAxesAreInt32Const(const OpSignature& op_sig, int idx) {
auto axes = op_sig.inputs.at(idx);
if (!axes.is_const) {
return absl::UnimplementedError(GetOpName(op_sig) +
" is only supported with constant axes.");
}
if (axes.type != kTfLiteInt32) {
return absl::UnimplementedError(absl::StrCat(
GetOpName(op_sig) + " supports int32 tensor for axes. But node has ",
TfLiteTypeGetName(axes.type)));
}
return absl::OkStatus();
}
absl::Status CheckPooling2DGpuDelegateCompatibility(const OpSignature& op_sig) {
const TfLitePoolParams* tf_options;
if (op_sig.custom_initial_data) {
RETURN_IF_ERROR(RetrieveCustomInitialData(op_sig, &tf_options));
RETURN_IF_ERROR(CheckInputsOutputs(op_sig,
1,
2));
} else {
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &tf_options));
RETURN_IF_ERROR(CheckInputsOutputs(op_sig,
1,
1));
}
RETURN_IF_ERROR(CheckKernelsAndStrides(
tf_options->filter_height, tf_options->filter_width,
tf_options->stride_height, tf_options->stride_width));
return IsActivationSupported(tf_options->activation);
}
absl::Status CheckDepthwiseConvGpuDelegateCompatibility(
const OpSignature& op_sig) {
RETURN_IF_ERROR(CheckConvoultionInputOutput(op_sig));
const TfLiteDepthwiseConvParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &tf_options));
RETURN_IF_ERROR(CheckStridesAndDilation(
tf_options->stride_height, tf_options->stride_width,
tf_options->dilation_height_factor, tf_options->dilation_width_factor));
RETURN_IF_ERROR(IsActivationSupported(tf_options->activation));
const int depth_multiplier = tf_options->depth_multiplier;
const auto* input = &op_sig.inputs[0];
const auto* filter = &op_sig.inputs[1];
const auto* bias = op_sig.inputs.size() > 2 ? &op_sig.inputs[2] : nullptr;
const auto* output = &op_sig.outputs[0];
if (input->dims.size() != 4) {
return absl::InvalidArgumentError("input.dims.size != 4");
}
if (filter->dims.size() != 4) {
return absl::InvalidArgumentError("filter.dims.size != 4");
}
if (output->dims.size() != 4) {
return absl::InvalidArgumentError("output.dims.size != 4");
}
if (input->dims[0] != output->dims[0]) {
return absl::InvalidArgumentError("input.b != output.b");
}
const int input_depth = input->dims[3];
const int output_depth = output->dims[3];
if (filter->dims[3] != output_depth) {
return absl::InvalidArgumentError("filter.i != output.c");
}
if (output_depth != input_depth * depth_multiplier) {
return absl::InvalidArgumentError("output.c != input.c * depth_multiplier");
}
if (bias && NumElements(bias->dims) != output_depth) {
return absl::InvalidArgumentError("bias.size != output.c");
}
if (depth_multiplier != 1 && input_depth != 1) {
return absl::UnimplementedError("depth_multiplier != 1 && input.c != 1");
}
return absl::OkStatus();
}
absl::Status CheckCumsumGpuDelegateCompatibility(const OpSignature& op_sig) {
if (op_sig.inputs.size() != 2) {
return absl::InvalidArgumentError("Expects 2 inputs and 1 output");
}
auto error = absl::InvalidArgumentError(
"Input/output must be float type and indices must be constant int32 "
"type");
if ((op_sig.inputs.at(0).type != kTfLiteFloat16 &&
op_sig.inputs.at(0).type != kTfLiteFloat32) ||
(op_sig.outputs.at(0).type != op_sig.inputs.at(0).type) ||
(op_sig.inputs.at(1).type != kTfLiteInt32 ||
!op_sig.inputs.at(1).is_const)) {
return error;
}
return absl::OkStatus();
}
absl::Status CheckOneHotGpuDelegateCompatibility(const OpSignature& op_sig) {
if (op_sig.inputs.size() != 4 && op_sig.outputs.size() != 1) {
return absl::InvalidArgumentError("Expects 4 inputs and 1 output");
}
absl::Status error = absl::InvalidArgumentError(
"Indices must be int32 type, on/off tensors must be constant, scalar, "
"float type, axis must be -1 or last dim");
if (op_sig.inputs[0].type != kTfLiteInt32) {
return error;
}
auto* one_hot_options =
reinterpret_cast<TfLiteOneHotParams*>(op_sig.builtin_data);
const int num_dims = op_sig.inputs[0].dims.size();
if (one_hot_options->axis != -1 &&
one_hot_options->axis != op_sig.inputs[0].dims[num_dims - 1]) {
return error;
}
for (int i = 0; i < num_dims - 1; ++i) {
if (num_dims > 3 && i == 0) {
continue;
}
if (op_sig.inputs.at(0).dims[i] != 1) {
return absl::InvalidArgumentError(
absl::StrCat("Unspported non-singleton dim at ", i));
}
}
if (op_sig.inputs.at(2).type != kTfLiteFloat32 ||
op_sig.inputs.at(3).type != kTfLiteFloat32) {
return error;
}
if (!op_sig.inputs.at(2).is_const || !op_sig.inputs.at(3).is_const ||
op_sig.inputs.at(2).dims.size() > 1 ||
op_sig.inputs.at(3).dims.size() > 1) {
return error;
}
if ((!op_sig.inputs.at(2).dims.empty() && op_sig.inputs.at(2).dims[0] > 1) ||
(!op_sig.inputs.at(3).dims.empty() && op_sig.inputs.at(3).dims[0] > 1)) {
return error;
}
return absl::OkStatus();
}
absl::Status CheckSelectV2GpuDelegateCompatibility(const OpSignature& op_sig) {
if (op_sig.inputs.size() != 3 || op_sig.outputs.size() != 1) {
return absl::InvalidArgumentError("Expected 3 inputs and 1 output");
}
absl::Status error = absl::InvalidArgumentError(
"Cond must be float or bool type, if, else tensors must be float and "
"either be same the shape as output or constant, scalar.");
if ((op_sig.inputs.at(0).type != kTfLiteBool &&
op_sig.inputs.at(0).type != kTfLiteFloat16 &&
op_sig.inputs.at(0).type != kTfLiteFloat32) ||
(op_sig.inputs.at(1).type != kTfLiteFloat16 &&
op_sig.inputs.at(1).type != kTfLiteFloat32) ||
(op_sig.inputs.at(2).type != kTfLiteFloat16 &&
op_sig.inputs.at(2).type != kTfLiteFloat32)) {
return error;
}
std::vector<int32_t> output_dims = op_sig.outputs[0].dims;
if (!op_sig.inputs.at(1).dims.empty() &&
(op_sig.inputs.at(1).dims != output_dims) &&
(op_sig.inputs.at(1).dims.size() > 1 ||
op_sig.inputs.at(1).dims[0] > 1)) {
return error;
}
if (op_sig.inputs.at(1).is_const && op_sig.inputs.at(1).dims.size() == 2) {
return absl::InvalidArgumentError(
"2-D if tensor only supported if constant.");
}
if (!op_sig.inputs.at(2).dims.empty() &&
(op_sig.inputs.at(2).dims != output_dims) &&
(op_sig.inputs.at(2).dims.size() > 1 ||
op_sig.inputs.at(2).dims[0] > 1)) {
return error;
}
if (op_sig.inputs.at(2).is_const && op_sig.inputs.at(2).dims.size() == 2) {
return absl::InvalidArgumentError(
"2-D else tensor only supported if constant.");
}
return absl::OkStatus();
}
absl::Status CheckCustomOpsGpuDelegateCompatibility(const OpSignature& op_sig) {
if (op_sig.custom_name == "Convolution2DTransposeBias") {
RETURN_IF_ERROR(CheckTensorIsAvailable(op_sig, 1));
const TfLiteTransposeConvParams* tf_options;
RETURN_IF_ERROR(RetrieveCustomInitialData(op_sig, &tf_options));
RETURN_IF_ERROR(
CheckStrides(tf_options->stride_height, tf_options->stride_width));
return absl::OkStatus();
}
if (op_sig.custom_name == "MaxPoolingWithArgmax2D") {
return CheckPooling2DGpuDelegateCompatibility(op_sig);
}
if (op_sig.custom_name == "MaxUnpooling2D") {
RETURN_IF_ERROR(CheckInputsOutputs(op_sig,
2,
1));
const TfLitePoolParams* tf_options;
RETURN_IF_ERROR(RetrieveCustomInitialData(op_sig, &tf_options));
RETURN_IF_ERROR(CheckKernelsAndStrides(
tf_options->filter_height, tf_options->filter_width,
tf_options->stride_height, tf_options->stride_width));
return absl::OkStatus();
}
if (op_sig.custom_name == "Resampler") {
return CheckInputsOutputs(op_sig,
2,
1);
}
return absl::InvalidArgumentError(
absl::StrCat("Not supported custom op ", op_sig.custom_name));
}
bool CheckIsBroadcastable(const std::vector<int32_t>* longer_dims,
const std::vector<int32_t>* shorter_dims) {
int idx_1 = longer_dims->size() - 1;
int idx_2 = shorter_dims->size() - 1;
int max_idx = std::max(idx_1, idx_2);
int data_1 = 0;
int data_2 = 0;
for (int i = max_idx; i >= 0; --i) {
data_1 = idx_1 < 0 ? 1 : longer_dims->at(idx_1);
data_2 = idx_2 < 0 ? 1 : shorter_dims->at(idx_2);
if (data_1 != data_2 && data_1 != 1 && data_2 != 1) {
return false;
}
--idx_1;
--idx_2;
}
return true;
}
absl::Status CheckAddMulBroadcastCompatibility(
const OpSignatureTensorSpec& input0, const OpSignatureTensorSpec& input1,
GpuCompatibilityFlags flags) {
if (input0.dims.size() > 1 && input1.dims.size() > 1 &&
input0.dims.size() != input1.dims.size()) {
const std::vector<int32_t>*longer_dims, *shorter_dims;
if (input0.dims.size() >= input1.dims.size()) {
longer_dims = &input0.dims;
shorter_dims = &input1.dims;
} else {
longer_dims = &input1.dims;
shorter_dims = &input0.dims;
}
bool is_broadcastable = false;
if (flags == GpuCompatibilityFlags::kEnhancedBroadcast) {
is_broadcastable = CheckIsBroadcastable(longer_dims, shorter_dims);
} else {
if (longer_dims->size() == 4 && shorter_dims->size() == 3 &&
longer_dims->at(0) == 1) {
is_broadcastable = true;
} else if (longer_dims->size() == 4 && shorter_dims->size() == 2 &&
longer_dims->at(0) == 1 && shorter_dims->at(0) == 1 &&
shorter_dims->at(1) == 1) {
is_broadcastable = true;
} else if (longer_dims->size() == 4 && shorter_dims->size() == 2 &&
longer_dims->at(0) == shorter_dims->at(0) &&
longer_dims->at(3) == shorter_dims->at(1)) {
is_broadcastable = true;
}
}
if (!is_broadcastable) {
return absl::UnimplementedError(
absl::StrCat("Doesn't support broadcasting - input0: [",
absl::StrJoin(input0.dims, ","), "], input1: [",
absl::StrJoin(input1.dims, ","), "]"));
}
}
return absl::OkStatus();
}
}
absl::Status CheckGpuDelegateCompatibility(const OpSignature& op_sig,
GpuCompatibilityFlags flags) {
TfLiteBuiltinOperator opcode = static_cast<TfLiteBuiltinOperator>(op_sig.op);
switch (opcode) {
case kTfLiteBuiltinAdd: {
if (op_sig.inputs.size() != 2) {
return absl::UnimplementedError("ADD requires two input tensors.");
}
const auto& input0 = op_sig.inputs.at(0);
const auto& input1 = op_sig.inputs.at(1);
auto broadcastable =
CheckAddMulBroadcastCompatibility(input0, input1, flags);
if (!broadcastable.ok()) {
return broadcastable;
}
const TfLiteAddParams* tf_options;
return RetrieveBuiltinData(op_sig, &tf_options);
}
case kTfLiteBuiltinAddN: {
return op_sig.inputs.size() == 2
? absl::OkStatus()
: absl::UnimplementedError("ADD_N only supports 2 inputs.");
}
case kTfLiteBuiltinAveragePool2d:
return CheckPooling2DGpuDelegateCompatibility(op_sig);
case kTfLiteBuiltinBatchMatmul: {
const int num_inputs = op_sig.inputs.size();
const int num_outputs = op_sig.outputs.size();
if (!(num_inputs == 2 && num_outputs == 1)) {
return absl::InternalError(
absl::StrCat("Expected 2 inputs and 1 output, got: ", num_inputs,
" inputs and ", num_outputs, " outputs"));
}
return absl::OkStatus();
}
case kTfLiteBuiltinCast:
RETURN_IF_ERROR(CheckInputsOutputs(op_sig,
1,
1));
if (op_sig.inputs.at(0).type == kTfLiteBool &&
(op_sig.outputs.at(0).type == kTfLiteFloat16 ||
op_sig.outputs.at(0).type == kTfLiteFloat32)) {
return absl::OkStatus();
} else if ((op_sig.inputs.at(0).type == kTfLiteFloat16 ||
op_sig.inputs.at(0).type == kTfLiteFloat32) &&
op_sig.outputs.at(0).type == kTfLiteBool) {
return absl::OkStatus();
} else if ((op_sig.inputs.at(0).type == kTfLiteFloat32 ||
op_sig.inputs.at(0).type == kTfLiteInt32) &&
(op_sig.outputs.at(0).type == kTfLiteFloat32 ||
op_sig.outputs.at(0).type == kTfLiteInt32)) {
return absl::OkStatus();
} else {
return absl::UnimplementedError(absl::StrCat(
"Not supported Cast case. Input type: ",
TfLiteTypeGetName(op_sig.inputs.at(0).type), " and output type: ",
TfLiteTypeGetName(op_sig.outputs.at(0).type)));
}
case kTfLiteBuiltinConcatenation: {
const TfLiteConcatenationParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &tf_options));
return absl::OkStatus();
}
case kTfLiteBuiltinConv2d: {
RETURN_IF_ERROR(CheckConvoultionInputOutput(op_sig));
const TfLiteConvParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &tf_options));
RETURN_IF_ERROR(CheckStridesAndDilation(
tf_options->stride_height, tf_options->stride_width,
tf_options->dilation_height_factor,
tf_options->dilation_width_factor));
return IsActivationSupported(tf_options->activation);
}
case kTfLiteBuiltinCumsum:
return CheckCumsumGpuDelegateCompatibility(op_sig);
case kTfLiteBuiltinDensify:
return CheckInputsOutputs(op_sig, 0,
1);
case kTfLiteBuiltinDepthwiseConv2d:
return CheckDepthwiseConvGpuDelegateCompatibility(op_sig);
case kTfLiteBuiltinDepthToSpace: {
RETURN_IF_ERROR(CheckInputsOutputs(op_sig,
1,
1));
const TfLiteDepthToSpaceParams* d2s_params;
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &d2s_params));
if (d2s_params->block_size == 1) {
return absl::InvalidArgumentError(
"DEPTH_TO_SPACE block_size = 1 is a no-op.");
}
if (d2s_params->block_size < 1) {
return absl::InvalidArgumentError(
"DEPTH_TO_SPACE block_size must be > 1.");
}
return absl::OkStatus();
}
case kTfLiteBuiltinDequantize: {
const int num_inputs = op_sig.inputs.size();
const int num_outputs = op_sig.outputs.size();
if (num_inputs != 1 || num_outputs != 1) {
return absl::InternalError(absl::StrCat(
"Expected 1 input & output each from Dequantize, got: %d, %d",
num_inputs, num_outputs));
}
if (op_sig.inputs[0].type == kTfLiteInt16) {
return absl::UnimplementedError("Unsupported dequantization type.");
}
return absl::OkStatus();
}
case kTfLiteBuiltinEmbeddingLookup: {
const int num_inputs = op_sig.inputs.size();
const OpSignatureTensorSpec ids_spec = op_sig.inputs[0];
const OpSignatureTensorSpec value_spec = op_sig.inputs[1];
const OpSignatureTensorSpec output_spec = op_sig.outputs[0];
if (num_inputs != 2) {
return absl::InvalidArgumentError(
absl::StrCat("Expected 2, but got ", num_inputs, " inputs."));
}
if (ids_spec.dims.size() != 1) {
return absl::InvalidArgumentError(absl::StrCat(
"Expected 1D, but got ", ids_spec.dims.size(), "D input #0."));
}
if (value_spec.dims.size() < 2) {
return absl::InvalidArgumentError(absl::StrCat(
"Expected > 1D, but got ", value_spec.dims.size(), "D input #1."));
}
if (op_sig.outputs.size() != 1) {
return absl::InvalidArgumentError(absl::StrCat(
"Expected 1, but got ", op_sig.outputs.size(), " outputs."));
}
if (value_spec.dims.size() != output_spec.dims.size()) {
return absl::InvalidArgumentError(
absl::StrCat("Expected ", value_spec.dims.size(), ", but got ",
output_spec.dims.size(), " for output."));
}
for (int i = 1; i < output_spec.dims.size(); ++i) {
if (value_spec.dims[i] != output_spec.dims[i]) {
return absl::InvalidArgumentError(
absl::StrCat("Expected ", value_spec.dims[i], ", but got ",
output_spec.dims[i], " for output.dim[", i, "]."));
}
}
if (value_spec.type != kTfLiteInt8 && value_spec.type != kTfLiteInt4 &&
value_spec.type != kTfLiteFloat32) {
return absl::InvalidArgumentError(
absl::StrCat("Expected int8, int4, or float32, but got ",
TfLiteTypeGetName(value_spec.type), " for input #1."));
}
return absl::OkStatus();
}
case kTfLiteBuiltinDynamicUpdateSlice: {
if (op_sig.inputs.size() != 3) {
return absl::UnimplementedError(
"DynamicUpdateSlice requires 3 inputs.");
}
OpSignatureTensorSpec operand = op_sig.inputs[0];
OpSignatureTensorSpec update_slice = op_sig.inputs[1];
OpSignatureTensorSpec start_indices = op_sig.inputs[2];
if (operand.dims.size() == 4 && operand.dims[0] != 1) {
return absl::UnimplementedError(
"DynamicUpdateSlice only support 4D operand with batch size 1.");
}
if (start_indices.dims.size() > 1) {
return absl::UnimplementedError(
"DynamicUpdateSlice only support 1D start_indices.");
}
if (operand.type != update_slice.type) {
return absl::InternalError(
absl::StrCat("Array to update and updated slice must have the same "
"data type, but got: array to update: ",
operand.type, ", updated slice: ", update_slice.type));
}
if (start_indices.dims.size() != 1) {
return absl::InternalError(
absl::StrCat("Start indices must have be 1D, but got: ",
start_indices.dims.size()));
}
if (start_indices.type != kTfLiteInt32) {
return absl::InvalidArgumentError(
"start_indices must be of type int32.");
}
if (update_slice.dims.size() != operand.dims.size()) {
return absl::InternalError(absl::StrCat(
"Operand and update must have the same number of "
"dimensions, but got: operand: ",
operand.dims.size(), ", update: ", update_slice.dims.size()));
}
return absl::OkStatus();
}
case kTfLiteBuiltinFullyConnected: {
const TfLiteFullyConnectedParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &tf_options));
if (tf_options->weights_format !=
kTfLiteFullyConnectedWeightsFormatDefault) {
return absl::UnimplementedError(
absl::StrCat("Unsupported FullyConnected weights format: ",
tf_options->weights_format));
}
if (GetNumberOfRuntimeInputs(op_sig) > 2) {
return absl::UnimplementedError(
"FullyConnected doesn't support more than 2 runtime inputs.");
}
if (op_sig.inputs[0].is_const) {
return absl::UnimplementedError(
"FullyConnected doesn't support constant input.");
}
if (tf_options->keep_num_dims == true) {
const auto& input = op_sig.inputs.at(0);
const auto& output = op_sig.outputs.at(0);
if (input.dims.size() != output.dims.size()) {
return absl::UnimplementedError(
"Input and output dimensions different and FullyConnected "
"doesn't "
"support keep_num_dims.");
}
}
return absl::OkStatus();
}
case kTfLiteBuiltinGather:
if (!CheckInputsConstsOutputs(op_sig, 2,
0,
1)
.ok() &&
!CheckInputsConstsOutputs(op_sig, 1,
1,
1)
.ok()) {
return absl::InvalidArgumentError(
"Op can only handle 1 or 2 operand(s).");
}
if (op_sig.inputs[1].dims.size() != 1) {
return absl::UnimplementedError("Only support 1D indices\n");
}
return op_sig.inputs.at(1).type == kTfLiteInt32
? absl::OkStatus()
: absl::UnimplementedError("Only accept INT32 indices\n");
case kTfLiteBuiltinHardSwish:
return CheckInputsOutputs(op_sig, 1,
1);
case kTfLiteBuiltinLstm: {
const TfLiteLSTMParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &tf_options));
switch (tf_options->kernel_type) {
case kTfLiteLSTMFullKernel: {
const int inputs = op_sig.inputs.size();
if (inputs != 20 && inputs != 24) {
return absl::InternalError(
absl::StrCat("Expected 20 or 24 input tensors, but node has ",
inputs, " input(s)."));
}
const int runtime_outputs = op_sig.outputs.size();
if (runtime_outputs != 1) {
return absl::InternalError(
absl::StrCat("Expected 1 output tensor, but node has ",
runtime_outputs, " output(s)."));
}
if (tf_options->activation != kTfLiteActSigmoid &&
tf_options->activation != kTfLiteActTanh) {
return absl::UnimplementedError(absl::StrCat(
"Only sigmoid or tanh activation is supported, but node has ",
tf_options->activation));
}
return absl::OkStatus();
}
case kTfLiteLSTMBasicKernel:
RETURN_IF_ERROR(
CheckInputsConstsOutputs(op_sig, 3,
2,
4));
if (tf_options->activation != kTfLiteActTanh) {
return absl::UnimplementedError(
absl::StrCat("Only TANH activation is supported. but node has ",
tf_options->activation));
}
if (tf_options->cell_clip != 0.0f) {
return absl::UnimplementedError("cell_clip is not supported.");
}
if (tf_options->proj_clip != 0.0f) {
return absl::UnimplementedError("proj_clip is not supported.");
}
return absl::OkStatus();
}
}
case kTfLiteBuiltinMaxPool2d:
return CheckPooling2DGpuDelegateCompatibility(op_sig);
case kTfLiteBuiltinMean: {
RETURN_IF_ERROR(CheckInputsConstsOutputs(op_sig,
1,
1,
1));
return CheckAxesAreInt32Const(op_sig, 1);
}
case kTfLiteBuiltinMul: {
if (op_sig.inputs.size() != 2) {
return absl::UnimplementedError("MUL requires two input tensors.");
}
const auto& input0 = op_sig.inputs.at(0);
const auto& input1 = op_sig.inputs.at(1);
if (input0.dims.size() == input1.dims.size()) {
bool first_has_smaller_dim = false;
bool second_has_smaller_dim = false;
for (int i = 0; i < input0.dims.size(); ++i) {
if (input0.dims[i] < input1.dims[i]) {
first_has_smaller_dim = true;
}
if (input1.dims[i] < input0.dims[i]) {
second_has_smaller_dim = true;
}
}
if (first_has_smaller_dim && second_has_smaller_dim) {
return absl::UnimplementedError(
"MUL requires one tensor that not less than second in all "
"dimensions.");
}
} else {
const auto& input0 = op_sig.inputs.at(0);
const auto& input1 = op_sig.inputs.at(1);
auto broadcastable =
CheckAddMulBroadcastCompatibility(input0, input1, flags);
if (!broadcastable.ok()) {
return broadcastable;
}
}
const TfLiteMulParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &tf_options));
return IsActivationSupported(tf_options->activation);
}
case kTfLiteBuiltinPack:
return absl::OkStatus();
case kTfLiteBuiltinOneHot:
return CheckOneHotGpuDelegateCompatibility(op_sig);
case kTfLiteBuiltinQuantize:
RETURN_IF_ERROR(CheckInputsOutputs(op_sig,
1,
1));
return absl::OkStatus();
case kTfLiteBuiltinReluN1To1:
return absl::OkStatus();
case kTfLiteBuiltinPrelu:
return absl::OkStatus();
case kTfLiteBuiltinReshape:
RETURN_IF_ERROR(CheckInputsOutputs(op_sig,
1,
1));
return absl::OkStatus();
case kTfLiteBuiltinSelect:
case kTfLiteBuiltinSelectV2:
return CheckSelectV2GpuDelegateCompatibility(op_sig);
case kTfLiteBuiltinSlice: {
if (op_sig.inputs.size() < 3) {
return absl::UnimplementedError(
absl::StrCat("SLICE requires 3 inputs, but node has ",
op_sig.inputs.size(), " inputs."));
}
const auto& input = op_sig.inputs.at(0);
if (input.dims.size() != 3 && input.dims.size() != 4) {
return absl::UnimplementedError(absl::StrCat(
"SLICE supports for 3 or 4 dimensional tensors only, but node has ",
input.dims.size(), " dimensional tensors."));
}
return absl::OkStatus();
}
case kTfLiteBuiltinSoftmax: {
const TfLiteSoftmaxParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &tf_options));
if (tf_options->beta != 1) {
return absl::UnimplementedError("Softmax.beta != 1 is not supported.");
}
return absl::OkStatus();
}
case kTfLiteBuiltinSpaceToDepth: {
RETURN_IF_ERROR(CheckInputsOutputs(op_sig,
1,
1));
const TfLiteSpaceToDepthParams* s2d_params;
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &s2d_params));
if (s2d_params->block_size == 1) {
return absl::InvalidArgumentError(
"SPACE_TO_DEPTH block_size = 1 is a no-op.");
}
if (s2d_params->block_size < 1) {
return absl::InvalidArgumentError(
"SPACE_TO_DEPTH block_size must be > 1.");
}
return absl::OkStatus();
}
case kTfLiteBuiltinSplit:
return absl::OkStatus();
case kTfLiteBuiltinSplitV:
return absl::OkStatus();
case kTfLiteBuiltinStridedSlice: {
const TfLiteStridedSliceParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &tf_options));
if (tf_options->ellipsis_mask) {
return absl::UnimplementedError(
"Slice does not support ellipsis_mask.");
}
if (tf_options->new_axis_mask) {
return absl::UnimplementedError(
"Slice does not support new_axis_mask.");
}
if (tf_options->shrink_axis_mask) {
return absl::UnimplementedError(
"Slice does not support shrink_axis_mask parameter. ");
}
if (op_sig.inputs.size() < 4) {
return absl::UnimplementedError("STRIDED_SLICE requires 4 inputs.");
}
const auto& input = op_sig.inputs.at(0);
if (input.dims.size() != 3 && input.dims.size() != 4) {
return absl::UnimplementedError(
"STRIDED_SLICE supports for 3 or 4 dimensional tensors only.");
}
return absl::OkStatus();
}
case kTfLiteBuiltinTile:
RETURN_IF_ERROR(CheckInputsOutputs(op_sig,
1,
1));
return absl::OkStatus();
case kTfLiteBuiltinTranspose:
RETURN_IF_ERROR(CheckInputsOutputs(op_sig,
1,
1));
return absl::OkStatus();
case kTfLiteBuiltinTransposeConv: {
RETURN_IF_ERROR(CheckConvoultionInputOutput(op_sig));
const TfLiteTransposeConvParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &tf_options));
RETURN_IF_ERROR(
CheckStrides(tf_options->stride_height, tf_options->stride_width));
return absl::OkStatus();
}
case kTfLiteBuiltinResizeBilinear: {
RETURN_IF_ERROR(CheckInputsOutputs(op_sig,
1,
1));
const TfLiteResizeBilinearParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &tf_options));
if (tf_options->align_corners && tf_options->half_pixel_centers) {
return absl::InternalError(
"If half_pixel_centers is True, align_corners must be False.");
}
return absl::OkStatus();
}
case kTfLiteBuiltinResizeNearestNeighbor: {
RETURN_IF_ERROR(CheckInputsOutputs(op_sig,
1,
1));
const TfLiteResizeNearestNeighborParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &tf_options));
return absl::OkStatus();
}
case kTfLiteBuiltinRelu:
case kTfLiteBuiltinRelu6:
case kTfLiteBuiltinLeakyRelu:
return absl::OkStatus();
case kTfLiteBuiltinReduceMax:
case kTfLiteBuiltinReduceMin:
case kTfLiteBuiltinReduceProd:
case kTfLiteBuiltinSum: {
RETURN_IF_ERROR(CheckInputsOutputs(op_sig,
1,
1));
return CheckAxesAreInt32Const(op_sig, 1);
}
case kTfLiteBuiltinPad:
case kTfLiteBuiltinPadv2:
case kTfLiteBuiltinMirrorPad: {
if (opcode == kTfLiteBuiltinMirrorPad) {
const TfLiteMirrorPaddingParams* tf_options;
RETURN_IF_ERROR(RetrieveBuiltinData(op_sig, &tf_options));
if (tf_options->mode !=
TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingReflect) {
return absl::InvalidArgumentError(
absl::StrCat("Only Reflective padding is supported for Mirror "
"Pad operation. But node has ",
tf_options->mode));
}
}
RETURN_IF_ERROR(CheckInputsOutputs(op_sig,
1,
1));
RETURN_IF_ERROR(CheckTensorIsAvailable(op_sig, 1));
auto& pad_tensor = op_sig.inputs.at(1);
if (pad_tensor.dims.size() != 2) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid paddings tensor dimension: expected 2 dim, got ",
pad_tensor.dims.size(), " dim"));
}
bool supported = pad_tensor.dims[0] == 3 || pad_tensor.dims[0] == 4;
if (!supported || pad_tensor.dims[1] != 2) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid paddings tensor shape: expected 4x2 or 3x2, got ",
pad_tensor.dims[0], "x", pad_tensor.dims[1]));
}
return absl::OkStatus();
}
case kTfLiteBuiltinReverseV2: {
RETURN_IF_ERROR(CheckInputsConstsOutputs(op_sig,
1,
1,
1));
return CheckAxesAreInt32Const(op_sig, 1);
}
case kTfLiteBuiltinAbs:
case kTfLiteBuiltinCeil:
case kTfLiteBuiltinCos:
case kTfLiteBuiltinElu:
case kTfLiteBuiltinExp:
case kTfLiteBuiltinFloor:
case kTfLiteBuiltinGelu:
case kTfLiteBuiltinLog:
case kTfLiteBuiltinLogistic:
case kTfLiteBuiltinNeg:
case kTfLiteBuiltinRsqrt:
case kTfLiteBuiltinSign:
case kTfLiteBuiltinSin:
case kTfLiteBuiltinSqrt:
case kTfLiteBuiltinSquare:
case kTfLiteBuiltinTanh:
return (CheckInputsConstsOutputs(op_sig, 1,
0,
1));
case kTfLiteBuiltinAtan2:
case kTfLiteBuiltinDiv:
case kTfLiteBuiltinEqual:
case kTfLiteBuiltinFloorDiv:
case kTfLiteBuiltinFloorMod:
case kTfLiteBuiltinGreater:
case kTfLiteBuiltinGreaterEqual:
case kTfLiteBuiltinLogicalAnd:
case kTfLiteBuiltinLess:
case kTfLiteBuiltinLessEqual:
case kTfLiteBuiltinMaximum:
case kTfLiteBuiltinMinimum:
case kTfLiteBuiltinNotEqual:
case kTfLiteBuiltinPow:
case kTfLiteBuiltinStablehloRemainder:
case kTfLiteBuiltinSquaredDifference:
case kTfLiteBuiltinSub: {
if (!CheckInputsConstsOutputs(op_sig, 2,
0,
1)
.ok() &&
!CheckInputsConstsOutputs(op_sig, 1,
1,
1)
.ok()) {
return absl::InvalidArgumentError(
"Op can only handle 1 or 2 operand(s).");
}
TfLiteFusedActivation activation = kTfLiteActNone;
if (opcode == kTfLiteBuiltinDiv) {
const TfLiteDivParams* tf_options;
auto status = RetrieveBuiltinData(op_sig, &tf_options);
activation = status.ok() ? tf_options->activation : kTfLiteActNone;
} else if (opcode == kTfLiteBuiltinSub) {
const TfLiteSubParams* tf_options;
auto status = RetrieveBuiltinData(op_sig, &tf_options);
activation = status.ok() ? tf_options->activation : kTfLiteActNone;
}
return IsActivationSupported(activation);
}
case kTfLiteBuiltinStablehloBroadcastInDim:
if (!CheckInputsConstsOutputs(op_sig, 1,
1,
1)
.ok()) {
return absl::InvalidArgumentError(
"requires one runtime input, one const input, and one output");
}
if (op_sig.inputs[1].dims.size() != 1) {
return absl::InvalidArgumentError("Only support 1D indices");
}
if (op_sig.inputs[1].type != kTfLiteInt32) {
return absl::InvalidArgumentError("Only support int32 indices");
}
if (op_sig.inputs[0].dims.size() != op_sig.inputs[1].dims[0]) {
return absl::InvalidArgumentError(
"Require size(indices) = rank(operand)");
}
return absl::OkStatus();
case kTfLiteBuiltinStablehloCbrt:
if (op_sig.inputs[0].type != kTfLiteFloat16 &&
op_sig.inputs[0].type != kTfLiteFloat32 &&
op_sig.inputs[0].type != kTfLiteBFloat16) {
return absl::InvalidArgumentError("Only support float inputs");
}
if (op_sig.inputs[0].type != op_sig.outputs[0].type) {
return absl::InvalidArgumentError("Input and output types must match");
}
return CheckInputsConstsOutputs(op_sig, 1,
0,
1);
case kTfLiteBuiltinStablehloClamp:
if ((op_sig.inputs.at(0).type != op_sig.inputs.at(1).type) ||
(op_sig.inputs.at(1).type != op_sig.inputs.at(2).type)) {
return absl::InvalidArgumentError(
"Clamp tensors must all be the same type");
}
if ((op_sig.inputs.at(0).dims != op_sig.inputs.at(1).dims) &&
(NumElements(op_sig.inputs.at(0).dims) != 1)) {
return absl::InvalidArgumentError(
"Min tensor must be the same shape as the input, or a scalar");
}
if ((op_sig.inputs.at(2).dims != op_sig.inputs.at(1).dims) &&
(NumElements(op_sig.inputs.at(0).dims) != 1)) {
return absl::InvalidArgumentError(
"Max tensor must be the same shape as the input, or a scalar");
}
return CheckInputsConstsOutputs(op_sig, 3,
0,
1);
case kTfLiteBuiltinCustom:
return CheckCustomOpsGpuDelegateCompatibility(op_sig);
default:
break;
}
return absl::InvalidArgumentError(absl::StrCat(
"Not supported op ", tflite::EnumNamesBuiltinOperator()[op_sig.op]));
}
absl::Status CheckGpuDelegateCompatibility(const OperatorCode* op_code,
const Operator* op,
const SubGraph* subgraph,
const Model* model) {
OpSignature op_sig = GetOpSignature(op_code, op, subgraph, model);
auto status = CheckGpuDelegateCompatibility(
op_sig, GpuCompatibilityFlags::kEnhancedBroadcast);
if (op_sig.builtin_data) {
free(op_sig.builtin_data);
}
return status;
}
absl::Status CheckGpuDelegateCompatibility(
const TfLiteContext* context, const TfLiteNode* node,
const TfLiteRegistration* registration, GpuCompatibilityFlags flags) {
return CheckGpuDelegateCompatibility(
GetOpSignature(context, node, registration), flags);
}
} | #include "tensorflow/lite/tools/versioning/gpu_compatibility.h"
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/model_builder.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/tools/versioning/op_signature.h"
namespace tflite {
namespace {
absl::Status CheckGpuDelegateCompatibility(const tflite::Model* model) {
auto subgraphs = model->subgraphs();
for (int i = 0; i < subgraphs->Length(); ++i) {
const SubGraph* subgraph = subgraphs->Get(i);
for (int j = 0; j < subgraph->operators()->Length(); ++j) {
const Operator* op = subgraph->operators()->Get(j);
const OperatorCode* op_code =
model->operator_codes()->Get(op->opcode_index());
auto status = CheckGpuDelegateCompatibility(op_code, op, subgraph, model);
if (!status.ok()) {
return status;
}
}
}
return absl::OkStatus();
}
}
TEST(CheckGpuDelegateCompatibility, Conv2DModel) {
const std::string& full_path = tensorflow::GetDataDependencyFilepath(
"tensorflow/lite/testdata/conv_huge_im2col.bin");
auto model = FlatBufferModel::BuildFromFile(full_path.data());
ASSERT_TRUE(model);
EXPECT_TRUE(CheckGpuDelegateCompatibility(model->GetModel()).ok());
}
TEST(CheckGpuDelegateCompatibility, Conv3DModel) {
const std::string& full_path = tensorflow::GetDataDependencyFilepath(
"tensorflow/lite/testdata/conv3d_huge_im2col.bin");
auto model = FlatBufferModel::BuildFromFile(full_path.data());
ASSERT_TRUE(model);
EXPECT_EQ(CheckGpuDelegateCompatibility(model->GetModel()).message(),
"Not supported op CONV_3D");
}
TEST(CheckGpuDelegateCompatibility, FlexModel) {
const std::string& full_path = tensorflow::GetDataDependencyFilepath(
"tensorflow/lite/testdata/multi_add_flex.bin");
auto model = FlatBufferModel::BuildFromFile(full_path.data());
ASSERT_TRUE(model);
EXPECT_EQ(CheckGpuDelegateCompatibility(model->GetModel()).message(),
"Not supported custom op FlexAddV2");
}
TEST(CheckGpuDelegateCompatibility, FCConstInput) {
OpSignature op_sig = OpSignature();
op_sig.op = BuiltinOperator_FULLY_CONNECTED;
auto params = std::make_unique<TfLiteFullyConnectedParams>();
params->weights_format = kTfLiteFullyConnectedWeightsFormatDefault;
op_sig.builtin_data = static_cast<void*>(params.get());
op_sig.inputs = std::vector<OpSignatureTensorSpec>(1);
op_sig.inputs[0] = OpSignatureTensorSpec();
op_sig.inputs[0].is_const = true;
EXPECT_EQ(CheckGpuDelegateCompatibility(op_sig).message(),
"FullyConnected doesn't support constant input.");
}
TEST(CheckGpuDelegateCompatibility, Add1Dto3DBroadcastSuccess) {
OpSignature op_sig = OpSignature();
op_sig.op = BuiltinOperator_ADD;
auto params = std::make_unique<TfLiteAddParams>();
op_sig.builtin_data = static_cast<void*>(params.get());
op_sig.inputs = std::vector<OpSignatureTensorSpec>(2);
op_sig.inputs[0] = OpSignatureTensorSpec();
op_sig.inputs[0].dims = {4, 1, 2};
op_sig.inputs[1] = OpSignatureTensorSpec();
op_sig.inputs[1].dims = {2};
EXPECT_TRUE(CheckGpuDelegateCompatibility(op_sig).message().empty());
}
TEST(CheckGpuDelegateCompatibility, Add2Dto3DBroadcastFail) {
OpSignature op_sig = OpSignature();
op_sig.op = BuiltinOperator_ADD;
auto params = std::make_unique<TfLiteAddParams>();
op_sig.builtin_data = static_cast<void*>(params.get());
op_sig.inputs = std::vector<OpSignatureTensorSpec>(2);
op_sig.inputs[0] = OpSignatureTensorSpec();
op_sig.inputs[0].dims = {1, 100, 256};
op_sig.inputs[1] = OpSignatureTensorSpec();
op_sig.inputs[1].dims = {100, 256};
EXPECT_EQ(CheckGpuDelegateCompatibility(op_sig).message(),
"Doesn't support broadcasting - input0: [1,100,256], input1: "
"[100,256]");
}
TEST(CheckGpuDelegateCompatibility, Add3Dto4DBroadcastFail) {
OpSignature op_sig = OpSignature();
op_sig.op = BuiltinOperator_ADD;
auto params = std::make_unique<TfLiteAddParams>();
op_sig.builtin_data = static_cast<void*>(params.get());
op_sig.inputs = std::vector<OpSignatureTensorSpec>(2);
op_sig.inputs[0] = OpSignatureTensorSpec();
op_sig.inputs[0].dims = {4, 1, 1, 2};
op_sig.inputs[1] = OpSignatureTensorSpec();
op_sig.inputs[1].dims = {1, 1, 2};
EXPECT_EQ(
CheckGpuDelegateCompatibility(op_sig).message(),
"Doesn't support broadcasting - input0: [4,1,1,2], input1: [1,1,2]");
}
TEST(CheckGpuDelegateCompatibility, Add3Dto4DBroadcastSuccess) {
OpSignature op_sig = OpSignature();
op_sig.op = BuiltinOperator_ADD;
auto params = std::make_unique<TfLiteAddParams>();
op_sig.builtin_data = static_cast<void*>(params.get());
op_sig.inputs = std::vector<OpSignatureTensorSpec>(2);
op_sig.inputs[0] = OpSignatureTensorSpec();
op_sig.inputs[0].dims = {1, 128, 513, 3};
op_sig.inputs[1] = OpSignatureTensorSpec();
op_sig.inputs[1].dims = {128, 513, 3};
EXPECT_TRUE(CheckGpuDelegateCompatibility(op_sig).message().empty());
}
TEST(CheckGpuDelegateCompatibility, Add2Dto4DBroadcastSuccess) {
OpSignature op_sig = OpSignature();
op_sig.op = BuiltinOperator_ADD;
auto params = std::make_unique<TfLiteAddParams>();
op_sig.builtin_data = static_cast<void*>(params.get());
op_sig.inputs = std::vector<OpSignatureTensorSpec>(2);
op_sig.inputs[0] = OpSignatureTensorSpec();
op_sig.inputs[0].dims = {1, 512, 512, 1};
op_sig.inputs[1] = OpSignatureTensorSpec();
op_sig.inputs[1].dims = {1, 1};
EXPECT_TRUE(CheckGpuDelegateCompatibility(op_sig).message().empty());
}
TEST(CheckGpuDelegateCompatibility, Add2Dto4DBroadcastSuccess2) {
OpSignature op_sig = OpSignature();
op_sig.op = BuiltinOperator_ADD;
auto params = std::make_unique<TfLiteAddParams>();
op_sig.builtin_data = static_cast<void*>(params.get());
op_sig.inputs = std::vector<OpSignatureTensorSpec>(2);
op_sig.inputs[0] = OpSignatureTensorSpec();
op_sig.inputs[0].dims = {1, 384, 384, 3};
op_sig.inputs[1] = OpSignatureTensorSpec();
op_sig.inputs[1].dims = {1, 1};
EXPECT_TRUE(CheckGpuDelegateCompatibility(op_sig).message().empty());
}
TEST(CheckGpuDelegateCompatibility, Add2Dto4DBroadcastSuccess3) {
OpSignature op_sig = OpSignature();
op_sig.op = BuiltinOperator_ADD;
auto params = std::make_unique<TfLiteAddParams>();
op_sig.builtin_data = static_cast<void*>(params.get());
op_sig.inputs = std::vector<OpSignatureTensorSpec>(2);
op_sig.inputs[0] = OpSignatureTensorSpec();
op_sig.inputs[0].dims = {1, 4, 4, 10};
op_sig.inputs[1] = OpSignatureTensorSpec();
op_sig.inputs[1].dims = {1, 10};
EXPECT_TRUE(CheckGpuDelegateCompatibility(op_sig).message().empty());
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/versioning/gpu_compatibility.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/versioning/gpu_compatibility_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5eceaa04-6a53-46b1-828b-67ccd30cfabc | cpp | google/tensorstore | image_reader | tensorstore/internal/image/image_reader.h | tensorstore/internal/image/image_reader_test.cc | #ifndef TENSORSTORE_INTERNAL_IMAGE_IMAGE_READER_H_
#define TENSORSTORE_INTERNAL_IMAGE_IMAGE_READER_H_
#include "absl/status/status.h"
#include "riegeli/bytes/reader.h"
#include "tensorstore/internal/image/image_info.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_image {
class ImageReader {
public:
virtual ~ImageReader() = default;
virtual absl::Status Initialize(riegeli::Reader* reader) = 0;
virtual ImageInfo GetImageInfo() = 0;
virtual absl::Status Decode(tensorstore::span<unsigned char> dest) = 0;
};
}
}
#endif | #include "tensorstore/internal/image/image_reader.h"
#include <stddef.h>
#include <array>
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/flags/flag.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "riegeli/bytes/cord_reader.h"
#include "riegeli/bytes/fd_reader.h"
#include "riegeli/bytes/read_all.h"
#include "tensorstore/data_type.h"
#include "tensorstore/internal/image/avif_reader.h"
#include "tensorstore/internal/image/bmp_reader.h"
#include "tensorstore/internal/image/image_info.h"
#include "tensorstore/internal/image/jpeg_reader.h"
#include "tensorstore/internal/image/png_reader.h"
#include "tensorstore/internal/image/tiff_reader.h"
#include "tensorstore/internal/image/webp_reader.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
ABSL_FLAG(std::string, tensorstore_test_data_dir, ".",
"Path to directory containing test data.");
namespace {
using ::tensorstore::internal_image::AvifReader;
using ::tensorstore::internal_image::BmpReader;
using ::tensorstore::internal_image::ImageInfo;
using ::tensorstore::internal_image::ImageReader;
using ::tensorstore::internal_image::JpegReader;
using ::tensorstore::internal_image::PngReader;
using ::tensorstore::internal_image::TiffReader;
using ::tensorstore::internal_image::WebPReader;
struct V {
std::array<size_t, 2> yx;
std::array<unsigned char, 3> rgb;
};
struct TestParam {
std::string filename;
ImageInfo info;
std::vector<V> values;
};
[[maybe_unused]] std::string PrintToString(const TestParam& p) {
return p.filename;
}
class ReaderTest : public ::testing::TestWithParam<TestParam> {
public:
ReaderTest() {
if (IsTiff()) {
reader = std::make_unique<TiffReader>();
} else if (IsJpeg()) {
reader = std::make_unique<JpegReader>();
} else if (IsPng()) {
reader = std::make_unique<PngReader>();
} else if (IsBmp()) {
reader = std::make_unique<BmpReader>();
} else if (IsAvif()) {
reader = std::make_unique<AvifReader>();
} else if (IsWebP()) {
reader = std::make_unique<WebPReader>();
}
}
bool IsTiff() {
return (absl::EndsWith(GetParam().filename, ".tiff") ||
absl::EndsWith(GetParam().filename, ".tif"));
}
bool IsPng() { return absl::EndsWith(GetParam().filename, ".png"); }
bool IsJpeg() {
return (absl::EndsWith(GetParam().filename, ".jpg") ||
absl::EndsWith(GetParam().filename, ".jpeg"));
}
bool IsAvif() { return absl::EndsWith(GetParam().filename, ".avif"); }
bool IsBmp() { return absl::EndsWith(GetParam().filename, ".bmp"); }
bool IsWebP() { return absl::EndsWith(GetParam().filename, ".webp"); }
bool ReadsEntireFile() { return IsAvif() || IsJpeg(); }
std::string GetFilename() {
return tensorstore::internal::JoinPath(
absl::GetFlag(FLAGS_tensorstore_test_data_dir), GetParam().filename);
}
tensorstore::Result<absl::Cord> ReadEntireFile(std::string filename) {
absl::Cord file_data;
TENSORSTORE_RETURN_IF_ERROR(
riegeli::ReadAll(riegeli::FdReader(filename), file_data));
return file_data;
}
std::unique_ptr<ImageReader> reader;
};
TEST_P(ReaderTest, ReadImage) {
const auto& filename = GetParam().filename;
ASSERT_FALSE(reader.get() == nullptr) << filename;
ABSL_LOG(INFO) << filename;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(absl::Cord file_data,
ReadEntireFile(GetFilename()));
ASSERT_FALSE(file_data.empty());
riegeli::CordReader cord_reader(&file_data);
ASSERT_THAT(reader->Initialize(&cord_reader), ::tensorstore::IsOk())
<< filename;
auto expected_info = GetParam().info;
auto info = reader->GetImageInfo();
EXPECT_EQ(info.width, expected_info.width) << filename;
EXPECT_EQ(info.height, expected_info.height) << filename;
EXPECT_EQ(info.num_components, expected_info.num_components) << filename;
EXPECT_EQ(info.dtype, expected_info.dtype) << filename;
const size_t image_bytes = ImageRequiredBytes(info);
EXPECT_EQ(image_bytes, ImageRequiredBytes(expected_info));
std::unique_ptr<unsigned char[]> image(new unsigned char[image_bytes]());
EXPECT_THAT(reader->Decode(tensorstore::span(image.get(), image_bytes)),
::tensorstore::IsOk());
EXPECT_TRUE(cord_reader.Close()) << cord_reader.status();
for (const V& v : GetParam().values) {
ASSERT_LT(v.yx[0], expected_info.height)
<< " (" << v.yx[0] << "," << v.yx[1] << ")";
ASSERT_LT(v.yx[1], expected_info.width)
<< " (" << v.yx[0] << "," << v.yx[1] << ")";
size_t offset =
expected_info.width * expected_info.num_components * v.yx[0] + v.yx[1];
EXPECT_THAT(tensorstore::span<unsigned char>(image.get() + offset, 3),
::testing::ElementsAreArray(v.rgb))
<< " (" << v.yx[0] << "," << v.yx[1] << ") " << offset;
}
}
TEST_P(ReaderTest, ReadImageTruncated) {
const auto& filename = GetParam().filename;
ASSERT_FALSE(reader.get() == nullptr) << filename;
if (filename == "png/D75_01b.png") return;
if (filename == "tiff/D75_01b.tiff") return;
if (filename == "bmp/D75_08b_grey.bmp") return;
if (IsWebP()) return;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(absl::Cord file_data,
ReadEntireFile(GetFilename()));
absl::Cord partial_file = file_data.Subcord(0, file_data.size() * 0.9);
riegeli::CordReader cord_reader(&partial_file);
absl::Status status = reader->Initialize(&cord_reader);
if (status.ok()) {
auto info = reader->GetImageInfo();
auto expected_info = GetParam().info;
if (info.width == expected_info.width) {
EXPECT_EQ(info.width, expected_info.width) << filename;
EXPECT_EQ(info.height, expected_info.height) << filename;
EXPECT_EQ(info.num_components, expected_info.num_components) << filename;
EXPECT_EQ(info.dtype, expected_info.dtype) << filename;
}
size_t image_bytes = ImageRequiredBytes(expected_info);
std::unique_ptr<unsigned char[]> image(new unsigned char[image_bytes]());
status.Update(reader->Decode(tensorstore::span(image.get(), image_bytes)));
}
if (status.ok()) {
if (!cord_reader.Close()) {
status.Update(cord_reader.status());
}
}
EXPECT_FALSE(status.ok());
}
std ::vector<V> GetD75_08_Values() {
return {
V{{0, 0}, {151, 75, 83}},
V{{171, 0}, {255, 250, 251}},
V{{29, 117}, {173, 93, 97}},
};
}
std ::vector<V> GetD75_08_Values_JPEG() {
return {
V{{0, 0}, {152, 76, 88}},
V{{171, 0}, {253, 247, 251}},
V{{29, 117}, {174, 93, 99}},
};
}
INSTANTIATE_TEST_SUITE_P(
AvifFiles, ReaderTest,
::testing::Values(
TestParam{"avif/D75_08b.avif", ImageInfo{172, 306, 3},
GetD75_08_Values()},
TestParam{"avif/D75_08b_cq1.avif", ImageInfo{172, 306, 3}},
TestParam{"avif/D75_10b_cq1.avif",
ImageInfo{172, 306, 3, ::tensorstore::dtype_v<uint16_t>}},
TestParam{"avif/D75_08b_grey.avif",
ImageInfo{172, 306, 1},
{V{{29, 117}, {87, 87, 87}}}},
TestParam{"avif/D75_12b_grey.avif",
ImageInfo{172, 306, 1, ::tensorstore::dtype_v<uint16_t>}}
));
INSTANTIATE_TEST_SUITE_P(
BmpFiles, ReaderTest,
::testing::Values(
TestParam{"bmp/D75_08b.bmp", ImageInfo{172, 306, 3},
GetD75_08_Values()},
TestParam{"bmp/D75_08b_grey.bmp",
ImageInfo{172, 306, 1},
{V{{29, 117}, {87, 87, 87}}}}
));
INSTANTIATE_TEST_SUITE_P(
JpegFiles, ReaderTest,
::testing::Values(
TestParam{"jpeg/D75_08b.jpeg", ImageInfo{172, 306, 3},
GetD75_08_Values_JPEG()}
));
INSTANTIATE_TEST_SUITE_P(
PngFiles, ReaderTest,
::testing::Values(
TestParam{"png/D75_08b.png", ImageInfo{172, 306, 3},
GetD75_08_Values()},
TestParam{"png/D75_16b.png",
ImageInfo{172, 306, 3, ::tensorstore::dtype_v<uint16_t>}},
TestParam{"png/D75_04b.png", ImageInfo{172, 306, 3}},
TestParam{"png/D75_08b_grey.png",
ImageInfo{172, 306, 1},
{V{{29, 117}, {87, 87, 87}}}},
TestParam{"png/D75_16b_grey.png",
ImageInfo{172, 306, 1, ::tensorstore::dtype_v<uint16_t>}},
TestParam{"png/D75_01b.png",
ImageInfo{172, 306, 1, ::tensorstore::dtype_v<bool>}}
));
INSTANTIATE_TEST_SUITE_P(
TifFiles, ReaderTest,
::testing::Values(
TestParam{"tiff/D75_08b.tiff", ImageInfo{172, 306, 3},
GetD75_08_Values()},
TestParam{"tiff/D75_08b_tiled.tiff", ImageInfo{172, 306, 3},
GetD75_08_Values()},
TestParam{"tiff/D75_08b_scanline.tiff", ImageInfo{172, 306, 3},
GetD75_08_Values()},
TestParam{"tiff/D75_08b_zip.tiff", ImageInfo{172, 306, 3},
GetD75_08_Values()},
TestParam{"tiff/D75_08b_lzw.tiff", ImageInfo{172, 306, 3},
GetD75_08_Values()},
TestParam{"tiff/D75_16b.tiff",
ImageInfo{172, 306, 3, ::tensorstore::dtype_v<uint16_t>}},
TestParam{"tiff/D75_01b.tiff",
ImageInfo{172, 306, 1, ::tensorstore::dtype_v<bool>}},
TestParam{"tiff/D75_08b_grey.tiff",
ImageInfo{172, 306, 1},
{V{{29, 117}, {87, 87, 87}}}},
TestParam{"tiff/D75_16b_grey.tiff",
ImageInfo{172, 306, 1, ::tensorstore::dtype_v<uint16_t>}}
));
INSTANTIATE_TEST_SUITE_P(
WebPFiles, ReaderTest,
::testing::Values(
TestParam{"webp/D75_08b.webp", ImageInfo{172, 306, 3},
GetD75_08_Values()},
TestParam{"webp/D75_08b_q90.webp",
ImageInfo{172, 306, 3},
{V{{29, 117}, {166, 94, 91}}}}
));
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/image/image_reader.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/image/image_reader_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
e44f5644-0e5c-4bc1-bd80-cff007141f3e | cpp | google/tsl | refcount | tsl/platform/refcount.h | tsl/platform/refcount_test.cc | #ifndef TENSORFLOW_TSL_PLATFORM_REFCOUNT_H_
#define TENSORFLOW_TSL_PLATFORM_REFCOUNT_H_
#include <atomic>
#include <map>
#include <memory>
#include "tsl/platform/logging.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/thread_annotations.h"
namespace tsl {
namespace core {
class RefCounted {
public:
RefCounted();
void Ref() const;
bool Unref() const;
int_fast32_t RefCount() const;
bool RefCountIsOne() const;
protected:
virtual ~RefCounted();
bool TryRef() const;
virtual void NotifyDeleted() const;
private:
mutable std::atomic_int_fast32_t ref_;
RefCounted(const RefCounted&) = delete;
void operator=(const RefCounted&) = delete;
};
struct RefCountDeleter {
void operator()(const RefCounted* o) const { o->Unref(); }
};
template <typename T>
class RefCountPtr;
template <typename T>
ABSL_MUST_USE_RESULT RefCountPtr<T> GetNewRef(T* ptr) {
static_assert(std::is_base_of<RefCounted, T>::value);
if (ptr == nullptr) return RefCountPtr<T>();
ptr->Ref();
RefCountPtr<T> ret(ptr);
return ret;
}
template <typename T>
class RefCountPtr : public std::unique_ptr<T, RefCountDeleter> {
public:
using std::unique_ptr<T, RefCountDeleter>::unique_ptr;
ABSL_MUST_USE_RESULT RefCountPtr GetNewRef() const {
if (this->get() == nullptr) return RefCountPtr<T>();
this->get()->Ref();
return RefCountPtr<T>(this->get());
}
};
class ScopedUnref {
public:
explicit ScopedUnref(const RefCounted* o) : obj_(o) {}
~ScopedUnref() {
if (obj_) obj_->Unref();
}
private:
const RefCounted* obj_;
ScopedUnref(const ScopedUnref&) = delete;
void operator=(const ScopedUnref&) = delete;
};
template <typename T>
class WeakPtr;
using WeakNotifyFn = std::function<void()>;
class WeakRefCounted : public RefCounted {
public:
int WeakRefCount() const {
return data_->RefCount() - 1;
}
protected:
void NotifyDeleted() const override { data_->Notify(); }
private:
struct WeakRefData : public RefCounted {
explicit WeakRefData(WeakRefCounted* ptr) : ptr(ptr), next_notifier_id(1) {}
mutable mutex mu;
WeakRefCounted* ptr TF_GUARDED_BY(mu);
std::map<int, WeakNotifyFn> notifiers;
int next_notifier_id;
void Notify() {
mutex_lock ml(mu);
while (!notifiers.empty()) {
auto iter = notifiers.begin();
WeakNotifyFn notify_fn = std::move(iter->second);
notifiers.erase(iter);
mu.unlock();
notify_fn();
mu.lock();
}
ptr = nullptr;
}
WeakRefCounted* GetNewRef() {
mutex_lock ml(mu);
if (ptr != nullptr && ptr->TryRef()) {
return ptr;
}
return nullptr;
}
int AddNotifier(WeakNotifyFn notify_fn) {
mutex_lock ml(mu);
if (ptr == nullptr) {
return 0;
}
int notifier_id = next_notifier_id++;
notifiers.emplace(notifier_id, std::move(notify_fn));
return notifier_id;
}
int DupNotifier(int notifier_id) {
mutex_lock ml(mu);
auto iter = notifiers.find(notifier_id);
if (iter != notifiers.end()) {
int notifier_id = next_notifier_id++;
notifiers.emplace(notifier_id, iter->second);
return notifier_id;
}
return 0;
}
void RemoveNotifier(int notifier_id) {
mutex_lock ml(mu);
notifiers.erase(notifier_id);
}
};
mutable RefCountPtr<WeakRefData> data_{new WeakRefData(this)};
template <typename T>
friend class WeakPtr;
friend struct WeakRefData;
};
template <typename T>
class WeakPtr {
public:
explicit WeakPtr(WeakRefCounted* ptr = nullptr,
WeakNotifyFn notify_fn = nullptr)
: data_(nullptr), notifier_id_(0) {
if (ptr != nullptr) {
ptr->data_->Ref();
data_.reset(ptr->data_.get());
if (notify_fn) {
notifier_id_ = data_->AddNotifier(notify_fn);
}
}
}
~WeakPtr() {
if (data_ != nullptr && notifier_id_ != 0) {
data_->RemoveNotifier(notifier_id_);
}
}
WeakPtr(const WeakPtr& other) { operator=(other); }
WeakPtr& operator=(const WeakPtr& other) {
if (data_ != nullptr && notifier_id_ != 0) {
data_->RemoveNotifier(notifier_id_);
}
other.data_->Ref();
data_.reset(other.data_.get());
notifier_id_ = data_->DupNotifier(other.notifier_id_);
return *this;
}
WeakPtr(WeakPtr&& other) noexcept {
data_ = std::move(other.data_);
notifier_id_ = other.notifier_id_;
other.notifier_id_ = 0;
}
WeakPtr& operator=(WeakPtr&& other) noexcept {
if (this != &other) {
if (data_ != nullptr && notifier_id_ != 0) {
data_->RemoveNotifier(notifier_id_);
}
data_ = std::move(other.data_);
notifier_id_ = other.notifier_id_;
other.notifier_id_ = 0;
}
return *this;
}
RefCountPtr<T> GetNewRef() const {
RefCountPtr<T> ref;
if (data_ != nullptr) {
WeakRefCounted* ptr = data_->GetNewRef();
ref.reset(static_cast<T*>(ptr));
}
return std::move(ref);
}
private:
RefCountPtr<WeakRefCounted::WeakRefData> data_;
int notifier_id_;
};
inline RefCounted::RefCounted() : ref_(1) {}
inline RefCounted::~RefCounted() {
DCHECK_EQ(ref_.load(), 0);
}
inline void RefCounted::Ref() const {
int_fast32_t old_ref = ref_.fetch_add(1, std::memory_order_relaxed);
DCHECK_GT(old_ref, 0);
}
inline bool RefCounted::TryRef() const {
int_fast32_t old_ref = ref_.load();
while (old_ref != 0) {
if (ref_.compare_exchange_weak(old_ref, old_ref + 1)) {
return true;
}
}
return false;
}
inline bool RefCounted::Unref() const {
DCHECK_GT(ref_.load(), 0);
if (ref_.fetch_sub(1, std::memory_order_acq_rel) == 1) {
NotifyDeleted();
delete this;
return true;
}
return false;
}
inline int_fast32_t RefCounted::RefCount() const {
return ref_.load(std::memory_order_acquire);
}
inline void RefCounted::NotifyDeleted() const {}
inline bool RefCounted::RefCountIsOne() const {
return (ref_.load(std::memory_order_acquire) == 1);
}
}
}
#endif | #include "tsl/platform/refcount.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace tsl {
namespace core {
namespace {
class RefTest : public ::testing::Test {
public:
RefTest() {
constructed_ = 0;
destroyed_ = 0;
}
static int constructed_;
static int destroyed_;
};
int RefTest::constructed_;
int RefTest::destroyed_;
class MyRef : public RefCounted {
public:
MyRef() { RefTest::constructed_++; }
~MyRef() override { RefTest::destroyed_++; }
};
TEST_F(RefTest, New) {
MyRef* ref = new MyRef;
ASSERT_EQ(1, constructed_);
ASSERT_EQ(0, destroyed_);
ref->Unref();
ASSERT_EQ(1, constructed_);
ASSERT_EQ(1, destroyed_);
}
TEST_F(RefTest, RefUnref) {
MyRef* ref = new MyRef;
ASSERT_EQ(1, constructed_);
ASSERT_EQ(0, destroyed_);
ref->Ref();
ASSERT_EQ(0, destroyed_);
ref->Unref();
ASSERT_EQ(0, destroyed_);
ref->Unref();
ASSERT_EQ(1, destroyed_);
}
TEST_F(RefTest, RefCountOne) {
MyRef* ref = new MyRef;
ASSERT_TRUE(ref->RefCountIsOne());
ref->Unref();
}
TEST_F(RefTest, RefCountNotOne) {
MyRef* ref = new MyRef;
ref->Ref();
ASSERT_FALSE(ref->RefCountIsOne());
ref->Unref();
ref->Unref();
}
TEST_F(RefTest, ConstRefUnref) {
const MyRef* cref = new MyRef;
ASSERT_EQ(1, constructed_);
ASSERT_EQ(0, destroyed_);
cref->Ref();
ASSERT_EQ(0, destroyed_);
cref->Unref();
ASSERT_EQ(0, destroyed_);
cref->Unref();
ASSERT_EQ(1, destroyed_);
}
TEST_F(RefTest, ReturnOfUnref) {
MyRef* ref = new MyRef;
ref->Ref();
EXPECT_FALSE(ref->Unref());
EXPECT_TRUE(ref->Unref());
}
TEST_F(RefTest, ScopedUnref) {
{ ScopedUnref unref(new MyRef); }
EXPECT_EQ(destroyed_, 1);
}
TEST_F(RefTest, ScopedUnref_Nullptr) {
{ ScopedUnref unref(nullptr); }
EXPECT_EQ(destroyed_, 0);
}
TEST_F(RefTest, RefCountPtr) {
const RefCountPtr<MyRef> cref = RefCountPtr<MyRef>(new MyRef);
ASSERT_TRUE(cref.get() != nullptr);
ASSERT_EQ(cref->RefCount(), 1);
{
const RefCountPtr<MyRef> cref2 = cref.GetNewRef();
ASSERT_EQ(cref->RefCount(), 2);
}
ASSERT_EQ(cref->RefCount(), 1);
}
class ObjType : public WeakRefCounted {
public:
ObjType() : ObjType(unused_dtor_called_) {}
explicit ObjType(int& dtor_called) : dtor_called_(dtor_called) {}
~ObjType() override { dtor_called_++; }
int& dtor_called_;
static int unused_dtor_called_;
};
int ObjType::unused_dtor_called_ = 0;
TEST(WeakPtr, SingleThread) {
auto obj = new ObjType();
WeakPtr<ObjType> weakptr(obj);
ASSERT_TRUE(obj->RefCountIsOne());
EXPECT_EQ(obj->WeakRefCount(), 1);
EXPECT_NE(weakptr.GetNewRef(), nullptr);
obj->Unref();
EXPECT_EQ(weakptr.GetNewRef(), nullptr);
}
TEST(WeakPtr, MultiThreadedWeakRef) {
std::atomic<int> hit_destructed{0};
auto env = Env::Default();
for (int i = 0; i < 100; i++) {
auto obj = new ObjType();
WeakPtr<ObjType> weakptr(obj);
bool obj_destructed = false;
EXPECT_EQ(obj->WeakRefCount(), 1);
auto fn = [&]() {
auto ref = weakptr.GetNewRef();
if (ref != nullptr) {
EXPECT_EQ(ref.get(), obj);
EXPECT_EQ(ref->WeakRefCount(), 1);
EXPECT_GE(ref->RefCount(), 1);
} else {
hit_destructed++;
EXPECT_TRUE(obj_destructed);
}
};
auto t1 = env->StartThread(ThreadOptions{}, "thread-1", fn);
auto t2 = env->StartThread(ThreadOptions{}, "thread-2", fn);
env->SleepForMicroseconds(10);
obj_destructed = true;
obj->Unref();
delete t1;
delete t2;
EXPECT_EQ(weakptr.GetNewRef(), nullptr);
}
if (hit_destructed == 0) {
LOG(WARNING) << "The destructed weakref test branch is not exercised.";
}
if (hit_destructed == 200) {
LOG(WARNING) << "The valid weakref test branch is not exercised.";
}
}
TEST(WeakPtr, NotifyCalled) {
auto obj = new ObjType();
int num_calls1 = 0;
int num_calls2 = 0;
auto notify_fn1 = [&num_calls1]() { num_calls1++; };
auto notify_fn2 = [&num_calls2]() { num_calls2++; };
WeakPtr<ObjType> weakptr1(obj, notify_fn1);
WeakPtr<ObjType> weakptr2(obj, notify_fn2);
ASSERT_TRUE(obj->RefCountIsOne());
EXPECT_EQ(obj->WeakRefCount(), 2);
EXPECT_NE(weakptr1.GetNewRef(), nullptr);
EXPECT_NE(weakptr2.GetNewRef(), nullptr);
EXPECT_EQ(num_calls1, 0);
EXPECT_EQ(num_calls2, 0);
obj->Unref();
EXPECT_EQ(weakptr1.GetNewRef(), nullptr);
EXPECT_EQ(weakptr2.GetNewRef(), nullptr);
EXPECT_EQ(num_calls1, 1);
EXPECT_EQ(num_calls2, 1);
}
TEST(WeakPtr, NotifyCalledBeforeDestructor) {
int dtor_called = 0;
auto obj = new ObjType(dtor_called);
int num_calls1 = 0;
auto notify_fn1 = [&num_calls1, &dtor_called]() {
num_calls1++;
EXPECT_EQ(dtor_called, 0);
};
WeakPtr<ObjType> weakptr1(obj, notify_fn1);
ASSERT_TRUE(obj->RefCountIsOne());
EXPECT_EQ(obj->WeakRefCount(), 1);
EXPECT_NE(weakptr1.GetNewRef(), nullptr);
EXPECT_EQ(num_calls1, 0);
obj->Unref();
EXPECT_EQ(weakptr1.GetNewRef(), nullptr);
EXPECT_EQ(num_calls1, 1);
EXPECT_EQ(dtor_called, 1);
}
TEST(WeakPtr, CopyTargetCalled) {
auto obj = new ObjType();
int num_calls1 = 0;
int num_calls2 = 0;
auto notify_fn1 = [&num_calls1]() { num_calls1++; };
auto notify_fn2 = [&num_calls2]() { num_calls2++; };
WeakPtr<ObjType> weakptr1(obj, notify_fn1);
WeakPtr<ObjType> weakptr2(obj, notify_fn2);
WeakPtr<ObjType> weakptr3(weakptr1);
weakptr2 = weakptr1;
ASSERT_TRUE(obj->RefCountIsOne());
EXPECT_EQ(obj->WeakRefCount(), 3);
EXPECT_NE(weakptr2.GetNewRef(), nullptr);
EXPECT_NE(weakptr3.GetNewRef(), nullptr);
EXPECT_EQ(num_calls1, 0);
EXPECT_EQ(num_calls2, 0);
obj->Unref();
EXPECT_EQ(weakptr2.GetNewRef(), nullptr);
EXPECT_EQ(weakptr3.GetNewRef(), nullptr);
EXPECT_EQ(num_calls1, 3);
EXPECT_EQ(num_calls2, 0);
}
TEST(WeakPtr, MoveTargetNotCalled) {
auto obj = new ObjType();
int num_calls1 = 0;
int num_calls2 = 0;
int num_calls3 = 0;
auto notify_fn1 = [&num_calls1]() { num_calls1++; };
auto notify_fn2 = [&num_calls2]() { num_calls2++; };
auto notify_fn3 = [&num_calls3]() { num_calls3++; };
WeakPtr<ObjType> weakptr1(obj, notify_fn1);
WeakPtr<ObjType> weakptr2(obj, notify_fn2);
WeakPtr<ObjType> weakptr3(WeakPtr<ObjType>(obj, notify_fn3));
weakptr2 = std::move(weakptr1);
ASSERT_TRUE(obj->RefCountIsOne());
EXPECT_EQ(obj->WeakRefCount(), 2);
EXPECT_NE(weakptr2.GetNewRef(), nullptr);
EXPECT_NE(weakptr3.GetNewRef(), nullptr);
EXPECT_EQ(num_calls1, 0);
EXPECT_EQ(num_calls2, 0);
EXPECT_EQ(num_calls3, 0);
obj->Unref();
EXPECT_EQ(weakptr2.GetNewRef(), nullptr);
EXPECT_EQ(weakptr3.GetNewRef(), nullptr);
EXPECT_EQ(num_calls1, 1);
EXPECT_EQ(num_calls2, 0);
EXPECT_EQ(num_calls3, 1);
}
TEST(WeakPtr, DestroyedNotifyNotCalled) {
auto obj = new ObjType();
int num_calls = 0;
auto notify_fn = [&num_calls]() { num_calls++; };
{ WeakPtr<ObjType> weakptr(obj, notify_fn); }
ASSERT_TRUE(obj->RefCountIsOne());
EXPECT_EQ(obj->WeakRefCount(), 0);
EXPECT_EQ(num_calls, 0);
obj->Unref();
EXPECT_EQ(num_calls, 0);
}
}
}
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/refcount.h | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/refcount_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
56c494af-7b20-485f-bb65-2e34dfb9d524 | cpp | tensorflow/tensorflow | c_plugin_coordination_service_agent | tensorflow/core/common_runtime/next_pluggable_device/c_plugin_coordination_service_agent.cc | tensorflow/core/common_runtime/next_pluggable_device/c_plugin_coordination_service_agent_test.cc | #include "tensorflow/core/common_runtime/next_pluggable_device/c_plugin_coordination_service_agent.h"
#include <string>
#include <string_view>
#include "absl/time/time.h"
#include "tensorflow/c/experimental/next_pluggable_device/c_api.h"
#include "tensorflow/c/tf_buffer.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_status_helper.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
namespace tensorflow {
namespace {
absl::StatusOr<std::string> ProcessGetKeyValueResult(TF_Buffer* result_buf,
TF_Status* status) {
if (TF_GetCode(status) != TF_OK) {
return StatusFromTF_Status(status);
} else {
std::string result{static_cast<const char*>(result_buf->data),
result_buf->length};
TF_DeleteBuffer(result_buf);
return result;
}
}
}
Status CPluginCoordinationServiceAgent::InsertKeyValue(std::string_view key,
std::string_view value) {
TF_StatusPtr c_status_ptr(TF_NewStatus());
TF_Status* status = c_status_ptr.get();
TF_CoordinationServiceInsertKeyValue(key.data(), key.size(), value.data(),
value.size(), agent_, status);
return StatusFromTF_Status(status);
}
absl::StatusOr<std::string> CPluginCoordinationServiceAgent::GetKeyValue(
std::string_view key) {
TF_StatusPtr c_status_ptr(TF_NewStatus());
TF_Status* status = c_status_ptr.get();
TF_Buffer* result_buf =
TF_CoordinationServiceGetKeyValue(key.data(), key.size(), agent_, status);
return ProcessGetKeyValueResult(result_buf, status);
}
absl::StatusOr<std::string> CPluginCoordinationServiceAgent::GetKeyValue(
std::string_view key, absl::Duration timeout) {
TF_StatusPtr c_status_ptr(TF_NewStatus());
TF_Status* status = c_status_ptr.get();
TF_Buffer* result_buf = TF_CoordinationServiceGetKeyValueWithTimeout(
key.data(), key.size(), absl::ToInt64Seconds(timeout), agent_, status);
return ProcessGetKeyValueResult(result_buf, status);
}
absl::StatusOr<std::string> CPluginCoordinationServiceAgent::TryGetKeyValue(
std::string_view key) {
TF_StatusPtr c_status_ptr(TF_NewStatus());
TF_Status* status = c_status_ptr.get();
TF_Buffer* result_buf = TF_CoordinationServiceTryGetKeyValue(
key.data(), key.size(), agent_, status);
return ProcessGetKeyValueResult(result_buf, status);
}
Status CPluginCoordinationServiceAgent::DeleteKeyValue(std::string_view key) {
TF_StatusPtr c_status_ptr(TF_NewStatus());
TF_Status* status = c_status_ptr.get();
TF_CoordinationServiceDeleteKeyValue(key.data(), key.size(), agent_, status);
return StatusFromTF_Status(status);
}
} | #include "tensorflow/core/common_runtime/next_pluggable_device/c_plugin_coordination_service_agent.h"
#include <memory>
#include <ostream>
#include <string>
#include <utility>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/time/time.h"
#include "xla/tsl/distributed_runtime/call_options.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_client.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_service_agent.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/protobuf/coordination_config.pb.h"
#include "xla/tsl/protobuf/coordination_service.pb.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
namespace tensorflow {
namespace {
using tsl::CoordinationClient;
using tsl::CoordinationServiceAgent;
using tsl::CallOptions;
using tsl::DeleteKeyValueRequest;
using tsl::DeleteKeyValueResponse;
using tsl::GetKeyValueRequest;
using tsl::GetKeyValueResponse;
using tsl::InsertKeyValueRequest;
using tsl::InsertKeyValueResponse;
using ::testing::_;
using ::testing::DoAll;
using ::testing::InvokeArgument;
using ::testing::Pointee;
using ::testing::SetArgPointee;
using ::testing::WithArgs;
class ProtoStringMatcher {
public:
explicit ProtoStringMatcher(const tsl::protobuf::Message& expected)
: expected_(expected.DebugString()) {}
template <typename Message>
bool MatchAndExplain(const Message& p,
::testing::MatchResultListener*) const {
return p.DebugString() == expected_;
}
void DescribeTo(std::ostream* os) const { *os << expected_; }
void DescribeNegationTo(std::ostream* os) const {
*os << "not equal to expected message: " << expected_;
}
private:
const std::string expected_;
};
inline ::testing::PolymorphicMatcher<ProtoStringMatcher> EqualsProto(
const tsl::protobuf::Message& x) {
return ::testing::MakePolymorphicMatcher(ProtoStringMatcher(x));
}
MATCHER(KvEq, "simple KeyValueEntry matcher") {
const KeyValueEntry& kv0 = std::get<0>(arg);
const KeyValueEntry& kv1 = std::get<1>(arg);
return kv0.key() == kv1.key() && kv0.value() == kv1.value();
}
class TestCoordinationClient : public CoordinationClient {
public:
TestCoordinationClient() = default;
MOCK_METHOD(void, GetKeyValueAsync,
(CallOptions * call_opts, const GetKeyValueRequest*,
GetKeyValueResponse*, StatusCallback),
(override));
MOCK_METHOD(void, TryGetKeyValueAsync,
(const TryGetKeyValueRequest*, TryGetKeyValueResponse*,
StatusCallback),
(override));
MOCK_METHOD(void, InsertKeyValueAsync,
(const InsertKeyValueRequest*, InsertKeyValueResponse*,
StatusCallback),
(override));
MOCK_METHOD(void, DeleteKeyValueAsync,
(const DeleteKeyValueRequest*, DeleteKeyValueResponse*,
StatusCallback),
(override));
void GetKeyValueDirAsync(const tsl::GetKeyValueDirRequest* request,
tsl::GetKeyValueDirResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("GetKeyValueDirAsync"));
}
void ResetTaskAsync(const tsl::ResetTaskRequest* request,
tsl::ResetTaskResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("ResetTaskAsync"));
}
void ReportErrorToServiceAsync(
const tsl::ReportErrorToServiceRequest* request,
tsl::ReportErrorToServiceResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("ReportErrorToServiceAsync"));
}
void BarrierAsync(const tsl::BarrierRequest* request,
tsl::BarrierResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("BarrierAsync"));
}
void GetTaskStateAsync(const tsl::GetTaskStateRequest* request,
tsl::GetTaskStateResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("GetTaskStateAsync"));
}
void WaitForAllTasksAsync(const tsl::WaitForAllTasksRequest* request,
tsl::WaitForAllTasksResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("WaitForAllTasksAsync"));
}
void CancelBarrierAsync(const tsl::CancelBarrierRequest* request,
tsl::CancelBarrierResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("CancelBarrierAsync"));
}
void RegisterTaskAsync(tsl::CallOptions*,
const tsl::RegisterTaskRequest* request,
tsl::RegisterTaskResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("RegisterTaskAsync"));
}
void ShutdownTaskAsync(tsl::CallOptions*,
const tsl::ShutdownTaskRequest* request,
tsl::ShutdownTaskResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("ShutdownTaskAsync"));
}
void HeartbeatAsync(tsl::CallOptions*, const tsl::HeartbeatRequest* request,
tsl::HeartbeatResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("HeartbeatAsync"));
}
void ReportErrorToTaskAsync(CallOptions* call_opts,
const ReportErrorToTaskRequest* request,
ReportErrorToTaskResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("ReportErrorToTaskAsync"));
}
void PollForErrorAsync(CallOptions* call_opts,
const PollForErrorRequest* request,
PollForErrorResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("PollForErrorAsync"));
}
};
class CPluginCoordinationServiceAgentTest : public ::testing::Test {
public:
void InitializeAgent(CoordinationServiceConfig config = {}) {
config.set_service_leader("test_leader");
TF_ASSERT_OK(impl_->Initialize(
tsl::Env::Default(), "test_job",
0, config, std::move(client_),
[](Status s) {
LOG(ERROR) << "Coordination agent is set to error: " << s;
}));
}
TestCoordinationClient* GetClient() {
CHECK(client_ != nullptr)
<< "GetClient() was called after InitializeAgent()";
return client_.get();
}
protected:
std::unique_ptr<CoordinationServiceAgent> impl_ =
tsl::CreateCoordinationServiceAgent();
std::unique_ptr<CPluginCoordinationServiceAgent> agent_ =
std::make_unique<CPluginCoordinationServiceAgent>(impl_.get());
std::unique_ptr<TestCoordinationClient> client_ =
std::make_unique<TestCoordinationClient>();
};
TEST_F(CPluginCoordinationServiceAgentTest, GetKeyValue_Simple_Success) {
const std::string test_key = "test_key";
const std::string test_value = "test_value";
GetKeyValueResponse mocked_response;
auto kv = mocked_response.mutable_kv();
kv->set_key(test_key);
kv->set_value(test_value);
ON_CALL(*GetClient(), GetKeyValueAsync(_, _, _, _))
.WillByDefault(DoAll(SetArgPointee<2>(mocked_response),
InvokeArgument<3>(absl::OkStatus())));
InitializeAgent();
auto result = agent_->GetKeyValue(test_key);
TF_ASSERT_OK(result.status());
EXPECT_EQ(*result, test_value);
}
TEST_F(CPluginCoordinationServiceAgentTest, GetKeyValue_WithTimeout_Success) {
const std::string test_key = "test_key";
const std::string test_value = "test_value";
GetKeyValueResponse mocked_response;
auto kv = mocked_response.mutable_kv();
kv->set_key(test_key);
kv->set_value(test_value);
ON_CALL(*GetClient(), GetKeyValueAsync(_, _, _, _))
.WillByDefault(DoAll(SetArgPointee<2>(mocked_response),
InvokeArgument<3>(absl::OkStatus())));
InitializeAgent();
auto result = agent_->GetKeyValue(test_key, absl::Seconds(10));
TF_ASSERT_OK(result.status());
EXPECT_EQ(*result, test_value);
}
TEST_F(CPluginCoordinationServiceAgentTest, GetKeyValue_Timeout_ReturnError) {
const std::string test_key = "test_key";
StatusCallback owned_done;
ON_CALL(*GetClient(), GetKeyValueAsync(_, _, _, _))
.WillByDefault(WithArgs<3>([&](StatusCallback done) {
owned_done = done;
}));
InitializeAgent();
auto result = agent_->GetKeyValue(test_key, absl::Seconds(1));
EXPECT_EQ(result.status().code(), error::DEADLINE_EXCEEDED);
owned_done(absl::CancelledError("error"));
}
TEST_F(CPluginCoordinationServiceAgentTest,
GetKeyValue_ZeroTimeout_ReturnError) {
const std::string test_key = "test_key";
auto result = agent_->GetKeyValue(test_key, absl::ZeroDuration());
EXPECT_EQ(result.status().code(), error::INVALID_ARGUMENT);
}
TEST_F(CPluginCoordinationServiceAgentTest,
GetKeyValue_NegativeTimeout_ReturnError) {
const std::string test_key = "test_key";
auto result = agent_->GetKeyValue(test_key, absl::Seconds(-1));
EXPECT_EQ(result.status().code(), error::INVALID_ARGUMENT);
}
TEST_F(CPluginCoordinationServiceAgentTest, InsertKeyValue_Success) {
const std::string test_key = "test_key";
const std::string test_value = "test_value";
InsertKeyValueRequest expected_input;
auto kv = expected_input.mutable_kv();
kv->set_key(test_key);
kv->set_value(test_value);
EXPECT_CALL(*GetClient(),
InsertKeyValueAsync(Pointee(EqualsProto(expected_input)), _, _))
.WillOnce(InvokeArgument<2>(absl::OkStatus()));
InitializeAgent();
TF_ASSERT_OK(agent_->InsertKeyValue(test_key, test_value));
}
TEST_F(CPluginCoordinationServiceAgentTest, DeleteKeyValue_Success) {
const std::string test_key = "test_x_key";
DeleteKeyValueRequest expected_input;
expected_input.set_key(test_key);
expected_input.set_is_directory(true);
EXPECT_CALL(*GetClient(),
DeleteKeyValueAsync(Pointee(EqualsProto(expected_input)), _, _))
.WillOnce(InvokeArgument<2>(absl::OkStatus()));
InitializeAgent();
TF_ASSERT_OK(agent_->DeleteKeyValue(test_key));
}
TEST_F(CPluginCoordinationServiceAgentTest, TryGetKeyValue_Simple_Success) {
const std::string& test_key = "test_key";
const std::string& test_value = "test_value";
TryGetKeyValueResponse mocked_response;
auto kv = mocked_response.mutable_kv();
kv->set_key(test_key);
kv->set_value(test_value);
ON_CALL(*GetClient(), TryGetKeyValueAsync(_, _, _))
.WillByDefault(DoAll(SetArgPointee<1>(mocked_response),
InvokeArgument<2>(absl::OkStatus())));
InitializeAgent();
auto result = agent_->TryGetKeyValue(test_key);
TF_ASSERT_OK(result.status());
EXPECT_EQ(*result, test_value);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/next_pluggable_device/c_plugin_coordination_service_agent.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/next_pluggable_device/c_plugin_coordination_service_agent_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8bba7f0f-f871-4066-bcbc-e3cd8d4e8d1d | cpp | tensorflow/tensorflow | android_sync | tensorflow/lite/delegates/gpu/gl/android_sync.cc | tensorflow/lite/delegates/gpu/gl/android_sync_test.cc | #include "tensorflow/lite/delegates/gpu/gl/android_sync.h"
#include <EGL/egl.h>
#include <EGL/eglext.h>
#include <EGL/eglplatform.h>
#include <GLES2/gl2.h>
#include <unistd.h>
namespace {
PFNEGLDUPNATIVEFENCEFDANDROIDPROC eglDupNativeFenceFDANDROID;
PFNEGLCREATESYNCKHRPROC eglCreateSyncKHR;
PFNEGLWAITSYNCKHRPROC eglWaitSyncKHR;
PFNEGLDESTROYSYNCKHRPROC eglDestroySyncKHR;
bool IsGlSupported() {
static const bool extensions_allowed = [] {
eglDupNativeFenceFDANDROID =
reinterpret_cast<PFNEGLDUPNATIVEFENCEFDANDROIDPROC>(
eglGetProcAddress("eglDupNativeFenceFDANDROID"));
eglCreateSyncKHR = reinterpret_cast<PFNEGLCREATESYNCKHRPROC>(
eglGetProcAddress("eglCreateSyncKHR"));
eglWaitSyncKHR = reinterpret_cast<PFNEGLWAITSYNCKHRPROC>(
eglGetProcAddress("eglWaitSyncKHR"));
eglDestroySyncKHR = reinterpret_cast<PFNEGLDESTROYSYNCKHRPROC>(
eglGetProcAddress("eglDestroySyncKHR"));
return eglWaitSyncKHR && eglCreateSyncKHR && eglDupNativeFenceFDANDROID &&
eglDestroySyncKHR;
}();
return extensions_allowed;
}
}
namespace tflite::gpu::gl {
bool WaitFdGpu(int fence_fd) {
if (fence_fd == -1) {
return false;
}
if (!IsGlSupported()) {
return false;
}
EGLDisplay egl_display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
if (egl_display == EGL_NO_DISPLAY) return false;
int fd_for_egl = dup(fence_fd);
EGLint sync_attribs[] = {EGL_SYNC_NATIVE_FENCE_FD_ANDROID, (EGLint)fd_for_egl,
EGL_NONE};
EGLSync fence_sync = eglCreateSyncKHR(
egl_display, EGL_SYNC_NATIVE_FENCE_ANDROID, sync_attribs);
if (fence_sync != EGL_NO_SYNC_KHR) {
eglWaitSyncKHR(egl_display, fence_sync, 0);
return true;
} else {
close(fd_for_egl);
return false;
}
}
int CreateFdGpu() {
if (IsGlSupported()) {
EGLDisplay egl_display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
if (egl_display != EGL_NO_DISPLAY) {
EGLSync fence_sync =
eglCreateSyncKHR(egl_display, EGL_SYNC_NATIVE_FENCE_ANDROID, nullptr);
if (fence_sync != EGL_NO_SYNC_KHR) {
int fence_fd = eglDupNativeFenceFDANDROID(egl_display, fence_sync);
if (fence_fd == -1) {
eglDestroySyncKHR(egl_display, fence_sync);
} else {
return fence_fd;
}
}
}
}
glFinish();
return -1;
}
} | #include "tensorflow/lite/delegates/gpu/gl/android_sync.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/gl/egl_environment.h"
namespace tflite::gpu::gl {
TEST(AsyncBufferTest, FenceTest) {
EXPECT_EQ(CreateFdGpu(), -1);
EXPECT_FALSE(WaitFdGpu(1));
std::unique_ptr<EglEnvironment> env;
EXPECT_OK(EglEnvironment::NewEglEnvironment(&env));
int gpu_fd = CreateFdGpu();
EXPECT_GE(gpu_fd, 0);
EXPECT_TRUE(WaitFdGpu(gpu_fd));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/android_sync.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/android_sync_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d35cffbe-967b-4c8d-96bd-dc614f18ca06 | cpp | google/tensorstore | float8 | tensorstore/util/float8.h | tensorstore/util/float8_test.cc | #ifndef TENSORSTORE_UTIL_FLOAT8_H_
#define TENSORSTORE_UTIL_FLOAT8_H_
#include <algorithm>
#include <cmath>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <ostream>
#include <type_traits>
#include <utility>
#include "absl/base/casts.h"
#include <half.hpp>
#include <nlohmann/json_fwd.hpp>
#include "tensorstore/util/bfloat16.h"
#if (defined(__cpp_lib_bitops) && __cpp_lib_bitops >= 201907L)
#include <bit>
#endif
namespace tensorstore {
namespace float8_internal {
class Float8e4m3fn;
class Float8e4m3fnuz;
class Float8e4m3b11fnuz;
class Float8e5m2;
class Float8e5m2fnuz;
template <typename Derived>
class Float8Base {
protected:
struct ConstructFromRepTag {};
constexpr Float8Base(uint8_t rep, ConstructFromRepTag) : rep_{rep} {}
public:
constexpr Float8Base() : rep_(0) {}
template <typename T,
typename EnableIf = std::enable_if<std::is_arithmetic_v<T>>>
explicit Float8Base(T f)
: Float8Base(ConvertFrom(static_cast<float>(f)).rep(),
ConstructFromRepTag{}) {}
explicit Float8Base(double f64)
: Float8Base(ConvertFrom(f64).rep(), ConstructFromRepTag{}) {}
explicit Float8Base(float f32)
: Float8Base(ConvertFrom(f32).rep(), ConstructFromRepTag{}) {}
explicit Float8Base(BFloat16 bf16)
: Float8Base(ConvertFrom(bf16).rep(), ConstructFromRepTag{}) {}
explicit Float8Base(::half_float::half f16)
: Float8Base(ConvertFrom(f16).rep(), ConstructFromRepTag{}) {}
constexpr uint8_t rep() const { return rep_; }
template <typename T,
typename EnableIf = std::enable_if<std::is_arithmetic_v<T>>>
explicit operator T() const {
return static_cast<T>(static_cast<float>(derived()));
}
explicit operator double() const { return ConvertTo<double>(derived()); }
explicit operator float() const { return ConvertTo<float>(derived()); }
explicit operator BFloat16() const { return ConvertTo<BFloat16>(derived()); }
explicit operator ::half_float::half() const {
return ConvertTo<::half_float::half>(derived());
}
explicit operator bool() const { return (rep() & 0x7F) != 0; }
constexpr Derived operator-() const {
return Derived(static_cast<uint8_t>(rep() ^ 0x80), ConstructFromRepTag{});
}
constexpr const Derived& derived() const {
return *static_cast<const Derived*>(this);
}
constexpr Derived& derived() { return *static_cast<Derived*>(this); }
static constexpr Derived FromRep(uint8_t rep) {
return Derived(rep, ConstructFromRepTag{});
}
template <bool kSaturate = false, bool kTruncate = false, typename From>
static Derived ConvertFrom(const From& from);
template <typename To, bool kSaturate = false, bool kTruncate = false>
static To ConvertTo(const Derived& from);
Derived operator+(const Derived& other) const {
return Derived{float{derived()} + float{other}};
}
Derived operator-(const Derived& other) const {
return Derived{float{derived()} - float{other}};
}
Derived operator*(const Derived& other) const {
return Derived{float{derived()} * float{other}};
}
Derived operator/(const Derived& other) const {
return Derived{float{derived()} / float{other}};
}
constexpr bool operator==(const Derived& other) const {
return Compare(derived(), other) == Ordering::kEquivalent;
}
constexpr bool operator!=(const Derived& other) const {
return Compare(derived(), other) != Ordering::kEquivalent;
}
bool operator<(const Derived& other) const {
return Compare(derived(), other) == Ordering::kLess;
}
bool operator<=(const Derived& other) const {
return Compare(derived(), other) <= Ordering::kEquivalent;
}
bool operator>(const Derived& other) const {
return Compare(derived(), other) == Ordering::kGreater;
}
bool operator>=(const Derived& other) const {
Ordering ordering = Compare(derived(), other);
return ordering == Ordering::kGreater || ordering == Ordering::kEquivalent;
}
Derived& operator+=(const Derived& other) {
derived() = derived() + other;
return derived();
}
friend float operator+=(const float& a, Derived b) {
return a + static_cast<float>(b);
}
Derived& operator-=(const Derived& other) {
derived() = derived() - other;
return derived();
}
Derived& operator*=(const Derived& other) {
derived() = derived() * other;
return derived();
}
Derived& operator/=(const Derived& other) {
derived() = derived() / other;
return derived();
}
template <template <typename U, typename V, typename... Args>
class ObjectType ,
template <typename U, typename... Args>
class ArrayType ,
class StringType , class BooleanType ,
class NumberIntegerType ,
class NumberUnsignedType ,
class NumberFloatType ,
template <typename U> class AllocatorType ,
template <typename T, typename SFINAE = void>
class JSONSerializer ,
class BinaryType >
friend void to_json(
::nlohmann::basic_json<ObjectType, ArrayType, StringType, BooleanType,
NumberIntegerType, NumberUnsignedType,
NumberFloatType, AllocatorType, JSONSerializer,
BinaryType>& j,
Derived v) {
j = static_cast<NumberFloatType>(v);
}
private:
static std::pair<uint8_t, uint8_t> SignAndMagnitude(Derived x) {
const uint8_t x_abs_bits = absl::bit_cast<uint8_t>(abs(x));
const uint8_t x_bits = absl::bit_cast<uint8_t>(x);
const uint8_t x_sign = x_bits ^ x_abs_bits;
return {x_sign, x_abs_bits};
}
static int8_t SignAndMagnitudeToTwosComplement(uint8_t sign,
uint8_t magnitude) {
return magnitude ^ (static_cast<int8_t>(sign) < 0 ? -1 : 0);
}
enum Ordering : int8_t {
kLess = -1,
kEquivalent = 0,
kGreater = 1,
kUnordered = 2,
};
friend Ordering Compare(const Derived& lhs, const Derived& rhs) {
if (isnan(lhs) || isnan(rhs)) {
return Ordering::kUnordered;
}
auto [lhs_sign, lhs_mag] = SignAndMagnitude(lhs);
auto [rhs_sign, rhs_mag] = SignAndMagnitude(rhs);
if (lhs_mag == 0 && rhs_mag == 0) {
return Ordering::kEquivalent;
}
int8_t lhs_twos_complement =
SignAndMagnitudeToTwosComplement(lhs_sign, lhs_mag);
int8_t rhs_twos_complement =
SignAndMagnitudeToTwosComplement(rhs_sign, rhs_mag);
if (lhs_twos_complement < rhs_twos_complement) {
return Ordering::kLess;
}
if (lhs_twos_complement > rhs_twos_complement) {
return Ordering::kGreater;
}
return Ordering::kEquivalent;
}
uint8_t rep_;
};
class Float8e4m3fn : public Float8Base<Float8e4m3fn> {
private:
using Base = Float8Base<Float8e4m3fn>;
friend class Float8Base<Float8e4m3fn>;
using Base::Float8Base;
public:
explicit Float8e4m3fn(const Float8e5m2& f8) : Float8e4m3fn(ConvertFrom(f8)) {}
explicit Float8e4m3fn(const Float8e4m3b11fnuz& f8)
: Float8e4m3fn(ConvertFrom(f8)) {}
};
class Float8e4m3b11fnuz : public Float8Base<Float8e4m3b11fnuz> {
private:
using Base = Float8Base<Float8e4m3b11fnuz>;
friend class Float8Base<Float8e4m3b11fnuz>;
using Base::Float8Base;
public:
explicit Float8e4m3b11fnuz(const Float8e5m2& f8)
: Float8e4m3b11fnuz(ConvertFrom(f8)) {}
explicit Float8e4m3b11fnuz(const Float8e5m2fnuz& f8)
: Float8e4m3b11fnuz(ConvertFrom(f8)) {}
explicit Float8e4m3b11fnuz(const Float8e4m3fn& f8)
: Float8e4m3b11fnuz(ConvertFrom(f8)) {}
explicit Float8e4m3b11fnuz(const Float8e4m3fnuz& f8)
: Float8e4m3b11fnuz(ConvertFrom(f8)) {}
constexpr Float8e4m3b11fnuz operator-() const {
if ((rep() & 0x7f) == 0x00) {
return *this;
}
return Base::operator-();
}
Float8e4m3b11fnuz operator-(const Float8e4m3b11fnuz& other) const {
return Base::operator-(other);
}
explicit operator bool() const { return rep() != 0; }
};
class Float8e4m3fnuz : public Float8Base<Float8e4m3fnuz> {
private:
using Base = Float8Base<Float8e4m3fnuz>;
friend class Float8Base<Float8e4m3fnuz>;
using Base::Float8Base;
public:
explicit Float8e4m3fnuz(const Float8e5m2& f8)
: Float8e4m3fnuz(ConvertFrom(f8)) {}
explicit Float8e4m3fnuz(const Float8e5m2fnuz& f8)
: Float8e4m3fnuz(ConvertFrom(f8)) {}
explicit Float8e4m3fnuz(const Float8e4m3b11fnuz& f8)
: Float8e4m3fnuz(ConvertFrom(f8)) {}
explicit Float8e4m3fnuz(const Float8e4m3fn& f8)
: Float8e4m3fnuz(ConvertFrom(f8)) {}
constexpr Float8e4m3fnuz operator-() const {
if ((rep() & 0x7f) == 0x00) {
return *this;
}
return Base::operator-();
}
Float8e4m3fnuz operator-(const Float8e4m3fnuz& other) const {
return Base::operator-(other);
}
explicit operator bool() const { return rep() != 0; }
};
class Float8e5m2 : public Float8Base<Float8e5m2> {
private:
using Base = Float8Base<Float8e5m2>;
friend class Float8Base<Float8e5m2>;
using Base::Float8Base;
public:
explicit Float8e5m2(Float8e4m3fn f8) : Float8e5m2(ConvertFrom(f8)) {}
explicit Float8e5m2(Float8e4m3fnuz f8) : Float8e5m2(ConvertFrom(f8)) {}
explicit Float8e5m2(Float8e4m3b11fnuz f8) : Float8e5m2(ConvertFrom(f8)) {}
explicit Float8e5m2(Float8e5m2fnuz& f8) : Float8e5m2(ConvertFrom(f8)) {}
};
class Float8e5m2fnuz : public Float8Base<Float8e5m2fnuz> {
private:
using Base = Float8Base<Float8e5m2fnuz>;
friend class Float8Base<Float8e5m2fnuz>;
using Base::Float8Base;
public:
explicit Float8e5m2fnuz(const Float8e5m2& f8)
: Float8e5m2fnuz(ConvertFrom(f8)) {}
explicit Float8e5m2fnuz(const Float8e4m3b11fnuz& f8)
: Float8e5m2fnuz(ConvertFrom(f8)) {}
explicit Float8e5m2fnuz(const Float8e4m3fn& f8)
: Float8e5m2fnuz(ConvertFrom(f8)) {}
explicit Float8e5m2fnuz(const Float8e4m3fnuz& f8)
: Float8e5m2fnuz(ConvertFrom(f8)) {}
constexpr Float8e5m2fnuz operator-() const {
if ((rep() & 0x7f) == 0x00) {
return *this;
}
return Base::operator-();
}
Float8e5m2fnuz operator-(const Float8e5m2fnuz& other) const {
return Base::operator-(other);
}
explicit operator bool() const { return rep() != 0; }
};
constexpr double ConstexprAbs(double x) { return x < 0.0 ? -x : x; }
constexpr double ConstexprCeil(double x) {
constexpr double kIntegerThreshold =
uint64_t{1} << (std::numeric_limits<double>::digits - 1);
if (!(ConstexprAbs(x) < kIntegerThreshold)) {
return x;
}
const double x_trunc = static_cast<double>(static_cast<int64_t>(x));
return x_trunc < x ? x_trunc + 1.0 : x_trunc;
}
constexpr double ConstexprFloor(double x) { return -ConstexprCeil(-x); }
constexpr double kLog10Of2 = 0.3010299956639812;
constexpr int Digits10FromDigits(int digits) {
return static_cast<int>(ConstexprFloor((digits - 1) * kLog10Of2));
}
constexpr int MaxDigits10FromDigits(int digits) {
return static_cast<int>(ConstexprCeil(1.0 + (digits * kLog10Of2)));
}
constexpr int MinExponent10FromMinExponent(int min_exponent) {
return static_cast<int>(ConstexprCeil((min_exponent - 1) * kLog10Of2));
}
constexpr int MaxExponent10FromMaxExponentAndDigits(int max_exponent,
int digits) {
constexpr double kLog10OfOnePredecessor[] = {
-0.057991946977686754,
-0.028028723600243537,
};
return static_cast<int>(ConstexprFloor(kLog10OfOnePredecessor[digits - 3] +
max_exponent * kLog10Of2));
}
struct numeric_limits_float8_base {
static inline constexpr const bool is_specialized = true;
static inline constexpr const bool is_signed = true;
static inline constexpr const bool is_integer = false;
static inline constexpr const bool is_exact = false;
static inline constexpr const bool has_quiet_NaN = true;
static inline constexpr const std::float_denorm_style has_denorm =
std::denorm_present;
static inline constexpr const bool has_denorm_loss = false;
static inline constexpr const std::float_round_style round_style =
std::round_to_nearest;
static inline constexpr const bool is_bounded = true;
static inline constexpr const bool is_modulo = false;
static inline constexpr const int radix = std::numeric_limits<float>::radix;
static inline constexpr const bool traps = std::numeric_limits<float>::traps;
static inline constexpr const bool tinyness_before =
std::numeric_limits<float>::tinyness_before;
};
struct numeric_limits_float8_e4m3fn : public numeric_limits_float8_base {
private:
static inline constexpr const int kExponentBias = 7;
static inline constexpr const int kMantissaBits = 3;
public:
static inline constexpr const int digits = kMantissaBits + 1;
static inline constexpr const int digits10 = Digits10FromDigits(digits);
static inline constexpr const int max_digits10 =
MaxDigits10FromDigits(digits);
static inline constexpr const int min_exponent = (1 - kExponentBias) + 1;
static inline constexpr const int min_exponent10 =
MinExponent10FromMinExponent(min_exponent);
static inline constexpr const int max_exponent =
(0b1111 - 7) + 1;
static inline constexpr const int max_exponent10 =
MaxExponent10FromMaxExponentAndDigits(max_exponent, digits);
static inline constexpr const bool is_iec559 = false;
static inline constexpr const bool has_infinity = false;
static inline constexpr const bool has_signaling_NaN = false;
static constexpr Float8e4m3fn min() {
return Float8e4m3fn::FromRep(0b0'0001 << kMantissaBits);
}
static constexpr Float8e4m3fn lowest() {
return Float8e4m3fn::FromRep(0b1'1111'110);
}
static constexpr Float8e4m3fn max() {
return Float8e4m3fn::FromRep(0b0'1111'110);
}
static constexpr Float8e4m3fn epsilon() {
return Float8e4m3fn::FromRep((-kMantissaBits + kExponentBias)
<< kMantissaBits);
}
static constexpr Float8e4m3fn round_error() {
return Float8e4m3fn::FromRep((-1 + kExponentBias) << kMantissaBits);
}
static constexpr Float8e4m3fn infinity() {
return Float8e4m3fn::FromRep(0b0'1111'111);
}
static constexpr Float8e4m3fn quiet_NaN() {
return Float8e4m3fn::FromRep(0b0'1111'111);
}
static constexpr Float8e4m3fn signaling_NaN() {
return Float8e4m3fn::FromRep(0b0'1111'111);
}
static constexpr Float8e4m3fn denorm_min() {
return Float8e4m3fn::FromRep(0b0'0000'001);
}
};
struct numeric_limits_float8_e4m3b11fnuz : public numeric_limits_float8_base {
private:
static inline constexpr const int kExponentBias = 11;
static inline constexpr const int kMantissaBits = 3;
public:
static inline constexpr const int digits = kMantissaBits + 1;
static inline constexpr const int digits10 = Digits10FromDigits(digits);
static inline constexpr const int max_digits10 =
MaxDigits10FromDigits(digits);
static inline constexpr const int min_exponent = (1 - kExponentBias) + 1;
static inline constexpr const int min_exponent10 =
MinExponent10FromMinExponent(min_exponent);
static inline constexpr const int max_exponent =
(0b1111 - kExponentBias) + 1;
static inline constexpr const int max_exponent10 =
MaxExponent10FromMaxExponentAndDigits(max_exponent, digits);
static inline constexpr const bool is_iec559 = false;
static inline constexpr const bool has_infinity = false;
static inline constexpr const bool has_signaling_NaN = false;
static constexpr Float8e4m3b11fnuz min() {
return Float8e4m3b11fnuz::FromRep(1 << kMantissaBits);
}
static constexpr Float8e4m3b11fnuz lowest() {
return Float8e4m3b11fnuz::FromRep(0b1'1111'111);
}
static constexpr Float8e4m3b11fnuz max() {
return Float8e4m3b11fnuz::FromRep(0b0'1111'111);
}
static constexpr Float8e4m3b11fnuz epsilon() {
return Float8e4m3b11fnuz::FromRep((-kMantissaBits + kExponentBias)
<< kMantissaBits);
}
static constexpr Float8e4m3b11fnuz round_error() {
return Float8e4m3b11fnuz::FromRep((-1 + kExponentBias) << kMantissaBits);
}
static constexpr Float8e4m3b11fnuz infinity() {
return Float8e4m3b11fnuz::FromRep(0b1'0000'000);
}
static constexpr Float8e4m3b11fnuz quiet_NaN() {
return Float8e4m3b11fnuz::FromRep(0b1'0000'000);
}
static constexpr Float8e4m3b11fnuz signaling_NaN() {
return Float8e4m3b11fnuz::FromRep(0b1'0000'000);
}
static constexpr Float8e4m3b11fnuz denorm_min() {
return Float8e4m3b11fnuz::FromRep(0b0'0000'001);
}
};
struct numeric_limits_float8_e4m3fnuz : public numeric_limits_float8_base {
private:
static inline constexpr const int kExponentBias = 8;
static inline constexpr const int kMantissaBits = 3;
public:
static inline constexpr const int digits = kMantissaBits + 1;
static inline constexpr const int digits10 = Digits10FromDigits(digits);
static inline constexpr const int max_digits10 =
MaxDigits10FromDigits(digits);
static inline constexpr const int min_exponent = (1 - kExponentBias) + 1;
static inline constexpr const int min_exponent10 =
MinExponent10FromMinExponent(min_exponent);
static inline constexpr const int max_exponent =
(0b1111 - kExponentBias) + 1;
static inline constexpr const int max_exponent10 =
MaxExponent10FromMaxExponentAndDigits(max_exponent, digits);
static inline constexpr const bool is_iec559 = false;
static inline constexpr const bool has_infinity = false;
static inline constexpr const bool has_signaling_NaN = false;
static constexpr Float8e4m3fnuz min() {
return Float8e4m3fnuz::FromRep(0x08);
}
static constexpr Float8e4m3fnuz lowest() {
return Float8e4m3fnuz::FromRep(0xFF);
}
static constexpr Float8e4m3fnuz max() {
return Float8e4m3fnuz::FromRep(0x7F);
}
static constexpr Float8e4m3fnuz epsilon() {
return Float8e4m3fnuz::FromRep((-kMantissaBits + kExponentBias)
<< kMantissaBits);
}
static constexpr Float8e4m3fnuz round_error() {
return Float8e4m3fnuz::FromRep((-1 + kExponentBias) << kMantissaBits);
}
static constexpr Float8e4m3fnuz infinity() {
return Float8e4m3fnuz::FromRep(0x80);
}
static constexpr Float8e4m3fnuz quiet_NaN() {
return Float8e4m3fnuz::FromRep(0x80);
}
static constexpr Float8e4m3fnuz signaling_NaN() {
return Float8e4m3fnuz::FromRep(0x80);
}
static constexpr Float8e4m3fnuz denorm_min() {
return Float8e4m3fnuz::FromRep(0x01);
}
};
struct numeric_limits_float8_e5m2 : public numeric_limits_float8_base {
private:
static inline constexpr const int kExponentBias = 15;
static inline constexpr const int kMantissaBits = 2;
public:
static inline constexpr const int digits = kMantissaBits + 1;
static inline constexpr const int digits10 = Digits10FromDigits(digits);
static inline constexpr const int max_digits10 =
MaxDigits10FromDigits(digits);
static inline constexpr const int min_exponent = (1 - kExponentBias) + 1;
static inline constexpr const int min_exponent10 =
MinExponent10FromMinExponent(min_exponent);
static inline constexpr const int max_exponent = 0b11111 - kExponentBias;
static inline constexpr const int max_exponent10 =
MaxExponent10FromMaxExponentAndDigits(max_exponent, digits);
static inline constexpr const bool is_iec559 = true;
static inline constexpr const bool has_infinity = true;
static inline constexpr const bool has_signaling_NaN = true;
static constexpr Float8e5m2 min() {
return Float8e5m2::FromRep(1 << kMantissaBits);
}
static constexpr Float8e5m2 lowest() {
return Float8e5m2::FromRep(0b1'11110'11);
}
static constexpr Float8e5m2 max() {
return Float8e5m2::FromRep(0b0'11110'11);
}
static constexpr Float8e5m2 epsilon() {
return Float8e5m2::FromRep((-kMantissaBits + kExponentBias)
<< kMantissaBits);
}
static constexpr Float8e5m2 round_error() {
return Float8e5m2::FromRep((-1 + kExponentBias) << kMantissaBits);
}
static constexpr Float8e5m2 infinity() {
return Float8e5m2::FromRep(0b0'11111'00);
}
static constexpr Float8e5m2 quiet_NaN() {
return Float8e5m2::FromRep(0b0'11111'10);
}
static constexpr Float8e5m2 signaling_NaN() {
return Float8e5m2::FromRep(0b0'11111'01);
}
static constexpr Float8e5m2 denorm_min() {
return Float8e5m2::FromRep(0b0'00000'01);
}
};
struct numeric_limits_float8_e5m2fnuz : public numeric_limits_float8_base {
private:
static inline constexpr const int kExponentBias = 16;
static inline constexpr const int kMantissaBits = 2;
public:
static inline constexpr const int digits = kMantissaBits + 1;
static inline constexpr const int digits10 = Digits10FromDigits(digits);
static inline constexpr const int max_digits10 =
MaxDigits10FromDigits(digits);
static inline constexpr const int min_exponent = (1 - kExponentBias) + 1;
static inline constexpr const int min_exponent10 =
MinExponent10FromMinExponent(min_exponent);
static inline constexpr const int max_exponent =
(0b11111 - kExponentBias) + 1;
static inline constexpr const int max_exponent10 =
MaxExponent10FromMaxExponentAndDigits(max_exponent, digits);
static inline constexpr const bool is_iec559 = false;
static inline constexpr const bool has_infinity = false;
static inline constexpr const bool has_signaling_NaN = false;
static constexpr Float8e5m2fnuz min() {
return Float8e5m2fnuz::FromRep(0x04);
}
static constexpr Float8e5m2fnuz lowest() {
return Float8e5m2fnuz::FromRep(0xFF);
}
static constexpr Float8e5m2fnuz max() {
return Float8e5m2fnuz::FromRep(0x7F);
}
static constexpr Float8e5m2fnuz epsilon() {
return Float8e5m2fnuz::FromRep((-kMantissaBits + kExponentBias)
<< kMantissaBits);
}
static constexpr Float8e5m2fnuz round_error() {
return Float8e5m2fnuz::FromRep((-1 + kExponentBias) << kMantissaBits);
}
static constexpr Float8e5m2fnuz infinity() {
return Float8e5m2fnuz::FromRep(0x80);
}
static constexpr Float8e5m2fnuz quiet_NaN() {
return Float8e5m2fnuz::FromRep(0x80);
}
static constexpr Float8e5m2fnuz signaling_NaN() {
return Float8e5m2fnuz::FromRep(0x80);
}
static constexpr Float8e5m2fnuz denorm_min() {
return Float8e5m2fnuz::FromRep(0x01);
}
};
}
}
namespace std {
template <>
struct numeric_limits<tensorstore::float8_internal::Float8e4m3fn>
: public tensorstore::float8_internal::numeric_limits_float8_e4m3fn {};
template <>
struct numeric_limits<tensorstore::float8_internal::Float8e4m3b11fnuz>
: public tensorstore::float8_internal::numeric_limits_float8_e4m3b11fnuz {};
template <>
struct numeric_limits<tensorstore::float8_internal::Float8e4m3fnuz>
: public tensorstore::float8_internal::numeric_limits_float8_e4m3fnuz {};
template <>
struct numeric_limits<tensorstore::float8_internal::Float8e5m2>
: public tensorstore::float8_internal::numeric_limits_float8_e5m2 {};
template <>
struct numeric_limits<tensorstore::float8_internal::Float8e5m2fnuz>
: public tensorstore::float8_internal::numeric_limits_float8_e5m2fnuz {};
}
namespace tensorstore {
namespace float8_internal {
constexpr inline Float8e4m3fn abs(const Float8e4m3fn& a) {
return Float8e4m3fn::FromRep(a.rep() & 0b0'1111'111);
}
constexpr inline bool(isnan)(const Float8e4m3fn& a) {
return abs(a).rep() == std::numeric_limits<Float8e4m3fn>::quiet_NaN().rep();
}
constexpr inline Float8e4m3b11fnuz abs(const Float8e4m3b11fnuz& a) {
return (a.rep() & 0b0'1111'111) == 0
? Float8e4m3b11fnuz::FromRep(a.rep())
: Float8e4m3b11fnuz::FromRep(a.rep() & 0b0'1111'111);
}
constexpr inline bool(isnan)(const Float8e4m3b11fnuz& a) {
return a.rep() == std::numeric_limits<Float8e4m3b11fnuz>::quiet_NaN().rep();
}
constexpr inline Float8e4m3fnuz abs(const Float8e4m3fnuz& a) {
return (a.rep() & 0x7F) == 0 ? Float8e4m3fnuz::FromRep(a.rep())
: Float8e4m3fnuz::FromRep(a.rep() & 0x7F);
}
constexpr inline bool(isnan)(const Float8e4m3fnuz& a) {
return abs(a).rep() == std::numeric_limits<Float8e4m3fnuz>::quiet_NaN().rep();
}
constexpr inline Float8e5m2 abs(const Float8e5m2& a) {
return Float8e5m2::FromRep(a.rep() & 0b0'11111'11);
}
constexpr inline bool(isnan)(const Float8e5m2& a) {
return abs(a).rep() > std::numeric_limits<Float8e5m2>::infinity().rep();
}
constexpr inline Float8e5m2fnuz abs(const Float8e5m2fnuz& a) {
return (a.rep() & 0x7F) == 0 ? Float8e5m2fnuz::FromRep(a.rep())
: Float8e5m2fnuz::FromRep(a.rep() & 0x7F);
}
constexpr inline bool isnan(const Float8e5m2fnuz& a) { return a.rep() == 0x80; }
template <typename Float8>
constexpr inline bool(isinf)(const Float8Base<Float8>& a) {
return std::numeric_limits<Float8>::has_infinity
? abs(a.derived()).rep() ==
std::numeric_limits<Float8>::infinity().rep()
: false;
}
template <typename Float8>
constexpr inline bool(isfinite)(const Float8Base<Float8>& a) {
return !isnan(a.derived()) && !isinf(a.derived());
}
template <typename Float8>
std::ostream& operator<<(std::ostream& os, const Float8Base<Float8>& f8) {
os << static_cast<float>(f8.derived());
return os;
}
template <size_t Size>
struct get_integer_by_size {
typedef void signed_type;
typedef void unsigned_type;
};
template <>
struct get_integer_by_size<1> {
typedef int8_t signed_type;
typedef uint8_t unsigned_type;
};
template <>
struct get_integer_by_size<2> {
typedef int16_t signed_type;
typedef uint16_t unsigned_type;
};
template <>
struct get_integer_by_size<4> {
typedef int32_t signed_type;
typedef uint32_t unsigned_type;
};
template <>
struct get_integer_by_size<8> {
typedef int64_t signed_type;
typedef uint64_t unsigned_type;
};
template <int kNumBytes>
using GetUnsignedInteger =
typename get_integer_by_size<kNumBytes>::unsigned_type;
template <typename From, typename To, bool kSaturate, bool kTruncate,
typename EnableIf = void>
struct ConvertImpl;
template <typename Scalar>
struct IdentityConversion {
static inline Scalar run(const Scalar& from) { return from; }
};
template <typename Scalar>
struct ConvertImpl<Scalar, Scalar, false, false,
void> : public IdentityConversion<Scalar> {};
template <typename Scalar>
struct ConvertImpl<Scalar, Scalar, false, true,
void> : public IdentityConversion<Scalar> {};
template <typename Scalar>
struct ConvertImpl<Scalar, Scalar, true, false,
void> : public IdentityConversion<Scalar> {};
template <typename Scalar>
struct ConvertImpl<Scalar, Scalar, true, true,
void> : public IdentityConversion<Scalar> {};
template <typename Float>
struct TraitsBase {
using BitsType = GetUnsignedInteger<sizeof(Float)>;
static constexpr int kBits = sizeof(Float) * CHAR_BIT;
static constexpr int kMantissaBits = std::numeric_limits<Float>::digits - 1;
static constexpr int kExponentBits = kBits - kMantissaBits - 1;
static constexpr BitsType kExponentMask = ((BitsType{1} << kExponentBits) - 1)
<< kMantissaBits;
static constexpr BitsType kMantissaMask = (BitsType{1} << kMantissaBits) - 1;
static constexpr int kExponentBias = (1 << (kExponentBits - 1)) - 1;
};
template <typename Float>
struct Traits : public TraitsBase<Float> {};
template <>
struct Traits<Float8e4m3b11fnuz> : public TraitsBase<Float8e4m3b11fnuz> {
static constexpr int kExponentBias = 11;
};
template <>
struct Traits<Float8e4m3fnuz> : public TraitsBase<Float8e4m3fnuz> {
using Base = TraitsBase<Float8e4m3fnuz>;
static constexpr int kExponentBias = Base::kExponentBias + 1;
};
template <>
struct Traits<Float8e5m2fnuz> : public TraitsBase<Float8e5m2fnuz> {
using Base = TraitsBase<Float8e5m2fnuz>;
static constexpr int kExponentBias = Base::kExponentBias + 1;
};
template <typename Bits>
constexpr inline Bits RoundBitsToNearestEven(Bits bits, int roundoff) {
Bits bias = roundoff == 0
? 0
: ((bits >> roundoff) & 1) + (Bits{1} << (roundoff - 1)) - 1;
return bits + bias;
}
#if (defined(__cpp_lib_bitops) && __cpp_lib_bitops >= 201907L)
using std::countl_zero;
#else
static constexpr inline int countl_zero(uint64_t x) {
int zeroes = 60;
if (x >> 32) {
zeroes -= 32;
x >>= 32;
}
if (x >> 16) {
zeroes -= 16;
x >>= 16;
}
if (x >> 8) {
zeroes -= 8;
x >>= 8;
}
if (x >> 4) {
zeroes -= 4;
x >>= 4;
}
return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[x] + zeroes;
}
static constexpr inline int countl_zero(uint32_t x) {
int zeroes = 28;
if (x >> 16) {
zeroes -= 16;
x >>= 16;
}
if (x >> 8) {
zeroes -= 8;
x >>= 8;
}
if (x >> 4) {
zeroes -= 4;
x >>= 4;
}
return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[x] + zeroes;
}
static constexpr inline int countl_zero(uint16_t x) {
int zeroes = 12;
if (x >> 8) {
zeroes -= 8;
x >>= 8;
}
if (x >> 4) {
zeroes -= 4;
x >>= 4;
}
return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[x] + zeroes;
}
static constexpr inline int countl_zero(uint8_t x) {
int zeroes = 4;
if (x >> 4) {
zeroes -= 4;
x >>= 4;
}
return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[x] + zeroes;
}
#endif
template <typename From, typename To, bool kSaturate, bool kTruncate>
struct ConvertImpl<From, To, kSaturate, kTruncate,
std::enable_if_t<!std::is_same_v<From, To>>> {
using FromTraits = Traits<From>;
using FromBits = typename FromTraits::BitsType;
static constexpr int kFromBits = FromTraits::kBits;
static constexpr int kFromMantissaBits = FromTraits::kMantissaBits;
static constexpr int kFromExponentBits = FromTraits::kExponentBits;
static constexpr int kFromExponentBias = FromTraits::kExponentBias;
static constexpr FromBits kFromExponentMask = FromTraits::kExponentMask;
using ToTraits = Traits<To>;
using ToBits = typename ToTraits::BitsType;
static constexpr int kToBits = ToTraits::kBits;
static constexpr int kToMantissaBits = ToTraits::kMantissaBits;
static constexpr int kToExponentBits = ToTraits::kExponentBits;
static constexpr int kToExponentBias = ToTraits::kExponentBias;
static constexpr ToBits kToExponentMask = ToTraits::kExponentMask;
static constexpr int kWideBits =
(std::max(kToMantissaBits, kFromMantissaBits)) +
(std::max(kToExponentBits, kFromExponentBits));
static constexpr int kWideBytes = (kWideBits + (CHAR_BIT - 1)) / CHAR_BIT;
using WideBits = GetUnsignedInteger<kWideBytes>;
static constexpr int kExponentOffset = kToExponentBias - kFromExponentBias;
static constexpr int kDigitShift = kToMantissaBits - kFromMantissaBits;
static inline To run(const From& from) {
using std::abs;
using std::isinf;
using std::isnan;
const bool from_sign_bit =
absl::bit_cast<FromBits>(from) >> (kFromBits - 1);
const FromBits from_bits = absl::bit_cast<FromBits>(abs(from));
if (isinf(from)) {
return from_sign_bit ? -std::numeric_limits<To>::infinity()
: std::numeric_limits<To>::infinity();
}
if (isnan(from)) {
return from_sign_bit ? -std::numeric_limits<To>::quiet_NaN()
: std::numeric_limits<To>::quiet_NaN();
}
if (from_bits == 0) {
return from_sign_bit ? -To{} : To{};
}
const int biased_from_exponent = from_bits >> kFromMantissaBits;
if constexpr (std::numeric_limits<To>::min_exponent <
std::numeric_limits<From>::min_exponent) {
if (biased_from_exponent == 0) {
WideBits bits = from_bits;
const int normalization_factor =
countl_zero(from_bits) - (kFromBits - kFromMantissaBits) + 1;
const int biased_exponent = kExponentOffset - normalization_factor + 1;
if (biased_exponent <= 0) {
if constexpr (kExponentOffset < sizeof(WideBits) * CHAR_BIT) {
bits <<= kExponentOffset;
}
} else {
bits <<= normalization_factor;
bits &= ~(WideBits{1} << kFromMantissaBits);
bits |= static_cast<WideBits>(biased_exponent) << kFromMantissaBits;
}
if constexpr (kDigitShift > 0) {
bits <<= kDigitShift;
} else {
if constexpr (!kTruncate) {
bits = RoundBitsToNearestEven(bits, -kDigitShift);
}
bits >>= -kDigitShift;
}
To to = absl::bit_cast<To>(static_cast<ToBits>(bits));
return from_sign_bit ? -to : to;
}
}
if constexpr (std::numeric_limits<To>::min_exponent >
std::numeric_limits<From>::min_exponent) {
const int unbiased_exponent = biased_from_exponent - kFromExponentBias;
const int biased_to_exponent = unbiased_exponent + kToExponentBias;
if (biased_to_exponent <= 0) {
FromBits from_has_leading_one = (biased_from_exponent > 0 ? 1 : 0);
int exponent_shift =
-kDigitShift - biased_to_exponent + from_has_leading_one;
FromBits rounded_from_bits =
(from_bits & FromTraits::kMantissaMask) |
(from_has_leading_one << kFromMantissaBits);
ToBits bits = 0;
if (exponent_shift <= kFromMantissaBits + 1) {
if constexpr (!kTruncate) {
rounded_from_bits =
RoundBitsToNearestEven(rounded_from_bits, exponent_shift);
}
bits = (rounded_from_bits >> exponent_shift);
}
To to = absl::bit_cast<To>(bits);
return from_sign_bit ? -to : to;
}
}
WideBits rounded_from_bits = from_bits;
if constexpr (kDigitShift < 0) {
if constexpr (!kTruncate) {
rounded_from_bits = RoundBitsToNearestEven(from_bits, -kDigitShift);
}
rounded_from_bits &= ~((WideBits{1} << (-kDigitShift)) - 1);
}
rounded_from_bits += static_cast<WideBits>(kExponentOffset)
<< kFromMantissaBits;
ToBits bits;
const WideBits kToHighestRep =
absl::bit_cast<ToBits>(std::numeric_limits<To>::max());
WideBits aligned_highest{kToHighestRep};
if constexpr (kDigitShift < 0) {
aligned_highest <<= -kDigitShift;
bits = static_cast<ToBits>(rounded_from_bits >> -kDigitShift);
} else if constexpr (kDigitShift >= 0) {
rounded_from_bits <<= kDigitShift;
bits = ToBits{rounded_from_bits};
}
To to = absl::bit_cast<To>(bits);
if constexpr (std::make_pair(std::numeric_limits<To>::max_exponent,
std::numeric_limits<To>::digits) <
std::make_pair(std::numeric_limits<From>::max_exponent,
std::numeric_limits<From>::digits)) {
if (rounded_from_bits > aligned_highest) {
to = kSaturate ? std::numeric_limits<To>::max()
: std::numeric_limits<To>::infinity();
}
}
return from_sign_bit ? -to : to;
}
};
template <bool kTruncate>
struct ConvertImpl<Float8e4m3fn, Float8e5m2, true, kTruncate> {
static inline Float8e5m2 run(const Float8e4m3fn& from) {
return ConvertImpl<Float8e4m3fn, Float8e5m2, false, kTruncate>::run(from);
}
};
template <bool kSaturate, bool kTruncate>
struct ConvertImpl<::half_float::half, Float8e5m2, kSaturate, kTruncate> {
static inline Float8e5m2 run(const ::half_float::half& from) {
uint16_t from_bits = absl::bit_cast<uint16_t>(from);
uint16_t abs_bits = from_bits & 0x7FFF;
if (abs_bits == 0x7C00) {
return Float8e5m2::FromRep(from_bits >> 8);
} else if (abs_bits > 0x7C00) {
return Float8e5m2::FromRep((from_bits >> 8) | 0b0'00000'10);
}
if constexpr (!kTruncate) {
from_bits = RoundBitsToNearestEven(from_bits, 8);
if constexpr (kSaturate) {
const Float8e5m2 kHighest = std::numeric_limits<Float8e5m2>::max();
if ((from_bits & 0x7F00) > static_cast<uint16_t>(kHighest.rep()) << 8) {
const bool from_sign_bit = from_bits >> 15;
return from_sign_bit ? -kHighest : kHighest;
}
}
}
return Float8e5m2::FromRep(from_bits >> 8);
}
};
template <>
struct ConvertImpl<Float8e5m2, ::half_float::half, false,
false> {
static inline ::half_float::half run(const Float8e5m2& from) {
return absl::bit_cast<::half_float::half>(
static_cast<uint16_t>(static_cast<uint16_t>(from.rep()) << 8));
}
};
template <bool kSaturate, bool kTruncate>
struct ConvertImpl<Float8e5m2, ::half_float::half, kSaturate, kTruncate> {
static inline ::half_float::half run(const Float8e5m2& from) {
return absl::bit_cast<::half_float::half>(
static_cast<uint16_t>(static_cast<uint16_t>(from.rep()) << 8));
}
};
template <bool kSaturate, bool kTruncate>
struct ConvertImpl<Float8e5m2fnuz, ::half_float::half, kSaturate, kTruncate> {
static inline ::half_float::half run(const Float8e5m2fnuz& from) {
return static_cast<::half_float::half>(static_cast<float>(from));
}
};
template <typename Derived>
template <bool kSaturate, bool kTruncate, typename From>
Derived Float8Base<Derived>::ConvertFrom(const From& from) {
return ConvertImpl<From, Derived, kSaturate, kTruncate>::run(from);
}
template <typename Derived>
template <typename To, bool kSaturate, bool kTruncate>
To Float8Base<Derived>::ConvertTo(const Derived& from) {
return ConvertImpl<Derived, To, kSaturate, kTruncate>::run(from);
}
#ifdef _MSC_VER
#define TENSORSTORE_INTERNAL_FPCLASSIFY(Float8) \
inline int fpclassify(Float8 a) noexcept { \
if (tensorstore::float8_internal::isnan(a)) return FP_NAN; \
if (tensorstore::float8_internal::isinf(a)) return FP_INFINITE; \
Float8 abs_value = tensorstore::float8_internal::abs(a); \
if (abs_value.rep() == 0x00) return FP_ZERO; \
if ((abs_value.rep() & Traits<Float8>::kExponentMask) == 0) \
return FP_SUBNORMAL; \
return FP_NORMAL; \
}
TENSORSTORE_INTERNAL_FPCLASSIFY(Float8e4m3fn);
TENSORSTORE_INTERNAL_FPCLASSIFY(Float8e4m3fnuz);
TENSORSTORE_INTERNAL_FPCLASSIFY(Float8e4m3b11fnuz);
TENSORSTORE_INTERNAL_FPCLASSIFY(Float8e5m2);
TENSORSTORE_INTERNAL_FPCLASSIFY(Float8e5m2fnuz);
#undef TENSORSTORE_INTERNAL_FPCLASSIFY
#endif
}
using Float8e4m3fn = float8_internal::Float8e4m3fn;
using Float8e4m3fnuz = float8_internal::Float8e4m3fnuz;
using Float8e4m3b11fnuz = float8_internal::Float8e4m3b11fnuz;
using Float8e5m2 = float8_internal::Float8e5m2;
using Float8e5m2fnuz = float8_internal::Float8e5m2fnuz;
}
#endif | #include "tensorstore/util/float8.h"
#include <cmath>
#include <cstdint>
#include <limits>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/casts.h"
#include "absl/strings/str_cat.h"
#include <half.hpp>
#include "tensorstore/util/bfloat16.h"
namespace tensorstore {
namespace {
using std::isfinite;
using std::isinf;
using std::isnan;
template <typename Float8_>
class Float8Test : public ::testing::Test {};
struct Float8TestParamNames {
template <typename TypeParam>
static std::string GetName(int idx) {
if constexpr (std::is_same_v<TypeParam, Float8e4m3fn>) {
return "Float8e4m3fn";
} else if constexpr (std::is_same_v<TypeParam, Float8e4m3b11fnuz>) {
return "Float8e4m3b11fnuz";
} else if constexpr (std::is_same_v<TypeParam, Float8e5m2>) {
return "Float8e5m2";
} else if constexpr (std::is_same_v<TypeParam, Float8e4m3fnuz>) {
return "Float8e4m3fnuz";
} else if constexpr (std::is_same_v<TypeParam, Float8e5m2fnuz>) {
return "Float8e5m2fnuz";
}
return absl::StrCat(idx);
}
};
using Float8Types =
::testing::Types<Float8e4m3fn, Float8e5m2, Float8e4m3b11fnuz,
Float8e4m3fnuz, Float8e5m2fnuz>;
TYPED_TEST_SUITE(Float8Test, Float8Types, Float8TestParamNames);
TEST(Float8E4m3fnTest, NumericLimits) {
EXPECT_TRUE(isnan(std::numeric_limits<Float8e4m3fn>::quiet_NaN()));
EXPECT_TRUE(isnan(std::numeric_limits<Float8e4m3fn>::signaling_NaN()));
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e4m3fn>::min()),
std::exp2(-6));
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e4m3fn>::max()), 448);
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e4m3fn>::lowest()),
-448);
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e4m3fn>::epsilon()),
0.125);
EXPECT_EQ(
static_cast<float>(std::numeric_limits<Float8e4m3fn>::round_error()),
0.5);
EXPECT_TRUE(isnan(std::numeric_limits<Float8e4m3fn>::infinity()));
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e4m3fn>::denorm_min()),
std::exp2(-9));
EXPECT_EQ(std::numeric_limits<Float8e4m3fn>::digits, 4);
EXPECT_EQ(std::numeric_limits<Float8e4m3fn>::digits10, 0);
EXPECT_EQ(std::numeric_limits<Float8e4m3fn>::max_digits10, 3);
EXPECT_EQ(std::numeric_limits<Float8e4m3fn>::min_exponent, -5);
EXPECT_EQ(std::numeric_limits<Float8e4m3fn>::min_exponent10, -1);
EXPECT_EQ(std::numeric_limits<Float8e4m3fn>::max_exponent, 9);
EXPECT_EQ(std::numeric_limits<Float8e4m3fn>::max_exponent10, 2);
EXPECT_EQ(std::numeric_limits<Float8e4m3fn>::is_iec559, false);
EXPECT_EQ(std::numeric_limits<Float8e4m3fn>::has_infinity, false);
EXPECT_EQ(std::numeric_limits<Float8e4m3fn>::has_signaling_NaN, false);
}
TEST(Float8E4m3b11fnuzTest, NumericLimits) {
EXPECT_TRUE(isnan(std::numeric_limits<Float8e4m3b11fnuz>::quiet_NaN()));
EXPECT_TRUE(isnan(std::numeric_limits<Float8e4m3b11fnuz>::signaling_NaN()));
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e4m3b11fnuz>::min()),
std::exp2(-10));
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e4m3b11fnuz>::max()),
30);
EXPECT_EQ(
static_cast<float>(std::numeric_limits<Float8e4m3b11fnuz>::lowest()),
-30);
EXPECT_EQ(
static_cast<float>(std::numeric_limits<Float8e4m3b11fnuz>::epsilon()),
0.125);
EXPECT_EQ(
static_cast<float>(std::numeric_limits<Float8e4m3b11fnuz>::round_error()),
0.5);
EXPECT_TRUE(isnan(std::numeric_limits<Float8e4m3b11fnuz>::infinity()));
EXPECT_EQ(
static_cast<float>(std::numeric_limits<Float8e4m3b11fnuz>::denorm_min()),
std::exp2(-13));
EXPECT_EQ(std::numeric_limits<Float8e4m3b11fnuz>::digits, 4);
EXPECT_EQ(std::numeric_limits<Float8e4m3b11fnuz>::digits10, 0);
EXPECT_EQ(std::numeric_limits<Float8e4m3b11fnuz>::max_digits10, 3);
EXPECT_EQ(std::numeric_limits<Float8e4m3b11fnuz>::min_exponent, -9);
EXPECT_EQ(std::numeric_limits<Float8e4m3b11fnuz>::min_exponent10, -3);
EXPECT_EQ(std::numeric_limits<Float8e4m3b11fnuz>::max_exponent, 5);
EXPECT_EQ(std::numeric_limits<Float8e4m3b11fnuz>::max_exponent10, 1);
EXPECT_EQ(std::numeric_limits<Float8e4m3b11fnuz>::is_iec559, false);
EXPECT_EQ(std::numeric_limits<Float8e4m3b11fnuz>::has_infinity, false);
EXPECT_EQ(std::numeric_limits<Float8e4m3b11fnuz>::has_signaling_NaN, false);
}
TEST(Float8E4m3fnuzTest, NumericLimits) {
EXPECT_TRUE(isnan(std::numeric_limits<Float8e4m3fnuz>::quiet_NaN()));
EXPECT_TRUE(isnan(std::numeric_limits<Float8e4m3fnuz>::signaling_NaN()));
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e4m3fnuz>::min()),
std::exp2(-7));
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e4m3fnuz>::max()),
240);
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e4m3fnuz>::lowest()),
-240);
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e4m3fnuz>::epsilon()),
0.125);
EXPECT_EQ(
static_cast<float>(std::numeric_limits<Float8e4m3fnuz>::round_error()),
0.5);
EXPECT_TRUE(isnan(std::numeric_limits<Float8e4m3fnuz>::infinity()));
EXPECT_EQ(
static_cast<float>(std::numeric_limits<Float8e4m3fnuz>::denorm_min()),
std::exp2(-10));
EXPECT_EQ(std::numeric_limits<Float8e4m3fnuz>::digits, 4);
EXPECT_EQ(std::numeric_limits<Float8e4m3fnuz>::digits10, 0);
EXPECT_EQ(std::numeric_limits<Float8e4m3fnuz>::max_digits10, 3);
EXPECT_EQ(std::numeric_limits<Float8e4m3fnuz>::min_exponent, -6);
EXPECT_EQ(std::numeric_limits<Float8e4m3fnuz>::min_exponent10, -2);
EXPECT_EQ(std::numeric_limits<Float8e4m3fnuz>::max_exponent, 8);
EXPECT_EQ(std::numeric_limits<Float8e4m3fnuz>::max_exponent10, 2);
EXPECT_EQ(std::numeric_limits<Float8e4m3fnuz>::is_iec559, false);
EXPECT_EQ(std::numeric_limits<Float8e4m3fnuz>::has_infinity, false);
EXPECT_EQ(std::numeric_limits<Float8e4m3fnuz>::has_signaling_NaN, false);
}
TEST(Float8E5m2Test, NumericLimits) {
EXPECT_TRUE(isnan(std::numeric_limits<Float8e5m2>::quiet_NaN()));
EXPECT_TRUE(isnan(std::numeric_limits<Float8e5m2>::signaling_NaN()));
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e5m2>::min()),
std::exp2(-14));
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e5m2>::max()), 57344);
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e5m2>::lowest()),
-57344);
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e5m2>::epsilon()),
0.25);
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e5m2>::round_error()),
0.5);
EXPECT_TRUE(isinf(std::numeric_limits<Float8e5m2>::infinity()));
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e5m2>::denorm_min()),
std::exp2(-16));
EXPECT_EQ(std::numeric_limits<Float8e5m2>::digits, 3);
EXPECT_EQ(std::numeric_limits<Float8e5m2>::digits10, 0);
EXPECT_EQ(std::numeric_limits<Float8e5m2>::max_digits10, 2);
EXPECT_EQ(std::numeric_limits<Float8e5m2>::min_exponent, -13);
EXPECT_EQ(std::numeric_limits<Float8e5m2>::min_exponent10, -4);
EXPECT_EQ(std::numeric_limits<Float8e5m2>::max_exponent, 16);
EXPECT_EQ(std::numeric_limits<Float8e5m2>::max_exponent10, 4);
EXPECT_EQ(std::numeric_limits<Float8e5m2>::is_iec559, true);
EXPECT_EQ(std::numeric_limits<Float8e5m2>::has_infinity, true);
EXPECT_EQ(std::numeric_limits<Float8e5m2>::has_signaling_NaN, true);
}
TEST(Float8E5m2fnuzTest, NumericLimits) {
EXPECT_TRUE(isnan(std::numeric_limits<Float8e5m2fnuz>::quiet_NaN()));
EXPECT_TRUE(isnan(std::numeric_limits<Float8e5m2fnuz>::signaling_NaN()));
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e5m2fnuz>::min()),
std::exp2(-15));
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e5m2fnuz>::max()),
57344);
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e5m2fnuz>::lowest()),
-57344);
EXPECT_EQ(static_cast<float>(std::numeric_limits<Float8e5m2fnuz>::epsilon()),
0.25);
EXPECT_EQ(
static_cast<float>(std::numeric_limits<Float8e5m2fnuz>::round_error()),
0.5);
EXPECT_TRUE(isnan(std::numeric_limits<Float8e5m2fnuz>::infinity()));
EXPECT_EQ(
static_cast<float>(std::numeric_limits<Float8e5m2fnuz>::denorm_min()),
std::exp2(-17));
EXPECT_EQ(std::numeric_limits<Float8e5m2fnuz>::digits, 3);
EXPECT_EQ(std::numeric_limits<Float8e5m2fnuz>::digits10, 0);
EXPECT_EQ(std::numeric_limits<Float8e5m2fnuz>::max_digits10, 2);
EXPECT_EQ(std::numeric_limits<Float8e5m2fnuz>::min_exponent, -14);
EXPECT_EQ(std::numeric_limits<Float8e5m2fnuz>::min_exponent10, -4);
EXPECT_EQ(std::numeric_limits<Float8e5m2fnuz>::max_exponent, 16);
EXPECT_EQ(std::numeric_limits<Float8e5m2fnuz>::max_exponent10, 4);
EXPECT_EQ(std::numeric_limits<Float8e5m2fnuz>::is_iec559, false);
EXPECT_EQ(std::numeric_limits<Float8e5m2fnuz>::has_infinity, false);
EXPECT_EQ(std::numeric_limits<Float8e5m2fnuz>::has_signaling_NaN, false);
}
TYPED_TEST(Float8Test, FromRep) {
using Float8 = TypeParam;
Float8 x = Float8::FromRep(0x4F);
EXPECT_EQ(x.rep(), 0x4F);
}
TYPED_TEST(Float8Test, Negate) {
using Float8 = TypeParam;
Float8 x = -Float8::FromRep(0x4F);
EXPECT_EQ(x.rep(), 0x80 | 0x4F);
Float8 nan = -std::numeric_limits<Float8>::quiet_NaN();
EXPECT_TRUE(isnan(nan));
}
TYPED_TEST(Float8Test, BitCasts) {
using Float8 = TypeParam;
Float8 x = Float8::FromRep(0x47);
EXPECT_EQ(absl::bit_cast<uint8_t>(x), 0x47);
EXPECT_EQ(absl::bit_cast<Float8>(x.rep()).rep(), 0x47);
}
TYPED_TEST(Float8Test, UpCasts) {
using Float8 = TypeParam;
for (int i = 0x00; i <= 0xFF; ++i) {
Float8 f8 = Float8::FromRep(i);
double f64 = static_cast<double>(f8);
float f32 = static_cast<float>(f8);
tensorstore::BFloat16 bf16 = static_cast<tensorstore::BFloat16>(f8);
::half_float::half f16 = static_cast<::half_float::half>(f8);
if (isnan(f8)) {
EXPECT_TRUE(std::isnan(f64));
EXPECT_TRUE(std::isnan(f32));
EXPECT_TRUE(tensorstore::isnan(bf16));
EXPECT_TRUE(::half_float::isnan(f16));
} else {
EXPECT_EQ(f64, f32);
EXPECT_EQ(f32, bf16);
EXPECT_EQ(bf16, f16);
}
}
}
TYPED_TEST(Float8Test, DownCasts) {
using Float8 = TypeParam;
for (int i = 0x00; i <= 0xFF; ++i) {
float x = static_cast<float>(Float8::FromRep(i));
Float8 f64 = static_cast<Float8>(static_cast<double>(x));
Float8 f32 = static_cast<Float8>(static_cast<float>(x));
Float8 bf16 = static_cast<Float8>(static_cast<tensorstore::BFloat16>(x));
Float8 f16 = static_cast<Float8>(static_cast<::half_float::half>(x));
if (std::isnan(x)) {
EXPECT_TRUE(isnan(f64));
EXPECT_TRUE(isnan(f32));
EXPECT_TRUE(isnan(bf16));
EXPECT_TRUE(isnan(f16));
} else {
EXPECT_EQ(f64.rep(), i) << i;
EXPECT_EQ(f32.rep(), i) << i;
EXPECT_EQ(bf16.rep(), i) << i;
EXPECT_EQ(f16.rep(), i) << i;
}
}
}
TYPED_TEST(Float8Test, ConvertFromWithSaturation) {
using Float8 = TypeParam;
Float8 upper =
Float8::template ConvertFrom<true, false>(
static_cast<float>(std::numeric_limits<Float8>::max()) * 2);
EXPECT_EQ(upper, std::numeric_limits<Float8>::max());
Float8 lower =
Float8::template ConvertFrom<true, false>(
static_cast<float>(std::numeric_limits<Float8>::lowest()) * 2);
EXPECT_EQ(lower, std::numeric_limits<Float8>::lowest());
Float8 nan =
Float8::template ConvertFrom<true, true>(
std::numeric_limits<float>::quiet_NaN());
EXPECT_TRUE(isnan(nan));
Float8 inf =
Float8::template ConvertFrom<true, true>(
std::numeric_limits<float>::infinity());
EXPECT_TRUE(std::numeric_limits<Float8>::has_infinity ? isinf(inf)
: isnan(inf));
Float8 ninf =
Float8::template ConvertFrom<true, true>(
-std::numeric_limits<float>::infinity());
EXPECT_TRUE(std::numeric_limits<Float8>::has_infinity ? isinf(ninf)
: isnan(ninf));
}
TYPED_TEST(Float8Test, ConvertFromWithTruncation) {
using Float8 = TypeParam;
float less_than_two = absl::bit_cast<float>(0x3FFFFFFF);
Float8 truncated =
Float8::template ConvertFrom<false, true>(
less_than_two);
EXPECT_LT(static_cast<float>(truncated), 2);
Float8 rounded =
Float8::template ConvertFrom<false, false>(
less_than_two);
EXPECT_EQ(static_cast<float>(rounded), 2);
double kLarge = 0x1.c001p+16;
EXPECT_EQ(
(Float8::template ConvertFrom<false, true>(
kLarge)
.rep()),
std::numeric_limits<Float8>::infinity().rep());
EXPECT_EQ(
(Float8::template ConvertFrom<false, false>(
kLarge)
.rep()),
std::numeric_limits<Float8>::infinity().rep());
for (int i = 0x01; i < 0x04; ++i) {
float less_than_subnorm =
std::nexttoward(static_cast<float>(Float8::FromRep(i)), 0);
Float8 truncated_subnorm =
Float8::template ConvertFrom<false, true>(
less_than_subnorm);
EXPECT_EQ(truncated_subnorm.rep(), i - 1);
Float8 rounded_subnorm =
Float8::template ConvertFrom<false, false>(
less_than_subnorm);
EXPECT_EQ(rounded_subnorm.rep(), i);
}
}
TYPED_TEST(Float8Test, ConvertTo) {
using Float8 = TypeParam;
for (int i = 0x00; i <= 0xFF; ++i) {
Float8 f8 = Float8::FromRep(i);
float f32 = static_cast<float>(f8);
if (isnan(f8)) {
EXPECT_TRUE(isnan(Float8::template ConvertTo<float, false,
false>(f8)));
EXPECT_TRUE(isnan(Float8::template ConvertTo<float, false,
true>(f8)));
EXPECT_TRUE(isnan(Float8::template ConvertTo<float, true,
false>(f8)));
EXPECT_TRUE(isnan(Float8::template ConvertTo<float, true,
true>(f8)));
} else {
EXPECT_EQ(f32, (Float8::template ConvertTo<float, false,
false>(f8)));
EXPECT_EQ(f32, (Float8::template ConvertTo<float, false,
true>(f8)));
EXPECT_EQ(f32, (Float8::template ConvertTo<float, true,
false>(f8)));
EXPECT_EQ(f32, (Float8::template ConvertTo<float, true,
true>(f8)));
}
}
}
TEST(Float8Test, Float8E5m2_To_Float8E4m3) {
Float8e5m2 max = std::numeric_limits<Float8e5m2>::max();
Float8e4m3fn saturated = Float8e4m3fn::ConvertFrom<true>(max);
EXPECT_EQ(saturated, std::numeric_limits<Float8e4m3fn>::max());
saturated = Float8e5m2::ConvertTo<Float8e4m3fn, true>(max);
EXPECT_EQ(saturated, std::numeric_limits<Float8e4m3fn>::max());
Float8e5m2 less_than_subnorm = Float8e5m2::FromRep(0x1F);
Float8e4m3fn rounded_subnorm =
Float8e4m3fn::ConvertFrom<false, false>(
less_than_subnorm);
EXPECT_EQ(rounded_subnorm.rep(), 0x04);
Float8e4m3fn truncated_subnorm =
Float8e4m3fn::ConvertFrom<false, true>(
less_than_subnorm);
EXPECT_EQ(truncated_subnorm.rep(), 0x03);
}
TEST(Float8Test, Half_To_Float8E4m3) {
::half_float::half big_half(0x1.dfcp+8f);
Float8e4m3fn big_e4m3 =
Float8e4m3fn::ConvertFrom<true, false>(
big_half);
EXPECT_EQ(big_e4m3.rep(), std::numeric_limits<Float8e4m3fn>::max().rep());
}
TEST(Float8Test, Float8E5m2_To_Float8E4m3b11fnuz) {
Float8e5m2 max = std::numeric_limits<Float8e5m2>::max();
Float8e4m3b11fnuz saturated =
Float8e4m3b11fnuz::ConvertFrom<true>(max);
EXPECT_EQ(saturated, std::numeric_limits<Float8e4m3b11fnuz>::max());
saturated = Float8e5m2::ConvertTo<Float8e4m3b11fnuz, true>(max);
EXPECT_EQ(saturated, std::numeric_limits<Float8e4m3b11fnuz>::max());
Float8e5m2 less_than_subnorm = Float8e5m2::FromRep(0x0F);
Float8e4m3b11fnuz rounded_subnorm =
Float8e4m3b11fnuz::ConvertFrom<false, false>(
less_than_subnorm);
EXPECT_EQ(rounded_subnorm.rep(), 0x04);
Float8e4m3b11fnuz truncated_subnorm =
Float8e4m3b11fnuz::ConvertFrom<false, true>(
less_than_subnorm);
EXPECT_EQ(truncated_subnorm.rep(), 0x03);
for (uint8_t i = 0; i < std::numeric_limits<Float8e5m2>::infinity().rep();
++i) {
Float8e5m2 big_e5m2 = absl::bit_cast<Float8e5m2>(i);
EXPECT_TRUE(isfinite(big_e5m2)) << uint16_t{i};
float big_float = static_cast<float>(big_e5m2);
auto big_e4m3 =
Float8e4m3b11fnuz::ConvertFrom<true,
false>(big_float);
if (i > 0x4f) {
EXPECT_EQ(big_e4m3.rep(),
std::numeric_limits<Float8e4m3b11fnuz>::max().rep())
<< uint16_t{i};
}
EXPECT_EQ((Float8e4m3b11fnuz::ConvertFrom<true,
false>(big_e5m2)
.rep()),
big_e4m3.rep())
<< i;
EXPECT_EQ((Float8e4m3b11fnuz::ConvertFrom<true,
false>(-big_e5m2)
.rep()),
(-big_e4m3).rep())
<< i;
}
}
TEST(Float8Test, Float8E4m3b11fnuz_To_Float8E4m3) {
Float8e4m3b11fnuz max = std::numeric_limits<Float8e4m3b11fnuz>::max();
Float8e4m3fn saturated = Float8e4m3fn::ConvertFrom<true>(max);
EXPECT_EQ(static_cast<float>(saturated),
static_cast<float>(std::numeric_limits<Float8e4m3b11fnuz>::max()));
saturated =
Float8e4m3b11fnuz::ConvertTo<Float8e4m3fn, true>(max);
EXPECT_EQ(static_cast<float>(saturated),
static_cast<float>(std::numeric_limits<Float8e4m3b11fnuz>::max()));
Float8e4m3b11fnuz less_than_subnorm =
Float8e4m3b11fnuz::FromRep(0b0011'110);
Float8e4m3fn rounded_subnorm =
Float8e4m3fn::ConvertFrom<false, false>(
less_than_subnorm);
EXPECT_EQ(rounded_subnorm.rep(), 0x04);
Float8e4m3fn truncated_subnorm =
Float8e4m3fn::ConvertFrom<false, true>(
less_than_subnorm);
EXPECT_EQ(truncated_subnorm.rep(), 0x03);
for (uint8_t i = 0;
i < std::numeric_limits<Float8e4m3b11fnuz>::infinity().rep(); ++i) {
Float8e4m3b11fnuz big_e4m3b11fnuz = absl::bit_cast<Float8e4m3b11fnuz>(i);
EXPECT_TRUE(isfinite(big_e4m3b11fnuz)) << uint16_t{i};
float big_float = static_cast<float>(big_e4m3b11fnuz);
auto big_e4m3 =
Float8e4m3fn::ConvertFrom<true, false>(
big_float);
EXPECT_EQ(
(Float8e4m3fn::ConvertFrom<true, false>(
big_e4m3b11fnuz)
.rep()),
big_e4m3.rep())
<< i;
EXPECT_EQ(
(Float8e4m3fn::ConvertFrom<true, false>(
-big_e4m3b11fnuz)
.rep()),
(big_float > 0.0f ? -big_e4m3 : big_e4m3).rep())
<< i;
}
}
TEST(Float8Test, Float8E4m3_To_Float8E5m2) {
Float8e4m3fn less_than_two = Float8e4m3fn::FromRep(0x3F);
Float8e5m2 truncated =
Float8e5m2::template ConvertFrom<false,
true>(less_than_two);
EXPECT_LT(static_cast<float>(truncated), 2);
Float8e5m2 rounded =
Float8e5m2::template ConvertFrom<false,
false>(less_than_two);
EXPECT_EQ(static_cast<float>(rounded), 2);
}
TEST(Float8Test, Half_To_Float8E5m2) {
::half_float::half inf =
absl::bit_cast<::half_float::half>(static_cast<uint16_t>(0x7C00));
EXPECT_EQ(static_cast<Float8e5m2>(inf).rep(), 0x7C);
::half_float::half ninf =
absl::bit_cast<::half_float::half>(static_cast<uint16_t>(0xFC00));
EXPECT_EQ(static_cast<Float8e5m2>(ninf).rep(), 0xFC);
::half_float::half nan =
absl::bit_cast<::half_float::half>(static_cast<uint16_t>(0x7C01));
EXPECT_EQ(static_cast<Float8e5m2>(nan).rep(), 0x7E);
::half_float::half nnan =
absl::bit_cast<::half_float::half>(static_cast<uint16_t>(0xFC01));
EXPECT_EQ(static_cast<Float8e5m2>(nnan).rep(), 0xFE);
::half_float::half less_than_two =
absl::bit_cast<::half_float::half>(static_cast<uint16_t>(0x3FFF));
EXPECT_EQ((Float8e5m2::ConvertFrom<false,
false>(less_than_two)
.rep()),
0x40);
EXPECT_EQ((Float8e5m2::ConvertFrom<false,
true>(less_than_two)
.rep()),
0x3F);
EXPECT_EQ((Float8e5m2::ConvertFrom<false,
false>(-less_than_two)
.rep()),
0xC0);
EXPECT_EQ((Float8e5m2::ConvertFrom<false,
true>(-less_than_two)
.rep()),
0xBF);
for (uint16_t i = static_cast<uint16_t>(absl::bit_cast<uint8_t>(
std::numeric_limits<Float8e5m2>::max()))
<< 8;
i < absl::bit_cast<uint16_t>(
std::numeric_limits<::half_float::half>::infinity());
++i) {
::half_float::half big_half = absl::bit_cast<::half_float::half>(i);
float big_float = static_cast<float>(big_half);
EXPECT_EQ((Float8e5m2::ConvertFrom<true, false>(
big_half)
.rep()),
(Float8e5m2::ConvertFrom<true, false>(
big_float)
.rep()))
<< i;
EXPECT_EQ((Float8e5m2::ConvertFrom<true, false>(
-big_half)
.rep()),
(Float8e5m2::ConvertFrom<true, false>(
-big_float)
.rep()))
<< i;
}
}
using ::testing::Eq;
using ::testing::IsTrue;
MATCHER_P(EqOrIsNan, other, "") {
if (isnan(other)) {
return ExplainMatchResult(IsTrue(), isnan(arg), result_listener);
}
return ExplainMatchResult(Eq(other), arg, result_listener);
}
TYPED_TEST(Float8Test, CallTheOperator) {
using Float8 = TypeParam;
for (int i = 0x00; i <= 0xFF; ++i) {
Float8 a = Float8::FromRep(i);
for (int j = 0x00; j <= 0xFF; ++j) {
Float8 b = Float8::FromRep(j);
EXPECT_THAT(a + b, EqOrIsNan<Float8>(Float8{float{a} + float{b}}));
EXPECT_THAT(a - b, EqOrIsNan<Float8>(Float8{float{a} - float{b}}));
EXPECT_THAT(a * b, EqOrIsNan<Float8>(Float8{float{a} * float{b}}));
EXPECT_THAT(a / b, EqOrIsNan<Float8>(Float8{float{a} / float{b}}));
Float8 c;
EXPECT_THAT((c = a, c += b),
EqOrIsNan<Float8>(Float8{float{a} + float{b}}));
EXPECT_THAT((c = a, c -= b),
EqOrIsNan<Float8>(Float8{float{a} - float{b}}));
EXPECT_THAT((c = a, c *= b),
EqOrIsNan<Float8>(Float8{float{a} * float{b}}));
EXPECT_THAT((c = a, c /= b),
EqOrIsNan<Float8>(Float8{float{a} / float{b}}));
EXPECT_EQ(a == b, float{a} == float{b}) << float{a} << " vs " << float{b};
EXPECT_EQ(a != b, float{a} != float{b});
EXPECT_EQ(a < b, float{a} < float{b});
EXPECT_EQ(a <= b, float{a} <= float{b});
EXPECT_EQ(a > b, float{a} > float{b});
EXPECT_EQ(a >= b, float{a} >= float{b});
}
}
}
TYPED_TEST(Float8Test, CallTheConstOperator) {
using Float8 = TypeParam;
for (int i = 0x00; i <= 0xFF; ++i) {
const Float8 a = Float8::FromRep(i);
for (int j = 0x00; j <= 0xFF; ++j) {
const Float8 b = Float8::FromRep(j);
EXPECT_THAT(a + b, EqOrIsNan<Float8>(Float8{float{a} + float{b}}));
EXPECT_THAT(a - b, EqOrIsNan<Float8>(Float8{float{a} - float{b}}));
EXPECT_THAT(a * b, EqOrIsNan<Float8>(Float8{float{a} * float{b}}));
EXPECT_THAT(a / b, EqOrIsNan<Float8>(Float8{float{a} / float{b}}));
Float8 c;
EXPECT_THAT((c = a, c += b),
EqOrIsNan<Float8>(Float8{float{a} + float{b}}));
EXPECT_THAT((c = a, c -= b),
EqOrIsNan<Float8>(Float8{float{a} - float{b}}));
EXPECT_THAT((c = a, c *= b),
EqOrIsNan<Float8>(Float8{float{a} * float{b}}));
EXPECT_THAT((c = a, c /= b),
EqOrIsNan<Float8>(Float8{float{a} / float{b}}));
EXPECT_EQ(a == b, float{a} == float{b}) << float{a} << " vs " << float{b};
EXPECT_EQ(a != b, float{a} != float{b});
EXPECT_EQ(a < b, float{a} < float{b}) << float{a} << " vs " << float{b};
EXPECT_EQ(a <= b, float{a} <= float{b});
EXPECT_EQ(a > b, float{a} > float{b}) << float{a} << " vs " << float{b};
EXPECT_EQ(a >= b, float{a} >= float{b});
}
}
}
TEST(Float855m2Test, SmallCastToDenormal) {
float x = std::ldexp(1.3125, -15);
Float8e5m2 y = static_cast<Float8e5m2>(x);
float z = static_cast<float>(y);
EXPECT_EQ(z, std::ldexp(1.5, -15));
}
struct Float8CastTestParamNames {
template <typename TypeParam>
static std::string GetName(int idx) {
using first_type = typename TypeParam::first_type;
using second_type = typename TypeParam::second_type;
return absl::StrCat(::testing::internal::GetTypeName<first_type>(), "_",
::testing::internal::GetTypeName<second_type>());
}
};
#define GEN_LONG_DOUBLE_PAIR(Type) std::pair<Type, long double>,
#define GEN_DEST_TYPES(Type) \
GEN_LONG_DOUBLE_PAIR(Type) \
std::pair<Type, double>, std::pair<Type, float>, \
std::pair<Type, tensorstore::BFloat16>, \
std::pair<Type, ::half_float::half>, std::pair<Type, Float8e4m3fn>, \
std::pair<Type, Float8e4m3b11fnuz>, std::pair<Type, Float8e4m3fnuz>, \
std::pair<Type, Float8e5m2fnuz>, std::pair<Type, Float8e5m2>, \
std::pair<Type, bool>, std::pair<Type, int32_t>, \
std::pair<Type, int64_t>
#define GEN_TYPE_PAIRS() \
GEN_DEST_TYPES(Float8e4m3fn), GEN_DEST_TYPES(Float8e4m3b11fnuz), \
GEN_DEST_TYPES(Float8e5m2), GEN_DEST_TYPES(Float8e4m3fnuz), \
GEN_DEST_TYPES(Float8e5m2fnuz)
using Float8CastTypePairs = ::testing::Types<GEN_TYPE_PAIRS()>;
template <typename CastPair>
class Float8CastTest : public ::testing::Test {};
TYPED_TEST_SUITE(Float8CastTest, Float8CastTypePairs, Float8CastTestParamNames);
TYPED_TEST(Float8CastTest, CastThroughFloat) {
using Float8 = typename TypeParam::first_type;
using DestType = typename TypeParam::second_type;
for (int i = 0x00; i <= 0xFF; ++i) {
Float8 f8 = Float8::FromRep(i);
if constexpr (std::numeric_limits<DestType>::is_integer &&
!std::is_same_v<DestType, bool>) {
if (!isfinite(f8)) {
continue;
}
}
DestType dest = static_cast<DestType>(f8);
DestType expected = static_cast<DestType>(static_cast<float>(f8));
if constexpr (std::numeric_limits<DestType>::is_integer) {
EXPECT_EQ(dest, expected);
} else {
EXPECT_THAT(dest, EqOrIsNan<DestType>(expected));
}
}
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/float8.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/float8_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
c9209c28-d4bd-437f-92df-e2eb3d2bf2b6 | cpp | google/tensorstore | unit | tensorstore/internal/json_binding/unit.cc | tensorstore/util/unit_test.cc | #include "tensorstore/internal/json_binding/unit.h"
#include <cmath>
#include <string>
#include "absl/status/status.h"
#include <nlohmann/json_fwd.hpp>
#include "tensorstore/internal/json/value_as.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_binding/std_tuple.h"
#include "tensorstore/util/unit.h"
namespace tensorstore {
namespace internal_json_binding {
TENSORSTORE_DEFINE_JSON_BINDER(
UnitJsonBinder,
[](auto is_loading, const auto& options, auto* obj,
::nlohmann::json* j) -> absl::Status {
if constexpr (is_loading) {
if (auto* s = j->get_ptr<const std::string*>()) {
*obj = Unit(*s);
return absl::OkStatus();
} else if (j->is_number()) {
*obj = Unit(j->get<double>(), "");
return absl::OkStatus();
}
}
return HeterogeneousArray(
Projection<&Unit::multiplier>(Validate([](const auto& options,
auto* num) {
if (*num > 0 && std::isfinite(*num)) return absl::OkStatus();
return internal_json::ExpectedError(*num, "finite positive number");
})),
Projection<&Unit::base_unit>())(is_loading, options, obj, j);
});
TENSORSTORE_DEFINE_JSON_BINDER(
StringOnlyUnitJsonBinder,
Compose<std::string>([](auto is_loading, const auto& options, auto* obj,
std::string* j) -> absl::Status {
if constexpr (is_loading) {
*obj = Unit(*j);
} else {
*j = obj->to_string();
}
return absl::OkStatus();
}));
}
} | #include "tensorstore/util/unit.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_binding/unit.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/test_util.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::TestJsonBinderRoundTrip;
using ::tensorstore::TestJsonBinderRoundTripJsonOnlyInexact;
using ::tensorstore::Unit;
using ::tensorstore::serialization::TestSerializationRoundTrip;
TEST(UnitTest, DefaultConstruct) {
Unit u;
EXPECT_EQ(1, u.multiplier);
EXPECT_EQ("", u.base_unit);
}
TEST(UnitTest, Compare) {
Unit a(5, "nm");
Unit b(5.5, "nm");
Unit c(5, "um");
Unit d;
EXPECT_EQ(a, a);
EXPECT_EQ(b, b);
EXPECT_EQ(c, c);
EXPECT_EQ(d, d);
EXPECT_NE(a, b);
EXPECT_NE(a, c);
EXPECT_NE(a, d);
EXPECT_NE(b, c);
EXPECT_NE(b, d);
EXPECT_NE(c, d);
}
TEST(UnitTest, Ostream) {
EXPECT_EQ("5.5 nm", tensorstore::StrCat(Unit(5.5, "nm")));
EXPECT_EQ("nm", tensorstore::StrCat(Unit(1, "nm")));
EXPECT_EQ("5", tensorstore::StrCat(Unit(5, "")));
EXPECT_EQ("1", tensorstore::StrCat(Unit(1, "")));
}
TEST(UnitTest, ConvertToString) {
EXPECT_EQ("5.5 nm", Unit(5.5, "nm").to_string());
EXPECT_EQ("nm", Unit(1, "nm").to_string());
EXPECT_EQ("5", Unit(5, "").to_string());
EXPECT_EQ("1", Unit(1, "").to_string());
EXPECT_EQ("1", absl::StrCat(Unit(1, "")));
}
TEST(UnitTest, MultiplierBaseUnit) {
Unit u = {5, "nm"};
EXPECT_EQ(5, u.multiplier);
EXPECT_EQ("nm", u.base_unit);
}
TEST(UnitTest, Unit) {
EXPECT_EQ(Unit(4, "nm"), Unit("4nm"));
EXPECT_EQ(Unit(4, "nm"), Unit("4.nm"));
EXPECT_EQ(Unit(4e-3, "nm"), Unit("4e-3nm"));
EXPECT_EQ(Unit(.4, "nm"), Unit(".4nm"));
EXPECT_EQ(Unit(.4, "nm"), Unit(".4 nm"));
EXPECT_EQ(Unit(.4, "nm"), Unit(" .4 nm"));
EXPECT_EQ(Unit(.4, "nm"), Unit(" .4 nm "));
EXPECT_EQ(Unit(4e-3, "nm"), Unit("+4e-3nm"));
EXPECT_EQ(Unit(-4e-3, "nm"), Unit("-4e-3nm"));
EXPECT_EQ(Unit(4.5, "nm"), Unit("4.5nm"));
EXPECT_EQ(Unit(1, "nm"), Unit("nm"));
EXPECT_EQ(Unit(4, ""), Unit("4"));
EXPECT_EQ(Unit(1, ""), Unit(""));
EXPECT_EQ(Unit(3, "nm @ 50"), Unit("3 nm @ 50"));
}
TEST(UnitTest, JsonRoundTrip) {
TestJsonBinderRoundTrip<Unit>({
{Unit(4, "nm"), {4, "nm"}},
{Unit(4.5, "nm"), {4.5, "nm"}},
{Unit(4.5, ""), {4.5, ""}},
});
}
TEST(UnitTest, JsonRoundTripInexact) {
TestJsonBinderRoundTripJsonOnlyInexact<Unit>({
{"4nm", {4, "nm"}},
{4, {4, ""}},
{"nm", {1, "nm"}},
});
}
TEST(SerializationTest, Basic) {
TestSerializationRoundTrip(Unit("4nm"));
TestSerializationRoundTrip(Unit("4"));
TestSerializationRoundTrip(Unit("nm"));
TestSerializationRoundTrip(Unit(""));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/unit.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/unit_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
7da9ed7e-56c9-4359-93fe-cc293e9bcce3 | cpp | abseil/abseil-cpp | generate_real | absl/random/internal/generate_real.h | absl/random/internal/generate_real_test.cc | #ifndef ABSL_RANDOM_INTERNAL_GENERATE_REAL_H_
#define ABSL_RANDOM_INTERNAL_GENERATE_REAL_H_
#include <cstdint>
#include <cstring>
#include <limits>
#include <type_traits>
#include "absl/meta/type_traits.h"
#include "absl/numeric/bits.h"
#include "absl/random/internal/fastmath.h"
#include "absl/random/internal/traits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace random_internal {
struct GeneratePositiveTag {};
struct GenerateNegativeTag {};
struct GenerateSignedTag {};
template <typename RealType,
typename SignedTag = GeneratePositiveTag,
bool IncludeZero = true>
inline RealType GenerateRealFromBits(uint64_t bits, int exp_bias = 0) {
using real_type = RealType;
using uint_type = absl::conditional_t<std::is_same<real_type, float>::value,
uint32_t, uint64_t>;
static_assert(
(std::is_same<double, real_type>::value ||
std::is_same<float, real_type>::value),
"GenerateRealFromBits must be parameterized by either float or double.");
static_assert(sizeof(uint_type) == sizeof(real_type),
"Mismatched unsigned and real types.");
static_assert((std::numeric_limits<real_type>::is_iec559 &&
std::numeric_limits<real_type>::radix == 2),
"RealType representation is not IEEE 754 binary.");
static_assert((std::is_same<SignedTag, GeneratePositiveTag>::value ||
std::is_same<SignedTag, GenerateNegativeTag>::value ||
std::is_same<SignedTag, GenerateSignedTag>::value),
"");
static constexpr int kExp = std::numeric_limits<real_type>::digits - 1;
static constexpr uint_type kMask = (static_cast<uint_type>(1) << kExp) - 1u;
static constexpr int kUintBits = sizeof(uint_type) * 8;
int exp = exp_bias + int{std::numeric_limits<real_type>::max_exponent - 2};
uint_type sign = std::is_same<SignedTag, GenerateNegativeTag>::value
? (static_cast<uint_type>(1) << (kUintBits - 1))
: 0;
if (std::is_same<SignedTag, GenerateSignedTag>::value) {
if (std::is_same<uint_type, uint64_t>::value) {
sign = bits & uint64_t{0x8000000000000000};
}
if (std::is_same<uint_type, uint32_t>::value) {
const uint64_t tmp = bits & uint64_t{0x8000000000000000};
sign = static_cast<uint32_t>(tmp >> 32);
}
bits = bits & uint64_t{0x7FFFFFFFFFFFFFFF};
exp++;
}
if (IncludeZero) {
if (bits == 0u) return 0;
}
int clz = countl_zero(bits);
bits <<= (IncludeZero ? clz : (clz & 63));
exp -= clz;
bits >>= (63 - kExp);
uint_type val = sign | (static_cast<uint_type>(exp) << kExp) |
(static_cast<uint_type>(bits) & kMask);
real_type result;
memcpy(static_cast<void*>(&result), static_cast<const void*>(&val),
sizeof(result));
return result;
}
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/random/internal/generate_real.h"
#include <cfloat>
#include <cstddef>
#include <cstdint>
#include <string>
#include "gtest/gtest.h"
#include "absl/flags/flag.h"
#include "absl/numeric/bits.h"
ABSL_FLAG(int64_t, absl_random_test_trials, 50000,
"Number of trials for the probability tests.");
using absl::random_internal::GenerateNegativeTag;
using absl::random_internal::GeneratePositiveTag;
using absl::random_internal::GenerateRealFromBits;
using absl::random_internal::GenerateSignedTag;
namespace {
TEST(GenerateRealTest, U64ToFloat_Positive_NoZero_Test) {
auto ToFloat = [](uint64_t a) {
return GenerateRealFromBits<float, GeneratePositiveTag, false>(a);
};
EXPECT_EQ(ToFloat(0x0000000000000000), 2.710505431e-20f);
EXPECT_EQ(ToFloat(0x0000000000000001), 5.421010862e-20f);
EXPECT_EQ(ToFloat(0x8000000000000000), 0.5);
EXPECT_EQ(ToFloat(0x8000000000000001), 0.5);
EXPECT_EQ(ToFloat(0xFFFFFFFFFFFFFFFF), 0.9999999404f);
}
TEST(GenerateRealTest, U64ToFloat_Positive_Zero_Test) {
auto ToFloat = [](uint64_t a) {
return GenerateRealFromBits<float, GeneratePositiveTag, true>(a);
};
EXPECT_EQ(ToFloat(0x0000000000000000), 0.0);
EXPECT_EQ(ToFloat(0x0000000000000001), 5.421010862e-20f);
EXPECT_EQ(ToFloat(0x8000000000000000), 0.5);
EXPECT_EQ(ToFloat(0x8000000000000001), 0.5);
EXPECT_EQ(ToFloat(0xFFFFFFFFFFFFFFFF), 0.9999999404f);
}
TEST(GenerateRealTest, U64ToFloat_Negative_NoZero_Test) {
auto ToFloat = [](uint64_t a) {
return GenerateRealFromBits<float, GenerateNegativeTag, false>(a);
};
EXPECT_EQ(ToFloat(0x0000000000000000), -2.710505431e-20f);
EXPECT_EQ(ToFloat(0x0000000000000001), -5.421010862e-20f);
EXPECT_EQ(ToFloat(0x8000000000000000), -0.5);
EXPECT_EQ(ToFloat(0x8000000000000001), -0.5);
EXPECT_EQ(ToFloat(0xFFFFFFFFFFFFFFFF), -0.9999999404f);
}
TEST(GenerateRealTest, U64ToFloat_Negative_Zero_Test) {
auto ToFloat = [](uint64_t a) {
return GenerateRealFromBits<float, GenerateNegativeTag, true>(a);
};
EXPECT_EQ(ToFloat(0x0000000000000000), 0.0);
EXPECT_EQ(ToFloat(0x0000000000000001), -5.421010862e-20f);
EXPECT_EQ(ToFloat(0x8000000000000000), -0.5);
EXPECT_EQ(ToFloat(0x8000000000000001), -0.5);
EXPECT_EQ(ToFloat(0xFFFFFFFFFFFFFFFF), -0.9999999404f);
}
TEST(GenerateRealTest, U64ToFloat_Signed_NoZero_Test) {
auto ToFloat = [](uint64_t a) {
return GenerateRealFromBits<float, GenerateSignedTag, false>(a);
};
EXPECT_EQ(ToFloat(0x0000000000000000), 5.421010862e-20f);
EXPECT_EQ(ToFloat(0x0000000000000001), 1.084202172e-19f);
EXPECT_EQ(ToFloat(0x7FFFFFFFFFFFFFFF), 0.9999999404f);
EXPECT_EQ(ToFloat(0x8000000000000000), -5.421010862e-20f);
EXPECT_EQ(ToFloat(0x8000000000000001), -1.084202172e-19f);
EXPECT_EQ(ToFloat(0xFFFFFFFFFFFFFFFF), -0.9999999404f);
}
TEST(GenerateRealTest, U64ToFloat_Signed_Zero_Test) {
auto ToFloat = [](uint64_t a) {
return GenerateRealFromBits<float, GenerateSignedTag, true>(a);
};
EXPECT_EQ(ToFloat(0x0000000000000000), 0);
EXPECT_EQ(ToFloat(0x0000000000000001), 1.084202172e-19f);
EXPECT_EQ(ToFloat(0x7FFFFFFFFFFFFFFF), 0.9999999404f);
EXPECT_EQ(ToFloat(0x8000000000000000), 0);
EXPECT_EQ(ToFloat(0x8000000000000001), -1.084202172e-19f);
EXPECT_EQ(ToFloat(0xFFFFFFFFFFFFFFFF), -0.9999999404f);
}
TEST(GenerateRealTest, U64ToFloat_Signed_Bias_Test) {
auto ToFloat = [](uint64_t a) {
return GenerateRealFromBits<float, GenerateSignedTag, true>(a, 1);
};
EXPECT_EQ(ToFloat(0x0000000000000000), 0);
EXPECT_EQ(ToFloat(0x0000000000000001), 2 * 1.084202172e-19f);
EXPECT_EQ(ToFloat(0x7FFFFFFFFFFFFFFF), 2 * 0.9999999404f);
EXPECT_EQ(ToFloat(0x8000000000000000), 0);
EXPECT_EQ(ToFloat(0x8000000000000001), 2 * -1.084202172e-19f);
EXPECT_EQ(ToFloat(0xFFFFFFFFFFFFFFFF), 2 * -0.9999999404f);
}
TEST(GenerateRealTest, U64ToFloatTest) {
auto ToFloat = [](uint64_t a) -> float {
return GenerateRealFromBits<float, GeneratePositiveTag, true>(a);
};
EXPECT_EQ(ToFloat(0x0000000000000000), 0.0f);
EXPECT_EQ(ToFloat(0x8000000000000000), 0.5f);
EXPECT_EQ(ToFloat(0x8000000000000001), 0.5f);
EXPECT_EQ(ToFloat(0x800000FFFFFFFFFF), 0.5f);
EXPECT_EQ(ToFloat(0xFFFFFFFFFFFFFFFF), 0.9999999404f);
EXPECT_GT(ToFloat(0x0000000000000001), 0.0f);
EXPECT_NE(ToFloat(0x7FFFFF0000000000), ToFloat(0x7FFFFEFFFFFFFFFF));
EXPECT_LT(ToFloat(0xFFFFFFFFFFFFFFFF), 1.0f);
int32_t two_to_24 = 1 << 24;
EXPECT_EQ(static_cast<int32_t>(ToFloat(0xFFFFFFFFFFFFFFFF) * two_to_24),
two_to_24 - 1);
EXPECT_NE(static_cast<int32_t>(ToFloat(0xFFFFFFFFFFFFFFFF) * two_to_24 * 2),
two_to_24 * 2 - 1);
EXPECT_EQ(ToFloat(0xFFFFFFFFFFFFFFFF), ToFloat(0xFFFFFF0000000000));
EXPECT_NE(ToFloat(0xFFFFFFFFFFFFFFFF), ToFloat(0xFFFFFEFFFFFFFFFF));
EXPECT_EQ(ToFloat(0x7FFFFFFFFFFFFFFF), ToFloat(0x7FFFFF8000000000));
EXPECT_NE(ToFloat(0x7FFFFFFFFFFFFFFF), ToFloat(0x7FFFFF7FFFFFFFFF));
EXPECT_EQ(ToFloat(0x3FFFFFFFFFFFFFFF), ToFloat(0x3FFFFFC000000000));
EXPECT_NE(ToFloat(0x3FFFFFFFFFFFFFFF), ToFloat(0x3FFFFFBFFFFFFFFF));
for (int i = 0; i < 100; ++i) {
EXPECT_EQ(i * ToFloat(0x0000000000000001), ToFloat(i));
}
float exp_values[64];
exp_values[63] = 0.5f;
for (int i = 62; i >= 0; --i) exp_values[i] = 0.5f * exp_values[i + 1];
constexpr uint64_t one = 1;
for (int i = 0; i < 64; ++i) {
EXPECT_EQ(ToFloat(one << i), exp_values[i]);
for (int j = 1; j < FLT_MANT_DIG && i - j >= 0; ++j) {
EXPECT_NE(exp_values[i] + exp_values[i - j], exp_values[i]);
EXPECT_EQ(ToFloat((one << i) + (one << (i - j))),
exp_values[i] + exp_values[i - j]);
}
for (int j = FLT_MANT_DIG; i - j >= 0; ++j) {
EXPECT_EQ(exp_values[i] + exp_values[i - j], exp_values[i]);
EXPECT_EQ(ToFloat((one << i) + (one << (i - j))), exp_values[i]);
}
}
}
TEST(GenerateRealTest, U64ToDouble_Positive_NoZero_Test) {
auto ToDouble = [](uint64_t a) {
return GenerateRealFromBits<double, GeneratePositiveTag, false>(a);
};
EXPECT_EQ(ToDouble(0x0000000000000000), 2.710505431213761085e-20);
EXPECT_EQ(ToDouble(0x0000000000000001), 5.42101086242752217004e-20);
EXPECT_EQ(ToDouble(0x0000000000000002), 1.084202172485504434e-19);
EXPECT_EQ(ToDouble(0x8000000000000000), 0.5);
EXPECT_EQ(ToDouble(0x8000000000000001), 0.5);
EXPECT_EQ(ToDouble(0xFFFFFFFFFFFFFFFF), 0.999999999999999888978);
}
TEST(GenerateRealTest, U64ToDouble_Positive_Zero_Test) {
auto ToDouble = [](uint64_t a) {
return GenerateRealFromBits<double, GeneratePositiveTag, true>(a);
};
EXPECT_EQ(ToDouble(0x0000000000000000), 0.0);
EXPECT_EQ(ToDouble(0x0000000000000001), 5.42101086242752217004e-20);
EXPECT_EQ(ToDouble(0x8000000000000000), 0.5);
EXPECT_EQ(ToDouble(0x8000000000000001), 0.5);
EXPECT_EQ(ToDouble(0xFFFFFFFFFFFFFFFF), 0.999999999999999888978);
}
TEST(GenerateRealTest, U64ToDouble_Negative_NoZero_Test) {
auto ToDouble = [](uint64_t a) {
return GenerateRealFromBits<double, GenerateNegativeTag, false>(a);
};
EXPECT_EQ(ToDouble(0x0000000000000000), -2.710505431213761085e-20);
EXPECT_EQ(ToDouble(0x0000000000000001), -5.42101086242752217004e-20);
EXPECT_EQ(ToDouble(0x0000000000000002), -1.084202172485504434e-19);
EXPECT_EQ(ToDouble(0x8000000000000000), -0.5);
EXPECT_EQ(ToDouble(0x8000000000000001), -0.5);
EXPECT_EQ(ToDouble(0xFFFFFFFFFFFFFFFF), -0.999999999999999888978);
}
TEST(GenerateRealTest, U64ToDouble_Negative_Zero_Test) {
auto ToDouble = [](uint64_t a) {
return GenerateRealFromBits<double, GenerateNegativeTag, true>(a);
};
EXPECT_EQ(ToDouble(0x0000000000000000), 0.0);
EXPECT_EQ(ToDouble(0x0000000000000001), -5.42101086242752217004e-20);
EXPECT_EQ(ToDouble(0x0000000000000002), -1.084202172485504434e-19);
EXPECT_EQ(ToDouble(0x8000000000000000), -0.5);
EXPECT_EQ(ToDouble(0x8000000000000001), -0.5);
EXPECT_EQ(ToDouble(0xFFFFFFFFFFFFFFFF), -0.999999999999999888978);
}
TEST(GenerateRealTest, U64ToDouble_Signed_NoZero_Test) {
auto ToDouble = [](uint64_t a) {
return GenerateRealFromBits<double, GenerateSignedTag, false>(a);
};
EXPECT_EQ(ToDouble(0x0000000000000000), 5.42101086242752217004e-20);
EXPECT_EQ(ToDouble(0x0000000000000001), 1.084202172485504434e-19);
EXPECT_EQ(ToDouble(0x7FFFFFFFFFFFFFFF), 0.999999999999999888978);
EXPECT_EQ(ToDouble(0x8000000000000000), -5.42101086242752217004e-20);
EXPECT_EQ(ToDouble(0x8000000000000001), -1.084202172485504434e-19);
EXPECT_EQ(ToDouble(0xFFFFFFFFFFFFFFFF), -0.999999999999999888978);
}
TEST(GenerateRealTest, U64ToDouble_Signed_Zero_Test) {
auto ToDouble = [](uint64_t a) {
return GenerateRealFromBits<double, GenerateSignedTag, true>(a);
};
EXPECT_EQ(ToDouble(0x0000000000000000), 0);
EXPECT_EQ(ToDouble(0x0000000000000001), 1.084202172485504434e-19);
EXPECT_EQ(ToDouble(0x7FFFFFFFFFFFFFFF), 0.999999999999999888978);
EXPECT_EQ(ToDouble(0x8000000000000000), 0);
EXPECT_EQ(ToDouble(0x8000000000000001), -1.084202172485504434e-19);
EXPECT_EQ(ToDouble(0xFFFFFFFFFFFFFFFF), -0.999999999999999888978);
}
TEST(GenerateRealTest, U64ToDouble_GenerateSignedTag_Bias_Test) {
auto ToDouble = [](uint64_t a) {
return GenerateRealFromBits<double, GenerateSignedTag, true>(a, -1);
};
EXPECT_EQ(ToDouble(0x0000000000000000), 0);
EXPECT_EQ(ToDouble(0x0000000000000001), 1.084202172485504434e-19 / 2);
EXPECT_EQ(ToDouble(0x7FFFFFFFFFFFFFFF), 0.999999999999999888978 / 2);
EXPECT_EQ(ToDouble(0x8000000000000000), 0);
EXPECT_EQ(ToDouble(0x8000000000000001), -1.084202172485504434e-19 / 2);
EXPECT_EQ(ToDouble(0xFFFFFFFFFFFFFFFF), -0.999999999999999888978 / 2);
}
TEST(GenerateRealTest, U64ToDoubleTest) {
auto ToDouble = [](uint64_t a) {
return GenerateRealFromBits<double, GeneratePositiveTag, true>(a);
};
EXPECT_EQ(ToDouble(0x0000000000000000), 0.0);
EXPECT_EQ(ToDouble(0x0000000000000000), 0.0);
EXPECT_EQ(ToDouble(0x0000000000000001), 5.42101086242752217004e-20);
EXPECT_EQ(ToDouble(0x7fffffffffffffef), 0.499999999999999944489);
EXPECT_EQ(ToDouble(0x8000000000000000), 0.5);
EXPECT_EQ(ToDouble(0x8000000000000001), 0.5);
EXPECT_EQ(ToDouble(0x80000000000007FF), 0.5);
EXPECT_EQ(ToDouble(0xFFFFFFFFFFFFFFFF), 0.999999999999999888978);
EXPECT_NE(ToDouble(0x7FFFFFFFFFFFF800), ToDouble(0x7FFFFFFFFFFFF7FF));
EXPECT_LT(ToDouble(0xFFFFFFFFFFFFFFFF), 1.0);
EXPECT_EQ(ToDouble(0xFFFFFFFFFFFFFFFF), ToDouble(0xFFFFFFFFFFFFF800));
EXPECT_NE(ToDouble(0xFFFFFFFFFFFFFFFF), ToDouble(0xFFFFFFFFFFFFF7FF));
EXPECT_EQ(ToDouble(0x7FFFFFFFFFFFFFFF), ToDouble(0x7FFFFFFFFFFFFC00));
EXPECT_NE(ToDouble(0x7FFFFFFFFFFFFFFF), ToDouble(0x7FFFFFFFFFFFFBFF));
EXPECT_EQ(ToDouble(0x3FFFFFFFFFFFFFFF), ToDouble(0x3FFFFFFFFFFFFE00));
EXPECT_NE(ToDouble(0x3FFFFFFFFFFFFFFF), ToDouble(0x3FFFFFFFFFFFFDFF));
EXPECT_EQ(ToDouble(0x1000000000000001), 0.0625);
EXPECT_EQ(ToDouble(0x2000000000000001), 0.125);
EXPECT_EQ(ToDouble(0x3000000000000001), 0.1875);
EXPECT_EQ(ToDouble(0x4000000000000001), 0.25);
EXPECT_EQ(ToDouble(0x5000000000000001), 0.3125);
EXPECT_EQ(ToDouble(0x6000000000000001), 0.375);
EXPECT_EQ(ToDouble(0x7000000000000001), 0.4375);
EXPECT_EQ(ToDouble(0x8000000000000001), 0.5);
EXPECT_EQ(ToDouble(0x9000000000000001), 0.5625);
EXPECT_EQ(ToDouble(0xa000000000000001), 0.625);
EXPECT_EQ(ToDouble(0xb000000000000001), 0.6875);
EXPECT_EQ(ToDouble(0xc000000000000001), 0.75);
EXPECT_EQ(ToDouble(0xd000000000000001), 0.8125);
EXPECT_EQ(ToDouble(0xe000000000000001), 0.875);
EXPECT_EQ(ToDouble(0xf000000000000001), 0.9375);
int64_t two_to_53 = int64_t{1} << 53;
EXPECT_EQ(static_cast<int64_t>(ToDouble(0xFFFFFFFFFFFFFFFF) * two_to_53),
two_to_53 - 1);
EXPECT_NE(static_cast<int64_t>(ToDouble(0xFFFFFFFFFFFFFFFF) * two_to_53 * 2),
two_to_53 * 2 - 1);
for (int i = 0; i < 100; ++i) {
EXPECT_EQ(i * ToDouble(0x0000000000000001), ToDouble(i));
}
double exp_values[64];
exp_values[63] = 0.5;
for (int i = 62; i >= 0; --i) exp_values[i] = 0.5 * exp_values[i + 1];
constexpr uint64_t one = 1;
for (int i = 0; i < 64; ++i) {
EXPECT_EQ(ToDouble(one << i), exp_values[i]);
for (int j = 1; j < DBL_MANT_DIG && i - j >= 0; ++j) {
EXPECT_NE(exp_values[i] + exp_values[i - j], exp_values[i]);
EXPECT_EQ(ToDouble((one << i) + (one << (i - j))),
exp_values[i] + exp_values[i - j]);
}
for (int j = DBL_MANT_DIG; i - j >= 0; ++j) {
EXPECT_EQ(exp_values[i] + exp_values[i - j], exp_values[i]);
EXPECT_EQ(ToDouble((one << i) + (one << (i - j))), exp_values[i]);
}
}
}
TEST(GenerateRealTest, U64ToDoubleSignedTest) {
auto ToDouble = [](uint64_t a) {
return GenerateRealFromBits<double, GenerateSignedTag, false>(a);
};
EXPECT_EQ(ToDouble(0x0000000000000000), 5.42101086242752217004e-20);
EXPECT_EQ(ToDouble(0x0000000000000001), 1.084202172485504434e-19);
EXPECT_EQ(ToDouble(0x8000000000000000), -5.42101086242752217004e-20);
EXPECT_EQ(ToDouble(0x8000000000000001), -1.084202172485504434e-19);
const double e_plus = ToDouble(0x0000000000000001);
const double e_minus = ToDouble(0x8000000000000001);
EXPECT_EQ(e_plus, 1.084202172485504434e-19);
EXPECT_EQ(e_minus, -1.084202172485504434e-19);
EXPECT_EQ(ToDouble(0x3fffffffffffffef), 0.499999999999999944489);
EXPECT_EQ(ToDouble(0xbfffffffffffffef), -0.499999999999999944489);
EXPECT_EQ(ToDouble(0x4000000000000000), 0.5);
EXPECT_EQ(ToDouble(0x4000000000000001), 0.5);
EXPECT_EQ(ToDouble(0x40000000000003FF), 0.5);
EXPECT_EQ(ToDouble(0xC000000000000000), -0.5);
EXPECT_EQ(ToDouble(0xC000000000000001), -0.5);
EXPECT_EQ(ToDouble(0xC0000000000003FF), -0.5);
EXPECT_EQ(ToDouble(0x7FFFFFFFFFFFFFFe), 0.999999999999999888978);
EXPECT_EQ(ToDouble(0xFFFFFFFFFFFFFFFe), -0.999999999999999888978);
EXPECT_NE(ToDouble(0x7FFFFFFFFFFFF800), ToDouble(0x7FFFFFFFFFFFF7FF));
EXPECT_LT(ToDouble(0x7FFFFFFFFFFFFFFF), 1.0);
EXPECT_GT(ToDouble(0x7FFFFFFFFFFFFFFF), 0.9999999999);
EXPECT_GT(ToDouble(0xFFFFFFFFFFFFFFFe), -1.0);
EXPECT_LT(ToDouble(0xFFFFFFFFFFFFFFFe), -0.999999999);
EXPECT_EQ(ToDouble(0xFFFFFFFFFFFFFFFe), ToDouble(0xFFFFFFFFFFFFFC00));
EXPECT_EQ(ToDouble(0x7FFFFFFFFFFFFFFF), ToDouble(0x7FFFFFFFFFFFFC00));
EXPECT_NE(ToDouble(0xFFFFFFFFFFFFFFFe), ToDouble(0xFFFFFFFFFFFFF3FF));
EXPECT_NE(ToDouble(0x7FFFFFFFFFFFFFFF), ToDouble(0x7FFFFFFFFFFFF3FF));
EXPECT_EQ(ToDouble(0x1000000000000001), 0.125);
EXPECT_EQ(ToDouble(0x2000000000000001), 0.25);
EXPECT_EQ(ToDouble(0x3000000000000001), 0.375);
EXPECT_EQ(ToDouble(0x4000000000000001), 0.5);
EXPECT_EQ(ToDouble(0x5000000000000001), 0.625);
EXPECT_EQ(ToDouble(0x6000000000000001), 0.75);
EXPECT_EQ(ToDouble(0x7000000000000001), 0.875);
EXPECT_EQ(ToDouble(0x7800000000000001), 0.9375);
EXPECT_EQ(ToDouble(0x7c00000000000001), 0.96875);
EXPECT_EQ(ToDouble(0x7e00000000000001), 0.984375);
EXPECT_EQ(ToDouble(0x7f00000000000001), 0.9921875);
EXPECT_EQ(ToDouble(0x9000000000000001), -0.125);
EXPECT_EQ(ToDouble(0xa000000000000001), -0.25);
EXPECT_EQ(ToDouble(0xb000000000000001), -0.375);
EXPECT_EQ(ToDouble(0xc000000000000001), -0.5);
EXPECT_EQ(ToDouble(0xd000000000000001), -0.625);
EXPECT_EQ(ToDouble(0xe000000000000001), -0.75);
EXPECT_EQ(ToDouble(0xf000000000000001), -0.875);
int64_t two_to_53 = int64_t{1} << 53;
EXPECT_EQ(static_cast<int64_t>(ToDouble(0x7FFFFFFFFFFFFFFF) * two_to_53),
two_to_53 - 1);
EXPECT_EQ(static_cast<int64_t>(ToDouble(0xFFFFFFFFFFFFFFFF) * two_to_53),
-(two_to_53 - 1));
EXPECT_NE(static_cast<int64_t>(ToDouble(0x7FFFFFFFFFFFFFFF) * two_to_53 * 2),
two_to_53 * 2 - 1);
for (int i = 1; i < 100; ++i) {
EXPECT_EQ(i * e_plus, ToDouble(i)) << i;
EXPECT_EQ(i * e_minus, ToDouble(0x8000000000000000 | i)) << i;
}
}
TEST(GenerateRealTest, ExhaustiveFloat) {
auto ToFloat = [](uint64_t a) {
return GenerateRealFromBits<float, GeneratePositiveTag, true>(a);
};
float last_f = 1.0, last_g = 2.0;
uint64_t f_collisions = 0, g_collisions = 0;
uint64_t f_unique = 0, g_unique = 0;
uint64_t total = 0;
auto count = [&](const float r) {
total++;
const float f = 0.0f * (1.0f - r) + 1.0f * r;
if (f >= last_f) {
f_collisions++;
} else {
f_unique++;
last_f = f;
}
const float g = 1.0f * (1.0f - r) + 2.0f * r;
if (g >= last_g) {
g_collisions++;
} else {
g_unique++;
last_g = g;
}
};
size_t limit = absl::GetFlag(FLAGS_absl_random_test_trials);
uint64_t x = ~uint64_t(0);
for (; x != 0 && limit > 0;) {
constexpr int kDig = (64 - FLT_MANT_DIG);
uint64_t dec = 1;
uint64_t chk = 0;
const int clz = absl::countl_zero(x);
if (clz < kDig) {
dec <<= (kDig - clz);
chk = (~uint64_t(0)) >> (clz + 1);
}
for (; x > chk && limit > 0; x -= dec) {
count(ToFloat(x));
--limit;
}
}
static_assert(FLT_MANT_DIG == 24,
"The float type is expected to have a 24 bit mantissa.");
if (limit != 0) {
EXPECT_LT(1 << 28, f_unique);
EXPECT_EQ((1 << 24) + 40 * (1 << 23) - 1, f_unique);
EXPECT_EQ(total, f_unique);
EXPECT_EQ(0, f_collisions);
EXPECT_LE(1 << 23, g_unique);
EXPECT_EQ(total - g_unique, g_collisions);
}
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/internal/generate_real.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/internal/generate_real_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
88b9adff-15bb-48a6-920b-e030f186c9d9 | cpp | tensorflow/tensorflow | all_reduce_folder | third_party/xla/xla/service/all_reduce_folder.cc | third_party/xla/xla/service/all_reduce_folder_test.cc | #include "xla/service/all_reduce_folder.h"
#include <algorithm>
#include <cstdint>
#include <optional>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/all_reduce_key.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
std::optional<std::vector<ReplicaGroup>> FoldReplicaGroups(
absl::Span<const ReplicaGroup> replica_groups0,
absl::Span<const ReplicaGroup> replica_groups1) {
int64_t num_replicas = 0;
for (const ReplicaGroup &rg : replica_groups0) {
for (int64_t id : rg.replica_ids()) {
num_replicas = std::max(num_replicas, id);
}
}
num_replicas++;
std::vector<int> replica_group_no(num_replicas, -1);
for (int group_no = 0; group_no < replica_groups0.size(); ++group_no) {
for (int64_t id : replica_groups0[group_no].replica_ids()) {
replica_group_no[id] = group_no;
}
}
absl::flat_hash_map<std::vector<bool>, int64_t> contributor_set_id;
std::vector<int64_t> contributing_replicas_set_id(num_replicas, 0);
int64_t next_id = 1;
for (const ReplicaGroup &rg : replica_groups1) {
std::vector<bool> contributors(num_replicas, false);
for (int64_t id : rg.replica_ids()) {
int64_t group_no = replica_group_no[id];
for (int64_t contrib : replica_groups0[group_no].replica_ids()) {
if (contributors[contrib]) {
return std::nullopt;
}
contributors[contrib] = true;
}
}
int64_t set_id;
auto it = contributor_set_id.find(contributors);
if (it != contributor_set_id.end()) {
set_id = it->second;
} else {
set_id = next_id++;
contributor_set_id[contributors] = set_id;
}
for (int64_t id : rg.replica_ids()) {
contributing_replicas_set_id[id] = set_id;
}
}
std::vector<ReplicaGroup> new_replica_groups;
new_replica_groups.reserve(contributor_set_id.size());
for (const auto &it : contributor_set_id) {
const std::vector<bool> &contributors = it.first;
const int64_t set_id = it.second;
new_replica_groups.emplace_back();
ReplicaGroup &group = new_replica_groups.back();
for (int64_t replica = 0; replica < num_replicas; ++replica) {
if (contributors[replica]) {
if (contributing_replicas_set_id[replica] != set_id) {
return std::nullopt;
}
group.add_replica_ids(replica);
}
}
}
absl::c_sort(new_replica_groups,
[](const ReplicaGroup &a, const ReplicaGroup &b) {
return a.replica_ids(0) < b.replica_ids(0);
});
return new_replica_groups;
}
}
absl::StatusOr<bool> AllReduceFolder::Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) {
if (hlo_query::ContainsLayoutConstrainedAllReduce(*module)) {
VLOG(1) << "Skip AllReduceFolder because the module contains all-reduce "
"with constrained layouts";
return false;
}
int64_t next_channel_id = hlo_query::NextChannelId(*module);
bool changed = false;
for (auto computation : module->computations(execution_threads)) {
for (HloInstruction *inst : computation->MakeInstructionPostOrder()) {
if (inst->opcode() != HloOpcode::kAllReduce ||
inst->operand(0)->opcode() != HloOpcode::kAllReduce) {
continue;
}
auto *ar0 = Cast<HloAllReduceInstruction>(inst->mutable_operand(0));
auto *ar1 = Cast<HloAllReduceInstruction>(inst);
if (ar0->user_count() != 1) {
continue;
}
std::optional<AllReduceKey> key0 = GetAllReduceKey(
ar0, nullptr, true);
std::optional<AllReduceKey> key1 = GetAllReduceKey(
ar1, nullptr, true);
if (!key0 || !key1 || *key0 != *key1 || ar0->replica_groups().empty() ||
ar1->replica_groups().empty()) {
continue;
}
std::optional<std::vector<ReplicaGroup>> new_replica_groups =
FoldReplicaGroups(ar0->replica_groups(), ar1->replica_groups());
if (!new_replica_groups) {
continue;
}
std::optional<int64_t> channel_id;
if (ar0->channel_id()) {
channel_id = next_channel_id++;
}
HloInstruction *new_ar =
computation->AddInstruction(HloInstruction::CreateAllReduce(
ar0->shape(), ar0->operands(), ar0->to_apply(),
CollectiveDeviceList(*new_replica_groups),
false, channel_id,
ar0->use_global_device_ids()));
TF_RETURN_IF_ERROR(ar1->ReplaceAllUsesWith(new_ar));
TF_RETURN_IF_ERROR(computation->RemoveInstruction(ar1));
TF_RETURN_IF_ERROR(computation->RemoveInstruction(ar0));
changed = true;
}
}
return changed;
}
} | #include "xla/service/all_reduce_folder.h"
#include <cstddef>
#include <initializer_list>
#include <memory>
#include "absl/algorithm/container.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace matcher = xla::testing::opcode_matchers;
using ::testing::HasSubstr;
class AllReduceFolderTest : public HloTestBase {};
const char *k2AllReduce = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
ar0 = f32[8] all-reduce(p0), replica_groups=$group_0, to_apply=sum
ROOT ar1 = f32[8] all-reduce(ar0), replica_groups=$group_1, to_apply=sum
}
)";
size_t AllReduceCount(HloModule *module) {
return absl::c_count_if(module->entry_computation()->instructions(),
HloPredicateIsOp<HloOpcode::kAllReduce>);
}
void ExpectOneAllReduce(HloModule *module,
absl::string_view target_replica_groups) {
EXPECT_EQ(AllReduceCount(module), 1);
HloInstruction *root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, matcher::AllReduce(matcher::Parameter(0)));
EXPECT_THAT(root->ToString(), HasSubstr(target_replica_groups));
}
TEST_F(AllReduceFolderTest, Simple) {
TF_ASSERT_OK_AND_ASSIGN(
auto module, RunAndCheckHloRewrite(k2AllReduce, AllReduceFolder(), true,
{{"$group_0", "{{0,1},{2,3}}"},
{"$group_1", "{{0,2},{1,3}}"}}));
ExpectOneAllReduce(module.get(), "replica_groups={{0,1,2,3}}");
}
TEST_F(AllReduceFolderTest, SimpleSwap) {
TF_ASSERT_OK_AND_ASSIGN(
auto module, RunAndCheckHloRewrite(k2AllReduce, AllReduceFolder(), true,
{{"$group_1", "{{0,1},{2,3}}"},
{"$group_0", "{{0,2},{1,3}}"}}));
ExpectOneAllReduce(module.get(), "replica_groups={{0,1,2,3}}");
}
TEST_F(AllReduceFolderTest, BothEmptyReplicaGroups_NotTransformed) {
TF_ASSERT_OK(RunAndCheckHloRewrite(k2AllReduce, AllReduceFolder(), false,
{{"$group_0", "{}"}, {"$group_1", "{}"}}));
}
TEST_F(AllReduceFolderTest, EmptyReplicaGroups_NotTransformed) {
TF_ASSERT_OK(RunAndCheckHloRewrite(
k2AllReduce, AllReduceFolder(), false,
{{"$group_0", "{}"}, {"$group_1", "{{0,2},{1,3}}"}}));
}
TEST_F(AllReduceFolderTest, MismatchOtherProperties0_NotTransformed) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
ar0 = f32[8] all-reduce(p0), replica_groups={{0,1},{2,3}}, channel_id=1, to_apply=sum
ROOT ar1 = f32[8] all-reduce(ar0), replica_groups={{0,2},{1,3}}, to_apply=sum
}
)";
TF_ASSERT_OK(RunAndCheckHloRewrite(hlo_string, AllReduceFolder(), false));
}
TEST_F(AllReduceFolderTest, MismatchOtherProperties1_NotTransformed) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
mul {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT mul = f32[] multiply(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
ar0 = f32[8] all-reduce(p0), replica_groups={{0,1},{2,3}}, to_apply=sum
ROOT ar1 = f32[8] all-reduce(ar0), replica_groups={{0,2},{1,3}}, to_apply=mul
}
)";
TF_ASSERT_OK(RunAndCheckHloRewrite(hlo_string, AllReduceFolder(), false));
}
TEST_F(AllReduceFolderTest, NotFoldable_NotTransformed) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
ar0 = f32[8] all-reduce(p0), replica_groups={{0,1},{2,3}}, to_apply=sum
ROOT ar1 = f32[8] all-reduce(ar0), replica_groups={{0,1},{2,3}}, to_apply=sum
}
)";
TF_ASSERT_OK(RunAndCheckHloRewrite(hlo_string, AllReduceFolder(), false));
}
TEST_F(AllReduceFolderTest, Foldable0) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
ar0 = f32[8] all-reduce(p0), replica_groups={{0,4},{1,5},{2,3},{6,7}}, to_apply=sum
ROOT ar1 = f32[8] all-reduce(ar0), replica_groups={{0,5},{4,1},{2,7},{3,6}}, to_apply=sum
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunAndCheckHloRewrite(hlo_string, AllReduceFolder()));
ExpectOneAllReduce(module.get(), "replica_groups={{0,1,4,5},{2,3,6,7}}");
}
TEST_F(AllReduceFolderTest, FoldableChain) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
ar0 = f32[8] all-reduce(p0), replica_groups={{0,1},{2,3},{4,5},{6,7}}, to_apply=sum
ar1 = f32[8] all-reduce(ar0), replica_groups={{0,2},{1,3},{4,6},{5,7}}, to_apply=sum
ROOT ar2 = f32[8] all-reduce(ar1), replica_groups={{0,4},{1,5},{2,6},{3,7}}, to_apply=sum
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunAndCheckHloRewrite(hlo_string, AllReduceFolder()));
ExpectOneAllReduce(module.get(), "replica_groups={{0,1,2,3,4,5,6,7}}");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_folder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_folder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e3bfde0e-9a6f-443c-be03-78ee0a78278c | cpp | google/arolla | bound_split_conditions | arolla/decision_forest/pointwise_evaluation/bound_split_conditions.h | arolla/decision_forest/pointwise_evaluation/bound_split_conditions_test.cc | #ifndef AROLLA_DECISION_FOREST_POINTWISE_EVALUATION_BOUND_SPLIT_CONDITIONS_H_
#define AROLLA_DECISION_FOREST_POINTWISE_EVALUATION_BOUND_SPLIT_CONDITIONS_H_
#include <memory>
#include <variant>
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "arolla/decision_forest/split_condition.h"
#include "arolla/decision_forest/split_conditions/interval_split_condition.h"
#include "arolla/decision_forest/split_conditions/set_of_values_split_condition.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
struct IntervalBoundCondition {
FrameLayout::Slot<OptionalValue<float>> input_slot =
FrameLayout::Slot<OptionalValue<float>>::UnsafeUninitializedSlot();
float left, right;
bool operator()(const ConstFramePtr ctx) const {
OptionalValue<float> v = ctx.Get(input_slot);
return v.present && left <= v.value && v.value <= right;
}
using cond_type = IntervalSplitCondition;
static absl::StatusOr<IntervalBoundCondition> Create(
const std::shared_ptr<const cond_type>& cond,
absl::Span<const TypedSlot> input_slots) {
ASSIGN_OR_RETURN(
auto input_slot,
input_slots[cond->input_id()].ToSlot<OptionalValue<float>>());
return IntervalBoundCondition{input_slot, cond->left(), cond->right()};
}
};
template <class T>
struct SetOfValuesBoundCondition {
FrameLayout::Slot<OptionalValue<T>> input_slot =
FrameLayout::Slot<OptionalValue<T>>::UnsafeUninitializedSlot();
absl::flat_hash_set<T> values;
bool result_if_missed;
bool operator()(const ConstFramePtr ctx) const {
const OptionalValue<T>& v = ctx.Get(input_slot);
return (v.present && values.contains(v.value)) ||
(!v.present && result_if_missed);
}
using cond_type = SetOfValuesSplitCondition<T>;
static absl::StatusOr<SetOfValuesBoundCondition> Create(
const std::shared_ptr<const cond_type>& cond,
absl::Span<const TypedSlot> input_slots) {
ASSIGN_OR_RETURN(
auto input_slot,
input_slots[cond->input_id()].template ToSlot<OptionalValue<T>>());
return SetOfValuesBoundCondition{input_slot, cond->values(),
cond->GetDefaultResultForMissedInput()};
}
};
struct VirtualBoundCondition {
std::shared_ptr<const SplitCondition> condition;
absl::InlinedVector<TypedSlot, 1> inputs;
bool operator()(const ConstFramePtr ctx) const {
return condition->EvaluateCondition(ctx, inputs);
}
using cond_type = SplitCondition;
static absl::StatusOr<VirtualBoundCondition> Create(
const std::shared_ptr<const cond_type>& cond,
absl::Span<const TypedSlot> input_slots) {
VirtualBoundCondition res;
res.condition = cond;
res.inputs.insert(res.inputs.end(), input_slots.begin(), input_slots.end());
return res;
}
};
template <typename... Args>
class VariantBoundCondition {
public:
bool operator()(const ConstFramePtr ctx) const {
return std::visit(Visitor{ctx}, bound_condition_);
}
static absl::StatusOr<VariantBoundCondition<Args...>> Create(
const std::shared_ptr<const SplitCondition>& condition,
absl::Span<const TypedSlot> input_slots) {
VariantBoundCondition<Args...> res;
bool initialized = false;
for (auto status :
{res.TryInit<Args>(condition, input_slots, &initialized)...}) {
if (!status.ok()) return status;
}
if (initialized) {
return res;
} else {
return absl::InvalidArgumentError("unsupported SplitCondition");
}
}
private:
template <class T>
absl::Status TryInit(const std::shared_ptr<const SplitCondition>& cond,
absl::Span<const TypedSlot> input_slots,
bool* initialized) {
if (*initialized) return absl::OkStatus();
if (auto casted_cond =
std::dynamic_pointer_cast<const typename T::cond_type>(cond)) {
ASSIGN_OR_RETURN(bound_condition_, T::Create(casted_cond, input_slots));
*initialized = true;
}
return absl::OkStatus();
}
struct Visitor {
const ConstFramePtr context;
explicit Visitor(const ConstFramePtr ctx) : context(ctx) {}
template <class T>
bool operator()(const T& condition) {
return condition(context);
}
};
std::variant<Args...> bound_condition_;
};
}
#endif | #include "arolla/decision_forest/pointwise_evaluation/bound_split_conditions.h"
#include <cmath>
#include <cstdint>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "arolla/decision_forest/split_conditions/interval_split_condition.h"
#include "arolla/decision_forest/split_conditions/set_of_values_split_condition.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/bytes.h"
namespace arolla::testing {
namespace {
TEST(BoundConditions, IntervalSplitCondition) {
auto interval_split = IntervalSplit(0, 2, 3);
FrameLayout::Builder bldr;
auto slot = bldr.AddSlot<OptionalValue<float>>();
auto layout = std::move(bldr).Build();
MemoryAllocation alloc(&layout);
FramePtr context = alloc.frame();
using BoundCondition =
VariantBoundCondition<IntervalBoundCondition,
SetOfValuesBoundCondition<int64_t>>;
std::vector<TypedSlot> typed_slots = {TypedSlot::FromSlot(slot)};
ASSERT_OK_AND_ASSIGN(BoundCondition bound_interval,
BoundCondition::Create(interval_split, typed_slots));
context.Set(slot, 3.5);
EXPECT_EQ(bound_interval(context), false);
context.Set(slot, NAN);
EXPECT_EQ(bound_interval(context), false);
context.Set(slot, 2.5);
EXPECT_EQ(bound_interval(context), true);
context.Set(slot, {});
EXPECT_EQ(bound_interval(context), false);
}
TEST(BoundConditions, SetOfValuesSplitCondition) {
auto set_of_values = SetOfValuesSplit<int64_t>(0, {2, 4, 3}, true);
FrameLayout::Builder bldr;
auto slot = bldr.AddSlot<OptionalValue<int64_t>>();
std::vector<TypedSlot> typed_slots = {TypedSlot::FromSlot(slot)};
auto layout = std::move(bldr).Build();
MemoryAllocation alloc(&layout);
FramePtr context = alloc.frame();
using BoundCondition =
VariantBoundCondition<IntervalBoundCondition,
SetOfValuesBoundCondition<int64_t>>;
ASSERT_OK_AND_ASSIGN(BoundCondition bound_set_of_values,
BoundCondition::Create(set_of_values, typed_slots));
context.Set(slot, 3);
EXPECT_EQ(bound_set_of_values(context), true);
context.Set(slot, 5);
EXPECT_EQ(bound_set_of_values(context), false);
context.Set(slot, {});
EXPECT_EQ(bound_set_of_values(context), true);
}
TEST(BoundConditions, VirtualBoundCondition) {
auto set_of_values =
SetOfValuesSplit<Bytes>(0, {Bytes("A"), Bytes("B"), Bytes("C")}, true);
FrameLayout::Builder bldr;
auto slot = bldr.AddSlot<OptionalValue<Bytes>>();
std::vector<TypedSlot> typed_slots = {TypedSlot::FromSlot(slot)};
auto layout = std::move(bldr).Build();
MemoryAllocation alloc(&layout);
FramePtr context = alloc.frame();
using BoundCondition =
VariantBoundCondition<IntervalBoundCondition,
SetOfValuesBoundCondition<int64_t>,
VirtualBoundCondition>;
ASSERT_OK_AND_ASSIGN(BoundCondition bound_set_of_values,
BoundCondition::Create(set_of_values, typed_slots));
context.Set(slot, Bytes("B"));
EXPECT_EQ(bound_set_of_values(context), true);
context.Set(slot, Bytes("D"));
EXPECT_EQ(bound_set_of_values(context), false);
context.Set(slot, {});
EXPECT_EQ(bound_set_of_values(context), true);
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/decision_forest/pointwise_evaluation/bound_split_conditions.h | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/decision_forest/pointwise_evaluation/bound_split_conditions_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
e69fde1a-b1ac-484c-b566-41a0b2e4cb3a | cpp | tensorflow/tensorflow | pattern_matcher | third_party/xla/xla/service/pattern_matcher.h | third_party/xla/xla/service/pattern_matcher_test.cc | #ifndef XLA_SERVICE_PATTERN_MATCHER_H_
#define XLA_SERVICE_PATTERN_MATCHER_H_
#include <cstddef>
#include <cstdint>
#include <ios>
#include <memory>
#include <optional>
#include <ostream>
#include <sstream>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "absl/utility/utility.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/ir/ptrvec.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
namespace xla {
struct MatchOption {
bool capture;
bool single_user_only;
std::ostream* explain_os;
};
template <typename Value, typename Pattern>
bool Match(Value* value, const Pattern& pattern,
MatchOption option = {true,
false,
nullptr}) {
if (option.capture) {
auto new_option = option;
new_option.capture = false;
if (!pattern.Match(value, new_option)) {
return false;
}
}
return pattern.Match(value, option);
}
template <typename Value, typename Pattern>
bool MatchSingleUserOnly(Value* value, const Pattern& pattern) {
MatchOption option = {true, true,
nullptr};
return Match(value, pattern, option);
}
template <typename FilterPattern, typename Pattern>
bool MatchAndLogIfFailed(HloInstruction* instr, absl::string_view desc,
const Pattern& pattern, bool enable_logging,
const FilterPattern& filter_pattern) {
bool matched = Match(instr, pattern);
if (matched || !enable_logging || !Match(instr, filter_pattern)) {
return matched;
}
std::stringstream os;
CHECK(!Match(
instr, pattern,
{false, false, &os}));
LOG(ERROR) << "Failed to match " << desc << ":\n" << os.str();
return false;
}
namespace match {
namespace detail {
#pragma push_macro("EXPLAIN")
#define EXPLAIN \
if (option.explain_os) *option.explain_os
enum {
kIndentInc = 2,
};
inline void Indent(std::ostream* os, int64_t indent) {
*os << "\n";
for (int64_t i = 0; i < indent; ++i) {
*os << " ";
}
}
template <typename T, typename Dummy = void>
struct IsTrivialMatcher {
static constexpr bool value = false;
};
template <typename T>
struct IsTrivialMatcher<T,
typename std::enable_if<T::kIsTrivialMatcher>::type> {
static constexpr bool value = true;
};
template <typename Item, typename... Patterns>
class AllOfPattern {
public:
explicit AllOfPattern(const Patterns&... patterns) : patterns_(patterns...) {}
bool Match(const Item* item, MatchOption option) const {
bool matched = MatchImpl(item, option, std::integral_constant<size_t, 0>());
DCHECK(matched || !option.capture);
return matched;
}
bool Match(Item* item, MatchOption option) const {
bool matched = MatchImpl(item, option, std::integral_constant<size_t, 0>());
DCHECK(matched || !option.capture);
return matched;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
DescribeToImpl(os, std::integral_constant<size_t, 0>(), indent);
}
const std::tuple<Patterns...>& patterns() const { return patterns_; }
private:
template <typename ItemType, size_t index>
bool MatchImpl(ItemType* item, MatchOption option,
std::integral_constant<size_t, index>) const {
return std::get<index>(patterns_).Match(item, option) &&
MatchImpl(item, option, std::integral_constant<size_t, index + 1>());
}
template <typename ItemType>
bool MatchImpl(ItemType* item, MatchOption option,
std::integral_constant<size_t, sizeof...(Patterns)>) const {
return true;
}
template <size_t index>
void DescribeToImpl(std::ostream* os, std::integral_constant<size_t, index>,
int64_t indent) const {
constexpr bool first_is_trivial =
IsTrivialMatcher<typename std::remove_reference<decltype(std::get<0>(
patterns_))>::type>::value;
constexpr bool is_last = index == sizeof...(Patterns) - 1;
const auto& submatcher = std::get<index>(patterns_);
auto print_bulleted_item = [&] {
*os << " * ";
submatcher.DescribeTo(os, indent + 3);
if (!is_last) {
*os << " AND";
Indent(os, indent);
}
};
if (index == 0) {
if (first_is_trivial || is_last) {
submatcher.DescribeTo(os, indent + kIndentInc);
if (sizeof...(Patterns) > 2) {
*os << ":";
Indent(os, indent);
}
} else {
*os << "all of:";
Indent(os, indent);
print_bulleted_item();
}
} else if (first_is_trivial && index == 1 && sizeof...(Patterns) == 2) {
*os << " ";
submatcher.DescribeTo(os, indent);
} else {
print_bulleted_item();
}
DescribeToImpl(os, std::integral_constant<size_t, index + 1>(), indent);
}
void DescribeToImpl(std::ostream* os,
std::integral_constant<size_t, sizeof...(Patterns)>,
int64_t indent) const {}
std::tuple<Patterns...> patterns_;
};
}
template <typename Item, typename... Patterns>
auto AllOf(const Patterns&... patterns) {
return detail::AllOfPattern<typename std::remove_const<Item>::type,
Patterns...>(patterns...);
}
template <typename Item, typename... InnerPs, typename... OuterPs>
auto AllOf(const detail::AllOfPattern<Item, InnerPs...>& inner_p,
const OuterPs&... outer_ps) {
auto make_all_of = [](const InnerPs&... inner_ps,
const OuterPs&... outer_ps) {
return detail::AllOfPattern<typename std::remove_const<Item>::type,
InnerPs..., OuterPs...>(inner_ps...,
outer_ps...);
};
return absl::apply(make_all_of, std::tuple_cat(inner_p.patterns(),
std::make_tuple(outer_ps...)));
}
namespace detail {
template <typename LayoutType, typename Impl>
class LayoutPattern;
class LayoutPatternBaseImpl {
public:
bool Match(const ::xla::Layout* layout, MatchOption option) const {
if (layout == nullptr) {
EXPLAIN << "Layout is null";
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "a layout";
}
static constexpr bool kIsTrivialMatcher = true;
};
class LayoutPatternEqualImpl {
public:
explicit constexpr LayoutPatternEqualImpl(const ::xla::Layout* layout)
: layout_(layout) {}
bool Match(const ::xla::Layout* layout, MatchOption option) const {
if (!LayoutUtil::Equal(*layout_, *layout)) {
EXPLAIN << "Layout " << LayoutUtil::HumanString(*layout)
<< " is not equal to expected "
<< LayoutUtil::HumanString(*layout_);
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "equal to " << LayoutUtil::HumanString(*layout_);
}
private:
const ::xla::Layout* layout_;
};
class LayoutPatternMinorToMajorImpl {
public:
explicit LayoutPatternMinorToMajorImpl(
absl::Span<const int64_t> minor_to_major)
: minor_to_major_(minor_to_major.begin(), minor_to_major.end()) {}
bool Match(const ::xla::Layout* layout, MatchOption option) const {
if (layout->minor_to_major() != minor_to_major_) {
EXPLAIN << "Layout does not have minor to major ["
<< absl::StrJoin(minor_to_major_, ",") << "]";
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "with minor to major [" << absl::StrJoin(minor_to_major_, ",")
<< "]";
}
private:
absl::InlinedVector<int64_t, 8> minor_to_major_;
};
template <typename LayoutType, typename Impl>
class LayoutPattern {
private:
template <typename NewImpl>
auto AppendImpl(NewImpl new_impl) const {
auto new_allof = AllOf<::xla::Layout>(impl_, std::move(new_impl));
return LayoutPattern<LayoutType, decltype(new_allof)>(std::move(new_allof),
matched_layout_);
}
public:
explicit constexpr LayoutPattern(const Impl& impl,
LayoutType** matched_layout)
: impl_(impl), matched_layout_(matched_layout) {}
bool Match(const ::xla::Layout* layout, MatchOption option) const {
if (impl_.Match(layout, option)) {
if (option.capture && matched_layout_) {
*matched_layout_ = layout;
}
return true;
}
return false;
}
bool Match(::xla::Layout* layout, MatchOption option) const {
if (impl_.Match(layout, option)) {
if (option.capture && matched_layout_) {
*matched_layout_ = layout;
}
return true;
}
return false;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
impl_.DescribeTo(os, indent);
}
constexpr auto EqualTo(const ::xla::Layout* layout) const {
return AppendImpl(LayoutPatternEqualImpl(layout));
}
constexpr auto WithMinorToMajor(
absl::Span<const int64_t> minor_to_major) const {
return AppendImpl(LayoutPatternMinorToMajorImpl(minor_to_major));
}
private:
Impl impl_;
LayoutType** matched_layout_;
};
template <typename Item, typename... Patterns>
class AnyOfPattern {
public:
explicit AnyOfPattern(const Patterns&... patterns) : patterns_(patterns...) {}
bool Match(const Item* item, MatchOption option) const {
return MatchImpl(item, option);
}
bool Match(Item* item, MatchOption option) const {
return MatchImpl(item, option);
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "any of:";
Indent(os, indent);
DescribeToImpl(os, std::integral_constant<size_t, 0>(), indent);
}
private:
template <typename ItemType>
bool MatchImpl(ItemType* item, MatchOption option) const {
std::optional<std::stringstream> explanation;
MatchOption new_option = option;
if (option.explain_os) {
new_option.explain_os = &explanation.emplace();
}
bool rv = MatchRecursiveImpl(item, new_option,
std::integral_constant<size_t, 0>());
if (!rv && option.explain_os) {
EXPLAIN << "None of the following matchers succeeded:";
EXPLAIN << explanation->str();
}
return rv;
}
template <typename ItemType, size_t index>
bool MatchRecursiveImpl(ItemType* item, MatchOption option,
std::integral_constant<size_t, index>) const {
auto new_option = option;
new_option.capture = false;
std::optional<std::stringstream> explanation;
if (option.explain_os) {
new_option.explain_os = &explanation.emplace();
}
if (std::get<index>(patterns_).Match(item, new_option)) {
if (option.capture) {
bool matched = std::get<index>(patterns_).Match(item, option);
DCHECK(matched);
}
return true;
}
if (option.explain_os) {
EXPLAIN << "\nMatcher #" << index + 1;
EXPLAIN << "\n - ";
std::get<index>(patterns_).DescribeTo(option.explain_os, 3);
EXPLAIN << "\nfailed with";
EXPLAIN << "\n - ";
EXPLAIN << absl::StrReplaceAll(explanation->str(), {{"\n", "\n "}});
}
return MatchRecursiveImpl(item, option,
std::integral_constant<size_t, index + 1>());
}
template <typename ItemType>
bool MatchRecursiveImpl(
ItemType* item, MatchOption option,
std::integral_constant<size_t, sizeof...(Patterns)>) const {
return false;
}
template <size_t index>
void DescribeToImpl(std::ostream* os, std::integral_constant<size_t, index>,
int64_t indent) const {
*os << " - ";
std::get<index>(patterns_).DescribeTo(os, indent + 3);
if (index != sizeof...(Patterns) - 1) {
*os << " OR";
Indent(os, indent);
}
DescribeToImpl(os, std::integral_constant<size_t, index + 1>(), indent);
}
void DescribeToImpl(std::ostream* os,
std::integral_constant<size_t, sizeof...(Patterns)>,
int64_t indent) const {}
std::tuple<Patterns...> patterns_;
};
}
inline constexpr auto Layout(const ::xla::Layout** matched_layout = nullptr) {
return detail::LayoutPattern<const ::xla::Layout,
detail::LayoutPatternBaseImpl>(
detail::LayoutPatternBaseImpl(), matched_layout);
}
inline constexpr auto Layout(::xla::Layout** matched_layout) {
return detail::LayoutPattern<::xla::Layout, detail::LayoutPatternBaseImpl>(
detail::LayoutPatternBaseImpl(), matched_layout);
}
namespace detail {
template <typename ShapeType, typename Impl>
class ShapePattern;
class ShapePatternBaseImpl {
public:
bool Match(const ::xla::Shape* shape, MatchOption option) const {
if (shape == nullptr) {
EXPLAIN << "Shape is null";
}
return shape != nullptr;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "a shape";
}
static constexpr bool kIsTrivialMatcher = true;
};
class ShapePatternEqualImpl {
public:
explicit constexpr ShapePatternEqualImpl(const ::xla::Shape* shape)
: shape_(shape) {}
bool Match(const ::xla::Shape* shape, MatchOption option) const {
if (!ShapeUtil::Equal(*shape_, *shape)) {
EXPLAIN << "Shape not equal to "
<< ShapeUtil::HumanStringWithLayout(*shape_);
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "equal to " << ShapeUtil::HumanStringWithLayout(*shape_);
}
private:
const ::xla::Shape* shape_;
};
class ShapePatternCompatibleImpl {
public:
explicit constexpr ShapePatternCompatibleImpl(const ::xla::Shape* shape)
: shape_(shape) {}
bool Match(const ::xla::Shape* shape, MatchOption option) const {
if (!ShapeUtil::Compatible(*shape_, *shape)) {
EXPLAIN << "Shape not compatible with "
<< ShapeUtil::HumanString(*shape_);
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "compatible with " << ShapeUtil::HumanString(*shape_);
}
private:
const ::xla::Shape* shape_;
};
class ShapePatternElementTypeImpl {
public:
explicit constexpr ShapePatternElementTypeImpl(PrimitiveType element_type)
: element_type_(element_type) {}
bool Match(const ::xla::Shape* shape, MatchOption option) const {
if (shape->element_type() != element_type_) {
EXPLAIN << "Shape does not have element type "
<< PrimitiveType_Name(element_type_);
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "with element type " << PrimitiveType_Name(element_type_);
}
private:
PrimitiveType element_type_;
};
class ShapePatternDimsImpl {
public:
explicit ShapePatternDimsImpl(absl::Span<const int64_t> dims)
: dims_(dims.begin(), dims.end()) {}
bool Match(const ::xla::Shape* shape, MatchOption option) const {
if (shape->dimensions() != dims_) {
EXPLAIN << "Shape does not have dimensions [" << absl::StrJoin(dims_, ",")
<< "]";
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "with dimensions [" << absl::StrJoin(dims_, ",") << "]";
}
private:
absl::InlinedVector<int64_t, 8> dims_;
};
class ShapePatternIsScalarImpl {
public:
explicit constexpr ShapePatternIsScalarImpl() = default;
bool Match(const ::xla::Shape* shape, MatchOption option) const {
if (!ShapeUtil::IsScalar(*shape)) {
EXPLAIN << "Shape is not a scalar";
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "that represents a scalar";
}
};
class ShapePatternIsArrayImpl {
public:
explicit constexpr ShapePatternIsArrayImpl() = default;
bool Match(const ::xla::Shape* shape, MatchOption option) const {
if (!shape->IsArray()) {
EXPLAIN << "Shape is not an array";
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "that represents an array";
}
};
class ShapePatternIsDenseArrayImpl {
public:
explicit constexpr ShapePatternIsDenseArrayImpl() = default;
bool Match(const ::xla::Shape* shape, MatchOption option) const {
if (!LayoutUtil::IsDenseArray(*shape)) {
EXPLAIN << "Shape is not a dense array";
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "that represents a dense array";
}
};
class ShapePatternIsTupleImpl {
public:
explicit constexpr ShapePatternIsTupleImpl() = default;
bool Match(const ::xla::Shape* shape, MatchOption option) const {
if (!shape->IsTuple()) {
EXPLAIN << "Shape is not a tuple";
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "that represents a tuple";
}
};
class ShapePatternEffectiveScalarImpl {
public:
explicit constexpr ShapePatternEffectiveScalarImpl() = default;
bool Match(const ::xla::Shape* shape, MatchOption option) const {
if (!ShapeUtil::IsEffectiveScalar(*shape)) {
EXPLAIN << "Shape is not an effective scalar";
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "that is an effective scalar";
}
};
class ShapePatternRankImpl {
public:
explicit constexpr ShapePatternRankImpl(int64_t rank) : rank_(rank) {}
bool Match(const ::xla::Shape* shape, MatchOption option) const {
if (shape->rank() != rank_) {
if (rank_ == 0) {
EXPLAIN << "Shape is not a scalar";
} else {
EXPLAIN << "Shape does not have rank " << rank_;
}
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
if (rank_ == 0) {
*os << "that is a scalar";
} else {
*os << "that has " << rank_ << " dimension" << (rank_ != 1 ? "s" : "");
}
}
private:
int64_t rank_;
};
template <typename LayoutType, typename LayoutImpl>
class ShapePatternLayoutImpl {
public:
explicit constexpr ShapePatternLayoutImpl(
const LayoutPattern<LayoutType, LayoutImpl>& layout)
: layout_(layout) {}
bool Match(const ::xla::Shape* shape, MatchOption option) const {
return LayoutUtil::HasLayout(*shape) &&
layout_.Match(&shape->layout(), option);
}
bool Match(::xla::Shape* shape, MatchOption option) const {
if (!LayoutUtil::HasLayout(*shape)) {
EXPLAIN << "Shape does not have a layout";
return false;
}
if (!layout_.Match(shape->mutable_layout(), option)) {
EXPLAIN << "\nin layout";
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "with";
Indent(os, indent + kIndentInc);
layout_.DescribeTo(os, indent + kIndentInc);
}
private:
LayoutPattern<LayoutType, LayoutImpl> layout_;
};
template <typename SubshapeType, typename SubshapeImpl>
class ShapePatternSubshapeImpl {
public:
explicit ShapePatternSubshapeImpl(
ShapeIndexView index,
const ShapePattern<SubshapeType, SubshapeImpl>& subshape)
: index_(index), subshape_(subshape) {}
bool Match(const ::xla::Shape* shape, MatchOption option) const {
return MatchImpl(shape, option);
}
bool Match(::xla::Shape* shape, MatchOption option) const {
return MatchImpl(shape, option);
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "with subshape at index " << ShapeIndex(index_) << " which is";
Indent(os, indent + kIndentInc);
subshape_.DescribeTo(os, indent + kIndentInc);
}
private:
::xla::Shape* GetSubshape(::xla::Shape* shape) const {
return ShapeUtil::GetMutableSubshape(shape, index_);
}
const ::xla::Shape* GetSubshape(const ::xla::Shape* shape) const {
return &ShapeUtil::GetSubshape(*shape, index_);
}
template <typename ShapeType>
bool MatchImpl(ShapeType* shape, MatchOption option) const {
if (!ShapeUtil::IndexIsValid(*shape, index_)) {
EXPLAIN << "No subshape at " << ShapeIndex(index_);
return false;
}
if (!subshape_.Match(GetSubshape(shape), option)) {
EXPLAIN << "\nin subshape at " << ShapeIndex(index_);
return false;
}
return true;
}
ShapeIndexView index_;
ShapePattern<SubshapeType, SubshapeImpl> subshape_;
};
template <typename ShapeType, typename Impl>
class ShapePattern {
private:
template <typename NewImpl>
auto AppendImpl(NewImpl new_impl) const {
auto new_all_of = AllOf<::xla::Shape>(impl_, std::move(new_impl));
return ShapePattern<ShapeType, decltype(new_all_of)>(std::move(new_all_of),
matched_shape_);
}
public:
explicit constexpr ShapePattern(const Impl& impl, ShapeType** matched_shape)
: impl_(impl), matched_shape_(matched_shape) {}
bool Match(const ::xla::Shape* shape, MatchOption option) const {
if (impl_.Match(shape, option)) {
if (option.capture && matched_shape_) {
*matched_shape_ = shape;
}
return true;
}
if (shape) {
EXPLAIN << "\nin "
<< (shape->has_layout() ? ShapeUtil::HumanStringWithLayout(*shape)
: ShapeUtil::HumanString(*shape));
}
return false;
}
bool Match(::xla::Shape* shape, MatchOption option) const {
if (impl_.Match(shape, option)) {
if (option.capture && matched_shape_) {
*matched_shape_ = shape;
}
return true;
}
EXPLAIN << "\nin "
<< (shape->has_layout() ? ShapeUtil::HumanStringWithLayout(*shape)
: ShapeUtil::HumanString(*shape));
return false;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
return impl_.DescribeTo(os, indent);
}
constexpr auto EqualTo(const ::xla::Shape* shape) const {
return AppendImpl(ShapePatternEqualImpl(shape));
}
constexpr auto CompatibleTo(const ::xla::Shape* shape) const {
return AppendImpl(ShapePatternCompatibleImpl(shape));
}
constexpr auto WithElementType(PrimitiveType element_type) const {
return AppendImpl(ShapePatternElementTypeImpl(element_type));
}
constexpr auto WithDims(absl::Span<const int64_t> dims) const {
return AppendImpl(ShapePatternDimsImpl(dims));
}
constexpr auto IsScalar() const {
return AppendImpl(ShapePatternIsScalarImpl());
}
constexpr auto IsArray() const {
return AppendImpl(ShapePatternIsArrayImpl());
}
constexpr auto IsTuple() const {
return AppendImpl(ShapePatternIsTupleImpl());
}
constexpr auto IsEffectiveScalar() const {
return AppendImpl(ShapePatternEffectiveScalarImpl());
}
constexpr auto WithRank(int64_t rank) const {
return AppendImpl(ShapePatternRankImpl(rank));
}
template <typename LayoutType, typename LayoutImpl>
auto WithLayout(const LayoutPattern<LayoutType, LayoutImpl>& layout) const {
return AppendImpl(ShapePatternLayoutImpl<LayoutType, LayoutImpl>(layout));
}
constexpr auto WithLayout(absl::Span<const int64_t> minor_to_major) const {
return WithLayout(Layout().WithMinorToMajor(minor_to_major));
}
constexpr auto WithLayoutEqualTo(const ::xla::Layout* layout) const {
return WithLayout(Layout().EqualTo(layout));
}
constexpr auto IsDenseArray() const {
return AppendImpl(ShapePatternIsDenseArrayImpl());
}
template <typename SubshapeType, typename SubshapeImpl>
auto WithSubshape(
ShapeIndexView index,
const ShapePattern<SubshapeType, SubshapeImpl>& subshape) const {
return AppendImpl(
ShapePatternSubshapeImpl<SubshapeType, SubshapeImpl>(index, subshape));
}
ShapePattern<ShapeType,
AllOfPattern<::xla::Shape, Impl,
ShapePatternSubshapeImpl<
const ::xla::Shape,
AllOfPattern<::xla::Shape, ShapePatternBaseImpl,
ShapePatternEqualImpl>>>>
WithSubshapeEqualTo(ShapeIndexView index, const ::xla::Shape* shape) const {
return WithSubshape(index,
ShapePattern<const ::xla::Shape, ShapePatternBaseImpl>(
ShapePatternBaseImpl(), nullptr)
.EqualTo(shape));
}
ShapePattern<ShapeType,
AllOfPattern<::xla::Shape, Impl,
ShapePatternSubshapeImpl<
const ::xla::Shape,
AllOfPattern<::xla::Shape, ShapePatternBaseImpl,
ShapePatternCompatibleImpl>>>>
WithSubshapeCompatibleTo(ShapeIndexView index,
const ::xla::Shape* shape) const {
return WithSubshape(index,
ShapePattern<const ::xla::Shape, ShapePatternBaseImpl>(
ShapePatternBaseImpl(), nullptr)
.CompatibleTo(shape));
}
private:
Impl impl_;
ShapeType** matched_shape_;
};
}
inline constexpr auto Shape(const ::xla::Shape** matched_shape = nullptr) {
return detail::ShapePattern<const ::xla::Shape, detail::ShapePatternBaseImpl>(
detail::ShapePatternBaseImpl(), matched_shape);
}
inline constexpr auto Shape(::xla::Shape** matched_shape) {
return detail::ShapePattern<::xla::Shape, detail::ShapePatternBaseImpl>(
detail::ShapePatternBaseImpl(), matched_shape);
}
namespace detail {
inline HloInstruction* HloOperand(HloInstruction* instr, int64_t idx) {
return instr->mutable_operand(idx);
}
inline const HloInstruction* HloOperand(const HloInstruction* instr,
int64_t idx) {
return instr->operand(idx);
}
inline std::string InstToString(const HloInstruction* inst) {
return inst->ToString(
HloPrintOptions().set_print_metadata(false).set_print_percent(false));
}
template <typename HloInstructionType, typename Impl>
class HloInstructionPattern;
class HloInstructionPatternBaseImpl {
public:
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
if (inst == nullptr) {
EXPLAIN << "HloInstruction* is null";
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "an HloInstruction";
}
static constexpr bool kIsTrivialMatcher = true;
};
class HloInstructionPatternNameImpl {
public:
explicit HloInstructionPatternNameImpl(absl::string_view name)
: name_(name) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
if (inst->name() != name_) {
EXPLAIN << "HloInstruction not named \"" << name_ << "\"";
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "named \"" << name_ << "\"";
}
private:
absl::string_view name_;
};
class HloInstructionIsImpl {
public:
explicit HloInstructionIsImpl(const HloInstruction* inst) : inst_(inst) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
if (inst != inst_) {
EXPLAIN << "HloInstruction " << std::hex << std::nouppercase
<< std::showbase << reinterpret_cast<uint64_t>(inst) << " is not "
<< reinterpret_cast<uint64_t>(inst_) << " ("
<< InstToString(inst_) << ")";
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "which is " << std::hex << std::nouppercase << std::showbase
<< reinterpret_cast<uint64_t>(inst_) << " (" << InstToString(inst_)
<< ")";
}
private:
const HloInstruction* inst_;
};
class HloInstructionPatternOpcodeImpl {
public:
explicit constexpr HloInstructionPatternOpcodeImpl(HloOpcode opcode,
bool invert)
: opcode_(opcode), invert_(invert) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
if (invert_ && inst->opcode() == opcode_) {
EXPLAIN << "HloInstruction has opcode " << opcode_
<< ", expected anything else";
return false;
}
if (!invert_ && inst->opcode() != opcode_) {
EXPLAIN << "HloInstruction doesn't have opcode " << opcode_;
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
if (!invert_) {
*os << "with opcode " << opcode_;
} else {
*os << "with any opcode other than " << opcode_;
}
}
private:
HloOpcode opcode_;
bool invert_;
};
class HloInstructionCustomCallTargetImpl {
public:
explicit HloInstructionCustomCallTargetImpl(
absl::Span<const absl::string_view> custom_call_targets)
: custom_call_targets_(custom_call_targets.begin(),
custom_call_targets.end()) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
if (inst->opcode() != HloOpcode::kCustomCall ||
!absl::c_linear_search(custom_call_targets_,
inst->custom_call_target())) {
if (custom_call_targets_.size() == 1) {
EXPLAIN << "HloInstruction is not a custom call with a target '"
<< custom_call_targets_.front() << "'";
} else {
EXPLAIN << "HloInstruction is not a custom call with a target in {"
<< absl::StrJoin(custom_call_targets_, ", ") << "}";
}
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
if (custom_call_targets_.size() == 1) {
*os << "custom call with target '" << custom_call_targets_.front() << "'";
} else {
*os << "custom call with target in {"
<< absl::StrJoin(custom_call_targets_, ", ") << "}";
}
}
private:
absl::InlinedVector<std::string, 1> custom_call_targets_;
};
class HloInstructionPatternNumOperandsImpl {
public:
explicit constexpr HloInstructionPatternNumOperandsImpl(int64_t num_operands)
: num_operands_(num_operands) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
if (inst->operand_count() != num_operands_) {
EXPLAIN << "HloInstruction doesn't have " << num_operands_ << " operands";
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "with " << num_operands_ << " operand"
<< (num_operands_ != 1 ? "s" : "");
}
private:
int64_t num_operands_;
};
template <typename ShapeType, typename ShapeImpl>
class HloInstructionPatternShapeImpl {
public:
explicit constexpr HloInstructionPatternShapeImpl(
const ShapePattern<ShapeType, ShapeImpl>& shape)
: shape_(shape) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
if (!shape_.Match(&inst->shape(), option)) {
EXPLAIN << "\nin output shape";
return false;
}
return true;
}
bool Match(::xla::HloInstruction* inst, MatchOption option) const {
if (!shape_.Match(inst->mutable_shape(), option)) {
EXPLAIN << "\nin output shape";
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "outputting";
Indent(os, indent + kIndentInc);
shape_.DescribeTo(os, indent + kIndentInc);
}
private:
ShapePattern<ShapeType, ShapeImpl> shape_;
};
template <typename OperandType, typename OperandImpl>
class HloInstructionPatternOperandImpl {
public:
explicit constexpr HloInstructionPatternOperandImpl(
int64_t operand_index,
const HloInstructionPattern<OperandType, OperandImpl>& operand)
: operand_index_(operand_index), operand_(operand) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
bool Match(::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "with operand " << operand_index_ << " which is:";
Indent(os, indent + kIndentInc);
operand_.DescribeTo(os, indent + kIndentInc);
}
private:
template <typename HloInstructionType>
bool MatchImpl(HloInstructionType* inst, MatchOption option) const {
if (operand_index_ >= inst->operand_count()) {
EXPLAIN << "desired operand index " << operand_index_
<< " is out of bounds";
return false;
}
if (!operand_.Match(HloOperand(inst, operand_index_), option)) {
EXPLAIN << "\nin operand " << operand_index_;
return false;
}
if (option.single_user_only &&
inst->operand(operand_index_)->user_count() != 1) {
EXPLAIN << "Operand " << operand_index_ << " of HloInstruction has "
<< inst->operand(operand_index_)->user_count()
<< " users. Expected 1.";
return false;
}
return true;
}
int64_t operand_index_;
HloInstructionPattern<OperandType, OperandImpl> operand_;
};
template <typename OperandType, typename OperandImpl>
class HloInstructionPatternOperandIfPresentImpl {
public:
explicit constexpr HloInstructionPatternOperandIfPresentImpl(
int64_t operand_index,
const HloInstructionPattern<OperandType, OperandImpl>& operand)
: operand_index_(operand_index), operand_(operand) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
bool Match(::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "either with fewer than " << operand_index_ + 1 << " operand"
<< (operand_index_ + 1 != 1 ? "s" : "") << ", or with an operand "
<< operand_index_ << " which is:";
Indent(os, indent + kIndentInc);
operand_.DescribeTo(os, indent + kIndentInc);
}
private:
template <typename HloInstructionType>
bool MatchImpl(HloInstructionType* inst, MatchOption option) const {
if (operand_index_ >= inst->operand_count()) {
return true;
}
if (!operand_.Match(HloOperand(inst, operand_index_), option)) {
EXPLAIN << "\nin operand " << operand_index_;
return false;
}
return true;
}
int64_t operand_index_;
HloInstructionPattern<OperandType, OperandImpl> operand_;
};
template <typename OperandType1, typename OperandImpl1, typename OperandType2,
typename OperandImpl2>
class HloInstructionPatternBinaryOperandsAnyOrderImpl {
public:
explicit constexpr HloInstructionPatternBinaryOperandsAnyOrderImpl(
const HloInstructionPattern<OperandType1, OperandImpl1>& op1,
const HloInstructionPattern<OperandType2, OperandImpl2>& op2)
: op1_(op1), op2_(op2) {}
bool Match(::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "with two operands in either order:";
Indent(os, indent);
*os << " - ";
op1_.DescribeTo(os, indent + 3);
Indent(os, indent);
*os << " - ";
op2_.DescribeTo(os, indent + 3);
}
private:
HloInstruction* operand(HloInstruction* inst, int64_t idx) const {
return inst->mutable_operand(idx);
}
const HloInstruction* operand(const HloInstruction* inst, int64_t idx) const {
return inst->operand(idx);
}
template <typename HloInstructionType>
bool MatchImpl(HloInstructionType* inst, MatchOption option) const {
if (inst->operand_count() != 2) {
EXPLAIN << "HloInstruction did not have two operands";
return false;
}
if (option.single_user_only) {
for (int i = 0; i < 2; ++i) {
if (inst->operand(i)->user_count() != 1) {
EXPLAIN << "Operand " << i << " of HloInstruction has "
<< inst->operand(i)->user_count() << " users. Expected 1.";
return false;
}
}
}
if (!option.explain_os) {
auto try_match = [&](int64_t idx1, int64_t idx2) {
MatchOption new_option = option;
new_option.capture = false;
if (op1_.Match(operand(inst, idx1), new_option) &&
op2_.Match(operand(inst, idx2), new_option)) {
if (option.capture) {
bool matched = op1_.Match(operand(inst, idx1), option) &&
op2_.Match(operand(inst, idx2), option);
DCHECK(matched);
}
return true;
}
return false;
};
return try_match(0, 1) || try_match(1, 0);
}
bool matches[ 2][ 2];
std::stringstream explanations[ 2][ 2];
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 2; ++j) {
MatchOption new_option = option;
new_option.capture = false;
new_option.explain_os = &explanations[i][j];
matches[i][j] = i == 0 ? op1_.Match(operand(inst, j), new_option)
: op2_.Match(operand(inst, j), new_option);
}
}
for (int i = 0; i < 2; ++i) {
if (matches[0][i] && matches[1][(i + 1) % 2]) {
if (option.capture) {
auto* operand1 = operand(inst, i);
auto* operand2 = operand(inst, (i + 1) % 2);
bool matched =
op1_.Match(operand1, option) && op2_.Match(operand2, option);
DCHECK(matched);
}
return true;
}
}
auto describe_matcher = [&](int matcher_idx) {
EXPLAIN << "\n - ";
if (matcher_idx == 0) {
op1_.DescribeTo(option.explain_os, 3);
} else {
CHECK_EQ(matcher_idx, 1);
op2_.DescribeTo(option.explain_os, 3);
}
for (int i = 0; i < 2; ++i) {
if (matches[matcher_idx][ i]) {
continue;
}
EXPLAIN << "\ndoes not match " << (i == 0 ? "LHS" : "RHS") << ":\n";
EXPLAIN << " - ";
EXPLAIN << absl::StrReplaceAll(
explanations[matcher_idx][ i].str(), {{"\n", "\n "}});
}
};
bool wrote_explanation = false;
for (int i = 0; !wrote_explanation && i < 2; ++i) {
if (!matches[i][0] && !matches[i][1]) {
EXPLAIN << "HloInstruction's operands (ignoring order) did not match "
<< (i == 0 ? "first" : "second") << " matcher. Specifically,";
describe_matcher(i);
wrote_explanation = true;
}
}
for (int i = 0; !wrote_explanation && i < 2; ++i) {
if (matches[ 0][ i] &&
matches[ 1][ i]) {
CHECK(!matches[0][(i + 1) % 2]);
CHECK(!matches[1][(i + 1) % 2]);
CHECK(!wrote_explanation);
EXPLAIN << "HloInstruction's " << (i == 1 ? "LHS" : "RHS")
<< " operand did not match either of the two matchers. "
"Specifically,";
describe_matcher(0);
EXPLAIN << "\nand";
describe_matcher(1);
wrote_explanation = true;
}
}
CHECK(wrote_explanation);
return false;
}
HloInstructionPattern<OperandType1, OperandImpl1> op1_;
HloInstructionPattern<OperandType2, OperandImpl2> op2_;
};
class HloInstructionPatternFusionKindImpl {
public:
explicit constexpr HloInstructionPatternFusionKindImpl(
::xla::HloInstruction::FusionKind kind)
: kind_(kind) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
bool Match(::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "with fusion kind " << ToString(kind_);
}
private:
template <typename HloInstructionType>
bool MatchImpl(HloInstructionType* inst, MatchOption option) const {
if (inst->opcode() != HloOpcode::kFusion) {
EXPLAIN << "HloInstruction does not have fusion kind " << ToString(kind_)
<< "; it's not a fusion";
return false;
}
if (inst->fusion_kind() != kind_) {
EXPLAIN << "HloInstruction does not have fusion kind " << ToString(kind_);
return false;
}
return true;
}
::xla::HloInstruction::FusionKind kind_;
};
class HloInstructionPatternTupleIndexImpl {
public:
explicit constexpr HloInstructionPatternTupleIndexImpl(int64_t tuple_index)
: tuple_index_(tuple_index) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
bool Match(::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "which is a GTE with index " << tuple_index_;
}
private:
template <typename HloInstructionType>
bool MatchImpl(HloInstructionType* inst, MatchOption option) const {
if (inst->opcode() != HloOpcode::kGetTupleElement) {
EXPLAIN << "HloInstruction is not a GTE with index " << tuple_index_
<< "; it's not a GTE at all";
return false;
}
if (inst->tuple_index() != tuple_index_) {
EXPLAIN << "HloInstruction is not a GTE with index " << tuple_index_;
return false;
}
return true;
}
int64_t tuple_index_;
};
class HloInstructionPatternParameterNumImpl {
public:
explicit constexpr HloInstructionPatternParameterNumImpl(
int64_t parameter_num)
: parameter_num_(parameter_num) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
bool Match(::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "which is parameter " << parameter_num_;
}
private:
template <typename HloInstructionType>
bool MatchImpl(HloInstructionType* inst, MatchOption option) const {
if (inst->opcode() != HloOpcode::kParameter ||
inst->parameter_number() != parameter_num_) {
EXPLAIN << "HloInstruction is not parameter " << parameter_num_;
return false;
}
return true;
}
int64_t parameter_num_;
};
class HloInstructionPatternOneUseOrUserImpl {
protected:
bool MatchOneUser(const HloInstruction* inst, MatchOption option) const {
if (inst->user_count() != 1) {
EXPLAIN << "HloInstruction has " << inst->user_count()
<< " users, but expected exactly one.";
if (inst->user_count() > 1) {
EXPLAIN << "\nAll users:";
for (const HloInstruction* user : inst->users()) {
EXPLAIN << "\n - " << InstToString(user);
}
}
return false;
}
return true;
}
};
class HloInstructionPatternOneUseImpl
: public HloInstructionPatternOneUseOrUserImpl {
public:
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
if (!MatchOneUser(inst, option)) {
return false;
}
int64_t use_count = absl::c_count_if(
inst->users()[0]->operands(),
[&](const HloInstruction* operand) { return operand == inst; });
if (use_count != 1) {
EXPLAIN << "HloInstruction is used " << use_count
<< " times by its user, but is expected to be used just once: "
<< InstToString(inst->users()[0]);
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "which has exactly one use";
}
};
class HloInstructionPatternOneUserImpl
: public HloInstructionPatternOneUseOrUserImpl {
public:
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
return MatchOneUser(inst, option);
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "which has exactly one user (but possibly is used multiple times by "
"that instruction)";
}
};
class HloInstructionPatternNumUserImpl {
public:
explicit constexpr HloInstructionPatternNumUserImpl(int64_t user_num)
: user_num_(user_num) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
if (inst->user_count() != user_num_) {
EXPLAIN << "HloInstruction has " << inst->user_count()
<< " users, but expected exactly " << user_num_ << " users.";
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "which has exactly " << user_num_
<< " users (but possibly is used multiple times by "
"same instruction)";
}
private:
int64_t user_num_;
};
class HloInstructionPatternAtMostNumUserImpl {
public:
explicit constexpr HloInstructionPatternAtMostNumUserImpl(int64_t user_num)
: user_num_(user_num) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
if (inst->user_count() > user_num_) {
EXPLAIN << "HloInstruction has " << inst->user_count()
<< " users, but expected less than or equal " << user_num_
<< " users.";
return false;
}
return true;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "which has less than or equal " << user_num_
<< " users (but possibly is used multiple times by "
"same instruction)";
}
private:
int64_t user_num_;
};
class HloInstructionPatternComparisonDirectionImpl {
public:
explicit constexpr HloInstructionPatternComparisonDirectionImpl(
ComparisonDirection direction)
: direction_(direction) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
bool Match(::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "which has comparison direction "
<< ComparisonDirectionToString(direction_);
}
private:
template <typename HloInstructionType>
bool MatchImpl(HloInstructionType* inst, MatchOption option) const {
if (inst->opcode() != HloOpcode::kCompare ||
inst->comparison_direction() != direction_) {
EXPLAIN << "HloInstruction is not comparison "
<< ComparisonDirectionToString(direction_);
return false;
}
return true;
}
ComparisonDirection direction_;
};
class HloInstructionPatternConvDnumsImpl {
public:
explicit HloInstructionPatternConvDnumsImpl(absl::string_view dnums)
: HloInstructionPatternConvDnumsImpl(
ParseConvolutionDimensionNumbers(dnums).value()) {}
explicit HloInstructionPatternConvDnumsImpl(ConvolutionDimensionNumbers dnums)
: dnums_(std::move(dnums)) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
bool Match(::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "which has convolution dimension numbers "
<< ConvolutionDimensionNumbersToString(dnums_);
}
private:
template <typename HloInstructionType>
bool MatchImpl(HloInstructionType* inst, MatchOption option) const {
if (inst->opcode() != HloOpcode::kConvolution &&
inst->opcode() != HloOpcode::kCustomCall) {
EXPLAIN << "HloInstruction is not convolution or custom-call and so "
"can't have convolution_dimension_numbers";
return false;
}
const ConvolutionDimensionNumbers& actual_dnums =
inst->convolution_dimension_numbers();
if (!tsl::protobuf::util::MessageDifferencer::Equals(dnums_,
actual_dnums)) {
EXPLAIN << "convolution_dimension_numbers "
<< ConvolutionDimensionNumbersToString(actual_dnums)
<< " don't match expected "
<< ConvolutionDimensionNumbersToString(dnums_);
return false;
}
return true;
}
ConvolutionDimensionNumbers dnums_;
};
class HloInstructionPredicateImpl {
public:
explicit HloInstructionPredicateImpl(HloPredicate fn) : fn_(std::move(fn)) {}
bool Match(const HloInstruction* inst, MatchOption option) const {
bool match = fn_(inst);
if (!match) {
EXPLAIN << "HloInstruction does not match user-specified predicate";
}
return match;
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "which matches a user-specified predicate";
}
private:
HloPredicate fn_;
};
class HloInstructionContractingDimsImpl {
public:
explicit HloInstructionContractingDimsImpl(
absl::Span<const int64_t> lhs_contracting_dims,
absl::Span<const int64_t> rhs_contracting_dims)
: lhs_contracting_dims_(lhs_contracting_dims.begin(),
lhs_contracting_dims.end()),
rhs_contracting_dims_(rhs_contracting_dims.begin(),
rhs_contracting_dims.end()) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
bool Match(::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "with lhs_contracting_dims {"
<< absl::StrJoin(lhs_contracting_dims_, ",")
<< "} and rhs_contracting_dims {"
<< absl::StrJoin(rhs_contracting_dims_, ",") << "}";
}
private:
template <typename HloInstructionType>
bool MatchImpl(HloInstructionType* inst, MatchOption option) const {
if (inst->opcode() != HloOpcode::kDot) {
EXPLAIN << "HloInstruction is not dot so "
"can't have dot_dimension_numbers";
return false;
}
const DotDimensionNumbers& dnums = inst->dot_dimension_numbers();
if (absl::MakeSpan(dnums.lhs_contracting_dimensions()) !=
lhs_contracting_dims_) {
EXPLAIN << "lhs_contracting_dimensions {"
<< absl::StrJoin(dnums.lhs_contracting_dimensions(), ",")
<< "} don't match expected {"
<< absl::StrJoin(lhs_contracting_dims_, ",") << "}";
return false;
}
if (absl::MakeSpan(dnums.rhs_contracting_dimensions()) !=
rhs_contracting_dims_) {
EXPLAIN << "rhs_contracting_dimensions {"
<< absl::StrJoin(dnums.rhs_contracting_dimensions(), ",")
<< "} don't match expected {"
<< absl::StrJoin(rhs_contracting_dims_, ",") << "}";
return false;
}
return true;
}
absl::InlinedVector<int64_t, 8> lhs_contracting_dims_;
absl::InlinedVector<int64_t, 8> rhs_contracting_dims_;
};
class HloInstructionReplicaGroupsImpl {
public:
explicit HloInstructionReplicaGroupsImpl(
std::vector<std::vector<int64_t>> replica_groups)
: replica_groups_(std::move(replica_groups)) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
bool Match(::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
std::vector<std::string> replica_group_strs;
replica_group_strs.reserve(replica_groups_.size());
for (const std::vector<int64_t>& replica_group : replica_groups_) {
replica_group_strs.push_back(
absl::StrCat("{", absl::StrJoin(replica_group, ","), "}"));
}
*os << "with replica_group {" << absl::StrJoin(replica_group_strs, ",")
<< "}";
}
private:
template <typename HloInstructionType>
bool MatchImpl(HloInstructionType* inst, MatchOption option) const {
const HloCollectiveInstruction* collective =
DynCast<HloCollectiveInstruction>(inst);
if (!collective) {
EXPLAIN << "HloInstruction is not a collective";
return false;
}
if (absl::c_equal(collective->replica_groups(), replica_groups_,
[](const ReplicaGroup& a, const std::vector<int64_t>& b) {
return absl::c_equal(a.replica_ids(), b);
})) {
return true;
}
std::ostringstream desc_stream;
DescribeTo(&desc_stream);
std::vector<std::string> replica_group_strs;
replica_group_strs.reserve(replica_groups_.size());
for (const ReplicaGroup& replica_group : collective->replica_groups()) {
replica_group_strs.push_back(absl::StrCat(
"{", absl::StrJoin(replica_group.replica_ids(), ","), "}"));
}
EXPLAIN << "replica_group {" << absl::StrJoin(replica_group_strs, ",")
<< "} don't match expected " << desc_stream.str();
return false;
}
std::vector<std::vector<int64_t>> replica_groups_;
};
class HloInstructionShardingImpl {
public:
explicit HloInstructionShardingImpl(
const std::optional<HloSharding>& sharding)
: sharding_(sharding) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
bool Match(::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
if (sharding_.has_value()) {
*os << "with sharding " << sharding_->ToString();
} else {
*os << "with no sharding";
}
}
private:
template <typename HloInstructionType>
bool MatchImpl(HloInstructionType* inst, MatchOption option) const {
if (!sharding_.has_value()) {
if (!inst->has_sharding()) {
return true;
}
EXPLAIN << "HloInstruction is expected to have no sharding.";
return false;
}
if (inst->has_sharding()) {
if (inst->sharding() == sharding_.value()) {
return true;
}
EXPLAIN << "sharding " << inst->sharding().ToString()
<< " don't match expected " << sharding_->ToString();
return false;
} else {
EXPLAIN << "HloInstruction has no sharding. Expected: "
<< sharding_->ToString();
return false;
}
}
std::optional<HloSharding> sharding_;
};
class HloInstructionControlDepsImpl {
public:
explicit HloInstructionControlDepsImpl(
absl::Span<HloInstruction* const> preds,
absl::Span<HloInstruction* const> succs)
: preds_(preds), succs_(succs) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
bool Match(::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
auto print_deps = [os](absl::Span<HloInstruction* const> deps,
absl::string_view type) {
if (deps.empty()) {
*os << "no control " << type;
} else {
*os << "control " << type << " {" << absl::StrJoin(deps, ",", fmt)
<< "}";
}
};
*os << "with ";
print_deps(preds_, "predecessors");
*os << " and ";
print_deps(succs_, "successors");
}
private:
template <typename HloInstructionType>
bool MatchImpl(HloInstructionType* inst, MatchOption option) const {
auto match_deps = [&](absl::Span<HloInstruction* const> expected_deps,
const PtrVec<HloInstruction*>& actual_deps,
absl::string_view type) {
if (!absl::c_equal(expected_deps, actual_deps)) {
EXPLAIN << "HloInstruction expected to have control " << type << " {"
<< absl::StrJoin(expected_deps, ",", fmt) << "} but has {"
<< absl::StrJoin(actual_deps, ",", fmt) << "}";
return false;
}
return true;
};
return match_deps(preds_, inst->control_predecessors(), "predecessors") &&
match_deps(succs_, inst->control_successors(), "successors");
}
static void fmt(std::string* out, const HloInstruction* inst) {
absl::StrAppend(out, inst->name());
};
absl::Span<HloInstruction* const> preds_, succs_;
};
template <typename ScalarTy>
class HloConstantScalarImpl {
public:
explicit constexpr HloConstantScalarImpl(bool match_effective_scalar)
: val_(std::nullopt), match_effective_scalar_(match_effective_scalar) {}
constexpr HloConstantScalarImpl(ScalarTy val, bool match_effective_scalar)
: val_(val), match_effective_scalar_(match_effective_scalar) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
bool Match(::xla::HloInstruction* inst, MatchOption option) const {
return MatchImpl(inst, option);
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
*os << "which is a constant "
<< (match_effective_scalar_ ? "effective " : "") << "scalar";
if (val_.has_value()) {
*os << " with value " << *val_;
}
}
private:
template <typename InstTy>
bool MatchImpl(InstTy* inst, MatchOption option) const {
const auto* const_inst = DynCast<HloConstantInstruction>(inst);
if (!const_inst) {
EXPLAIN << "HloInstruction is not a constant";
return false;
}
if (match_effective_scalar_ &&
!ShapeUtil::IsEffectiveScalar(inst->shape())) {
EXPLAIN << "HloInstruction is not an effective scalar";
return false;
}
if (!match_effective_scalar_ && !ShapeUtil::IsScalar(inst->shape())) {
EXPLAIN << "HloInstruction is not a scalar";
return false;
}
if (!val_.has_value()) {
return true;
}
auto const_inst_scalar_or = const_inst->literal().Reshape({});
if (!const_inst_scalar_or.ok()) {
EXPLAIN << "could not convert matched literal to effective scalar";
return false;
}
Literal const_inst_scalar = std::move(const_inst_scalar_or).value();
if (!const_inst_scalar.IsEqualAt({}, *val_)) {
EXPLAIN << "HloInstruction's constant value "
<< const_inst_scalar.ToStringWithoutShape()
<< " did not match expected value " << *val_;
return false;
}
return true;
}
std::optional<ScalarTy> val_;
bool match_effective_scalar_;
};
template <typename HloInstructionType, typename Impl>
class HloInstructionPattern {
private:
template <typename NewImpl>
auto AppendImpl(NewImpl new_impl) const {
auto new_allof = AllOf<::xla::HloInstruction>(impl_, std::move(new_impl));
return HloInstructionPattern<HloInstructionType, decltype(new_allof)>(
std::move(new_allof), matched_inst_);
}
public:
explicit constexpr HloInstructionPattern(const Impl& impl,
HloInstructionType** matched_inst)
: impl_(impl), matched_inst_(matched_inst) {}
bool Match(const ::xla::HloInstruction* inst, MatchOption option) const {
if (impl_.Match(inst, option)) {
if (option.capture && matched_inst_) {
*matched_inst_ = inst;
}
return true;
}
if (inst != nullptr) {
EXPLAIN << "\nin " << InstToString(inst);
}
return false;
}
bool Match(::xla::HloInstruction* inst, MatchOption option,
bool explain_instruction = true) const {
if (impl_.Match(inst, option)) {
if (option.capture && matched_inst_) {
*matched_inst_ = inst;
}
return true;
}
if (explain_instruction) {
EXPLAIN << "\nin " << InstToString(inst);
}
return false;
}
auto WithName(absl::string_view name) const {
return AppendImpl(HloInstructionPatternNameImpl(name));
}
auto WithOpcode(HloOpcode opcode) const {
return AppendImpl(HloInstructionPatternOpcodeImpl(opcode, false));
}
auto WithCustomCallTarget(
absl::Span<const absl::string_view> custom_call_targets) const {
return AppendImpl(HloInstructionCustomCallTargetImpl(custom_call_targets));
}
auto WithNumOperands(int64_t num_operands) const {
return AppendImpl(HloInstructionPatternNumOperandsImpl(num_operands));
}
auto WithoutOpcode(HloOpcode opcode) const {
return AppendImpl(HloInstructionPatternOpcodeImpl(opcode, true));
}
constexpr auto Is(const HloInstruction* instr) const {
return AppendImpl(HloInstructionIsImpl(instr));
}
constexpr auto IsConstant() const { return WithOpcode(HloOpcode::kConstant); }
constexpr auto IsConstantScalar() const {
return AppendImpl(
HloConstantScalarImpl< int>(false));
}
template <typename ScalarTy>
constexpr auto IsConstantScalar(const ScalarTy& val) const {
return AppendImpl(
HloConstantScalarImpl<ScalarTy>(val, false));
}
constexpr auto IsConstantEffectiveScalar() const {
return AppendImpl(
HloConstantScalarImpl< int>(true));
}
template <typename ScalarTy>
constexpr auto IsConstantEffectiveScalar(const ScalarTy& val) const {
return AppendImpl(
HloConstantScalarImpl<ScalarTy>(val, true));
}
constexpr auto IsNonConstant() const {
return WithoutOpcode(HloOpcode::kConstant);
}
template <typename ShapeType, typename ShapeImpl>
constexpr auto WithShape(
const ShapePattern<ShapeType, ShapeImpl>& shape) const {
return AppendImpl(
HloInstructionPatternShapeImpl<ShapeType, ShapeImpl>(shape));
}
constexpr auto WithShape(PrimitiveType ty, absl::Span<const int64_t> dims) {
return WithShape(Shape().WithElementType(ty).WithDims(dims));
}
constexpr auto WithShape(PrimitiveType ty, absl::Span<const int64_t> dims,
absl::Span<const int64_t> minor_to_major) {
return WithShape(
Shape().WithElementType(ty).WithDims(dims).WithLayout(minor_to_major));
}
template <typename Dummy = void>
constexpr auto WithShapeEqualTo(const ::xla::Shape* shape) const {
return WithShape(Shape().EqualTo(shape));
}
template <typename Dummy = void>
constexpr auto WithShapeCompatibleTo(const ::xla::Shape* shape) const {
return WithShape(Shape().CompatibleTo(shape));
}
constexpr auto WithElementType(PrimitiveType ty) {
return WithShape(Shape().WithElementType(ty));
}
template <typename OperandType, typename OperandImpl>
constexpr auto WithOperand(
int64_t operand_index,
const HloInstructionPattern<OperandType, OperandImpl>& operand) const {
return AppendImpl(
HloInstructionPatternOperandImpl<OperandType, OperandImpl>(
operand_index, operand));
}
template <typename OperandType, typename OperandImpl>
constexpr auto WithOperandIfPresent(
int64_t operand_index,
const HloInstructionPattern<OperandType, OperandImpl>& operand) const {
return AppendImpl(
HloInstructionPatternOperandIfPresentImpl<OperandType, OperandImpl>(
operand_index, operand));
}
template <typename OperandType1, typename OperandImpl1, typename OperandType2,
typename OperandImpl2>
constexpr auto WithBinaryOperandsAnyOrder(
const HloInstructionPattern<OperandType1, OperandImpl1>& op1,
const HloInstructionPattern<OperandType2, OperandImpl2>& op2) const {
return AppendImpl(
HloInstructionPatternBinaryOperandsAnyOrderImpl<
OperandType1, OperandImpl1, OperandType2, OperandImpl2>(op1, op2));
}
constexpr auto WithFusionKind(HloInstruction::FusionKind kind) const {
return AppendImpl(HloInstructionPatternFusionKindImpl(kind));
}
constexpr auto WithTupleIndex(int64_t tuple_index) const {
return AppendImpl(HloInstructionPatternTupleIndexImpl(tuple_index));
}
constexpr auto WithParameterNum(int64_t parameter_num) const {
return AppendImpl(HloInstructionPatternParameterNumImpl(parameter_num));
}
constexpr auto WithOneUse() const {
return AppendImpl(HloInstructionPatternOneUseImpl());
}
constexpr auto WithOneUser() const {
return AppendImpl(HloInstructionPatternOneUserImpl());
}
constexpr auto WithNumUser(int64_t user_num) const {
return AppendImpl(HloInstructionPatternNumUserImpl(user_num));
}
constexpr auto WithAtMostNumUser(int64_t user_num) const {
return AppendImpl(HloInstructionPatternAtMostNumUserImpl(user_num));
}
auto WithComparisonDirection(ComparisonDirection direction) const {
return AppendImpl(HloInstructionPatternComparisonDirectionImpl(direction));
}
auto WithConvDnums(absl::string_view dnums) const {
return AppendImpl(HloInstructionPatternConvDnumsImpl(dnums));
}
auto WithConvDnums(ConvolutionDimensionNumbers dnums) const {
return AppendImpl(HloInstructionPatternConvDnumsImpl(dnums));
}
auto WithPredicate(HloPredicate fn) const {
return AppendImpl(HloInstructionPredicateImpl(std::move(fn)));
}
auto WithContractingDims(
absl::Span<const int64_t> lhs_contracting_dims,
absl::Span<const int64_t> rhs_contracting_dims) const {
return AppendImpl(HloInstructionContractingDimsImpl(lhs_contracting_dims,
rhs_contracting_dims));
}
auto WithReplicaGroups(
std::vector<std::vector<int64_t>> replica_groups) const {
return AppendImpl(
HloInstructionReplicaGroupsImpl(std::move(replica_groups)));
}
auto WithSharding(absl::string_view sharding) const {
return AppendImpl(
HloInstructionShardingImpl(ParseSharding(sharding).value()));
}
auto WithControlDeps(absl::Span<HloInstruction* const> preds,
absl::Span<HloInstruction* const> succs) {
return AppendImpl(HloInstructionControlDepsImpl(preds, succs));
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
impl_.DescribeTo(os, indent);
}
private:
Impl impl_;
HloInstructionType** matched_inst_;
};
template <typename Item, typename... Patterns>
struct AnyOfImpl {
auto operator()(const Patterns&... patterns) const {
return AnyOfPattern<typename std::remove_const<Item>::type, Patterns...>(
patterns...);
}
};
template <typename... Patterns>
struct AnyOfImpl<HloInstruction, Patterns...> {
auto operator()(const Patterns&... patterns) const {
auto any_of = AnyOfPattern<HloInstruction, Patterns...>(patterns...);
return HloInstructionPattern<HloInstruction, decltype(any_of)>(
std::move(any_of), nullptr);
}
};
}
template <typename Item, typename... Patterns>
auto AnyOf(const Patterns&... patterns) {
return detail::AnyOfImpl<Item, Patterns...>()(patterns...);
}
inline constexpr auto Op(const ::xla::HloInstruction** matched_inst = nullptr) {
return detail::HloInstructionPattern<const ::xla::HloInstruction,
detail::HloInstructionPatternBaseImpl>(
detail::HloInstructionPatternBaseImpl(), matched_inst);
}
inline constexpr auto Op(::xla::HloInstruction** matched_inst) {
return detail::HloInstructionPattern<::xla::HloInstruction,
detail::HloInstructionPatternBaseImpl>(
detail::HloInstructionPatternBaseImpl(), matched_inst);
}
#define XLA_NULLOP_PATTERN(NAME) \
inline auto NAME() { return Op().WithOpcode(HloOpcode::k##NAME); } \
\
template <typename HloInstructionType> \
inline auto NAME(HloInstructionType** matched_inst) { \
return Op(matched_inst).WithOpcode(HloOpcode::k##NAME); \
}
XLA_NULLOP_PATTERN(Constant)
XLA_NULLOP_PATTERN(Parameter)
XLA_NULLOP_PATTERN(Iota)
XLA_NULLOP_PATTERN(Rng)
XLA_NULLOP_PATTERN(PartitionId)
XLA_NULLOP_PATTERN(ReplicaId)
#undef XLA_NULLOP_PATTERN
#define XLA_UNOP_PATTERN(NAME) \
inline auto NAME() { return Op().WithOpcode(HloOpcode::k##NAME); } \
\
template <typename HloInstructionType> \
inline auto NAME(HloInstructionType** matched_inst) { \
return Op(matched_inst).WithOpcode(HloOpcode::k##NAME); \
} \
\
template <typename Arg> \
inline auto NAME(Arg&& arg) { \
return Op() \
.WithOpcode(HloOpcode::k##NAME) \
.WithOperand(0, std::forward<Arg>(arg)); \
} \
\
template <typename HloInstructionType, typename Arg> \
inline auto NAME(HloInstructionType** matched_inst, Arg&& arg) { \
return Op(matched_inst) \
.WithOpcode(HloOpcode::k##NAME) \
.WithOperand(0, std::forward<Arg>(arg)); \
}
XLA_UNOP_PATTERN(Abs)
XLA_UNOP_PATTERN(RoundNearestAfz)
XLA_UNOP_PATTERN(Bitcast)
XLA_UNOP_PATTERN(BitcastConvert)
XLA_UNOP_PATTERN(Broadcast)
XLA_UNOP_PATTERN(Cbrt)
XLA_UNOP_PATTERN(Ceil)
XLA_UNOP_PATTERN(Convert)
XLA_UNOP_PATTERN(Copy)
XLA_UNOP_PATTERN(Cos)
XLA_UNOP_PATTERN(AllReduceStart)
XLA_UNOP_PATTERN(AllReduceDone)
XLA_UNOP_PATTERN(AllToAll)
XLA_UNOP_PATTERN(AsyncDone)
XLA_UNOP_PATTERN(CollectiveBroadcast)
XLA_UNOP_PATTERN(CollectivePermute)
XLA_UNOP_PATTERN(CollectivePermuteStart)
XLA_UNOP_PATTERN(CollectivePermuteDone)
XLA_UNOP_PATTERN(Domain)
XLA_UNOP_PATTERN(Erf)
XLA_UNOP_PATTERN(Exp)
XLA_UNOP_PATTERN(Expm1)
XLA_UNOP_PATTERN(Fft)
XLA_UNOP_PATTERN(Floor)
XLA_UNOP_PATTERN(GetTupleElement)
XLA_UNOP_PATTERN(Imag)
XLA_UNOP_PATTERN(Infeed)
XLA_UNOP_PATTERN(IsFinite)
XLA_UNOP_PATTERN(Log)
XLA_UNOP_PATTERN(Logistic)
XLA_UNOP_PATTERN(Not)
XLA_UNOP_PATTERN(Negate)
XLA_UNOP_PATTERN(OptimizationBarrier)
XLA_UNOP_PATTERN(Real)
XLA_UNOP_PATTERN(Recv)
XLA_UNOP_PATTERN(RecvDone)
XLA_UNOP_PATTERN(ReducePrecision)
XLA_UNOP_PATTERN(Reshape)
XLA_UNOP_PATTERN(Reverse)
XLA_UNOP_PATTERN(Rsqrt)
XLA_UNOP_PATTERN(SendDone)
XLA_UNOP_PATTERN(Sign)
XLA_UNOP_PATTERN(Sin)
XLA_UNOP_PATTERN(Slice)
XLA_UNOP_PATTERN(Sqrt)
XLA_UNOP_PATTERN(Tan)
XLA_UNOP_PATTERN(Tanh)
XLA_UNOP_PATTERN(Transpose)
XLA_UNOP_PATTERN(While)
#undef XLA_UNOP_PATTERN
#define XLA_BINOP_PATTERN(NAME) \
inline auto NAME() { return Op().WithOpcode(HloOpcode::k##NAME); } \
\
template <typename Lhs, typename Rhs> \
inline auto NAME(Lhs&& lhs, Rhs&& rhs) { \
return Op() \
.WithOpcode(HloOpcode::k##NAME) \
.WithOperand(0, std::forward<Lhs>(lhs)) \
.WithOperand(1, std::forward<Rhs>(rhs)); \
} \
\
template <typename HloInstructionType, typename Lhs, typename Rhs> \
inline auto NAME(HloInstructionType** matched_inst, Lhs&& lhs, Rhs&& rhs) { \
return Op(matched_inst) \
.WithOpcode(HloOpcode::k##NAME) \
.WithOperand(0, std::forward<Lhs>(lhs)) \
.WithOperand(1, std::forward<Rhs>(rhs)); \
}
#define XLA_COMMUTATIVE_BINOP_PATTERN(NAME) \
XLA_BINOP_PATTERN(NAME) \
\
template <typename HloInstructionType, typename Lhs, typename Rhs> \
inline auto NAME##AnyOrder(HloInstructionType** matched_inst, Lhs&& lhs, \
Rhs&& rhs) { \
return Op(matched_inst) \
.WithOpcode(HloOpcode::k##NAME) \
.WithBinaryOperandsAnyOrder(std::forward<Lhs>(lhs), \
std::forward<Rhs>(rhs)); \
} \
template <typename Lhs, typename Rhs> \
inline auto NAME##AnyOrder(Lhs&& lhs, Rhs&& rhs) { \
return NAME##AnyOrder<const HloInstruction>( \
nullptr, std::forward<Lhs>(lhs), std::forward<Rhs>(rhs)); \
}
XLA_COMMUTATIVE_BINOP_PATTERN(Add)
XLA_BINOP_PATTERN(Atan2)
XLA_BINOP_PATTERN(Divide)
XLA_BINOP_PATTERN(Complex)
XLA_BINOP_PATTERN(Compare)
XLA_BINOP_PATTERN(Convolution)
XLA_BINOP_PATTERN(Dot)
XLA_BINOP_PATTERN(Gather)
XLA_COMMUTATIVE_BINOP_PATTERN(Maximum)
XLA_COMMUTATIVE_BINOP_PATTERN(Minimum)
XLA_COMMUTATIVE_BINOP_PATTERN(Multiply)
XLA_BINOP_PATTERN(Outfeed)
XLA_BINOP_PATTERN(Pad)
XLA_BINOP_PATTERN(Power)
XLA_BINOP_PATTERN(Remainder)
XLA_BINOP_PATTERN(Send)
XLA_BINOP_PATTERN(Subtract)
XLA_COMMUTATIVE_BINOP_PATTERN(And)
XLA_COMMUTATIVE_BINOP_PATTERN(Or)
XLA_BINOP_PATTERN(ShiftLeft)
XLA_BINOP_PATTERN(ShiftRightArithmetic)
XLA_BINOP_PATTERN(ShiftRightLogical)
XLA_COMMUTATIVE_BINOP_PATTERN(Xor)
#undef XLA_COMMUTATIVE_BINOP_PATTERN
#undef XLA_BINOP_PATTERN
#define XLA_TERNOP_PATTERN(NAME) \
inline auto NAME() { return Op().WithOpcode(HloOpcode::k##NAME); } \
\
template <typename Arg0, typename Arg1, typename Arg2> \
inline auto NAME(Arg0&& arg0, Arg1&& arg1, Arg2&& arg2) { \
return Op() \
.WithOpcode(HloOpcode::k##NAME) \
.WithOperand(0, std::forward<Arg0>(arg0)) \
.WithOperand(1, std::forward<Arg1>(arg1)) \
.WithOperand(2, std::forward<Arg2>(arg2)); \
} \
\
template <typename HloInstructionType, typename Arg0, typename Arg1, \
typename Arg2> \
inline auto NAME(HloInstructionType** matched_inst, Arg0&& arg0, \
Arg1&& arg1, Arg2&& arg2) { \
return Op(matched_inst) \
.WithOpcode(HloOpcode::k##NAME) \
.WithOperand(0, std::forward<Arg0>(arg0)) \
.WithOperand(1, std::forward<Arg1>(arg1)) \
.WithOperand(2, std::forward<Arg2>(arg2)); \
}
XLA_TERNOP_PATTERN(Clamp);
XLA_TERNOP_PATTERN(Select);
XLA_TERNOP_PATTERN(SelectAndScatter);
#undef XLA_TERNOP_PATTERN
namespace detail {
template <typename Matcher, typename FirstArg>
inline auto WithOperands(Matcher&& m, int64_t operand_num,
FirstArg&& first_arg) {
return m.WithOperand(operand_num, std::forward<FirstArg>(first_arg));
}
template <typename Matcher, typename FirstArg, typename... Args>
inline auto WithOperands(Matcher&& m, int64_t operand_num, FirstArg&& first_arg,
Args&&... args) {
return WithOperands(
m.WithOperand(operand_num, std::forward<FirstArg>(first_arg)),
operand_num + 1, std::forward<Args>(args)...);
}
}
#define XLA_VARIADIC_OP_PATTERN(NAME) \
inline auto NAME() { return Op().WithOpcode(HloOpcode::k##NAME); } \
\
template <typename... Args> \
inline auto NAME(Args&&... args) { \
return detail::WithOperands( \
Op().WithOpcode(HloOpcode::k##NAME).WithNumOperands(sizeof...(Args)), \
0, std::forward<Args>(args)...); \
} \
\
template <typename HloInstructionType, typename... Args> \
inline auto NAME(HloInstructionType** matched_inst, Args&&... args) { \
return detail::WithOperands(Op(matched_inst) \
.WithOpcode(HloOpcode::k##NAME) \
.WithNumOperands(sizeof...(Args)), \
0, \
std::forward<Args>(args)...); \
} \
\
template <typename HloInstructionType> \
inline auto NAME(HloInstructionType** matched_inst) { \
return Op(matched_inst).WithOpcode(HloOpcode::k##NAME); \
}
XLA_VARIADIC_OP_PATTERN(AfterAll);
XLA_VARIADIC_OP_PATTERN(AllGather)
XLA_VARIADIC_OP_PATTERN(AllReduce)
XLA_VARIADIC_OP_PATTERN(AsyncStart)
XLA_VARIADIC_OP_PATTERN(Concatenate);
XLA_VARIADIC_OP_PATTERN(Conditional);
XLA_VARIADIC_OP_PATTERN(DynamicSlice)
XLA_VARIADIC_OP_PATTERN(DynamicUpdateSlice)
XLA_VARIADIC_OP_PATTERN(Fusion);
XLA_VARIADIC_OP_PATTERN(Map)
XLA_VARIADIC_OP_PATTERN(Reduce);
XLA_VARIADIC_OP_PATTERN(ReduceScatter)
XLA_VARIADIC_OP_PATTERN(ReduceWindow)
XLA_VARIADIC_OP_PATTERN(Scatter);
XLA_VARIADIC_OP_PATTERN(Sort);
XLA_VARIADIC_OP_PATTERN(Tuple);
XLA_VARIADIC_OP_PATTERN(Call);
inline auto CustomCall() { return Op().WithOpcode(HloOpcode::kCustomCall); }
template <typename HloInstructionType>
auto CustomCall(HloInstructionType** matched_inst) {
return Op(matched_inst).WithOpcode(HloOpcode::kCustomCall);
}
template <
typename Arg0, typename... Args,
typename std::enable_if<
!std::is_convertible<Arg0, absl::string_view>::value &&
!std::is_convertible<Arg0, HloInstruction**>::value &&
!std::is_convertible<Arg0, const HloInstruction**>::value>::type* =
nullptr>
auto CustomCall(Arg0&& arg0, Args&&... args) {
return detail::WithOperands(CustomCall().WithNumOperands(sizeof...(Args) + 1),
0, std::forward<Arg0>(arg0),
std::forward<Args>(args)...);
}
template <typename... Args>
auto CustomCall(absl::Span<const absl::string_view> custom_call_targets,
Args&&... args) {
return CustomCall(std::forward<Args>(args)...)
.WithCustomCallTarget(custom_call_targets);
}
template <typename HloInstructionType, typename Arg0, typename... Args,
typename std::enable_if<!std::is_convertible<
Arg0, absl::string_view>::value>::type* = nullptr>
auto CustomCall(HloInstructionType** matched_inst, Arg0&& arg0,
Args&&... args) {
return detail::WithOperands(
CustomCall(matched_inst).WithNumOperands(sizeof...(Args) + 1),
0, std::forward<Arg0>(arg0), std::forward<Args>(args)...);
}
template <typename HloInstructionType, typename... Args>
auto CustomCall(HloInstructionType** matched_inst,
absl::Span<const absl::string_view> custom_call_targets,
Args&&... args) {
return CustomCall(matched_inst, std::forward<Args>(args)...)
.WithCustomCallTarget(custom_call_targets);
}
#define XLA_COMPARE_PATTERN(NAME) \
inline auto NAME() { \
return Op() \
.WithOpcode(HloOpcode::kCompare) \
.WithComparisonDirection(ComparisonDirection::k##NAME); \
} \
\
template <typename Lhs, typename Rhs> \
inline auto NAME(Lhs&& lhs, Rhs&& rhs) { \
return Op() \
.WithOpcode(HloOpcode::kCompare) \
.WithOperand(0, std::forward<Lhs>(lhs)) \
.WithOperand(1, std::forward<Rhs>(rhs)) \
.WithComparisonDirection(ComparisonDirection::k##NAME); \
} \
\
template <typename HloInstructionType, typename Lhs, typename Rhs> \
inline auto NAME(HloInstructionType** matched_inst, Lhs&& lhs, Rhs&& rhs) { \
return Op(matched_inst) \
.WithOpcode(HloOpcode::kCompare) \
.WithOperand(0, std::forward<Lhs>(lhs)) \
.WithOperand(1, std::forward<Rhs>(rhs)) \
.WithComparisonDirection(ComparisonDirection::k##NAME); \
}
#define XLA_COMMUTATIVE_COMPARE_PATTERN(NAME) \
XLA_COMPARE_PATTERN(NAME) \
\
template <typename HloInstructionType, typename Lhs, typename Rhs> \
inline auto NAME##AnyOrder(HloInstructionType** matched_inst, Lhs&& lhs, \
Rhs&& rhs) { \
return Op(matched_inst) \
.WithOpcode(HloOpcode::kCompare) \
.WithBinaryOperandsAnyOrder(std::forward<Lhs>(lhs), \
std::forward<Rhs>(rhs)); \
} \
template <typename Lhs, typename Rhs> \
inline auto NAME##AnyOrder(Lhs&& lhs, Rhs&& rhs) { \
return NAME##AnyOrder<const HloInstruction>( \
nullptr, std::forward<Lhs>(lhs), std::forward<Rhs>(rhs)); \
}
XLA_COMMUTATIVE_COMPARE_PATTERN(Eq);
XLA_COMMUTATIVE_COMPARE_PATTERN(Ne);
XLA_COMPARE_PATTERN(Ge);
XLA_COMPARE_PATTERN(Gt);
XLA_COMPARE_PATTERN(Le);
XLA_COMPARE_PATTERN(Lt);
inline auto NonConstant() { return Op().IsNonConstant(); }
template <typename HloInstructionType>
inline auto NonConstant(HloInstructionType** matched_inst) {
return Op(matched_inst).IsNonConstant();
}
template <typename Arg>
inline auto GetTupleElement(Arg&& arg, int64_t tuple_index) {
return Op()
.WithOpcode(HloOpcode::kGetTupleElement)
.WithOperand(0, std::forward<Arg>(arg))
.WithTupleIndex(tuple_index);
}
template <typename HloInstructionType, typename Arg>
inline auto GetTupleElement(HloInstructionType** matched_inst, Arg&& arg,
int64_t tuple_index) {
return Op(matched_inst)
.WithOpcode(HloOpcode::kGetTupleElement)
.WithOperand(0, std::forward<Arg>(arg))
.WithTupleIndex(tuple_index);
}
inline auto Parameter(int64_t parameter_num) {
return Op().WithOpcode(HloOpcode::kParameter).WithParameterNum(parameter_num);
}
template <typename HloInstructionType>
inline auto Parameter(HloInstructionType** matched_inst,
int64_t parameter_num) {
return Op(matched_inst)
.WithOpcode(HloOpcode::kParameter)
.WithParameterNum(parameter_num);
}
inline auto ConstantScalar() { return Op().IsConstantScalar(); }
template <typename HloInstructionType>
inline auto ConstantScalar(HloInstructionType** matched_inst) {
return Op(matched_inst).IsConstantScalar();
}
template <typename ScalarTy>
inline auto ConstantScalar(ScalarTy val) {
return Op().IsConstantScalar(val);
}
template <typename HloInstructionType, typename ScalarTy>
inline auto ConstantScalar(HloInstructionType** matched_inst, ScalarTy val) {
return Op(matched_inst).IsConstantScalar(val);
}
inline auto ConstantEffectiveScalar() {
return Op().IsConstantEffectiveScalar();
}
template <typename HloInstructionType>
inline auto ConstantEffectiveScalar(HloInstructionType** matched_inst) {
return Op(matched_inst).IsConstantEffectiveScalar();
}
template <typename ScalarTy>
inline auto ConstantEffectiveScalar(ScalarTy val) {
return Op().IsConstantEffectiveScalar(val);
}
template <typename HloInstructionType, typename ScalarTy>
inline auto ConstantEffectiveScalar(HloInstructionType** matched_inst,
ScalarTy val) {
return Op(matched_inst).IsConstantEffectiveScalar(val);
}
namespace detail {
class InstructionPatternInterface {
public:
virtual ~InstructionPatternInterface() = default;
virtual bool Match(::xla::HloInstruction* instr,
MatchOption option) const = 0;
virtual void DescribeTo(std::ostream* os, int64_t indent) const = 0;
};
template <typename Pattern>
class TypedInstructionPattern : public InstructionPatternInterface {
public:
explicit TypedInstructionPattern(Pattern pattern)
: pattern_(std::move(pattern)) {}
bool Match(::xla::HloInstruction* instr, MatchOption option) const override {
return pattern_.Match(instr, option);
}
void DescribeTo(std::ostream* os, int64_t indent) const override {
pattern_.DescribeTo(os, indent);
}
private:
Pattern pattern_;
};
class HloInstructionPatternSharedImpl {
public:
template <typename Pattern>
explicit HloInstructionPatternSharedImpl(Pattern pattern)
: pattern_(std::make_shared<TypedInstructionPattern<Pattern>>(
std::move(pattern))) {}
bool Match(::xla::HloInstruction* instr, MatchOption option) const {
return pattern_->Match(instr, option);
}
void DescribeTo(std::ostream* os, int64_t indent = 0) const {
pattern_->DescribeTo(os, indent);
}
private:
std::shared_ptr<InstructionPatternInterface> pattern_;
};
}
template <typename HloInstructionType, typename OriginalImpl>
inline auto SharedSubpattern(
detail::HloInstructionPattern<HloInstructionType, OriginalImpl> pattern) {
auto impl = detail::HloInstructionPatternSharedImpl(std::move(pattern));
return detail::HloInstructionPattern<HloInstructionType, decltype(impl)>(
std::move(impl), nullptr);
}
}
}
#undef EXPLAIN
#pragma pop_macro("EXPLAIN")
#endif | #include "xla/service/pattern_matcher.h"
#include <memory>
#include <sstream>
#include <string>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
namespace m = match;
using PatternMatcherTest = HloTestBase;
TEST_F(PatternMatcherTest, AddOp) {
constexpr char kModuleStr[] = R"(HloModule two_plus_two_module
ENTRY %two_plus_two_computation () -> f32[] {
%two = f32[] constant(2)
ROOT %two_plus_two = f32[] add(f32[] %two, f32[] %two)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
const HloInstruction* matched_inst;
HloInstruction* matched_operand;
Shape* matched_shape;
ASSERT_TRUE(Match(
hlo_module->entry_computation()->root_instruction(),
match::Op(&matched_inst)
.WithName("two_plus_two")
.WithOpcode(HloOpcode::kAdd)
.WithShape(match::Shape(&matched_shape).IsDenseArray())
.WithOperand(
0,
match::Op(&matched_operand).WithOpcode(HloOpcode::kConstant))));
ASSERT_NE(matched_inst, nullptr);
EXPECT_EQ(matched_inst->name(), "two_plus_two");
EXPECT_EQ(matched_inst->opcode(), HloOpcode::kAdd);
EXPECT_TRUE(Match(hlo_module->entry_computation()->root_instruction(),
match::Add(match::Constant(), match::Constant())));
EXPECT_FALSE(Match(hlo_module->entry_computation()->root_instruction(),
match::Op().WithName("bad_name")));
matched_inst = nullptr;
EXPECT_FALSE(Match(hlo_module->entry_computation()->root_instruction(),
match::Multiply(&matched_inst, match::Op(), match::Op())));
}
TEST_F(PatternMatcherTest, ScalarShape) {
auto scalar_shape = ShapeUtil::MakeShape(F32, {});
Shape* matched_shape;
EXPECT_TRUE(Match(&scalar_shape, match::Shape(&matched_shape).IsScalar()));
EXPECT_EQ(matched_shape, &scalar_shape);
EXPECT_TRUE(Match(&scalar_shape, match::Shape().IsArray()));
EXPECT_TRUE(Match(&scalar_shape, match::Shape().IsDenseArray()));
EXPECT_FALSE(Match(&scalar_shape, match::Shape().IsTuple()));
EXPECT_TRUE(Match(&scalar_shape, match::Shape().WithElementType(F32)));
EXPECT_TRUE(Match(&scalar_shape, match::Shape().WithRank(0)));
EXPECT_FALSE(Match(
&scalar_shape,
match::Shape().WithSubshape({0}, match::Shape()).WithElementType(F32)));
}
TEST_F(PatternMatcherTest, DenseArrayShape) {
auto array_shape = ShapeUtil::MakeShape(F32, {2, 3, 4});
Shape* matched_shape;
EXPECT_TRUE(Match(&array_shape, match::Shape(&matched_shape).IsArray()));
EXPECT_EQ(matched_shape, &array_shape);
EXPECT_TRUE(Match(&array_shape, match::Shape().IsDenseArray()));
EXPECT_FALSE(Match(&array_shape, match::Shape().IsScalar()));
EXPECT_FALSE(Match(&array_shape, match::Shape().IsTuple()));
EXPECT_TRUE(Match(&array_shape, match::Shape().WithElementType(F32)));
EXPECT_TRUE(Match(&array_shape, match::Shape().WithRank(3)));
EXPECT_FALSE(
Match(&array_shape, match::Shape().WithSubshape({0}, match::Shape())));
EXPECT_TRUE(Match(&array_shape, match::Shape().WithLayout({2, 1, 0})));
EXPECT_FALSE(Match(&array_shape, match::Shape().WithLayout({0, 1, 2})));
Layout* matched_layout;
EXPECT_TRUE(Match(&array_shape,
match::Shape().WithLayout(match::Layout(&matched_layout))));
EXPECT_EQ(matched_layout, &array_shape.layout());
EXPECT_TRUE(Match(&array_shape, match::Shape().IsDenseArray()));
}
TEST_F(PatternMatcherTest, DenseArrayShapeWithLayout) {
auto array_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 2, 3}, {1, 2, 0});
Shape* matched_shape;
EXPECT_TRUE(
Match(&array_shape, match::Shape(&matched_shape).WithLayout({1, 2, 0})));
EXPECT_EQ(matched_shape, &array_shape);
EXPECT_FALSE(Match(&array_shape, match::Shape().WithLayout({2, 0, 1})));
Layout* matched_layout;
EXPECT_TRUE(
Match(&array_shape,
match::Shape().WithLayout(
match::Layout(&matched_layout).WithMinorToMajor({1, 2, 0}))));
EXPECT_EQ(matched_layout, &array_shape.layout());
}
TEST_F(PatternMatcherTest, TupleShape) {
auto tuple_shape = ShapeUtil::MakeTupleShape({
ShapeUtil::MakeShape(F32, {1, 2, 3}),
ShapeUtil::MakeShape(S32, {4, 5}),
});
EXPECT_TRUE(Match(&tuple_shape, match::Shape().IsTuple()));
EXPECT_FALSE(Match(&tuple_shape, match::Shape().IsArray()));
EXPECT_FALSE(Match(&tuple_shape, match::Shape().IsScalar()));
Shape* subshape;
ASSERT_TRUE(Match(
&tuple_shape,
match::Shape().WithSubshape(
{0}, match::Shape(&subshape).WithElementType(F32).WithRank(3))));
ASSERT_NE(subshape, nullptr);
EXPECT_TRUE(
ShapeUtil::Equal(*subshape, ShapeUtil::GetSubshape(tuple_shape, {0})));
EXPECT_TRUE(Match(&tuple_shape,
match::Shape().WithSubshape(
{0}, match::Shape().EqualTo(
&ShapeUtil::GetSubshape(tuple_shape, {0})))));
EXPECT_FALSE(Match(&tuple_shape,
match::Shape().WithSubshape(
{0}, match::Shape().EqualTo(
&ShapeUtil::GetSubshape(tuple_shape, {1})))));
ASSERT_TRUE(Match(
&tuple_shape,
match::Shape().WithSubshape(
{1}, match::Shape(&subshape).WithElementType(S32).WithRank(2))));
ASSERT_NE(subshape, nullptr);
EXPECT_TRUE(
ShapeUtil::Equal(*subshape, ShapeUtil::GetSubshape(tuple_shape, {1})));
EXPECT_TRUE(Match(&tuple_shape,
match::Shape().WithSubshape(
{1}, match::Shape().EqualTo(
&ShapeUtil::GetSubshape(tuple_shape, {1})))));
EXPECT_FALSE(Match(&tuple_shape,
match::Shape().WithSubshape(
{1}, match::Shape().EqualTo(
&ShapeUtil::GetSubshape(tuple_shape, {0})))));
EXPECT_FALSE(
Match(&tuple_shape, match::Shape().WithSubshape({2}, match::Shape())));
EXPECT_FALSE(
Match(&tuple_shape, match::Shape().WithSubshape({0, 0}, match::Shape())));
}
TEST_F(PatternMatcherTest, FusionKind) {
constexpr char kModuleStr[] = R"(
HloModule test_module
fused_computation {
ROOT fp0 = f32[] parameter(0)
}
ENTRY while.v11 {
p0 = f32[] parameter(0)
ROOT fusion = f32[] fusion(p0), kind=kLoop, calls=fused_computation
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
EXPECT_TRUE(Match(
root, match::Op().WithFusionKind(HloInstruction::FusionKind::kLoop)));
EXPECT_FALSE(Match(
root, match::Op().WithFusionKind(HloInstruction::FusionKind::kInput)));
EXPECT_FALSE(Match(root->operand(0), match::Op().WithFusionKind(
HloInstruction::FusionKind::kLoop)));
}
TEST_F(PatternMatcherTest, GetTupleElement) {
constexpr char kModuleStr[] = R"(
HloModule test_module
ENTRY while.v11 {
p0 = (f32[], f32[], f32[]) parameter(0)
ROOT gte = f32[] get-tuple-element(p0), index=1
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
EXPECT_FALSE(Match(root, match::Op().WithTupleIndex(0)));
EXPECT_TRUE(Match(root, match::Op().WithTupleIndex(1)));
EXPECT_FALSE(Match(root, match::Op().WithTupleIndex(2)));
EXPECT_FALSE(Match(root, match::GetTupleElement(match::Op(), 0)));
EXPECT_TRUE(Match(root, match::GetTupleElement(match::Op(), 1)));
}
TEST_F(PatternMatcherTest, AnyOf) {
constexpr char kModuleStr[] = R"(
HloModule test_module ENTRY test { ROOT constant = f16[] constant(1) })";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
EXPECT_TRUE(
Match(root, match::AnyOf<HloInstruction>(match::ConstantScalar(0),
match::ConstantScalar(1))));
EXPECT_TRUE(
Match(root, match::AnyOf<HloInstruction>(match::ConstantScalar(1),
match::ConstantScalar(0))));
EXPECT_FALSE(
Match(root, match::AnyOf<HloInstruction>(match::ConstantScalar(0),
match::ConstantScalar(2))));
}
TEST_F(PatternMatcherTest, AnyOfInstructionIsInstructionPattern) {
constexpr char kModuleStr[] = R"(
HloModule test_module ENTRY test { ROOT constant = f16[] constant(1) })";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
EXPECT_TRUE(
Match(root, match::AnyOf<HloInstruction>(match::ConstantScalar(0),
match::ConstantScalar(1))));
EXPECT_FALSE(
Match(root, match::AnyOf<HloInstruction>(match::ConstantScalar(0),
match::ConstantScalar(1))
.WithName("foo")));
}
TEST_F(PatternMatcherTest, ConstantScalar) {
using match::ConstantEffectiveScalar;
using match::ConstantScalar;
using match::Op;
using match::Tuple;
constexpr char kModuleStr[] = R"(
HloModule test_module
ENTRY test {
a = s32[] constant(1)
b = s32[1,1] constant({{2}})
c = s32[1,2] constant({{2,2}})
d = f32[] constant(1)
e = f32[] constant(1.25)
ROOT tuple = (s32[], s32[1,1], s32[1,2], f32[], f32[]) tuple(a,b,c,d,e)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
const HloInstruction* a = root->operand(0);
const HloInstruction* b = root->operand(1);
const HloInstruction* c = root->operand(2);
const HloInstruction* d = root->operand(3);
const HloInstruction* e = root->operand(4);
EXPECT_TRUE(Match(a, ConstantScalar()));
EXPECT_TRUE(Match(a, ConstantScalar(1)));
EXPECT_TRUE(Match(a, ConstantEffectiveScalar()));
EXPECT_TRUE(Match(a, ConstantEffectiveScalar(1)));
EXPECT_FALSE(Match(a, ConstantScalar(2)));
EXPECT_FALSE(Match(a, ConstantScalar(2.01)));
EXPECT_FALSE(Match(a, ConstantEffectiveScalar(2)));
EXPECT_FALSE(Match(a, ConstantEffectiveScalar(1.01)));
EXPECT_FALSE(Match(b, ConstantScalar()));
EXPECT_FALSE(Match(b, ConstantScalar(2)));
EXPECT_TRUE(Match(b, ConstantEffectiveScalar()));
EXPECT_TRUE(Match(b, ConstantEffectiveScalar(2)));
EXPECT_FALSE(Match(c, ConstantScalar()));
EXPECT_FALSE(Match(c, ConstantScalar(2)));
EXPECT_FALSE(Match(c, ConstantEffectiveScalar()));
EXPECT_FALSE(Match(c, ConstantEffectiveScalar(2)));
EXPECT_TRUE(Match(d, ConstantScalar(1)));
EXPECT_TRUE(Match(d, ConstantEffectiveScalar(1)));
EXPECT_TRUE(Match(d, ConstantScalar(1.0)));
EXPECT_TRUE(Match(d, ConstantEffectiveScalar(1.0)));
EXPECT_TRUE(Match(e, ConstantScalar(1.25f)));
EXPECT_TRUE(Match(e, ConstantScalar(1.25)));
EXPECT_TRUE(Match(e, ConstantEffectiveScalar(1.25)));
EXPECT_FALSE(Match(e, ConstantScalar(1)));
EXPECT_FALSE(Match(e, ConstantEffectiveScalar(1)));
const HloInstruction* instr = nullptr;
EXPECT_TRUE(Match(a, ConstantScalar(&instr)));
EXPECT_EQ(instr, a);
instr = nullptr;
EXPECT_TRUE(Match(a, ConstantScalar(&instr, 1)));
EXPECT_EQ(instr, a);
instr = nullptr;
EXPECT_TRUE(Match(a, ConstantEffectiveScalar(&instr)));
EXPECT_EQ(instr, a);
instr = nullptr;
EXPECT_TRUE(Match(a, ConstantEffectiveScalar(&instr, 1)));
EXPECT_EQ(instr, a);
}
TEST_F(PatternMatcherTest, MultiplyAnyOrder) {
using match::ConstantScalar;
using match::MultiplyAnyOrder;
constexpr char kModuleStr[] = R"(
HloModule test_module
ENTRY test {
lhs = f16[] constant(42)
rhs = f16[] constant(52)
ROOT multiply = f16[] multiply(lhs, rhs)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
const HloInstruction* instr;
EXPECT_TRUE(Match(
root, MultiplyAnyOrder(&instr, ConstantScalar(42), ConstantScalar(52))));
EXPECT_TRUE(Match(
root, MultiplyAnyOrder(&instr, ConstantScalar(52), ConstantScalar(42))));
EXPECT_TRUE(Match(
root, MultiplyAnyOrder(&instr, ConstantScalar(42), ConstantScalar(52))
.IsNonConstant()));
EXPECT_TRUE(
Match(root, MultiplyAnyOrder(ConstantScalar(42), ConstantScalar(52))
.IsNonConstant()));
}
TEST_F(PatternMatcherTest, AnyOfShortCircuit) {
using match::AnyOf;
using match::Multiply;
using match::Op;
constexpr char kModuleStr[] = R"(
HloModule test_module
ENTRY test {
lhs = f16[] constant(42)
rhs = f16[] constant(52)
ROOT multiply = f16[] multiply(lhs, rhs)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
{
const HloInstruction* mul = nullptr;
const HloInstruction* any = nullptr;
ASSERT_TRUE(Match(
root, AnyOf<HloInstruction>(Multiply(&mul, Op(), Op()), Op(&any))));
EXPECT_NE(nullptr, mul);
EXPECT_EQ(nullptr, any);
}
{
const HloInstruction* mul = nullptr;
const HloInstruction* any = nullptr;
ASSERT_TRUE(Match(
root, AnyOf<HloInstruction>(Op(&any), Multiply(&mul, Op(), Op()))));
EXPECT_NE(nullptr, any);
EXPECT_EQ(nullptr, mul);
}
}
TEST_F(PatternMatcherTest, AllOf) {
using match::AllOf;
using match::Broadcast;
using match::Constant;
using match::Op;
constexpr char kModuleStr[] = R"(
HloModule test_module ENTRY test { ROOT constant = f16[] constant(1) })";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
auto f16_scalar = ShapeUtil::MakeShape(F16, {});
auto f16_pattern = Constant().WithShapeEqualTo(&f16_scalar);
auto f16_compatible_pattern = Constant().WithShapeCompatibleTo(&f16_scalar);
auto scalar_pattern = Constant().WithShape(match::Shape().IsScalar());
ASSERT_TRUE(Match(root, scalar_pattern));
ASSERT_TRUE(Match(root, f16_pattern));
ASSERT_TRUE(Match(root, f16_compatible_pattern));
EXPECT_TRUE(Match(root, AllOf<HloInstruction>(scalar_pattern, f16_pattern,
f16_compatible_pattern)));
EXPECT_TRUE(
Match(root, AllOf<HloInstruction>(f16_pattern, f16_compatible_pattern,
scalar_pattern)));
EXPECT_FALSE(
Match(root, AllOf<HloInstruction>(Broadcast(Op()), f16_pattern)));
EXPECT_FALSE(Match(
root, AllOf<HloInstruction>(Broadcast(Op()), f16_compatible_pattern)));
EXPECT_FALSE(
Match(root, AllOf<HloInstruction>(Broadcast(Op()), scalar_pattern)));
}
TEST_F(PatternMatcherTest, AllOfNoCaptureIfNotMatch) {
using match::AllOf;
using match::Broadcast;
using match::Constant;
using match::Op;
constexpr char kModuleStr[] = R"(
HloModule test_module
ENTRY test {
ROOT v = f16[] constant(42)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
const HloInstruction* constant = nullptr;
ASSERT_FALSE(
Match(root, AllOf<HloInstruction>(Constant(&constant), Broadcast(Op()))));
EXPECT_EQ(nullptr, constant);
ASSERT_TRUE(Match(root, Constant(&constant)));
EXPECT_NE(nullptr, constant);
}
TEST_F(PatternMatcherTest, TestNoCapture) {
using match::Constant;
constexpr char kModuleStr[] = R"(
HloModule test_module
ENTRY test {
ROOT v = f16[] constant(42)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
const HloInstruction* constant = nullptr;
ASSERT_TRUE(Match(root, Constant(&constant), {false}));
EXPECT_EQ(nullptr, constant);
}
TEST_F(PatternMatcherTest, TestCaptureMatchedSubPatternForAnyOf) {
using match::Add;
using match::AddAnyOrder;
using match::AnyOf;
using match::Op;
constexpr char kModuleStr[] = R"(
HloModule test_module
ENTRY test {
u = f16[] parameter(0)
v = f16[] parameter(1)
ROOT add = f16[] add(u, v)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
const HloInstruction* addend0 = nullptr;
const HloInstruction* addend1 = nullptr;
const HloInstruction* addend2 = nullptr;
auto add2_pattern = Add(Op(&addend0), Op(&addend1));
auto add3_pattern = AnyOf<HloInstruction>(
AddAnyOrder(add2_pattern, Op(&addend2)), add2_pattern, Op(&addend0));
ASSERT_TRUE(Match(root, add3_pattern));
EXPECT_NE(nullptr, addend0);
EXPECT_NE(nullptr, addend1);
EXPECT_EQ(nullptr, addend2);
}
TEST_F(PatternMatcherTest, TestConcat) {
using match::Concatenate;
using match::ConstantScalar;
using match::Op;
using match::Reshape;
constexpr char kModuleStr[] = R"(
HloModule test_module
ENTRY test {
c1 = u32[] constant(1)
c2 = u32[] constant(2)
c3 = u32[] constant(3)
c4 = u32[] constant(4)
r1 = u32[1] reshape(c1)
r2 = u32[1] reshape(c2)
r3 = u32[1] reshape(c3)
r4 = u32[1] reshape(c4)
ROOT concat = u32[4] concatenate(r1, r2, r3, r4), dimensions={0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
ASSERT_TRUE(Match(
root,
Concatenate(Reshape(ConstantScalar(1)), Reshape(ConstantScalar(2)),
Reshape(ConstantScalar(3)), Reshape(ConstantScalar(4)))));
ASSERT_FALSE(Match(
root,
Concatenate(Reshape(ConstantScalar(2)), Reshape(ConstantScalar(1)),
Reshape(ConstantScalar(3)), Reshape(ConstantScalar(4)))));
ASSERT_FALSE(Match(
root, Concatenate(Reshape(ConstantScalar(1)), Reshape(ConstantScalar(2)),
Reshape(ConstantScalar(3)))));
ASSERT_FALSE(Match(
root, Concatenate(Reshape(ConstantScalar(2)), Reshape(ConstantScalar(3)),
Reshape(ConstantScalar(4)))));
}
TEST_F(PatternMatcherTest, TestWithElementType) {
constexpr char kModuleStr[] = R"(
HloModule test_module
ENTRY test {
ROOT v = f16[] constant(42)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
EXPECT_TRUE(Match(root, m::Op().WithElementType(F16)));
EXPECT_FALSE(Match(root, m::Op().WithElementType(F32)));
}
TEST_F(PatternMatcherTest, TestWithOperandIfPresent) {
constexpr char kModuleStr[] = R"(
HloModule test_module
ENTRY test {
a = f16[] constant(42)
b = f16[] add(a, a)
ROOT root = tuple(a, b)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
auto* a = root->operand(0);
auto* b = root->operand(1);
EXPECT_TRUE(Match(a, m::Op().WithOperandIfPresent(0, m::Iota())));
EXPECT_TRUE(Match(b, m::Op().WithOperandIfPresent(0, m::Constant())));
EXPECT_TRUE(Match(b, m::Op().WithOperandIfPresent(1, m::Constant())));
EXPECT_FALSE(Match(b, m::Op().WithOperandIfPresent(0, m::Iota())));
EXPECT_TRUE(Match(b, m::Op().WithOperandIfPresent(2, m::Iota())));
EXPECT_TRUE(Match(b, m::Op().WithOperandIfPresent(3, m::Iota())));
}
TEST_F(PatternMatcherTest, TestWithPredicate) {
constexpr char kModuleStr[] = R"(
HloModule test_module
ENTRY test {
ROOT a = f16[] constant(42)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
EXPECT_TRUE(
Match(root, m::Op().WithPredicate([&](const HloInstruction* instr) {
return instr == root;
})));
EXPECT_FALSE(
Match(root, m::Op().WithPredicate([&](const HloInstruction* instr) {
return instr != root;
})));
}
template <typename Pattern>
std::string Description(const Pattern& pattern) {
std::stringstream ss;
pattern.DescribeTo(&ss);
return ss.str();
}
template <typename Elem, typename Pattern>
std::string Explanation(Elem* elem, const Pattern& pattern,
bool single_user_only = false) {
std::stringstream ss;
MatchOption options{true,
single_user_only,
&ss};
Match(elem, pattern, options);
return ss.str();
}
template <typename Elem, typename Pattern>
std::string Explanation(const std::unique_ptr<Elem>& elem,
const Pattern& pattern) {
return Explanation(elem.get(), pattern);
}
template <typename Elem, typename Pattern>
std::string Explanation(const Elem& elem, const Pattern& pattern) {
return Explanation(&elem, pattern);
}
#define EXPECT_DESC_AND_EXPLANATION(elem, pattern, expected_desc, \
expected_explanation) \
do { \
EXPECT_EQ(Description(pattern), (expected_desc)); \
EXPECT_EQ(Explanation((elem), (pattern)), expected_explanation); \
} while (0)
TEST_F(PatternMatcherTest, LayoutDescribeToAndExplain) {
auto layout = LayoutUtil::MakeLayout({1, 2});
auto layout2 = LayoutUtil::MakeLayout({2, 2});
EXPECT_DESC_AND_EXPLANATION(static_cast<const Layout*>(nullptr), m::Layout(),
"a layout", "Layout is null");
EXPECT_DESC_AND_EXPLANATION(layout2, m::Layout().EqualTo(&layout),
"a layout equal to {1,2}",
"Layout {2,2} is not equal to expected {1,2}");
}
TEST_F(PatternMatcherTest, CustomCallTargetMatcherDescribeAndExplain) {
constexpr char kModuleStr[] = R"(
HloModule test_module
ENTRY test {
ROOT out = f32[] custom-call(), custom_call_target="test_target"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
EXPECT_TRUE(Match(root, match::Op().WithCustomCallTarget({"test_target"})));
EXPECT_TRUE(Match(
root, match::Op().WithCustomCallTarget({"test_target", "other_target"})));
EXPECT_TRUE(Match(
root, match::Op().WithCustomCallTarget({"other_target", "test_target"})));
EXPECT_FALSE(Match(root, match::Op().WithCustomCallTarget({"other_target"})));
EXPECT_FALSE(Match(root, match::Op().WithCustomCallTarget(
{"other_target", "other_target2"})));
EXPECT_DESC_AND_EXPLANATION(
root, match::Op().WithCustomCallTarget({"other_target"}),
"an HloInstruction custom call with target 'other_target'",
"HloInstruction is not a custom call with a target 'other_target'\nin "
"out = f32[] custom-call(), custom_call_target=\"test_target\"");
EXPECT_DESC_AND_EXPLANATION(
root, match::Op().WithCustomCallTarget({"other_target", "other_target2"}),
"an HloInstruction custom call with target in {other_target, "
"other_target2}",
"HloInstruction is not a custom call with a target in {other_target, "
"other_target2}\nin "
"out = f32[] custom-call(), custom_call_target=\"test_target\"");
}
TEST_F(PatternMatcherTest, ShapeDescribeToAndExplain) {
auto shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 2}, {0, 1});
auto layout = shape.layout();
EXPECT_DESC_AND_EXPLANATION(static_cast<const Shape*>(nullptr), m::Shape(),
"a shape", "Shape is null");
EXPECT_DESC_AND_EXPLANATION(
ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 2}, {1, 0}),
m::Shape().EqualTo(&shape), "a shape equal to f32[1,2]{0,1}",
"Shape not equal to f32[1,2]{0,1}\n"
"in f32[1,2]{1,0}");
EXPECT_DESC_AND_EXPLANATION(ShapeUtil::MakeShape(F32, {2, 2}),
m::Shape().CompatibleTo(&shape),
"a shape compatible with f32[1,2]",
"Shape not compatible with f32[1,2]\n"
"in f32[2,2]{1,0}");
EXPECT_DESC_AND_EXPLANATION(shape, m::Shape().WithElementType(F16),
"a shape with element type F16",
"Shape does not have element type F16\n"
"in f32[1,2]{0,1}");
EXPECT_DESC_AND_EXPLANATION(shape, m::Shape().IsScalar(),
"a shape that represents a scalar",
"Shape is not a scalar\n"
"in f32[1,2]{0,1}");
EXPECT_DESC_AND_EXPLANATION(ShapeUtil::MakeNil(), m::Shape().IsArray(),
"a shape that represents an array",
"Shape is not an array\n"
"in ()");
EXPECT_DESC_AND_EXPLANATION(shape, m::Shape().IsTuple(),
"a shape that represents a tuple",
"Shape is not a tuple\n"
"in f32[1,2]{0,1}");
EXPECT_DESC_AND_EXPLANATION(shape, m::Shape().IsEffectiveScalar(),
"a shape that is an effective scalar",
"Shape is not an effective scalar\n"
"in f32[1,2]{0,1}");
EXPECT_DESC_AND_EXPLANATION(shape, m::Shape().WithRank(42),
"a shape that has 42 dimensions",
"Shape does not have rank 42\n"
"in f32[1,2]{0,1}");
EXPECT_DESC_AND_EXPLANATION(shape, m::Shape().WithRank(0),
"a shape that is a scalar",
"Shape is not a scalar\n"
"in f32[1,2]{0,1}");
EXPECT_DESC_AND_EXPLANATION(shape, m::Shape().WithRank(1).IsArray(),
"a shape:\n"
" * that has 1 dimension AND\n"
" * that represents an array",
"Shape does not have rank 1\n"
"in f32[1,2]{0,1}");
EXPECT_DESC_AND_EXPLANATION(ShapeUtil::MakeNil(),
m::Shape().IsArray().WithRank(1),
"a shape:\n"
" * that represents an array AND\n"
" * that has 1 dimension",
"Shape is not an array\n"
"in ()");
EXPECT_DESC_AND_EXPLANATION(
ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 2}, {1, 0}),
m::Shape().WithLayoutEqualTo(&layout),
"a shape with\n a layout equal to {0,1}",
"Layout {1,0} is not equal to expected {0,1}\n"
"in f32[1,2]{1,0}");
EXPECT_DESC_AND_EXPLANATION(shape,
m::Shape().WithSubshapeEqualTo({10}, &shape),
"a shape with subshape at index {10} which is\n"
" a shape equal to f32[1,2]{0,1}",
"No subshape at {10}\n"
"in f32[1,2]{0,1}");
EXPECT_DESC_AND_EXPLANATION(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {2, 2})}),
m::Shape().WithSubshapeEqualTo({0}, &shape),
"a shape with subshape at index {0} which is\n"
" a shape equal to f32[1,2]{0,1}",
"Shape not equal to f32[1,2]{0,1}\n"
"in f32[2,2]{1,0}\n"
"in subshape at {0}\n"
"in (f32[2,2])");
EXPECT_DESC_AND_EXPLANATION(shape,
m::Shape().WithSubshapeCompatibleTo({10}, &shape),
"a shape with subshape at index {10} which is\n"
" a shape compatible with f32[1,2]",
"No subshape at {10}\n"
"in f32[1,2]{0,1}");
EXPECT_DESC_AND_EXPLANATION(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {2, 2})}),
m::Shape().WithSubshapeCompatibleTo({0}, &shape),
"a shape with subshape at index {0} which is\n"
" a shape compatible with f32[1,2]",
"Shape not compatible with f32[1,2]\n"
"in f32[2,2]{1,0}\n"
"in subshape at {0}\n"
"in (f32[2,2])");
EXPECT_DESC_AND_EXPLANATION(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeTupleShape({shape})}),
m::Shape().WithSubshape({0, 0}, m::Shape().IsScalar()),
"a shape with subshape at index {0,0} which is\n"
" a shape that represents a scalar",
"Shape is not a scalar\n"
"in f32[1,2]{0,1}\n"
"in subshape at {0,0}\n"
"in ((f32[1,2]))");
}
std::unique_ptr<HloInstruction> SetName(absl::string_view name,
std::unique_ptr<HloInstruction> instr) {
instr->SetAndSanitizeName(name);
return instr;
}
TEST_F(PatternMatcherTest, HloInstructionDescribeToAndExplain) {
std::unique_ptr<HloInstruction> iota =
SetName("i", HloInstruction::CreateIota(ShapeUtil::MakeShape(S32, {42}),
0));
std::unique_ptr<HloInstruction> constant =
SetName("c", HloInstruction::CreateConstant(LiteralUtil::CreateR0(0)));
EXPECT_DESC_AND_EXPLANATION(static_cast<const HloInstruction*>(nullptr),
m::Op(), "an HloInstruction",
"HloInstruction* is null");
EXPECT_DESC_AND_EXPLANATION(iota, m::Op().WithName("foo"),
"an HloInstruction named \"foo\"",
"HloInstruction not named \"foo\"\n"
"in i = s32[42]{0} iota(), iota_dimension=0");
EXPECT_DESC_AND_EXPLANATION(iota, m::Op().WithOpcode(HloOpcode::kAdd),
"an HloInstruction with opcode add",
"HloInstruction doesn't have opcode add\n"
"in i = s32[42]{0} iota(), iota_dimension=0");
EXPECT_DESC_AND_EXPLANATION(
constant, m::Op().IsNonConstant(),
"an HloInstruction with any opcode other than constant",
"HloInstruction has opcode constant, expected anything else\n"
"in c = s32[] constant(0)");
EXPECT_DESC_AND_EXPLANATION(iota, m::Op().WithNumOperands(42),
"an HloInstruction with 42 operands",
"HloInstruction doesn't have 42 operands\n"
"in i = s32[42]{0} iota(), iota_dimension=0");
EXPECT_DESC_AND_EXPLANATION(iota, m::Op().WithShape(m::Shape().IsTuple()),
"an HloInstruction outputting\n"
" a shape that represents a tuple",
"Shape is not a tuple\n"
"in s32[42]{0}\n"
"in output shape\n"
"in i = s32[42]{0} iota(), iota_dimension=0");
EXPECT_DESC_AND_EXPLANATION(iota, m::Op().WithShape(F32, {42}),
"an HloInstruction outputting\n"
" a shape:\n"
" * with element type F32 AND\n"
" * with dimensions [42]",
"Shape does not have element type F32\n"
"in s32[42]{0}\n"
"in output shape\n"
"in i = s32[42]{0} iota(), iota_dimension=0");
EXPECT_DESC_AND_EXPLANATION(iota, m::Op().WithShape(S32, {128}),
"an HloInstruction outputting\n"
" a shape:\n"
" * with element type S32 AND\n"
" * with dimensions [128]",
"Shape does not have dimensions [128]\n"
"in s32[42]{0}\n"
"in output shape\n"
"in i = s32[42]{0} iota(), iota_dimension=0");
EXPECT_DESC_AND_EXPLANATION(
iota, m::Op().WithOperand(2, m::Op().WithOpcode(HloOpcode::kAdd)),
"an HloInstruction with operand 2 which is:\n"
" an HloInstruction with opcode add",
"desired operand index 2 is out of bounds\n"
"in i = s32[42]{0} iota(), iota_dimension=0");
EXPECT_DESC_AND_EXPLANATION(
SetName("a", HloInstruction::CreateBinary(ShapeUtil::MakeShape(S32, {}),
HloOpcode::kAdd, constant.get(),
constant.get())),
m::Op().WithOperand(1, m::Op().IsNonConstant()),
"an HloInstruction with operand 1 which is:\n"
" an HloInstruction with any opcode other than constant",
"HloInstruction has opcode constant, expected anything else\n"
"in c = s32[] constant(0)\n"
"in operand 1\n"
"in a = s32[] add(s32[] c, s32[] c)");
EXPECT_DESC_AND_EXPLANATION(
iota, m::Op().WithFusionKind(HloInstruction::FusionKind::kLoop),
"an HloInstruction with fusion kind kLoop",
"HloInstruction does not have fusion kind kLoop; it's not a fusion\n"
"in i = s32[42]{0} iota(), iota_dimension=0");
EXPECT_DESC_AND_EXPLANATION(
iota, m::Op().WithTupleIndex(42),
"an HloInstruction which is a GTE with index 42",
"HloInstruction is not a GTE with index 42; it's not a GTE at all\n"
"in i = s32[42]{0} iota(), iota_dimension=0");
EXPECT_DESC_AND_EXPLANATION(iota, m::Op().IsConstantScalar(),
"an HloInstruction which is a constant scalar",
"HloInstruction is not a constant\n"
"in i = s32[42]{0} iota(), iota_dimension=0");
EXPECT_DESC_AND_EXPLANATION(
SetName("c", HloInstruction::CreateConstant(
LiteralUtil::CreateR1<int>({1, 2}))),
m::Op().IsConstantEffectiveScalar(),
"an HloInstruction which is a constant effective scalar",
"HloInstruction is not an effective scalar\n"
"in c = s32[2]{0} constant({1, 2})");
EXPECT_DESC_AND_EXPLANATION(
SetName("c", HloInstruction::CreateConstant(LiteralUtil::CreateR0(10))),
m::Op().IsConstantScalar(42),
"an HloInstruction which is a constant scalar with value 42",
"HloInstruction's constant value 10 did not match expected value 42\n"
"in c = s32[] constant(10)");
EXPECT_DESC_AND_EXPLANATION(
SetName("c", HloInstruction::CreateConstant(LiteralUtil::CreateR0(2.25))),
m::Op().IsConstantEffectiveScalar(1.25),
"an HloInstruction which is a constant effective scalar with value 1.25",
"HloInstruction's constant value 2.25 did not match expected value 1.25\n"
"in c = f64[] constant(2.25)");
EXPECT_DESC_AND_EXPLANATION(
constant, m::Op().Is(iota.get()),
absl::StrCat("an HloInstruction which is 0x", absl::Hex(iota.get()),
" (i = s32[42]{0} iota(), iota_dimension=0)"),
absl::StrCat("HloInstruction 0x", absl::Hex(constant.get()), " is not 0x",
absl::Hex(iota.get()),
" (i = s32[42]{0} iota(), iota_dimension=0)\n"
"in c = s32[] constant(0)"));
EXPECT_DESC_AND_EXPLANATION(
SetName("a",
HloInstruction::CreateBinary(constant->shape(), HloOpcode::kAdd,
constant.get(), constant.get())),
m::Op().WithOperandIfPresent(0, m::Iota()),
"an HloInstruction either with fewer than 1 operand, or with an operand "
"0 which is:\n"
" an HloInstruction with opcode iota",
"HloInstruction doesn't have opcode iota\n"
"in c = s32[] constant(0)\n"
"in operand 0\n"
"in a = s32[] add(s32[] c, s32[] c)");
EXPECT_DESC_AND_EXPLANATION(
constant, m::Op().WithPredicate(HloPredicateFalse),
"an HloInstruction which matches a user-specified predicate",
"HloInstruction does not match user-specified predicate\n"
"in c = s32[] constant(0)");
}
TEST_F(PatternMatcherTest, HloInstructionMatcherAnyOrderDescribeTo) {
auto scalar_s32 = ShapeUtil::MakeShape(S32, {});
EXPECT_DESC_AND_EXPLANATION(
SetName("a", HloInstruction::CreateBinary(
scalar_s32, HloOpcode::kAdd,
SetName("b", HloInstruction::CreateConstant(
LiteralUtil::CreateR0(0)))
.get(),
SetName("c", HloInstruction::CreateConstant(
LiteralUtil::CreateR0(0)))
.get())),
m::AddAnyOrder(m::Op().WithName("b"), m::Op().WithName("bar")),
"an HloInstruction:\n"
" * with opcode add AND\n"
" * with two operands in either order:\n"
" - an HloInstruction named \"b\"\n"
" - an HloInstruction named \"bar\"",
"HloInstruction's operands (ignoring order) did not match second "
"matcher. Specifically,\n"
" - an HloInstruction named \"bar\"\n"
"does not match LHS:\n"
" - HloInstruction not named \"bar\"\n"
" in b = s32[] constant(0)\n"
"does not match RHS:\n"
" - HloInstruction not named \"bar\"\n"
" in c = s32[] constant(0)\n"
"in a = s32[] add(s32[] b, s32[] c)");
EXPECT_DESC_AND_EXPLANATION(
SetName("a",
HloInstruction::CreateBinary(
scalar_s32, HloOpcode::kAdd,
HloInstruction::CreateParameter(0, scalar_s32, "p").get(),
SetName("c", HloInstruction::CreateConstant(
LiteralUtil::CreateR0(0)))
.get())),
m::AddAnyOrder(m::Op().IsConstantScalar(), m::Op().IsConstant()),
"an HloInstruction:\n"
" * with opcode add AND\n"
" * with two operands in either order:\n"
" - an HloInstruction which is a constant scalar\n"
" - an HloInstruction with opcode constant",
"HloInstruction's LHS operand did not match either of the two matchers. "
"Specifically,\n"
" - an HloInstruction which is a constant scalar\n"
"does not match LHS:\n"
" - HloInstruction is not a constant\n"
" in p = s32[] parameter(0)\n"
"and\n"
" - an HloInstruction with opcode constant\n"
"does not match LHS:\n"
" - HloInstruction doesn't have opcode constant\n"
" in p = s32[] parameter(0)\n"
"in a = s32[] add(s32[] p, s32[] c)");
}
TEST_F(PatternMatcherTest, AnyOfMatcherDescribeToAndExplain) {
EXPECT_DESC_AND_EXPLANATION(
ShapeUtil::MakeScalarShape(S32),
m::AnyOf<Shape>(m::Shape().WithRank(1), m::Shape().WithElementType(F32)),
"any of:\n"
" - a shape that has 1 dimension OR\n"
" - a shape with element type F32",
"None of the following matchers succeeded:\n"
"Matcher #1\n"
" - a shape that has 1 dimension\n"
"failed with\n"
" - Shape does not have rank 1\n"
" in s32[]\n"
"Matcher #2\n"
" - a shape with element type F32\n"
"failed with\n"
" - Shape does not have element type F32\n"
" in s32[]");
}
TEST_F(PatternMatcherTest, Parameter) {
auto param =
HloInstruction::CreateParameter(1, ShapeUtil::MakeShape(F32, {}), "p1");
auto non_param =
SetName("c", HloInstruction::CreateConstant(LiteralUtil::CreateR0(0)));
EXPECT_FALSE(Match(param.get(), m::Parameter(0)));
EXPECT_TRUE(Match(param.get(), m::Parameter()));
EXPECT_TRUE(Match(param.get(), m::Parameter(1)));
EXPECT_FALSE(Match(non_param.get(), m::Parameter()));
EXPECT_FALSE(Match(non_param.get(), m::Parameter(1)));
EXPECT_DESC_AND_EXPLANATION(non_param, m::Parameter(1),
"an HloInstruction:\n"
" * with opcode parameter AND\n"
" * which is parameter 1",
"HloInstruction doesn't have opcode parameter\n"
"in c = s32[] constant(0)");
EXPECT_EQ(Explanation(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "p0"),
m::Parameter(1)),
"HloInstruction is not parameter 1\n"
"in p0 = f32[] parameter(0)");
}
TEST_F(PatternMatcherTest, OneUseAndOneUser) {
auto param =
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "p0");
EXPECT_FALSE(Match(param.get(), m::Op().WithOneUse()));
EXPECT_DESC_AND_EXPLANATION(
param, m::Op().WithOneUse(),
"an HloInstruction which has exactly one use",
"HloInstruction has 0 users, but expected exactly one.\n"
"in p0 = f32[] parameter(0)");
EXPECT_FALSE(Match(param.get(), m::Op().WithOneUser()));
EXPECT_DESC_AND_EXPLANATION(
param, m::Op().WithOneUser(),
"an HloInstruction which has exactly one user (but possibly is used "
"multiple times by that instruction)",
"HloInstruction has 0 users, but expected exactly one.\n"
"in p0 = f32[] parameter(0)");
{
auto reshape =
SetName("r", HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {1}), param.get()));
EXPECT_TRUE(Match(param.get(), m::Op().WithOneUse()));
EXPECT_TRUE(Match(param.get(), m::Op().WithOneUser()));
auto reshape1 =
SetName("r1", HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {1}), param.get()));
EXPECT_FALSE(Match(param.get(), m::Op().WithOneUse()));
EXPECT_FALSE(Match(param.get(), m::Op().WithOneUser()));
const char* kMultipleUserExplanation =
"HloInstruction has 2 users, but expected exactly one.\n"
"All users:\n"
" - r = f32[1]{0} reshape(f32[] p0)\n"
" - r1 = f32[1]{0} reshape(f32[] p0)\n"
"in p0 = f32[] parameter(0)";
EXPECT_EQ(Explanation(param.get(), m::Op().WithOneUse()),
kMultipleUserExplanation);
EXPECT_EQ(Explanation(param.get(), m::Op().WithOneUser()),
kMultipleUserExplanation);
}
auto add = SetName("add", HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kAdd,
param.get(), param.get()));
EXPECT_TRUE(Match(param.get(), m::Op().WithOneUser()));
EXPECT_FALSE(Match(param.get(), m::Op().WithOneUse()));
EXPECT_EQ(Explanation(param.get(), m::Op().WithOneUse()),
"HloInstruction is used 2 times by its user, but is expected to be "
"used just once: add = f32[] add(f32[] p0, f32[] p0)\n"
"in p0 = f32[] parameter(0)");
}
TEST_F(PatternMatcherTest, MatchSingleUserOnlyUnaryOpOneUser) {
auto param =
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "p");
auto reshape =
SetName("reshape", HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {1}), param.get()));
EXPECT_TRUE(MatchSingleUserOnly(reshape.get(), m::Reshape(m::Op())));
EXPECT_TRUE(Match(reshape.get(), m::Reshape(m::Op().WithOneUser())));
}
TEST_F(PatternMatcherTest, MatchSingleUserOnlyUnaryOpTwoUsers) {
auto param =
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "p");
auto reshape =
SetName("reshape", HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {1}), param.get()));
auto bitcast =
SetName("bitcast", HloInstruction::CreateBitcast(
ShapeUtil::MakeShape(F32, {1}), param.get()));
EXPECT_TRUE(MatchSingleUserOnly(param.get(), m::Op()));
EXPECT_TRUE(Match(param.get(), m::Op()));
EXPECT_TRUE(MatchSingleUserOnly(bitcast.get(), m::Bitcast()));
EXPECT_TRUE(Match(bitcast.get(), m::Bitcast()));
EXPECT_FALSE(MatchSingleUserOnly(bitcast.get(), m::Bitcast(m::Op())));
EXPECT_FALSE(Match(bitcast.get(), m::Bitcast(m::Op().WithOneUser())));
EXPECT_EQ(Explanation(bitcast.get(), m::Bitcast(m::Op()),
true),
"Operand 0 of HloInstruction has 2 users. Expected 1.\nin bitcast "
"= f32[1]{0} bitcast(f32[] p)");
}
TEST_F(PatternMatcherTest, MatchSingleUserOnlyBinaryOpOneUser) {
auto param0 =
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "p0");
auto add = SetName("add", HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kAdd,
param0.get(), param0.get()));
EXPECT_TRUE(MatchSingleUserOnly(add.get(), m::Add(m::Op(), m::Op())));
EXPECT_TRUE(
Match(add.get(), m::Add(m::Op().WithOneUser(), m::Op().WithOneUser())));
}
TEST_F(PatternMatcherTest, MatchSingleUserOnlyBinaryOpTwoUsers) {
auto param0 =
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "p0");
auto param1 =
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "p1");
auto add = SetName("add", HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kAdd,
param0.get(), param0.get()));
auto mul =
SetName("mul", HloInstruction::CreateBinary(ShapeUtil::MakeShape(F32, {}),
HloOpcode::kMultiply,
param1.get(), param0.get()));
EXPECT_TRUE(MatchSingleUserOnly(mul.get(), m::Multiply()));
EXPECT_TRUE(Match(mul.get(), m::Multiply()));
EXPECT_FALSE(MatchSingleUserOnly(mul.get(), m::Multiply(m::Op(), m::Op())));
EXPECT_FALSE(Match(
mul.get(), m::Multiply(m::Op().WithOneUser(), m::Op().WithOneUser())));
EXPECT_EQ(Explanation(mul.get(), m::Multiply(m::Op(), m::Op()),
true),
"Operand 1 of HloInstruction has 2 users. Expected 1.\nin mul = "
"f32[] multiply(f32[] p1, f32[] p0)");
EXPECT_FALSE(MatchSingleUserOnly(add.get(), m::Add(m::Op(), m::Op())));
EXPECT_FALSE(
Match(add.get(), m::Add(m::Op().WithOneUser(), m::Op().WithOneUser())));
EXPECT_EQ(Explanation(add.get(), m::Add(m::Op(), m::Op()),
true),
"Operand 0 of HloInstruction has 2 users. Expected 1.\nin add = "
"f32[] add(f32[] p0, f32[] p0)");
}
TEST_F(PatternMatcherTest, MatchSingleUserOnlyBinaryOpTwoUsersLowerLevel) {
auto param0 =
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "p0");
auto param1 =
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "p1");
auto add = SetName("add", HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kAdd,
param0.get(), param0.get()));
auto mul =
SetName("mul", HloInstruction::CreateBinary(ShapeUtil::MakeShape(F32, {}),
HloOpcode::kMultiply,
param1.get(), param0.get()));
auto div = SetName("div", HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}),
HloOpcode::kDivide, add.get(), mul.get()));
EXPECT_TRUE(
MatchSingleUserOnly(div.get(), m::Divide(m::Add(), m::Multiply())));
EXPECT_TRUE(Match(div.get(), m::Divide(m::Add().WithOneUser(),
m::Multiply().WithOneUser())));
EXPECT_FALSE(MatchSingleUserOnly(
div.get(), m::Divide(m::Add(m::Op(), m::Op()), m::Multiply())));
EXPECT_FALSE(Match(
div.get(),
m::Divide(
m::Add(m::Op().WithOneUser(), m::Op().WithOneUser()).WithOneUser(),
m::Multiply().WithOneUser())));
EXPECT_EQ(Explanation(add.get(), m::Add(m::Op(), m::Op()),
true),
"Operand 0 of HloInstruction has 2 users. Expected 1.\nin add = "
"f32[] add(f32[] p0, f32[] p0)");
}
TEST_F(PatternMatcherTest, Comparison) {
auto shape = ShapeUtil::MakeShape(F32, {1});
auto p0 = HloInstruction::CreateParameter(0, shape, "param.0");
auto p1 = HloInstruction::CreateParameter(1, shape, "param.1");
auto eq = HloInstruction::CreateCompare(shape, p0.get(), p1.get(),
ComparisonDirection::kEq);
auto ne = HloInstruction::CreateCompare(shape, p0.get(), p1.get(),
ComparisonDirection::kNe);
auto add =
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, p0.get(), p1.get());
auto le = HloInstruction::CreateCompare(shape, p0.get(), add.get(),
ComparisonDirection::kLe);
EXPECT_TRUE(Match(eq.get(), m::Compare()));
EXPECT_TRUE(Match(eq.get(), m::Eq()));
EXPECT_TRUE(Match(eq.get(), m::Eq(m::Parameter(0), m::Parameter(1))));
EXPECT_TRUE(Match(eq.get(), m::EqAnyOrder(m::Parameter(1), m::Parameter(0))));
EXPECT_TRUE(Match(ne.get(), m::Compare()));
EXPECT_TRUE(Match(ne.get(), m::Ne()));
EXPECT_TRUE(Match(
le.get(),
m::Compare(m::Parameter(0), m::Add(m::Parameter(0), m::Parameter(1)))));
EXPECT_TRUE(Match(le.get(), m::Le(m::Parameter(0),
m::Add(m::Parameter(0), m::Parameter(1)))));
EXPECT_FALSE(Match(eq.get(), m::Add()));
EXPECT_FALSE(Match(eq.get(), m::Ne()));
EXPECT_FALSE(
Match(le.get(),
m::Eq(m::Parameter(0), m::Add(m::Parameter(0), m::Parameter(1)))));
EXPECT_FALSE(Match(eq.get(), m::Eq(m::Parameter(1), m::Parameter(0))));
EXPECT_DESC_AND_EXPLANATION(
eq, m::Ne().WithOneUser(),
"an HloInstruction:\n"
" * with opcode compare AND\n"
" * which has comparison direction NE AND\n"
" * which has exactly one user (but possibly is used "
"multiple times by that instruction)",
"HloInstruction is not comparison NE\n"
"in compare = f32[1]{0} compare(f32[1]{0} param.0, f32[1]{0} param.1), "
"direction=EQ");
}
TEST_F(PatternMatcherTest, ConvDnums) {
TF_ASSERT_OK_AND_ASSIGN(ConvolutionDimensionNumbers dnums,
ParseConvolutionDimensionNumbers("bf01_oi01->bf01"));
auto param =
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "p0");
auto op = HloInstruction::CreateCustomCall(ShapeUtil::MakeShape(F32, {}),
{},
"foo");
op->set_convolution_dimension_numbers(dnums);
EXPECT_TRUE(Match(op.get(), m::CustomCall().WithConvDnums(dnums)));
EXPECT_TRUE(
Match(op.get(), m::CustomCall().WithConvDnums("bf01_oi01->bf01")));
TF_ASSERT_OK_AND_ASSIGN(ConvolutionDimensionNumbers different_dnums,
ParseConvolutionDimensionNumbers("b01f_oi01->bf01"));
EXPECT_FALSE(Match(op.get(), m::CustomCall().WithConvDnums(different_dnums)));
EXPECT_FALSE(
Match(op.get(), m::CustomCall().WithConvDnums("b01f_oi01->bf01")));
EXPECT_FALSE(
Match(param.get(), m::CustomCall().WithConvDnums("b01f_oi01->bf01")));
EXPECT_DESC_AND_EXPLANATION(
op.get(), m::CustomCall().WithConvDnums("b01f_oi01->bf01"),
"an HloInstruction:\n"
" * with opcode custom-call AND\n"
" * which has convolution dimension numbers b01f_oi01->bf01",
"convolution_dimension_numbers bf01_oi01->bf01 don't match expected "
"b01f_oi01->bf01\n"
"in custom-call = f32[] custom-call(), dim_labels=bf01_oi01->bf01, "
"custom_call_target=\"foo\"");
}
TEST_F(PatternMatcherTest, CustomCallMatchers) {
constexpr char kModuleStr[] = R"(
HloModule test_module
ENTRY test {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT out = f32[] custom-call(p0, p1), custom_call_target="test_target"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
EXPECT_TRUE(Match(root, m::CustomCall()));
EXPECT_TRUE(Match(root, m::CustomCall({"test_target"})));
EXPECT_TRUE(Match(
root, m::CustomCall({"test_target"}, m::Parameter(0), m::Parameter(1))));
EXPECT_TRUE(Match(root, m::CustomCall({"test_target", "other_target"})));
EXPECT_TRUE(Match(root, m::CustomCall({"other_target", "test_target"})));
EXPECT_TRUE(Match(root, m::CustomCall({"test_target", "other_target"},
m::Parameter(0), m::Parameter(1))));
EXPECT_TRUE(Match(root, m::CustomCall({"other_target", "test_target"},
m::Parameter(0), m::Parameter(1))));
HloInstruction* instr;
EXPECT_TRUE(Match(root, m::CustomCall(&instr)));
EXPECT_TRUE(Match(root, m::CustomCall(&instr, {"test_target"})));
EXPECT_TRUE(Match(root, m::CustomCall(&instr, {"test_target"},
m::Parameter(0), m::Parameter(1))));
const HloInstruction* const_instr;
EXPECT_TRUE(Match(root, m::CustomCall(&const_instr)));
EXPECT_TRUE(Match(root, m::CustomCall(&const_instr, {"test_target"})));
EXPECT_TRUE(Match(root, m::CustomCall(&const_instr, {"test_target"},
m::Parameter(0), m::Parameter(1))));
EXPECT_FALSE(Match(root, m::CustomCall({"other_target"})));
EXPECT_FALSE(Match(root, m::CustomCall({"other_target", "other_target2"})));
EXPECT_FALSE(Match(
root, m::CustomCall({"test_target"}, m::Parameter(1), m::Parameter(0))));
}
TEST_F(PatternMatcherTest, SharedSubpatternPreservesTheSemantics) {
auto scalar0 = m::SharedSubpattern(m::ConstantScalar(0));
auto pattern0 = m::AnyOf<HloInstruction>(m::Convert(scalar0), scalar0);
auto scalar1 = m::SharedSubpattern(m::ConstantScalar(1));
auto pattern1 = m::AnyOf<HloInstruction>(m::Convert(scalar1), scalar1);
{
constexpr char kModuleStr[] = R"(
HloModule test_module ENTRY test {
ROOT constant = f16[] constant(0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
EXPECT_TRUE(Match(root, pattern0));
EXPECT_FALSE(Match(root, pattern1));
}
{
constexpr char kModuleStr[] = R"(
HloModule test_module ENTRY test {
constant = f16[] constant(0)
ROOT convert = f32[] convert(constant)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
EXPECT_TRUE(Match(root, pattern0));
EXPECT_FALSE(Match(root, pattern1));
}
}
TEST_F(PatternMatcherTest, SharedSubpatternCanBeNested) {
auto scalar0 = m::SharedSubpattern(match::ConstantScalar(0));
auto subpattern0 = m::SharedSubpattern(
m::AnyOf<HloInstruction>(m::Convert(scalar0), scalar0));
auto pattern0 =
m::AnyOf<HloInstruction>(m::Convert(subpattern0), subpattern0);
auto scalar1 = m::SharedSubpattern(match::ConstantScalar(1));
auto subpattern1 = m::SharedSubpattern(
m::AnyOf<HloInstruction>(m::Convert(scalar1), scalar1));
auto pattern1 =
m::AnyOf<HloInstruction>(m::Convert(subpattern1), subpattern1);
{
constexpr char kModuleStr[] = R"(
HloModule test_module ENTRY test {
constant = f16[] constant(0)
convert = f32[] convert(constant)
ROOT convert1 = f32[] convert(convert)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
EXPECT_TRUE(Match(root, pattern0));
EXPECT_FALSE(Match(root, pattern1));
}
}
TEST_F(PatternMatcherTest, TestWithContractingDims) {
constexpr char kModuleStr[] = R"(
HloModule test_module
ENTRY test {
%param1 = f32[2048,1024] parameter(0)
%param2 = f32[1024,33708] parameter(1)
ROOT %dot1 = f32[2048,33708]{1,0} dot(f32[2048,1024]{1,0} %param1,
f32[1024,33708]{0,1} %param2),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
EXPECT_TRUE(Match(root, m::Dot().WithContractingDims({1}, {0})));
EXPECT_FALSE(Match(root, m::Dot().WithContractingDims({0}, {1})));
EXPECT_FALSE(Match(root, m::Dot().WithContractingDims({1}, {0, 1})));
EXPECT_DESC_AND_EXPLANATION(
root, m::Dot().WithContractingDims({1}, {0, 1}),
"an HloInstruction:\n"
" * with opcode dot AND\n"
" * with lhs_contracting_dims {1} and rhs_contracting_dims {0,1}",
"rhs_contracting_dimensions {0} don't match expected {0,1}\n"
"in dot1 = f32[2048,33708]{1,0} dot(f32[2048,1024]{1,0} param1, "
"f32[1024,33708]{1,0} param2), lhs_contracting_dims={1}, "
"rhs_contracting_dims={0}");
}
TEST_F(PatternMatcherTest, TestWithReplicaGroups) {
constexpr char kModuleStr[] = R"(
HloModule test_module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY test {
input = f32[128,32]{0,1} parameter(0)
ROOT all-reduce = f32[128,32]{0,1} all-reduce(input),
replica_groups={{0,1},{2,3}}, to_apply=add
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* root = hlo_module->entry_computation()->root_instruction();
EXPECT_TRUE(Match(root, m::AllReduce().WithReplicaGroups({{0, 1}, {2, 3}})));
EXPECT_FALSE(Match(root, m::AllReduce().WithReplicaGroups({{}, {}})));
EXPECT_FALSE(Match(root, m::AllReduce().WithReplicaGroups({{1, 0}, {3, 2}})));
EXPECT_DESC_AND_EXPLANATION(
root, m::AllReduce().WithReplicaGroups({{1, 0}, {3, 2}}),
"an HloInstruction:\n"
" * with opcode all-reduce AND\n"
" * with replica_group {{1,0},{3,2}}",
"replica_group {{0,1},{2,3}} don't match expected with replica_group "
"{{1,0},{3,2}}\n"
"in all-reduce = f32[128,32]{0,1} all-reduce(f32[128,32]{0,1} input), "
"replica_groups={{0,1},{2,3}}, to_apply=add");
}
TEST_F(PatternMatcherTest, TestWithSharding) {
constexpr char kModuleStr[] = R"(
HloModule test_module
ENTRY test {
p0 = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={devices=[1,2,2,1]0,1,2,3},
metadata={op_name="test"}
ROOT copy = f32[5,7,11,13]{3,2,1,0} copy(p0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* instruction = FindInstruction(hlo_module.get(), "p0");
EXPECT_TRUE(
Match(instruction, m::Op().WithSharding("{devices=[1,2,2,1]0,1,2,3}")));
EXPECT_FALSE(
Match(instruction, m::Op().WithSharding("{devices=[2,2,1,1]0,1,2,3}")));
EXPECT_DESC_AND_EXPLANATION(
instruction, m::Op().WithSharding("{devices=[2,2,1,1]0,1,2,3}"),
"an HloInstruction with sharding {devices=[2,2,1,1]0,1,2,3}",
"sharding {devices=[1,2,2,1]0,1,2,3} don't match expected "
"{devices=[2,2,1,1]0,1,2,3}\n"
"in p0 = f32[5,7,11,13]{3,2,1,0} parameter(0), "
"sharding={devices=[1,2,2,1]0,1,2,3}");
}
TEST_F(PatternMatcherTest, TestWithControlDeps) {
constexpr char kModuleStr[] = R"(
HloModule test_module
ENTRY test {
p0 = f32[4] parameter(0)
p1 = f32[4] parameter(1)
add = f32[4] add(p0, p1)
mul = f32[4] multiply(p0, p1), control-predecessors={add}
div = f32[4] divide(p0, p1), control-predecessors={mul}
ROOT t = (f32[4], f32[4], f32[4]) tuple(add, mul, div)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnVerifiedModule(kModuleStr));
auto* add = FindInstruction(hlo_module.get(), "add");
auto* mul = FindInstruction(hlo_module.get(), "mul");
auto* div = FindInstruction(hlo_module.get(), "div");
EXPECT_TRUE(Match(add, m::Op().WithControlDeps({}, {mul})));
EXPECT_TRUE(Match(mul, m::Op().WithControlDeps({add}, {div})));
EXPECT_TRUE(Match(div, m::Op().WithControlDeps({mul}, {})));
EXPECT_FALSE(Match(div, m::Op().WithControlDeps({mul}, {div})));
EXPECT_DESC_AND_EXPLANATION(
div, m::Op().WithControlDeps({mul}, {div}),
"an HloInstruction with control predecessors {mul} and control "
"successors {div}",
"HloInstruction expected to have control successors {div} but has {}\n"
"in div = f32[4]{0} divide(f32[4]{0} p0, f32[4]{0} p1), "
"control-predecessors={mul}");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/pattern_matcher.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/pattern_matcher_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4c8b14a4-a2fa-45f8-ab69-619597b9724e | cpp | google/tensorstore | admission_queue | tensorstore/internal/rate_limiter/admission_queue.cc | tensorstore/internal/rate_limiter/admission_queue_test.cc | #include "tensorstore/internal/rate_limiter/admission_queue.h"
#include <stddef.h>
#include <cassert>
#include <limits>
#include "absl/synchronization/mutex.h"
#include "tensorstore/internal/container/intrusive_linked_list.h"
#include "tensorstore/internal/rate_limiter/rate_limiter.h"
namespace tensorstore {
namespace internal {
AdmissionQueue::AdmissionQueue(size_t limit)
: limit_(limit == 0 ? std::numeric_limits<size_t>::max() : limit) {}
void AdmissionQueue::Admit(RateLimiterNode* node, RateLimiterNode::StartFn fn) {
assert(node->next_ == nullptr);
assert(node->prev_ == nullptr);
assert(node->start_fn_ == nullptr);
node->start_fn_ = fn;
{
absl::MutexLock lock(&mutex_);
if (in_flight_++ >= limit_) {
internal::intrusive_linked_list::InsertBefore(RateLimiterNodeAccessor{},
&head_, node);
return;
}
}
RunStartFunction(node);
}
void AdmissionQueue::Finish(RateLimiterNode* node) {
assert(node->next_ == nullptr);
RateLimiterNode* next_node = nullptr;
{
absl::MutexLock lock(&mutex_);
in_flight_--;
next_node = head_.next_;
if (next_node == &head_) return;
internal::intrusive_linked_list::Remove(RateLimiterNodeAccessor{},
next_node);
}
RunStartFunction(next_node);
}
}
} | #include "tensorstore/internal/rate_limiter/admission_queue.h"
#include <stddef.h>
#include <atomic>
#include <utility>
#include <gtest/gtest.h>
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/rate_limiter/rate_limiter.h"
#include "tensorstore/util/executor.h"
namespace {
using ::tensorstore::Executor;
using ::tensorstore::ExecutorTask;
using ::tensorstore::internal::AdmissionQueue;
using ::tensorstore::internal::adopt_object_ref;
using ::tensorstore::internal::AtomicReferenceCount;
using ::tensorstore::internal::IntrusivePtr;
using ::tensorstore::internal::MakeIntrusivePtr;
using ::tensorstore::internal::RateLimiterNode;
struct Node : public RateLimiterNode, public AtomicReferenceCount<Node> {
AdmissionQueue* queue_;
ExecutorTask task_;
Node(AdmissionQueue* queue, ExecutorTask task)
: queue_(queue), task_(std::move(task)) {}
~Node() { queue_->Finish(this); }
static void Start(void* task) {
IntrusivePtr<Node> self(reinterpret_cast<Node*>(task), adopt_object_ref);
std::move(self->task_)();
}
};
TEST(AdmissionQueueTest, Basic) {
AdmissionQueue queue(1);
std::atomic<size_t> done{0};
EXPECT_EQ(1, queue.limit());
EXPECT_EQ(0, queue.in_flight());
{
for (int i = 0; i < 100; i++) {
auto node = MakeIntrusivePtr<Node>(&queue, [&done] { done++; });
intrusive_ptr_increment(node.get());
queue.Admit(node.get(), &Node::Start);
}
}
EXPECT_EQ(100, done);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/rate_limiter/admission_queue.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/rate_limiter/admission_queue_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
5c54fd05-5e24-4353-98e8-4a51907c7f2c | cpp | tensorflow/tensorflow | dot_decomposer | third_party/xla/xla/service/dot_decomposer.cc | third_party/xla/xla/service/dot_decomposer_test.cc | #include "xla/service/dot_decomposer.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/shape_inference.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
absl::Status CanonicalizeDot(HloDotInstruction* original_dot) {
auto computation = original_dot->parent();
const auto& original_dnums = original_dot->dot_dimension_numbers();
const int64_t num_batch_dims = original_dnums.lhs_batch_dimensions_size();
const int64_t num_contracting_dims =
original_dnums.lhs_contracting_dimensions_size();
int lhs_sparse_dim = -1, rhs_sparse_dim = -1;
for (const SparsityDescriptor& descriptor : original_dot->sparsity()) {
(descriptor.index() == 0 ? lhs_sparse_dim : rhs_sparse_dim) =
descriptor.dimension();
}
auto move_dim_to_end = [&](std::vector<int64_t>& dims, int sparse_dim) {
if (sparse_dim < 0) return;
auto it = std::remove(dims.begin(), dims.end(), sparse_dim);
*it = sparse_dim;
};
const auto& lhs_shape = original_dot->operand(0)->shape();
const int64_t lhs_rank = lhs_shape.rank();
const int64_t num_lhs_non_contracting_dims =
lhs_rank - num_batch_dims - num_contracting_dims;
std::vector<int64_t> lhs_non_contracting_dims;
lhs_non_contracting_dims.reserve(num_lhs_non_contracting_dims);
int64_t lhs_contracting_size = 1;
bool lhs_contracting_dynamic = false;
int64_t lhs_non_contracting_size = 1;
bool lhs_non_contracting_dynamic = false;
std::vector<int64_t> batch_dim_sizes;
batch_dim_sizes.reserve(num_batch_dims);
std::vector<bool> batch_dynamic_dims;
batch_dynamic_dims.reserve(num_batch_dims);
for (int64_t i = 0; i < lhs_rank; ++i) {
if (absl::c_linear_search(original_dnums.lhs_contracting_dimensions(), i)) {
lhs_contracting_size *= lhs_shape.dimensions(i);
lhs_contracting_dynamic |= lhs_shape.is_dynamic_dimension(i);
} else if (absl::c_linear_search(original_dnums.lhs_batch_dimensions(),
i)) {
batch_dim_sizes.push_back(lhs_shape.dimensions(i));
batch_dynamic_dims.push_back(lhs_shape.is_dynamic_dimension(i));
} else {
lhs_non_contracting_dims.push_back(i);
lhs_non_contracting_size *= lhs_shape.dimensions(i);
lhs_non_contracting_dynamic |= lhs_shape.is_dynamic_dimension(i);
}
}
std::vector<int64_t> lhs_transpose;
lhs_transpose.reserve(lhs_rank);
lhs_transpose.insert(lhs_transpose.end(),
original_dnums.lhs_batch_dimensions().begin(),
original_dnums.lhs_batch_dimensions().end());
lhs_transpose.insert(lhs_transpose.end(), lhs_non_contracting_dims.begin(),
lhs_non_contracting_dims.end());
lhs_transpose.insert(lhs_transpose.end(),
original_dnums.lhs_contracting_dimensions().begin(),
original_dnums.lhs_contracting_dimensions().end());
move_dim_to_end(lhs_transpose, lhs_sparse_dim);
HloInstruction* lhs_operand = original_dot->mutable_operand(0);
HloInstruction* transposed_lhs = computation->AddInstruction(
HloInstruction::CreateTranspose(
ShapeUtil::PermuteDimensions(lhs_transpose, lhs_shape), lhs_operand,
lhs_transpose),
&lhs_operand->metadata());
std::vector<int64_t> lhs_reshape_dims = batch_dim_sizes;
std::vector<bool> lhs_reshape_dynamic_dims = batch_dynamic_dims;
if (lhs_non_contracting_size > 1) {
lhs_reshape_dims.push_back(lhs_non_contracting_size);
lhs_reshape_dynamic_dims.push_back(lhs_non_contracting_dynamic);
}
lhs_reshape_dims.push_back(lhs_contracting_size);
lhs_reshape_dynamic_dims.push_back(lhs_contracting_dynamic);
HloInstruction* reshaped_lhs = computation->AddInstruction(
HloInstruction::CreateReshape(
ShapeUtil::MakeShape(lhs_shape.element_type(), lhs_reshape_dims,
lhs_reshape_dynamic_dims),
transposed_lhs),
&transposed_lhs->metadata());
const auto& rhs_shape = original_dot->operand(1)->shape();
const int64_t rhs_rank = rhs_shape.rank();
const int64_t num_rhs_non_contracting_dims =
rhs_rank - num_batch_dims - num_contracting_dims;
std::vector<int64_t> rhs_non_contracting_dims;
rhs_non_contracting_dims.reserve(num_rhs_non_contracting_dims);
int64_t rhs_non_contracting_size = 1;
bool rhs_non_contracting_dynamic = false;
int64_t rhs_contracting_size = 1;
bool rhs_contracting_dynamic = false;
for (int64_t i = 0; i < rhs_rank; ++i) {
if (absl::c_linear_search(original_dnums.rhs_contracting_dimensions(), i)) {
rhs_contracting_size *= rhs_shape.dimensions(i);
rhs_contracting_dynamic |= rhs_shape.is_dynamic_dimension(i);
} else if (!absl::c_linear_search(original_dnums.rhs_batch_dimensions(),
i)) {
rhs_non_contracting_dims.push_back(i);
rhs_non_contracting_size *= rhs_shape.dimensions(i);
rhs_non_contracting_dynamic |= rhs_shape.is_dynamic_dimension(i);
}
}
std::vector<int64_t> rhs_transpose;
rhs_transpose.reserve(rhs_rank);
rhs_transpose.insert(rhs_transpose.end(),
original_dnums.rhs_batch_dimensions().begin(),
original_dnums.rhs_batch_dimensions().end());
rhs_transpose.insert(rhs_transpose.end(),
original_dnums.rhs_contracting_dimensions().begin(),
original_dnums.rhs_contracting_dimensions().end());
move_dim_to_end(rhs_transpose, rhs_sparse_dim);
rhs_transpose.insert(rhs_transpose.end(), rhs_non_contracting_dims.begin(),
rhs_non_contracting_dims.end());
HloInstruction* rhs_operand = original_dot->mutable_operand(1);
HloInstruction* transposed_rhs = computation->AddInstruction(
HloInstruction::CreateTranspose(
ShapeUtil::PermuteDimensions(rhs_transpose, rhs_shape), rhs_operand,
rhs_transpose),
&rhs_operand->metadata());
std::vector<int64_t> rhs_reshape_dims = batch_dim_sizes;
rhs_reshape_dims.push_back(rhs_contracting_size);
std::vector<bool> rhs_reshape_dynamic_dims = batch_dynamic_dims;
rhs_reshape_dynamic_dims.push_back(rhs_contracting_dynamic);
if (rhs_non_contracting_size > 1) {
rhs_reshape_dims.push_back(rhs_non_contracting_size);
rhs_reshape_dynamic_dims.push_back(rhs_non_contracting_dynamic);
}
HloInstruction* reshaped_rhs = computation->AddInstruction(
HloInstruction::CreateReshape(
ShapeUtil::MakeShape(rhs_shape.element_type(), rhs_reshape_dims,
rhs_reshape_dynamic_dims),
transposed_rhs),
&transposed_rhs->metadata());
std::vector<int64_t> dot_dims = batch_dim_sizes;
std::vector<bool> dot_dynamic_dims = batch_dynamic_dims;
if (lhs_non_contracting_size > 1) {
dot_dims.push_back(lhs_non_contracting_size);
dot_dynamic_dims.push_back(lhs_non_contracting_dynamic);
}
if (rhs_non_contracting_size > 1) {
dot_dims.push_back(rhs_non_contracting_size);
dot_dynamic_dims.push_back(rhs_non_contracting_dynamic);
}
DotDimensionNumbers dot_dnums;
for (int64_t i = 0; i < num_batch_dims; ++i) {
dot_dnums.add_lhs_batch_dimensions(i);
dot_dnums.add_rhs_batch_dimensions(i);
}
dot_dnums.add_lhs_contracting_dimensions(
num_batch_dims + (lhs_non_contracting_size > 1 ? 1 : 0));
dot_dnums.add_rhs_contracting_dimensions(num_batch_dims);
std::vector<SparsityDescriptor> sparsity;
std::vector<HloInstruction*> sparse_meta;
sparsity.reserve(original_dot->sparse_operands());
sparse_meta.reserve(original_dot->sparse_operands());
auto transpose_meta = [&](HloInstruction* original_meta,
absl::Span<const int64_t> transpose) {
return computation->AddInstruction(
HloInstruction::CreateTranspose(
ShapeUtil::PermuteDimensions(transpose, original_meta->shape()),
original_meta, transpose),
&original_meta->metadata());
};
for (int i = 0; i < original_dot->sparse_operands(); ++i) {
SparsityDescriptor descriptor = original_dot->sparsity()[i];
descriptor.set_dimension(num_batch_dims + (descriptor.index() == 0 &&
lhs_non_contracting_size > 1));
sparsity.push_back(descriptor);
HloInstruction* meta =
original_dot->mutable_operand(HloDotInstruction::kOperands + i);
HloInstruction* meta_operand;
if (descriptor.index() == 0) {
meta = transpose_meta(meta, lhs_transpose);
meta_operand = reshaped_lhs;
} else {
meta = transpose_meta(meta, rhs_transpose);
meta_operand = reshaped_rhs;
}
TF_ASSIGN_OR_RETURN(Shape result_shape,
ShapeInference::InferSparseDotMetadataShape(
meta_operand->shape(), dot_dnums, descriptor));
meta = computation->AddInstruction(
HloInstruction::CreateReshape(result_shape, meta), &meta->metadata());
sparse_meta.push_back(meta);
}
HloInstruction* dot = computation->AddInstruction(HloInstruction::CreateDot(
ShapeUtil::MakeShape(original_dot->shape().element_type(), dot_dims,
dot_dynamic_dims),
reshaped_lhs, reshaped_rhs, dot_dnums, original_dot->precision_config(),
sparsity, sparse_meta));
original_dot->SetupDerivedInstruction(dot);
std::unique_ptr<HloInstruction> replacement =
HloInstruction::CreateReshape(original_dot->shape(), dot);
VLOG(3) << "Canonicalizing dot:\n"
<< "\t old: " << original_dot->ToString() << "\n"
<< "\t new: " << dot->ToString() << "\n"
<< "\t -> " << replacement->ToString();
return computation->ReplaceWithNewInstruction(original_dot,
std::move(replacement));
}
}
absl::StatusOr<bool> DotDecomposer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
std::vector<HloInstruction*> non_canonical_dots;
for (auto* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (auto* instruction : computation->instructions()) {
if (instruction->opcode() != HloOpcode::kDot) {
continue;
}
const DotDimensionNumbers& dnums = instruction->dot_dimension_numbers();
if (dnums.lhs_contracting_dimensions_size() != 1) {
non_canonical_dots.push_back(instruction);
continue;
}
if (dnums.lhs_batch_dimensions_size() + 2 <
instruction->operand(0)->shape().rank() ||
dnums.rhs_batch_dimensions_size() + 2 <
instruction->operand(1)->shape().rank()) {
non_canonical_dots.push_back(instruction);
continue;
}
if (dnums.lhs_batch_dimensions().empty() &&
dnums.lhs_contracting_dimensions().empty()) {
non_canonical_dots.push_back(instruction);
continue;
}
std::vector<int64_t> canonical_batch_dims(
dnums.lhs_batch_dimensions_size());
absl::c_iota(canonical_batch_dims, 0);
if (!absl::c_equal(dnums.lhs_batch_dimensions(), canonical_batch_dims) ||
!absl::c_equal(dnums.rhs_batch_dimensions(), canonical_batch_dims)) {
non_canonical_dots.push_back(instruction);
}
}
}
bool changed = false;
for (auto* dot : non_canonical_dots) {
TF_RETURN_IF_ERROR(CanonicalizeDot(Cast<HloDotInstruction>(dot)));
changed = true;
}
return changed;
}
} | #include "xla/service/dot_decomposer.h"
#include <memory>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
namespace m = ::xla::match;
namespace op = ::xla::testing::opcode_matchers;
using DotDecomposerTest = HloTestBase;
TEST_F(DotDecomposerTest, CanonicalizeMultipleNonContractingDims) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = f32[64,63,512]{2,1,0} parameter(0)
p1 = f32[512,512]{1,0} parameter(1)
ROOT dot = f32[64,63,512]{2,1,0} dot(p0, p1), lhs_contracting_dims={2},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool canonicalized,
DotDecomposer().Run(module.get()));
EXPECT_TRUE(canonicalized);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Reshape(AllOf(op::Dot(op::Reshape(), op::Reshape(),
1,
0),
op::Shape("f32[4032,512]"))));
}
TEST_F(DotDecomposerTest, DontCanonicalizeIfNoNoncontractingDims) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = f32[64,4]{1,0} parameter(0)
p1 = f32[64,4]{1,0} parameter(1)
ROOT dot = f32[64]{0} dot(p0, p1), lhs_batch_dims={0},
lhs_contracting_dims={1},
rhs_batch_dims={0},
rhs_contracting_dims={1}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool canonicalized,
DotDecomposer().Run(module.get()));
EXPECT_FALSE(canonicalized);
}
TEST_F(DotDecomposerTest, DontAddLhsNonContractingDimIfOne) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = f32[64,4]{1,0} parameter(0)
p1 = f32[64,4,2,1]{3,2,1,0} parameter(1)
ROOT dot = f32[64,2,1]{2,1,0} dot(p0, p1), lhs_batch_dims={0},
lhs_contracting_dims={1},
rhs_batch_dims={0},
rhs_contracting_dims={1}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool canonicalized,
DotDecomposer().Run(module.get()));
EXPECT_TRUE(canonicalized);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Reshape(AllOf(op::Dot(op::Reshape(), op::Reshape(),
1,
1),
op::Shape("f32[64,2]"))));
}
TEST_F(DotDecomposerTest, DontAddRhsNonContractingDimIfOne) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = f32[64,4,2,1]{3,2,1,0} parameter(0)
p1 = f32[64,4]{1,0} parameter(1)
ROOT dot = f32[64,2,1]{2,1,0} dot(p0, p1), lhs_batch_dims={0},
lhs_contracting_dims={1},
rhs_batch_dims={0},
rhs_contracting_dims={1}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool canonicalized,
DotDecomposer().Run(module.get()));
EXPECT_TRUE(canonicalized);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Reshape(AllOf(op::Dot(op::Reshape(), op::Reshape(),
2,
1),
op::Shape("f32[64,2]"))));
}
template <typename Arg0, typename Arg1, typename Arg2>
auto SparseDotMatcher(Arg0&& arg0, Arg1&& arg1, Arg2&& arg2) {
return match::Op()
.WithOpcode(HloOpcode::kDot)
.WithOperand(0, std::forward<Arg0>(arg0))
.WithOperand(1, std::forward<Arg1>(arg1))
.WithOperand(2, std::forward<Arg2>(arg2));
}
TEST_F(DotDecomposerTest, CanonicalizeSparseLhs) {
absl::string_view kHlo = R"(
HloModule module
ENTRY main {
lhs = f32[16,4,3,7] parameter(0)
rhs = f32[32,4,5,7] parameter(1)
meta = u16[2,4,3,7] parameter(2)
ROOT dot = f32[7,3,5] dot(lhs, rhs, meta), sparsity=L.0@2:4,
lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1},
lhs_batch_dims={3}, rhs_batch_dims={3}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHlo));
TF_ASSERT_OK_AND_ASSIGN(bool canonicalized,
DotDecomposer().Run(module.get()));
EXPECT_TRUE(canonicalized);
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Reshape(SparseDotMatcher(
m::Reshape(m::Transpose(m::Parameter(0))),
m::Reshape(m::Transpose(m::Parameter(1))),
m::Reshape(m::Transpose(m::Parameter(2)))))));
auto dot = Cast<HloDotInstruction>(root->operand(0));
auto descriptor = dot->sparsity().front();
EXPECT_EQ(descriptor.index(), 0);
EXPECT_EQ(descriptor.dimension(), 2);
}
TEST_F(DotDecomposerTest, CanonicalizeSparseRhs) {
absl::string_view kHlo = R"(
HloModule module
ENTRY main {
lhs = f32[32,4,3,7] parameter(0)
rhs = f32[16,4,5,7] parameter(1)
meta = u16[2,4,5,7] parameter(2)
ROOT dot = f32[7,3,5] dot(lhs, rhs, meta), sparsity=R.0@2:4,
lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1},
lhs_batch_dims={3}, rhs_batch_dims={3}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHlo));
TF_ASSERT_OK_AND_ASSIGN(bool canonicalized,
DotDecomposer().Run(module.get()));
EXPECT_TRUE(canonicalized);
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Reshape(SparseDotMatcher(
m::Reshape(m::Transpose(m::Parameter(0))),
m::Reshape(m::Transpose(m::Parameter(1))),
m::Reshape(m::Transpose(m::Parameter(2)))))));
auto dot = Cast<HloDotInstruction>(root->operand(0));
auto descriptor = dot->sparsity().front();
EXPECT_EQ(descriptor.index(), 1);
EXPECT_EQ(descriptor.dimension(), 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dot_decomposer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dot_decomposer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0f523235-915f-4e72-b771-0e58ab3b8a13 | cpp | tensorflow/tensorflow | summary_op | tensorflow/c/kernels/summary_op.cc | tensorflow/c/kernels/summary_op_test.cc | #include <sstream>
#include <string>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/c/kernels.h"
#include "tensorflow/c/kernels/tensor_shape_utils.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_tensor.h"
#include "tensorflow/core/framework/registration/registration.h"
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/bfloat16.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/platform/types.h"
namespace {
struct Params {
TF_Tensor* tags;
TF_Tensor* values;
TF_Status* status;
explicit Params(TF_OpKernelContext* ctx)
: tags(nullptr), values(nullptr), status(nullptr) {
status = TF_NewStatus();
TF_GetInput(ctx, 0, &tags, status);
if (TF_GetCode(status) == TF_OK) {
TF_GetInput(ctx, 1, &values, status);
}
}
~Params() {
TF_DeleteStatus(status);
TF_DeleteTensor(tags);
TF_DeleteTensor(values);
}
};
void* ScalarSummaryOp_Create(TF_OpKernelConstruction* ctx) { return nullptr; }
void ScalarSummaryOp_Delete(void* kernel) {}
bool IsSameSize(TF_Tensor* tensor1, TF_Tensor* tensor2);
std::string SingleTag(TF_Tensor* tags);
template <typename T>
void ScalarSummaryOp_Compute(void* kernel, TF_OpKernelContext* ctx) {
Params params(ctx);
if (TF_GetCode(params.status) != TF_OK) {
TF_OpKernelContext_Failure(ctx, params.status);
return;
}
if (!IsSameSize(params.tags, params.values)) {
std::ostringstream err;
err << "tags and values are not the same shape: "
<< tensorflow::ShapeDebugString(params.tags)
<< " != " << tensorflow::ShapeDebugString(params.values)
<< SingleTag(params.tags);
TF_SetStatus(params.status, TF_INVALID_ARGUMENT, err.str().c_str());
TF_OpKernelContext_Failure(ctx, params.status);
return;
}
tensorflow::Summary s;
auto tags_array =
static_cast<tensorflow::tstring*>(TF_TensorData(params.tags));
auto values_array = static_cast<T*>(TF_TensorData(params.values));
for (int i = 0; i < TF_TensorElementCount(params.tags); ++i) {
tensorflow::Summary::Value* v = s.add_value();
const tensorflow::tstring& Ttags_i = tags_array[i];
v->set_tag(Ttags_i.data(), Ttags_i.size());
v->set_simple_value(static_cast<float>(values_array[i]));
}
TF_Tensor* summary_tensor =
TF_AllocateOutput(ctx, 0, TF_ExpectedOutputDataType(ctx, 0), nullptr, 0,
sizeof(tensorflow::tstring), params.status);
if (TF_GetCode(params.status) != TF_OK) {
TF_DeleteTensor(summary_tensor);
TF_OpKernelContext_Failure(ctx, params.status);
return;
}
tensorflow::tstring* output_tstring =
reinterpret_cast<tensorflow::tstring*>(TF_TensorData(summary_tensor));
CHECK(SerializeToTString(s, output_tstring));
TF_DeleteTensor(summary_tensor);
}
bool IsSameSize(TF_Tensor* tensor1, TF_Tensor* tensor2) {
if (TF_NumDims(tensor1) != TF_NumDims(tensor2)) {
return false;
}
for (int d = 0; d < TF_NumDims(tensor1); d++) {
if (TF_Dim(tensor1, d) != TF_Dim(tensor2, d)) {
return false;
}
}
return true;
}
std::string SingleTag(TF_Tensor* tags) {
if (TF_TensorElementCount(tags) == 1) {
const char* single_tag =
static_cast<tensorflow::tstring*>(TF_TensorData(tags))->c_str();
return tensorflow::strings::StrCat(" (tag '", single_tag, "')");
} else {
return "";
}
}
template <typename T>
void RegisterScalarSummaryOpKernel() {
TF_Status* status = TF_NewStatus();
{
auto* builder = TF_NewKernelBuilder(
"ScalarSummary", tensorflow::DEVICE_CPU, &ScalarSummaryOp_Create,
&ScalarSummaryOp_Compute<T>, &ScalarSummaryOp_Delete);
TF_KernelBuilder_TypeConstraint(
builder, "T",
static_cast<TF_DataType>(tensorflow::DataTypeToEnum<T>::v()), status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << "Error while adding type constraint";
TF_RegisterKernelBuilder("ScalarSummary", builder, status);
CHECK_EQ(TF_OK, TF_GetCode(status))
<< "Error while registering Scalar Summmary kernel";
}
TF_DeleteStatus(status);
}
TF_ATTRIBUTE_UNUSED bool IsScalarSummaryOpKernelRegistered = []() {
if (SHOULD_REGISTER_OP_KERNEL("ScalarSummary")) {
RegisterScalarSummaryOpKernel<int64_t>();
RegisterScalarSummaryOpKernel<tensorflow::uint64>();
RegisterScalarSummaryOpKernel<tensorflow::int32>();
RegisterScalarSummaryOpKernel<tensorflow::uint32>();
RegisterScalarSummaryOpKernel<tensorflow::uint16>();
RegisterScalarSummaryOpKernel<tensorflow::int16>();
RegisterScalarSummaryOpKernel<tensorflow::int8>();
RegisterScalarSummaryOpKernel<tensorflow::uint8>();
RegisterScalarSummaryOpKernel<Eigen::half>();
RegisterScalarSummaryOpKernel<tensorflow::bfloat16>();
RegisterScalarSummaryOpKernel<float>();
RegisterScalarSummaryOpKernel<double>();
}
return true;
}();
} | #include "tensorflow/c/kernels.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace {
class DummyDevice : public DeviceBase {
public:
explicit DummyDevice(Env* env) : DeviceBase(env) {}
Allocator* GetAllocator(AllocatorAttributes ) override {
return cpu_allocator();
}
};
void ExpectSummaryMatches(const Summary& actual, const string& expected_str) {
Summary expected;
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(expected_str, &expected));
EXPECT_EQ(expected.DebugString(), actual.DebugString());
}
void TestScalarSummaryOp(Tensor* tags, Tensor* values, string expected_output,
error::Code expected_code) {
Status status;
NodeDef def;
def.set_op("ScalarSummary");
def.set_device(DEVICE_CPU);
AttrValue valuesTypeAttr;
SetAttrValue(values->dtype(), &valuesTypeAttr);
(*def.mutable_attr())["T"] = valuesTypeAttr;
def.add_input(strings::StrCat("input1: ", DataTypeString(tags->dtype())));
def.add_input(strings::StrCat("input2: ", DataTypeString(values->dtype())));
std::unique_ptr<OpKernel> kernel =
CreateOpKernel(DeviceType(DEVICE_CPU), nullptr, nullptr, def, 1, &status);
ASSERT_TRUE(status.ok()) << status.ToString();
OpKernelContext::Params params;
DummyDevice dummy_device(nullptr);
params.device = &dummy_device;
params.op_kernel = kernel.get();
AllocatorAttributes alloc_attrs;
params.output_attr_array = &alloc_attrs;
absl::InlinedVector<TensorValue, 4UL> inputs;
inputs.emplace_back(tags);
inputs.emplace_back(values);
params.inputs = inputs;
OpKernelContext ctx(¶ms, 1);
kernel->Compute(&ctx);
ASSERT_EQ(expected_code, ctx.status().code());
if (expected_code == error::OK) {
Summary summary;
ASSERT_TRUE(ParseProtoUnlimited(
&summary, ctx.mutable_output(0)->scalar<tstring>()()));
ExpectSummaryMatches(summary, expected_output);
} else {
EXPECT_TRUE(absl::StrContains(ctx.status().ToString(), expected_output))
<< ctx.status();
}
}
TEST(ScalarSummaryOpTest, SimpleFloat) {
int vectorSize = 3;
Tensor tags(DT_STRING, {vectorSize});
Tensor values(DT_FLOAT, {vectorSize});
tags.vec<tstring>()(0) = "tag1";
tags.vec<tstring>()(1) = "tag2";
tags.vec<tstring>()(2) = "tag3";
values.vec<float>()(0) = 1.0f;
values.vec<float>()(1) = -0.73f;
values.vec<float>()(2) = 10000.0f;
TestScalarSummaryOp(&tags, &values, R"(
value { tag: 'tag1' simple_value: 1.0 }
value { tag: 'tag2' simple_value: -0.73}
value { tag: 'tag3' simple_value: 10000.0})",
error::OK);
}
TEST(ScalarSummaryOpTest, SimpleDouble) {
int vectorSize = 3;
Tensor tags(DT_STRING, {vectorSize});
Tensor values(DT_DOUBLE, {vectorSize});
tags.vec<tstring>()(0) = "tag1";
tags.vec<tstring>()(1) = "tag2";
tags.vec<tstring>()(2) = "tag3";
values.vec<double>()(0) = 1.0;
values.vec<double>()(1) = -0.73;
values.vec<double>()(2) = 10000.0;
TestScalarSummaryOp(&tags, &values, R"(
value { tag: 'tag1' simple_value: 1.0 }
value { tag: 'tag2' simple_value: -0.73}
value { tag: 'tag3' simple_value: 10000.0})",
error::OK);
}
TEST(ScalarSummaryOpTest, SimpleHalf) {
int vectorSize = 3;
Tensor tags(DT_STRING, {vectorSize});
Tensor values(DT_HALF, {vectorSize});
tags.vec<tstring>()(0) = "tag1";
tags.vec<tstring>()(1) = "tag2";
tags.vec<tstring>()(2) = "tag3";
values.vec<Eigen::half>()(0) = Eigen::half(1.0);
values.vec<Eigen::half>()(1) = Eigen::half(-2.0);
values.vec<Eigen::half>()(2) = Eigen::half(10000.0);
TestScalarSummaryOp(&tags, &values, R"(
value { tag: 'tag1' simple_value: 1.0 }
value { tag: 'tag2' simple_value: -2.0}
value { tag: 'tag3' simple_value: 10000.0})",
error::OK);
}
TEST(ScalarSummaryOpTest, Error_WrongDimsTags) {
Tensor tags(DT_STRING, {2, 1});
Tensor values(DT_FLOAT, {2});
tags.matrix<tstring>()(0, 0) = "tag1";
tags.matrix<tstring>()(1, 0) = "tag2";
values.vec<float>()(0) = 1.0f;
values.vec<float>()(1) = -2.0f;
TestScalarSummaryOp(&tags, &values, "tags and values are not the same shape",
error::INVALID_ARGUMENT);
}
TEST(ScalarSummaryOpTest, Error_WrongValuesTags) {
Tensor tags(DT_STRING, {2});
Tensor values(DT_FLOAT, {2, 1});
tags.vec<tstring>()(0) = "tag1";
tags.vec<tstring>()(1) = "tag2";
values.matrix<float>()(0, 0) = 1.0f;
values.matrix<float>()(1, 0) = -2.0f;
TestScalarSummaryOp(&tags, &values, "tags and values are not the same shape",
error::INVALID_ARGUMENT);
}
TEST(ScalarSummaryOpTest, Error_WrongWithSingleTag) {
Tensor tags(DT_STRING, {1});
Tensor values(DT_FLOAT, {2, 1});
tags.vec<tstring>()(0) = "tag1";
values.matrix<float>()(0, 0) = 1.0f;
values.matrix<float>()(1, 0) = -2.0f;
TestScalarSummaryOp(&tags, &values, "tags and values are not the same shape",
error::INVALID_ARGUMENT);
}
TEST(ScalarSummaryOpTest, IsRegistered) {
const OpRegistrationData* reg;
TF_CHECK_OK(OpRegistry::Global()->LookUp("ScalarSummary", ®));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/kernels/summary_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/kernels/summary_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8585c3c1-3110-4d7f-9df7-16798a77f5b7 | cpp | tensorflow/tensorflow | stochastic_convert_decomposer | third_party/xla/xla/service/stochastic_convert_decomposer.cc | third_party/xla/xla/service/stochastic_convert_decomposer_test.cc | #include "xla/service/stochastic_convert_decomposer.h"
#include <cstdint>
#include <limits>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/shape_inference.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
absl::Status DecomposeStochasticConvert(HloComputation* comp,
HloInstruction* instruction) {
CHECK(instruction->opcode() == HloOpcode::kStochasticConvert)
<< "requires a stochastic_convert instruction to decompose, but got: "
<< instruction->opcode();
CHECK(instruction->operand_count() == 2)
<< "requires 2 operands for stochastic convert, but got: "
<< instruction->operand_count();
HloInstruction* operand = instruction->mutable_operand(0);
HloInstruction* random = instruction->mutable_operand(1);
PrimitiveType from_type = operand->shape().element_type();
PrimitiveType random_type = random->shape().element_type();
PrimitiveType to_type = instruction->shape().element_type();
TF_RETURN_IF_ERROR(ShapeInference::InferStochasticConvertShape(
operand->shape(), random->shape(), to_type)
.status());
VLOG(1) << "Decomposing instruction: " << instruction->ToString();
if (primitive_util::IsSignedIntegralType(to_type)) {
TF_ASSIGN_OR_RETURN(HloInstruction * operand_sign,
MakeUnaryHlo(HloOpcode::kSign, operand));
TF_ASSIGN_OR_RETURN(HloInstruction * should_neg,
MakeCompareHlo(Comparison::Direction::kLt, operand_sign,
MakeScalarLike(operand_sign, 0)));
TF_ASSIGN_OR_RETURN(HloInstruction * operand_abs,
MakeUnaryHlo(HloOpcode::kAbs, operand));
TF_ASSIGN_OR_RETURN(HloInstruction * truncated_fp,
MakeUnaryHlo(HloOpcode::kFloor, operand_abs));
TF_ASSIGN_OR_RETURN(
HloInstruction * fractional,
MakeBinaryHlo(HloOpcode::kSubtract, operand_abs, truncated_fp));
if (from_type == F16) {
fractional = MakeConvertToHlo(fractional, F32);
}
TF_ASSIGN_OR_RETURN(
HloInstruction * fixed_fractional,
MakeBinaryHlo(
HloOpcode::kMultiply, fractional,
MakeScalarLike(fractional, IPow<double>(2, primitive_util::BitWidth(
random_type)))));
TF_ASSIGN_OR_RETURN(
HloInstruction * should_round_up,
MakeCompareHlo(Comparison::Direction::kLt, random,
MakeConvertToHlo(fixed_fractional, random_type)));
HloInstruction* truncated_int = MakeConvertToHlo(truncated_fp, to_type);
TF_ASSIGN_OR_RETURN(
truncated_int,
MakeSelectHlo(should_round_up,
MakeBinaryHlo(HloOpcode::kAdd, truncated_int,
MakeScalarLike(truncated_int, 1))
.value(),
truncated_int));
TF_ASSIGN_OR_RETURN(
HloInstruction * result,
MakeSelectHlo(should_neg,
MakeUnaryHlo(HloOpcode::kNegate, truncated_int).value(),
truncated_int));
auto to_bits = primitive_util::BitWidth(to_type);
auto min = static_cast<int64_t>(
(static_cast<uint64_t>(1) + ~static_cast<uint64_t>(1))
<< (to_bits - 1));
TF_ASSIGN_OR_RETURN(HloInstruction * is_min,
MakeCompareHlo(Comparison::Direction::kLe, operand,
MakeScalarLike(operand, min)));
TF_ASSIGN_OR_RETURN(
result, MakeSelectHlo(is_min, MakeScalarLike(result, min), result));
auto max =
static_cast<int64_t>((static_cast<uint64_t>(1) << (to_bits - 1)) - 1);
TF_ASSIGN_OR_RETURN(HloInstruction * is_max,
MakeCompareHlo(Comparison::Direction::kGe, operand,
MakeScalarLike(operand, max)));
TF_ASSIGN_OR_RETURN(
result, MakeSelectHlo(is_max, MakeScalarLike(result, max), result));
TF_RETURN_IF_ERROR(instruction->ReplaceAllUsesWith(result));
TF_RETURN_IF_ERROR(comp->RemoveInstruction(instruction));
return absl::OkStatus();
}
return Internal("Unsupported stochastic convert: from %s to %s",
PrimitiveType_Name(from_type),
PrimitiveType_Name(to_type));
}
absl::StatusOr<bool> StochasticConvertDecomposer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
if (instruction->opcode() != HloOpcode::kStochasticConvert) {
continue;
}
TF_RETURN_IF_ERROR(DecomposeStochasticConvert(computation, instruction));
changed = true;
}
}
return changed;
}
} | #include "xla/service/stochastic_convert_decomposer.h"
#include <string>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
using StochasticConvertDecomposerTest = HloTestBase;
using ::testing::HasSubstr;
TEST_F(StochasticConvertDecomposerTest, DecomposeStochasticConvertF32ToS32) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
%arg_param.1 = f32[65536]{0} parameter(0)
%random_param.2 = u32[65536]{0} parameter(1)
ROOT %stochastic-convert.3 = s32[65536]{0} stochastic-convert(f32[65536]{0} %arg_param.1, u32[65536]{0} %random_param.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
StochasticConvertDecomposer decomposer;
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Select(op::Compare(), op::Broadcast(),
op::Select(op::Compare(), op::Broadcast(),
op::Select(op::Compare(), op::Negate(),
op::Select()))));
}
TEST_F(StochasticConvertDecomposerTest, DecomposeStochasticConvertBF16ToS8) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
%arg_param.1 = bf16[65536]{0} parameter(0)
%random_param.2 = u16[65536]{0} parameter(1)
ROOT %stochastic-convert.3 = s8[65536]{0} stochastic-convert(bf16[65536]{0} %arg_param.1, u16[65536]{0} %random_param.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
StochasticConvertDecomposer decomposer;
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Select(op::Compare(), op::Broadcast(),
op::Select(op::Compare(), op::Broadcast(),
op::Select(op::Compare(), op::Negate(),
op::Select()))));
}
TEST_F(StochasticConvertDecomposerTest, WrongRandomBitWidth) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
%arg_param.1 = bf16[65536]{0} parameter(0)
%random_param.2 = u32[65536]{0} parameter(1)
ROOT %stochastic-convert.3 = s32[65536]{0} stochastic-convert(bf16[65536]{0} %arg_param.1, u32[65536]{0} %random_param.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
StochasticConvertDecomposer decomposer;
auto result = decomposer.Run(module.get());
EXPECT_NE(absl::OkStatus(), result.status());
EXPECT_THAT(result.status().message(), HasSubstr("have same bits"));
}
TEST_F(StochasticConvertDecomposerTest, WrongRandomType) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
%arg_param.1 = f32[65536]{0} parameter(0)
%random_param.2 = s32[65536]{0} parameter(1)
ROOT %stochastic-convert.3 = s32[65536]{0} stochastic-convert(f32[65536]{0} %arg_param.1, s32[65536]{0} %random_param.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
StochasticConvertDecomposer decomposer;
auto result = decomposer.Run(module.get());
EXPECT_NE(absl::OkStatus(), result.status());
EXPECT_THAT(result.status().message(),
HasSubstr("must be unsigned integers"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/stochastic_convert_decomposer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/stochastic_convert_decomposer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
59850b8a-9af1-4d1f-93e0-83f0db49b34b | cpp | google/quiche | priority_payload_decoder | quiche/http2/decoder/payload_decoders/priority_payload_decoder.cc | quiche/http2/decoder/payload_decoders/priority_payload_decoder_test.cc | #include "quiche/http2/decoder/payload_decoders/priority_payload_decoder.h"
#include "quiche/http2/decoder/decode_buffer.h"
#include "quiche/http2/decoder/http2_frame_decoder_listener.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/http2/http2_structures.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace http2 {
DecodeStatus PriorityPayloadDecoder::StartDecodingPayload(
FrameDecoderState* state, DecodeBuffer* db) {
QUICHE_DVLOG(2) << "PriorityPayloadDecoder::StartDecodingPayload: "
<< state->frame_header();
QUICHE_DCHECK_EQ(Http2FrameType::PRIORITY, state->frame_header().type);
QUICHE_DCHECK_LE(db->Remaining(), state->frame_header().payload_length);
QUICHE_DCHECK_EQ(0, state->frame_header().flags);
state->InitializeRemainders();
return HandleStatus(
state, state->StartDecodingStructureInPayload(&priority_fields_, db));
}
DecodeStatus PriorityPayloadDecoder::ResumeDecodingPayload(
FrameDecoderState* state, DecodeBuffer* db) {
QUICHE_DVLOG(2) << "PriorityPayloadDecoder::ResumeDecodingPayload"
<< " remaining_payload=" << state->remaining_payload()
<< " db->Remaining=" << db->Remaining();
QUICHE_DCHECK_EQ(Http2FrameType::PRIORITY, state->frame_header().type);
QUICHE_DCHECK_LE(db->Remaining(), state->frame_header().payload_length);
return HandleStatus(
state, state->ResumeDecodingStructureInPayload(&priority_fields_, db));
}
DecodeStatus PriorityPayloadDecoder::HandleStatus(FrameDecoderState* state,
DecodeStatus status) {
if (status == DecodeStatus::kDecodeDone) {
if (state->remaining_payload() == 0) {
state->listener()->OnPriorityFrame(state->frame_header(),
priority_fields_);
return DecodeStatus::kDecodeDone;
}
return state->ReportFrameSizeError();
}
QUICHE_DCHECK(
(status == DecodeStatus::kDecodeInProgress &&
state->remaining_payload() > 0) ||
(status == DecodeStatus::kDecodeError && state->remaining_payload() == 0))
<< "\n status=" << status
<< "; remaining_payload=" << state->remaining_payload();
return status;
}
} | #include "quiche/http2/decoder/payload_decoders/priority_payload_decoder.h"
#include <stddef.h>
#include "quiche/http2/decoder/http2_frame_decoder_listener.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/http2/test_tools/frame_parts.h"
#include "quiche/http2/test_tools/frame_parts_collector.h"
#include "quiche/http2/test_tools/http2_frame_builder.h"
#include "quiche/http2/test_tools/http2_random.h"
#include "quiche/http2/test_tools/http2_structures_test_util.h"
#include "quiche/http2/test_tools/payload_decoder_base_test_util.h"
#include "quiche/http2/test_tools/random_decoder_test_base.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace test {
class PriorityPayloadDecoderPeer {
public:
static constexpr Http2FrameType FrameType() {
return Http2FrameType::PRIORITY;
}
static constexpr uint8_t FlagsAffectingPayloadDecoding() { return 0; }
};
namespace {
struct Listener : public FramePartsCollector {
void OnPriorityFrame(const Http2FrameHeader& header,
const Http2PriorityFields& priority_fields) override {
QUICHE_VLOG(1) << "OnPriority: " << header << "; " << priority_fields;
StartAndEndFrame(header)->OnPriorityFrame(header, priority_fields);
}
void OnFrameSizeError(const Http2FrameHeader& header) override {
QUICHE_VLOG(1) << "OnFrameSizeError: " << header;
FrameError(header)->OnFrameSizeError(header);
}
};
class PriorityPayloadDecoderTest
: public AbstractPayloadDecoderTest<PriorityPayloadDecoder,
PriorityPayloadDecoderPeer, Listener> {
protected:
Http2PriorityFields RandPriorityFields() {
Http2PriorityFields fields;
test::Randomize(&fields, RandomPtr());
return fields;
}
};
TEST_F(PriorityPayloadDecoderTest, WrongSize) {
auto approve_size = [](size_t size) {
return size != Http2PriorityFields::EncodedSize();
};
Http2FrameBuilder fb;
fb.Append(RandPriorityFields());
fb.Append(RandPriorityFields());
EXPECT_TRUE(VerifyDetectsFrameSizeError(0, fb.buffer(), approve_size));
}
TEST_F(PriorityPayloadDecoderTest, VariousPayloads) {
for (int n = 0; n < 100; ++n) {
Http2PriorityFields fields = RandPriorityFields();
Http2FrameBuilder fb;
fb.Append(fields);
Http2FrameHeader header(fb.size(), Http2FrameType::PRIORITY, RandFlags(),
RandStreamId());
set_frame_header(header);
FrameParts expected(header);
expected.SetOptPriority(fields);
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(fb.buffer(), expected));
}
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/decoder/payload_decoders/priority_payload_decoder.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/decoder/payload_decoders/priority_payload_decoder_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
0ef82f61-2c0d-4c8e-ac35-17fce5a47c6e | cpp | abseil/abseil-cpp | fixed_array | absl/container/fixed_array.h | absl/container/fixed_array_test.cc | #ifndef ABSL_CONTAINER_FIXED_ARRAY_H_
#define ABSL_CONTAINER_FIXED_ARRAY_H_
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <initializer_list>
#include <iterator>
#include <limits>
#include <memory>
#include <new>
#include <type_traits>
#include "absl/algorithm/algorithm.h"
#include "absl/base/config.h"
#include "absl/base/dynamic_annotations.h"
#include "absl/base/internal/throw_delegate.h"
#include "absl/base/macros.h"
#include "absl/base/optimization.h"
#include "absl/base/port.h"
#include "absl/container/internal/compressed_tuple.h"
#include "absl/memory/memory.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
constexpr static auto kFixedArrayUseDefault = static_cast<size_t>(-1);
template <typename T, size_t N = kFixedArrayUseDefault,
typename A = std::allocator<T>>
class FixedArray {
static_assert(!std::is_array<T>::value || std::extent<T>::value > 0,
"Arrays with unknown bounds cannot be used with FixedArray.");
static constexpr size_t kInlineBytesDefault = 256;
using AllocatorTraits = std::allocator_traits<A>;
template <typename Iterator>
using EnableIfForwardIterator = absl::enable_if_t<std::is_convertible<
typename std::iterator_traits<Iterator>::iterator_category,
std::forward_iterator_tag>::value>;
static constexpr bool NoexceptCopyable() {
return std::is_nothrow_copy_constructible<StorageElement>::value &&
absl::allocator_is_nothrow<allocator_type>::value;
}
static constexpr bool NoexceptMovable() {
return std::is_nothrow_move_constructible<StorageElement>::value &&
absl::allocator_is_nothrow<allocator_type>::value;
}
static constexpr bool DefaultConstructorIsNonTrivial() {
return !absl::is_trivially_default_constructible<StorageElement>::value;
}
public:
using allocator_type = typename AllocatorTraits::allocator_type;
using value_type = typename AllocatorTraits::value_type;
using pointer = typename AllocatorTraits::pointer;
using const_pointer = typename AllocatorTraits::const_pointer;
using reference = value_type&;
using const_reference = const value_type&;
using size_type = typename AllocatorTraits::size_type;
using difference_type = typename AllocatorTraits::difference_type;
using iterator = pointer;
using const_iterator = const_pointer;
using reverse_iterator = std::reverse_iterator<iterator>;
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
static constexpr size_type inline_elements =
(N == kFixedArrayUseDefault ? kInlineBytesDefault / sizeof(value_type)
: static_cast<size_type>(N));
FixedArray(const FixedArray& other) noexcept(NoexceptCopyable())
: FixedArray(other,
AllocatorTraits::select_on_container_copy_construction(
other.storage_.alloc())) {}
FixedArray(const FixedArray& other,
const allocator_type& a) noexcept(NoexceptCopyable())
: FixedArray(other.begin(), other.end(), a) {}
FixedArray(FixedArray&& other) noexcept(NoexceptMovable())
: FixedArray(std::move(other), other.storage_.alloc()) {}
FixedArray(FixedArray&& other,
const allocator_type& a) noexcept(NoexceptMovable())
: FixedArray(std::make_move_iterator(other.begin()),
std::make_move_iterator(other.end()), a) {}
explicit FixedArray(size_type n, const allocator_type& a = allocator_type())
: storage_(n, a) {
if (DefaultConstructorIsNonTrivial()) {
memory_internal::ConstructRange(storage_.alloc(), storage_.begin(),
storage_.end());
}
}
FixedArray(size_type n, const value_type& val,
const allocator_type& a = allocator_type())
: storage_(n, a) {
memory_internal::ConstructRange(storage_.alloc(), storage_.begin(),
storage_.end(), val);
}
FixedArray(std::initializer_list<value_type> init_list,
const allocator_type& a = allocator_type())
: FixedArray(init_list.begin(), init_list.end(), a) {}
template <typename Iterator, EnableIfForwardIterator<Iterator>* = nullptr>
FixedArray(Iterator first, Iterator last,
const allocator_type& a = allocator_type())
: storage_(std::distance(first, last), a) {
memory_internal::CopyRange(storage_.alloc(), storage_.begin(), first, last);
}
~FixedArray() noexcept {
for (auto* cur = storage_.begin(); cur != storage_.end(); ++cur) {
AllocatorTraits::destroy(storage_.alloc(), cur);
}
}
void operator=(FixedArray&&) = delete;
void operator=(const FixedArray&) = delete;
size_type size() const { return storage_.size(); }
constexpr size_type max_size() const {
return (std::numeric_limits<difference_type>::max)() / sizeof(value_type);
}
bool empty() const { return size() == 0; }
size_t memsize() const { return size() * sizeof(value_type); }
const_pointer data() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
return AsValueType(storage_.begin());
}
pointer data() ABSL_ATTRIBUTE_LIFETIME_BOUND {
return AsValueType(storage_.begin());
}
reference operator[](size_type i) ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_HARDENING_ASSERT(i < size());
return data()[i];
}
const_reference operator[](size_type i) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_HARDENING_ASSERT(i < size());
return data()[i];
}
reference at(size_type i) ABSL_ATTRIBUTE_LIFETIME_BOUND {
if (ABSL_PREDICT_FALSE(i >= size())) {
base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check");
}
return data()[i];
}
const_reference at(size_type i) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
if (ABSL_PREDICT_FALSE(i >= size())) {
base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check");
}
return data()[i];
}
reference front() ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_HARDENING_ASSERT(!empty());
return data()[0];
}
const_reference front() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_HARDENING_ASSERT(!empty());
return data()[0];
}
reference back() ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_HARDENING_ASSERT(!empty());
return data()[size() - 1];
}
const_reference back() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_HARDENING_ASSERT(!empty());
return data()[size() - 1];
}
iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND { return data(); }
const_iterator begin() const ABSL_ATTRIBUTE_LIFETIME_BOUND { return data(); }
const_iterator cbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
return begin();
}
iterator end() ABSL_ATTRIBUTE_LIFETIME_BOUND { return data() + size(); }
const_iterator end() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
return data() + size();
}
const_iterator cend() const ABSL_ATTRIBUTE_LIFETIME_BOUND { return end(); }
reverse_iterator rbegin() ABSL_ATTRIBUTE_LIFETIME_BOUND {
return reverse_iterator(end());
}
const_reverse_iterator rbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
return const_reverse_iterator(end());
}
const_reverse_iterator crbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
return rbegin();
}
reverse_iterator rend() ABSL_ATTRIBUTE_LIFETIME_BOUND {
return reverse_iterator(begin());
}
const_reverse_iterator rend() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
return const_reverse_iterator(begin());
}
const_reverse_iterator crend() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
return rend();
}
void fill(const value_type& val) { std::fill(begin(), end(), val); }
friend bool operator==(const FixedArray& lhs, const FixedArray& rhs) {
return std::equal(lhs.begin(), lhs.end(), rhs.begin(), rhs.end());
}
friend bool operator!=(const FixedArray& lhs, const FixedArray& rhs) {
return !(lhs == rhs);
}
friend bool operator<(const FixedArray& lhs, const FixedArray& rhs) {
return std::lexicographical_compare(lhs.begin(), lhs.end(), rhs.begin(),
rhs.end());
}
friend bool operator>(const FixedArray& lhs, const FixedArray& rhs) {
return rhs < lhs;
}
friend bool operator<=(const FixedArray& lhs, const FixedArray& rhs) {
return !(rhs < lhs);
}
friend bool operator>=(const FixedArray& lhs, const FixedArray& rhs) {
return !(lhs < rhs);
}
template <typename H>
friend H AbslHashValue(H h, const FixedArray& v) {
return H::combine(H::combine_contiguous(std::move(h), v.data(), v.size()),
v.size());
}
private:
template <typename OuterT, typename InnerT = absl::remove_extent_t<OuterT>,
size_t InnerN = std::extent<OuterT>::value>
struct StorageElementWrapper {
InnerT array[InnerN];
};
using StorageElement =
absl::conditional_t<std::is_array<value_type>::value,
StorageElementWrapper<value_type>, value_type>;
static pointer AsValueType(pointer ptr) { return ptr; }
static pointer AsValueType(StorageElementWrapper<value_type>* ptr) {
return std::addressof(ptr->array);
}
static_assert(sizeof(StorageElement) == sizeof(value_type), "");
static_assert(alignof(StorageElement) == alignof(value_type), "");
class NonEmptyInlinedStorage {
public:
StorageElement* data() { return reinterpret_cast<StorageElement*>(buff_); }
void AnnotateConstruct(size_type n);
void AnnotateDestruct(size_type n);
#ifdef ABSL_HAVE_ADDRESS_SANITIZER
void* RedzoneBegin() { return &redzone_begin_; }
void* RedzoneEnd() { return &redzone_end_ + 1; }
#endif
private:
ABSL_ADDRESS_SANITIZER_REDZONE(redzone_begin_);
alignas(StorageElement) char buff_[sizeof(StorageElement[inline_elements])];
ABSL_ADDRESS_SANITIZER_REDZONE(redzone_end_);
};
class EmptyInlinedStorage {
public:
StorageElement* data() { return nullptr; }
void AnnotateConstruct(size_type) {}
void AnnotateDestruct(size_type) {}
};
using InlinedStorage =
absl::conditional_t<inline_elements == 0, EmptyInlinedStorage,
NonEmptyInlinedStorage>;
class Storage : public InlinedStorage {
public:
Storage(size_type n, const allocator_type& a)
: size_alloc_(n, a), data_(InitializeData()) {}
~Storage() noexcept {
if (UsingInlinedStorage(size())) {
InlinedStorage::AnnotateDestruct(size());
} else {
AllocatorTraits::deallocate(alloc(), AsValueType(begin()), size());
}
}
size_type size() const { return size_alloc_.template get<0>(); }
StorageElement* begin() const { return data_; }
StorageElement* end() const { return begin() + size(); }
allocator_type& alloc() { return size_alloc_.template get<1>(); }
const allocator_type& alloc() const {
return size_alloc_.template get<1>();
}
private:
static bool UsingInlinedStorage(size_type n) {
return n <= inline_elements;
}
#ifdef ABSL_HAVE_ADDRESS_SANITIZER
ABSL_ATTRIBUTE_NOINLINE
#endif
StorageElement* InitializeData() {
if (UsingInlinedStorage(size())) {
InlinedStorage::AnnotateConstruct(size());
return InlinedStorage::data();
} else {
return reinterpret_cast<StorageElement*>(
AllocatorTraits::allocate(alloc(), size()));
}
}
container_internal::CompressedTuple<size_type, allocator_type> size_alloc_;
StorageElement* data_;
};
Storage storage_;
};
#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
template <typename T, size_t N, typename A>
constexpr size_t FixedArray<T, N, A>::kInlineBytesDefault;
template <typename T, size_t N, typename A>
constexpr typename FixedArray<T, N, A>::size_type
FixedArray<T, N, A>::inline_elements;
#endif
template <typename T, size_t N, typename A>
void FixedArray<T, N, A>::NonEmptyInlinedStorage::AnnotateConstruct(
typename FixedArray<T, N, A>::size_type n) {
#ifdef ABSL_HAVE_ADDRESS_SANITIZER
if (!n) return;
ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), RedzoneEnd(),
data() + n);
ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), data(),
RedzoneBegin());
#endif
static_cast<void>(n);
}
template <typename T, size_t N, typename A>
void FixedArray<T, N, A>::NonEmptyInlinedStorage::AnnotateDestruct(
typename FixedArray<T, N, A>::size_type n) {
#ifdef ABSL_HAVE_ADDRESS_SANITIZER
if (!n) return;
ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), data() + n,
RedzoneEnd());
ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), RedzoneBegin(),
data());
#endif
static_cast<void>(n);
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/container/fixed_array.h"
#include <stdio.h>
#include <cstring>
#include <list>
#include <memory>
#include <numeric>
#include <scoped_allocator>
#include <stdexcept>
#include <string>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/base/internal/exception_testing.h"
#include "absl/base/options.h"
#include "absl/container/internal/test_allocator.h"
#include "absl/hash/hash_testing.h"
#include "absl/memory/memory.h"
using ::testing::ElementsAreArray;
namespace {
template <typename ArrayType>
static bool IsOnStack(const ArrayType& a) {
return a.size() <= ArrayType::inline_elements;
}
class ConstructionTester {
public:
ConstructionTester() : self_ptr_(this), value_(0) { constructions++; }
~ConstructionTester() {
assert(self_ptr_ == this);
self_ptr_ = nullptr;
destructions++;
}
static int constructions;
static int destructions;
void CheckConstructed() { assert(self_ptr_ == this); }
void set(int value) { value_ = value; }
int get() { return value_; }
private:
ConstructionTester* self_ptr_;
int value_;
};
int ConstructionTester::constructions = 0;
int ConstructionTester::destructions = 0;
class ThreeInts {
public:
ThreeInts() {
x_ = counter;
y_ = counter;
z_ = counter;
++counter;
}
static int counter;
int x_, y_, z_;
};
int ThreeInts::counter = 0;
TEST(FixedArrayTest, CopyCtor) {
absl::FixedArray<int, 10> on_stack(5);
std::iota(on_stack.begin(), on_stack.end(), 0);
absl::FixedArray<int, 10> stack_copy = on_stack;
EXPECT_THAT(stack_copy, ElementsAreArray(on_stack));
EXPECT_TRUE(IsOnStack(stack_copy));
absl::FixedArray<int, 10> allocated(15);
std::iota(allocated.begin(), allocated.end(), 0);
absl::FixedArray<int, 10> alloced_copy = allocated;
EXPECT_THAT(alloced_copy, ElementsAreArray(allocated));
EXPECT_FALSE(IsOnStack(alloced_copy));
}
TEST(FixedArrayTest, MoveCtor) {
absl::FixedArray<std::unique_ptr<int>, 10> on_stack(5);
for (int i = 0; i < 5; ++i) {
on_stack[i] = absl::make_unique<int>(i);
}
absl::FixedArray<std::unique_ptr<int>, 10> stack_copy = std::move(on_stack);
for (int i = 0; i < 5; ++i) EXPECT_EQ(*(stack_copy[i]), i);
EXPECT_EQ(stack_copy.size(), on_stack.size());
absl::FixedArray<std::unique_ptr<int>, 10> allocated(15);
for (int i = 0; i < 15; ++i) {
allocated[i] = absl::make_unique<int>(i);
}
absl::FixedArray<std::unique_ptr<int>, 10> alloced_copy =
std::move(allocated);
for (int i = 0; i < 15; ++i) EXPECT_EQ(*(alloced_copy[i]), i);
EXPECT_EQ(allocated.size(), alloced_copy.size());
}
TEST(FixedArrayTest, SmallObjects) {
{
absl::FixedArray<int> array(4);
EXPECT_TRUE(IsOnStack(array));
}
{
absl::FixedArray<int> array(1048576);
EXPECT_FALSE(IsOnStack(array));
}
{
absl::FixedArray<int, 100> array(100);
EXPECT_TRUE(IsOnStack(array));
}
{
absl::FixedArray<int, 100> array(101);
EXPECT_FALSE(IsOnStack(array));
}
{
absl::FixedArray<int> array1(0);
absl::FixedArray<char> array2(0);
EXPECT_LE(sizeof(array1), sizeof(array2) + 100);
EXPECT_LE(sizeof(array2), sizeof(array1) + 100);
}
{
absl::FixedArray<std::vector<int>> array(2);
EXPECT_EQ(0, array[0].size());
EXPECT_EQ(0, array[1].size());
}
{
ThreeInts::counter = 1;
absl::FixedArray<ThreeInts> array(2);
EXPECT_EQ(1, array[0].x_);
EXPECT_EQ(1, array[0].y_);
EXPECT_EQ(1, array[0].z_);
EXPECT_EQ(2, array[1].x_);
EXPECT_EQ(2, array[1].y_);
EXPECT_EQ(2, array[1].z_);
}
}
TEST(FixedArrayTest, AtThrows) {
absl::FixedArray<int> a = {1, 2, 3};
EXPECT_EQ(a.at(2), 3);
ABSL_BASE_INTERNAL_EXPECT_FAIL(a.at(3), std::out_of_range,
"failed bounds check");
}
TEST(FixedArrayTest, Hardened) {
#if !defined(NDEBUG) || ABSL_OPTION_HARDENED
absl::FixedArray<int> a = {1, 2, 3};
EXPECT_EQ(a[2], 3);
EXPECT_DEATH_IF_SUPPORTED(a[3], "");
EXPECT_DEATH_IF_SUPPORTED(a[-1], "");
absl::FixedArray<int> empty(0);
EXPECT_DEATH_IF_SUPPORTED(empty[0], "");
EXPECT_DEATH_IF_SUPPORTED(empty[-1], "");
EXPECT_DEATH_IF_SUPPORTED(empty.front(), "");
EXPECT_DEATH_IF_SUPPORTED(empty.back(), "");
#endif
}
TEST(FixedArrayRelationalsTest, EqualArrays) {
for (int i = 0; i < 10; ++i) {
absl::FixedArray<int, 5> a1(i);
std::iota(a1.begin(), a1.end(), 0);
absl::FixedArray<int, 5> a2(a1.begin(), a1.end());
EXPECT_TRUE(a1 == a2);
EXPECT_FALSE(a1 != a2);
EXPECT_TRUE(a2 == a1);
EXPECT_FALSE(a2 != a1);
EXPECT_FALSE(a1 < a2);
EXPECT_FALSE(a1 > a2);
EXPECT_FALSE(a2 < a1);
EXPECT_FALSE(a2 > a1);
EXPECT_TRUE(a1 <= a2);
EXPECT_TRUE(a1 >= a2);
EXPECT_TRUE(a2 <= a1);
EXPECT_TRUE(a2 >= a1);
}
}
TEST(FixedArrayRelationalsTest, UnequalArrays) {
for (int i = 1; i < 10; ++i) {
absl::FixedArray<int, 5> a1(i);
std::iota(a1.begin(), a1.end(), 0);
absl::FixedArray<int, 5> a2(a1.begin(), a1.end());
--a2[i / 2];
EXPECT_FALSE(a1 == a2);
EXPECT_TRUE(a1 != a2);
EXPECT_FALSE(a2 == a1);
EXPECT_TRUE(a2 != a1);
EXPECT_FALSE(a1 < a2);
EXPECT_TRUE(a1 > a2);
EXPECT_TRUE(a2 < a1);
EXPECT_FALSE(a2 > a1);
EXPECT_FALSE(a1 <= a2);
EXPECT_TRUE(a1 >= a2);
EXPECT_TRUE(a2 <= a1);
EXPECT_FALSE(a2 >= a1);
}
}
template <int stack_elements>
static void TestArray(int n) {
SCOPED_TRACE(n);
SCOPED_TRACE(stack_elements);
ConstructionTester::constructions = 0;
ConstructionTester::destructions = 0;
{
absl::FixedArray<ConstructionTester, stack_elements> array(n);
EXPECT_THAT(array.size(), n);
EXPECT_THAT(array.memsize(), sizeof(ConstructionTester) * n);
EXPECT_THAT(array.begin() + n, array.end());
for (int i = 0; i < n; i++) {
array[i].CheckConstructed();
}
EXPECT_THAT(ConstructionTester::constructions, n);
for (int i = 0; i < n; i++) {
array[i].set(i);
}
for (int i = 0; i < n; i++) {
EXPECT_THAT(array[i].get(), i);
EXPECT_THAT(array.data()[i].get(), i);
}
for (int i = 0; i < n; i++) {
array.data()[i].set(i + 1);
}
for (int i = 0; i < n; i++) {
EXPECT_THAT(array[i].get(), i + 1);
EXPECT_THAT(array.data()[i].get(), i + 1);
}
}
EXPECT_EQ(ConstructionTester::constructions,
ConstructionTester::destructions);
}
template <int elements_per_inner_array, int inline_elements>
static void TestArrayOfArrays(int n) {
SCOPED_TRACE(n);
SCOPED_TRACE(inline_elements);
SCOPED_TRACE(elements_per_inner_array);
ConstructionTester::constructions = 0;
ConstructionTester::destructions = 0;
{
using InnerArray = ConstructionTester[elements_per_inner_array];
auto array_ptr =
absl::make_unique<absl::FixedArray<InnerArray, inline_elements>>(n);
auto& array = *array_ptr;
ASSERT_EQ(array.size(), n);
ASSERT_EQ(array.memsize(),
sizeof(ConstructionTester) * elements_per_inner_array * n);
ASSERT_EQ(array.begin() + n, array.end());
for (int i = 0; i < n; i++) {
for (int j = 0; j < elements_per_inner_array; j++) {
(array[i])[j].CheckConstructed();
}
}
ASSERT_EQ(ConstructionTester::constructions, n * elements_per_inner_array);
for (int i = 0; i < n; i++) {
for (int j = 0; j < elements_per_inner_array; j++) {
(array[i])[j].set(i * elements_per_inner_array + j);
}
}
for (int i = 0; i < n; i++) {
for (int j = 0; j < elements_per_inner_array; j++) {
ASSERT_EQ((array[i])[j].get(), i * elements_per_inner_array + j);
ASSERT_EQ((array.data()[i])[j].get(), i * elements_per_inner_array + j);
}
}
for (int i = 0; i < n; i++) {
for (int j = 0; j < elements_per_inner_array; j++) {
(array.data()[i])[j].set((i + 1) * elements_per_inner_array + j);
}
}
for (int i = 0; i < n; i++) {
for (int j = 0; j < elements_per_inner_array; j++) {
ASSERT_EQ((array[i])[j].get(), (i + 1) * elements_per_inner_array + j);
ASSERT_EQ((array.data()[i])[j].get(),
(i + 1) * elements_per_inner_array + j);
}
}
}
EXPECT_EQ(ConstructionTester::constructions,
ConstructionTester::destructions);
}
TEST(IteratorConstructorTest, NonInline) {
int const kInput[] = {2, 3, 5, 7, 11, 13, 17};
absl::FixedArray<int, ABSL_ARRAYSIZE(kInput) - 1> const fixed(
kInput, kInput + ABSL_ARRAYSIZE(kInput));
ASSERT_EQ(ABSL_ARRAYSIZE(kInput), fixed.size());
for (size_t i = 0; i < ABSL_ARRAYSIZE(kInput); ++i) {
ASSERT_EQ(kInput[i], fixed[i]);
}
}
TEST(IteratorConstructorTest, Inline) {
int const kInput[] = {2, 3, 5, 7, 11, 13, 17};
absl::FixedArray<int, ABSL_ARRAYSIZE(kInput)> const fixed(
kInput, kInput + ABSL_ARRAYSIZE(kInput));
ASSERT_EQ(ABSL_ARRAYSIZE(kInput), fixed.size());
for (size_t i = 0; i < ABSL_ARRAYSIZE(kInput); ++i) {
ASSERT_EQ(kInput[i], fixed[i]);
}
}
TEST(IteratorConstructorTest, NonPod) {
char const* kInput[] = {"red", "orange", "yellow", "green",
"blue", "indigo", "violet"};
absl::FixedArray<std::string> const fixed(kInput,
kInput + ABSL_ARRAYSIZE(kInput));
ASSERT_EQ(ABSL_ARRAYSIZE(kInput), fixed.size());
for (size_t i = 0; i < ABSL_ARRAYSIZE(kInput); ++i) {
ASSERT_EQ(kInput[i], fixed[i]);
}
}
TEST(IteratorConstructorTest, FromEmptyVector) {
std::vector<int> const empty;
absl::FixedArray<int> const fixed(empty.begin(), empty.end());
EXPECT_EQ(0, fixed.size());
EXPECT_EQ(empty.size(), fixed.size());
}
TEST(IteratorConstructorTest, FromNonEmptyVector) {
int const kInput[] = {2, 3, 5, 7, 11, 13, 17};
std::vector<int> const items(kInput, kInput + ABSL_ARRAYSIZE(kInput));
absl::FixedArray<int> const fixed(items.begin(), items.end());
ASSERT_EQ(items.size(), fixed.size());
for (size_t i = 0; i < items.size(); ++i) {
ASSERT_EQ(items[i], fixed[i]);
}
}
TEST(IteratorConstructorTest, FromBidirectionalIteratorRange) {
int const kInput[] = {2, 3, 5, 7, 11, 13, 17};
std::list<int> const items(kInput, kInput + ABSL_ARRAYSIZE(kInput));
absl::FixedArray<int> const fixed(items.begin(), items.end());
EXPECT_THAT(fixed, testing::ElementsAreArray(kInput));
}
TEST(InitListConstructorTest, InitListConstruction) {
absl::FixedArray<int> fixed = {1, 2, 3};
EXPECT_THAT(fixed, testing::ElementsAreArray({1, 2, 3}));
}
TEST(FillConstructorTest, NonEmptyArrays) {
absl::FixedArray<int> stack_array(4, 1);
EXPECT_THAT(stack_array, testing::ElementsAreArray({1, 1, 1, 1}));
absl::FixedArray<int, 0> heap_array(4, 1);
EXPECT_THAT(stack_array, testing::ElementsAreArray({1, 1, 1, 1}));
}
TEST(FillConstructorTest, EmptyArray) {
absl::FixedArray<int> empty_fill(0, 1);
absl::FixedArray<int> empty_size(0);
EXPECT_EQ(empty_fill, empty_size);
}
TEST(FillConstructorTest, NotTriviallyCopyable) {
std::string str = "abcd";
absl::FixedArray<std::string> strings = {str, str, str, str};
absl::FixedArray<std::string> array(4, str);
EXPECT_EQ(array, strings);
}
TEST(FillConstructorTest, Disambiguation) {
absl::FixedArray<size_t> a(1, 2);
EXPECT_THAT(a, testing::ElementsAre(2));
}
TEST(FixedArrayTest, ManySizedArrays) {
std::vector<int> sizes;
for (int i = 1; i < 100; i++) sizes.push_back(i);
for (int i = 100; i <= 1000; i += 100) sizes.push_back(i);
for (int n : sizes) {
TestArray<0>(n);
TestArray<1>(n);
TestArray<64>(n);
TestArray<1000>(n);
}
}
TEST(FixedArrayTest, ManySizedArraysOfArraysOf1) {
for (int n = 1; n < 1000; n++) {
ASSERT_NO_FATAL_FAILURE((TestArrayOfArrays<1, 0>(n)));
ASSERT_NO_FATAL_FAILURE((TestArrayOfArrays<1, 1>(n)));
ASSERT_NO_FATAL_FAILURE((TestArrayOfArrays<1, 64>(n)));
ASSERT_NO_FATAL_FAILURE((TestArrayOfArrays<1, 1000>(n)));
}
}
TEST(FixedArrayTest, ManySizedArraysOfArraysOf2) {
for (int n = 1; n < 1000; n++) {
TestArrayOfArrays<2, 0>(n);
TestArrayOfArrays<2, 1>(n);
TestArrayOfArrays<2, 64>(n);
TestArrayOfArrays<2, 1000>(n);
}
}
TEST(FixedArrayTest, AvoidParanoidDiagnostics) {
absl::FixedArray<char, 32> buf(32);
sprintf(buf.data(), "foo");
}
TEST(FixedArrayTest, TooBigInlinedSpace) {
struct TooBig {
char c[1 << 20];
};
struct Data {
TooBig* p;
size_t size;
};
static_assert(sizeof(absl::FixedArray<TooBig, 0>) == sizeof(Data),
"0-sized absl::FixedArray should have same size as Data.");
static_assert(alignof(absl::FixedArray<TooBig, 0>) == alignof(Data),
"0-sized absl::FixedArray should have same alignment as Data.");
static_assert(sizeof(absl::FixedArray<TooBig>) == sizeof(Data),
"default-sized absl::FixedArray should have same size as Data");
static_assert(
alignof(absl::FixedArray<TooBig>) == alignof(Data),
"default-sized absl::FixedArray should have same alignment as Data.");
}
struct PickyDelete {
PickyDelete() {}
~PickyDelete() {}
void operator delete(void* p) {
EXPECT_TRUE(false) << __FUNCTION__;
::operator delete(p);
}
void operator delete[](void* p) {
EXPECT_TRUE(false) << __FUNCTION__;
::operator delete[](p);
}
};
TEST(FixedArrayTest, UsesGlobalAlloc) { absl::FixedArray<PickyDelete, 0> a(5); }
TEST(FixedArrayTest, Data) {
static const int kInput[] = {2, 3, 5, 7, 11, 13, 17};
absl::FixedArray<int> fa(std::begin(kInput), std::end(kInput));
EXPECT_EQ(fa.data(), &*fa.begin());
EXPECT_EQ(fa.data(), &fa[0]);
const absl::FixedArray<int>& cfa = fa;
EXPECT_EQ(cfa.data(), &*cfa.begin());
EXPECT_EQ(cfa.data(), &cfa[0]);
}
TEST(FixedArrayTest, Empty) {
absl::FixedArray<int> empty(0);
absl::FixedArray<int> inline_filled(1);
absl::FixedArray<int, 0> heap_filled(1);
EXPECT_TRUE(empty.empty());
EXPECT_FALSE(inline_filled.empty());
EXPECT_FALSE(heap_filled.empty());
}
TEST(FixedArrayTest, FrontAndBack) {
absl::FixedArray<int, 3 * sizeof(int)> inlined = {1, 2, 3};
EXPECT_EQ(inlined.front(), 1);
EXPECT_EQ(inlined.back(), 3);
absl::FixedArray<int, 0> allocated = {1, 2, 3};
EXPECT_EQ(allocated.front(), 1);
EXPECT_EQ(allocated.back(), 3);
absl::FixedArray<int> one_element = {1};
EXPECT_EQ(one_element.front(), one_element.back());
}
TEST(FixedArrayTest, ReverseIteratorInlined) {
absl::FixedArray<int, 5 * sizeof(int)> a = {0, 1, 2, 3, 4};
int counter = 5;
for (absl::FixedArray<int>::reverse_iterator iter = a.rbegin();
iter != a.rend(); ++iter) {
counter--;
EXPECT_EQ(counter, *iter);
}
EXPECT_EQ(counter, 0);
counter = 5;
for (absl::FixedArray<int>::const_reverse_iterator iter = a.rbegin();
iter != a.rend(); ++iter) {
counter--;
EXPECT_EQ(counter, *iter);
}
EXPECT_EQ(counter, 0);
counter = 5;
for (auto iter = a.crbegin(); iter != a.crend(); ++iter) {
counter--;
EXPECT_EQ(counter, *iter);
}
EXPECT_EQ(counter, 0);
}
TEST(FixedArrayTest, ReverseIteratorAllocated) {
absl::FixedArray<int, 0> a = {0, 1, 2, 3, 4};
int counter = 5;
for (absl::FixedArray<int>::reverse_iterator iter = a.rbegin();
iter != a.rend(); ++iter) {
counter--;
EXPECT_EQ(counter, *iter);
}
EXPECT_EQ(counter, 0);
counter = 5;
for (absl::FixedArray<int>::const_reverse_iterator iter = a.rbegin();
iter != a.rend(); ++iter) {
counter--;
EXPECT_EQ(counter, *iter);
}
EXPECT_EQ(counter, 0);
counter = 5;
for (auto iter = a.crbegin(); iter != a.crend(); ++iter) {
counter--;
EXPECT_EQ(counter, *iter);
}
EXPECT_EQ(counter, 0);
}
TEST(FixedArrayTest, Fill) {
absl::FixedArray<int, 5 * sizeof(int)> inlined(5);
int fill_val = 42;
inlined.fill(fill_val);
for (int i : inlined) EXPECT_EQ(i, fill_val);
absl::FixedArray<int, 0> allocated(5);
allocated.fill(fill_val);
for (int i : allocated) EXPECT_EQ(i, fill_val);
absl::FixedArray<int> empty(0);
empty.fill(fill_val);
}
#ifndef __GNUC__
TEST(FixedArrayTest, DefaultCtorDoesNotValueInit) {
using T = char;
constexpr auto capacity = 10;
using FixedArrType = absl::FixedArray<T, capacity>;
constexpr auto scrubbed_bits = 0x95;
constexpr auto length = capacity / 2;
alignas(FixedArrType) unsigned char buff[sizeof(FixedArrType)];
std::memset(std::addressof(buff), scrubbed_bits, sizeof(FixedArrType));
FixedArrType* arr =
::new (static_cast<void*>(std::addressof(buff))) FixedArrType(length);
EXPECT_THAT(*arr, testing::Each(scrubbed_bits));
arr->~FixedArrType();
}
#endif
TEST(AllocatorSupportTest, CountInlineAllocations) {
constexpr size_t inlined_size = 4;
using Alloc = absl::container_internal::CountingAllocator<int>;
using AllocFxdArr = absl::FixedArray<int, inlined_size, Alloc>;
int64_t allocated = 0;
int64_t active_instances = 0;
{
const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7};
Alloc alloc(&allocated, &active_instances);
AllocFxdArr arr(ia, ia + inlined_size, alloc);
static_cast<void>(arr);
}
EXPECT_EQ(allocated, 0);
EXPECT_EQ(active_instances, 0);
}
TEST(AllocatorSupportTest, CountOutoflineAllocations) {
constexpr size_t inlined_size = 4;
using Alloc = absl::container_internal::CountingAllocator<int>;
using AllocFxdArr = absl::FixedArray<int, inlined_size, Alloc>;
int64_t allocated = 0;
int64_t active_instances = 0;
{
const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7};
Alloc alloc(&allocated, &active_instances);
AllocFxdArr arr(ia, ia + ABSL_ARRAYSIZE(ia), alloc);
EXPECT_EQ(allocated, arr.size() * sizeof(int));
static_cast<void>(arr);
}
EXPECT_EQ(active_instances, 0);
}
TEST(AllocatorSupportTest, CountCopyInlineAllocations) {
constexpr size_t inlined_size = 4;
using Alloc = absl::container_internal::CountingAllocator<int>;
using AllocFxdArr = absl::FixedArray<int, inlined_size, Alloc>;
int64_t allocated1 = 0;
int64_t allocated2 = 0;
int64_t active_instances = 0;
Alloc alloc(&allocated1, &active_instances);
Alloc alloc2(&allocated2, &active_instances);
{
int initial_value = 1;
AllocFxdArr arr1(inlined_size / 2, initial_value, alloc);
EXPECT_EQ(allocated1, 0);
AllocFxdArr arr2(arr1, alloc2);
EXPECT_EQ(allocated2, 0);
static_cast<void>(arr1);
static_cast<void>(arr2);
}
EXPECT_EQ(active_instances, 0);
}
TEST(AllocatorSupportTest, CountCopyOutoflineAllocations) {
constexpr size_t inlined_size = 4;
using Alloc = absl::container_internal::CountingAllocator<int>;
using AllocFxdArr = absl::FixedArray<int, inlined_size, Alloc>;
int64_t allocated1 = 0;
int64_t allocated2 = 0;
int64_t active_instances = 0;
Alloc alloc(&allocated1, &active_instances);
Alloc alloc2(&allocated2, &active_instances);
{
int initial_value = 1;
AllocFxdArr arr1(inlined_size * 2, initial_value, alloc);
EXPECT_EQ(allocated1, arr1.size() * sizeof(int));
AllocFxdArr arr2(arr1, alloc2);
EXPECT_EQ(allocated2, inlined_size * 2 * sizeof(int));
static_cast<void>(arr1);
static_cast<void>(arr2);
}
EXPECT_EQ(active_instances, 0);
}
TEST(AllocatorSupportTest, SizeValAllocConstructor) {
using testing::AllOf;
using testing::Each;
using testing::SizeIs;
constexpr size_t inlined_size = 4;
using Alloc = absl::container_internal::CountingAllocator<int>;
using AllocFxdArr = absl::FixedArray<int, inlined_size, Alloc>;
{
auto len = inlined_size / 2;
auto val = 0;
int64_t allocated = 0;
AllocFxdArr arr(len, val, Alloc(&allocated));
EXPECT_EQ(allocated, 0);
EXPECT_THAT(arr, AllOf(SizeIs(len), Each(0)));
}
{
auto len = inlined_size * 2;
auto val = 0;
int64_t allocated = 0;
AllocFxdArr arr(len, val, Alloc(&allocated));
EXPECT_EQ(allocated, len * sizeof(int));
EXPECT_THAT(arr, AllOf(SizeIs(len), Each(0)));
}
}
TEST(AllocatorSupportTest, PropagatesStatefulAllocator) {
constexpr size_t inlined_size = 4;
using Alloc = absl::container_internal::CountingAllocator<int>;
using AllocFxdArr = absl::FixedArray<int, inlined_size, Alloc>;
auto len = inlined_size * 2;
auto val = 0;
int64_t allocated = 0;
AllocFxdArr arr(len, val, Alloc(&allocated));
EXPECT_EQ(allocated, len * sizeof(int));
AllocFxdArr copy = arr;
EXPECT_EQ(allocated, len * sizeof(int) * 2);
}
#ifdef ABSL_HAVE_ADDRESS_SANITIZER
TEST(FixedArrayTest, AddressSanitizerAnnotations1) {
absl::FixedArray<int, 32> a(10);
int* raw = a.data();
raw[0] = 0;
raw[9] = 0;
EXPECT_DEATH_IF_SUPPORTED(raw[-2] = 0, "container-overflow");
EXPECT_DEATH_IF_SUPPORTED(raw[-1] = 0, "container-overflow");
EXPECT_DEATH_IF_SUPPORTED(raw[10] = 0, "container-overflow");
EXPECT_DEATH_IF_SUPPORTED(raw[31] = 0, "container-overflow");
}
TEST(FixedArrayTest, AddressSanitizerAnnotations2) {
absl::FixedArray<char, 17> a(12);
char* raw = a.data();
raw[0] = 0;
raw[11] = 0;
EXPECT_DEATH_IF_SUPPORTED(raw[-7] = 0, "container-overflow");
EXPECT_DEATH_IF_SUPPORTED(raw[-1] = 0, "container-overflow");
EXPECT_DEATH_IF_SUPPORTED(raw[12] = 0, "container-overflow");
EXPECT_DEATH_IF_SUPPORTED(raw[17] = 0, "container-overflow");
}
TEST(FixedArrayTest, AddressSanitizerAnnotations3) {
absl::FixedArray<uint64_t, 20> a(20);
uint64_t* raw = a.data();
raw[0] = 0;
raw[19] = 0;
EXPECT_DEATH_IF_SUPPORTED(raw[-1] = 0, "container-overflow");
EXPECT_DEATH_IF_SUPPORTED(raw[20] = 0, "container-overflow");
}
TEST(FixedArrayTest, AddressSanitizerAnnotations4) {
absl::FixedArray<ThreeInts> a(10);
ThreeInts* raw = a.data();
raw[0] = ThreeInts();
raw[9] = ThreeInts();
EXPECT_DEATH_IF_SUPPORTED(raw[-1].z_ = 0, "container-overflow");
EXPECT_DEATH_IF_SUPPORTED(raw[10] = ThreeInts(), "container-overflow");
EXPECT_DEATH_IF_SUPPORTED(raw[21] = ThreeInts(), "container-overflow");
}
#endif
TEST(FixedArrayTest, AbslHashValueWorks) {
using V = absl::FixedArray<int>;
std::vector<V> cases;
for (int i = 0; i < 10; ++i) {
V v(i);
for (int j = 0; j < i; ++j) {
v[j] = j;
}
cases.push_back(v);
}
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(cases));
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/container/fixed_array.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/container/fixed_array_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
19886cb3-2cfb-4460-800b-3e30336fb6f2 | cpp | tensorflow/tensorflow | arena | tensorflow/core/lib/core/arena.cc | tensorflow/core/lib/core/arena_test.cc | #include "tensorflow/core/lib/core/arena.h"
#include <assert.h>
#include <algorithm>
#include <vector>
#include "tensorflow/core/lib/math/math_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mem.h"
namespace tensorflow {
namespace core {
Arena::Arena(const size_t block_size)
: remaining_(0),
block_size_(block_size),
freestart_(nullptr),
blocks_alloced_(1),
overflow_blocks_(nullptr) {
assert(block_size > kDefaultAlignment);
first_blocks_[0].mem =
reinterpret_cast<char*>(port::AlignedMalloc(block_size_, sizeof(void*)));
first_blocks_[0].size = block_size_;
Reset();
}
Arena::~Arena() {
FreeBlocks();
assert(overflow_blocks_ == nullptr);
for (size_t i = 0; i < blocks_alloced_; ++i) {
port::AlignedFree(first_blocks_[i].mem);
}
}
bool Arena::SatisfyAlignment(size_t alignment) {
const size_t overage = reinterpret_cast<size_t>(freestart_) & (alignment - 1);
if (overage > 0) {
const size_t waste = alignment - overage;
if (waste >= remaining_) {
return false;
}
freestart_ += waste;
remaining_ -= waste;
}
DCHECK_EQ(size_t{0}, reinterpret_cast<size_t>(freestart_) & (alignment - 1));
return true;
}
void Arena::Reset() {
FreeBlocks();
freestart_ = first_blocks_[0].mem;
remaining_ = first_blocks_[0].size;
CHECK(SatisfyAlignment(kDefaultAlignment));
freestart_when_empty_ = freestart_;
}
void Arena::MakeNewBlock(const uint32 alignment) {
AllocatedBlock* block = AllocNewBlock(block_size_, alignment);
freestart_ = block->mem;
remaining_ = block->size;
CHECK(SatisfyAlignment(alignment));
}
static uint32 LeastCommonMultiple(uint32 a, uint32 b) {
if (a > b) {
return (a / MathUtil::GCD<uint32>(a, b)) * b;
} else if (a < b) {
return (b / MathUtil::GCD<uint32>(b, a)) * a;
} else {
return a;
}
}
Arena::AllocatedBlock* Arena::AllocNewBlock(const size_t block_size,
const uint32 alignment) {
AllocatedBlock* block;
if (blocks_alloced_ < TF_ARRAYSIZE(first_blocks_)) {
block = &first_blocks_[blocks_alloced_++];
} else {
if (overflow_blocks_ == nullptr)
overflow_blocks_ = new std::vector<AllocatedBlock>;
overflow_blocks_->resize(overflow_blocks_->size() + 1);
block = &overflow_blocks_->back();
}
uint32 adjusted_alignment =
(alignment > 1 ? LeastCommonMultiple(alignment, kDefaultAlignment) : 1);
adjusted_alignment =
std::max(adjusted_alignment, static_cast<uint32>(sizeof(void*)));
CHECK_LE(adjusted_alignment, static_cast<uint32>(1 << 20))
<< "Alignment on boundaries greater than 1MB not supported.";
size_t adjusted_block_size = block_size;
if (adjusted_block_size > adjusted_alignment) {
const uint32 excess = adjusted_block_size % adjusted_alignment;
adjusted_block_size += (excess > 0 ? adjusted_alignment - excess : 0);
}
block->mem = reinterpret_cast<char*>(
port::AlignedMalloc(adjusted_block_size, adjusted_alignment));
block->size = adjusted_block_size;
CHECK(nullptr != block->mem) << "block_size=" << block_size
<< " adjusted_block_size=" << adjusted_block_size
<< " alignment=" << alignment
<< " adjusted_alignment=" << adjusted_alignment;
return block;
}
void* Arena::GetMemoryFallback(const size_t size, const int alignment) {
if (0 == size) {
return nullptr;
}
CHECK(alignment > 0 && 0 == (alignment & (alignment - 1)));
if (block_size_ == 0 || size > block_size_ / 4) {
return AllocNewBlock(size, alignment)->mem;
}
if (!SatisfyAlignment(alignment) || size > remaining_) {
MakeNewBlock(alignment);
}
CHECK_LE(size, remaining_);
remaining_ -= size;
void* result = freestart_;
freestart_ += size;
return result;
}
void Arena::FreeBlocks() {
for (size_t i = 1; i < blocks_alloced_; ++i) {
port::AlignedFree(first_blocks_[i].mem);
first_blocks_[i].mem = nullptr;
first_blocks_[i].size = 0;
}
blocks_alloced_ = 1;
if (overflow_blocks_ != nullptr) {
std::vector<AllocatedBlock>::iterator it;
for (it = overflow_blocks_->begin(); it != overflow_blocks_->end(); ++it) {
port::AlignedFree(it->mem);
}
delete overflow_blocks_;
overflow_blocks_ = nullptr;
}
}
}
} | #include "tensorflow/core/lib/core/arena.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace core {
namespace {
static void TestMemory(void* mem, int size) {
memset(mem, 0xaa, size);
char* tmp[100];
for (size_t i = 0; i < TF_ARRAYSIZE(tmp); i++) {
tmp[i] = new char[i * i + 1];
}
memset(mem, 0xcc, size);
for (size_t i = 0; i < TF_ARRAYSIZE(tmp); i++) {
delete[] tmp[i];
}
memset(mem, 0xee, size);
}
TEST(ArenaTest, TestBasicArena) {
Arena a(1024);
char* memory = a.Alloc(100);
ASSERT_NE(memory, nullptr);
TestMemory(memory, 100);
memory = a.Alloc(100);
ASSERT_NE(memory, nullptr);
TestMemory(memory, 100);
}
TEST(ArenaTest, TestAlignment) {
Arena a(1024);
char* byte0 = a.Alloc(1);
char* alloc_aligned8 = a.AllocAligned(17, 8);
EXPECT_EQ(alloc_aligned8 - byte0, 8);
char* alloc_aligned8_b = a.AllocAligned(8, 8);
EXPECT_EQ(alloc_aligned8_b - alloc_aligned8, 24);
char* alloc_aligned8_c = a.AllocAligned(16, 8);
EXPECT_EQ(alloc_aligned8_c - alloc_aligned8_b, 8);
char* alloc_aligned8_d = a.AllocAligned(8, 1);
EXPECT_EQ(alloc_aligned8_d - alloc_aligned8_c, 16);
}
TEST(ArenaTest, TestVariousArenaSizes) {
{
Arena a(1024);
char* memory = a.Alloc(1024);
ASSERT_NE(memory, nullptr);
TestMemory(memory, 1024);
char* memory2 = a.Alloc(1024);
ASSERT_NE(memory2, nullptr);
TestMemory(memory2, 1024);
}
{
Arena a(1024);
char* memory = a.Alloc(768);
ASSERT_NE(memory, nullptr);
TestMemory(memory, 768);
char* memory2 = a.Alloc(768);
ASSERT_NE(memory2, nullptr);
TestMemory(memory2, 768);
}
{
Arena a(1024);
char* memory = a.Alloc(10240);
ASSERT_NE(memory, nullptr);
TestMemory(memory, 10240);
char* memory2 = a.Alloc(1234);
ASSERT_NE(memory2, nullptr);
TestMemory(memory2, 1234);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/core/arena.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/core/arena_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c282ff11-8b1f-4ce2-90b0-556c13f12219 | cpp | tensorflow/tensorflow | concurrency | third_party/xla/xla/backends/cpu/runtime/concurrency.h | third_party/xla/xla/backends/cpu/runtime/concurrency_test.cc | #ifndef XLA_BACKENDS_CPU_RUNTIME_CONCURRENCY_H_
#define XLA_BACKENDS_CPU_RUNTIME_CONCURRENCY_H_
#include <cstdint>
#include <functional>
#include <memory>
#include <type_traits>
#include "tsl/platform/logging.h"
#define EIGEN_USE_THREADS
#include "unsupported/Eigen/CXX11/Tensor"
#include "unsupported/Eigen/CXX11/ThreadPool"
namespace xla::cpu {
template <typename F,
std::enable_if_t<std::is_invocable_v<F, int64_t>>* = nullptr>
void ScheduleAll(const Eigen::ThreadPoolDevice* intra_op_threadpool, int64_t n,
F&& f) {
DCHECK(n >= 0) << "n must be non-negative";
if (n == 0) return;
if (n == 1) {
f(0);
return;
}
struct State {
State(const Eigen::ThreadPoolDevice* intra_op_threadpool, F&& f)
: intra_op_threadpool(intra_op_threadpool), f(std::forward<F>(f)) {}
void Execute(std::shared_ptr<State> self, int64_t start, int64_t end) {
while (end - start > 1) {
uint64_t mid = (start + end) / 2;
intra_op_threadpool->getPool()->Schedule(
std::bind(&State::Execute, this, self, mid, end));
end = mid;
}
f(start);
}
const Eigen::ThreadPoolDevice* intra_op_threadpool;
F f;
};
auto s = std::make_shared<State>(intra_op_threadpool, std::forward<F>(f));
s->Execute(std::move(s), 0, n);
}
}
#endif | #include "xla/backends/cpu/runtime/concurrency.h"
#include <cstdint>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/synchronization/blocking_counter.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace xla::cpu {
namespace {
TEST(ConcurrencyTest, ScheduleAll) {
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(), "test", 10);
std::vector<int64_t> tasks(64, 0);
Eigen::ThreadPoolDevice device(thread_pool.AsEigenThreadPool(),
thread_pool.NumThreads());
absl::BlockingCounter counter(64);
ScheduleAll(&device, 64, [&](int64_t index) {
tasks[index] += 1;
counter.DecrementCount();
});
counter.Wait();
ASSERT_TRUE(absl::c_all_of(tasks, [](int64_t task) { return task == 1; }));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/cpu/runtime/concurrency.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/cpu/runtime/concurrency_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2f828fe0-405a-42a5-8483-81e379c54f44 | cpp | tensorflow/tensorflow | nnapi_implementation | tensorflow/lite/nnapi/nnapi_implementation.cc | tensorflow/lite/nnapi/nnapi_implementation_test.cc | #include "tensorflow/lite/nnapi/nnapi_implementation.h"
#include <dlfcn.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <unistd.h>
#include <algorithm>
#include <cstdlib>
#include <memory>
#include "tensorflow/lite/nnapi/sl/public/NeuralNetworksSupportLibraryImpl.h"
#ifdef __ANDROID__
#include <sys/system_properties.h>
#endif
#define EXPAND_VA_ARGS(...) , ##__VA_ARGS__
#define NNAPI_LOG(format, ...) \
fprintf(stderr, format "\n" EXPAND_VA_ARGS(__VA_ARGS__));
namespace {
#ifdef __ANDROID__
const int kFirstIsolatedUid = 99000;
const int kLastIsolatedUid = 99999;
const int kFirstAppZygoteIsolatedUid = 90000;
const int kLastAppZygoteIsolatedUid = 98999;
bool IsIsolatedProcess() {
int uid = getuid();
return (uid >= kFirstIsolatedUid && uid <= kLastIsolatedUid) ||
(uid >= kFirstAppZygoteIsolatedUid &&
uid <= kLastAppZygoteIsolatedUid);
}
int32_t GetAndroidSdkVersion() {
const char* sdkProp = "ro.build.version.sdk";
char sdkVersion[PROP_VALUE_MAX];
int length = __system_property_get(sdkProp, sdkVersion);
if (length != 0) {
int32_t result = 0;
for (int i = 0; i < length; ++i) {
int digit = sdkVersion[i] - '0';
if (digit < 0 || digit > 9) {
return 0xffff;
}
result = result * 10 + digit;
}
return result;
}
return 0;
}
#endif
void* LoadFunction(void* handle, const char* name, bool optional) {
if (handle == nullptr) {
return nullptr;
}
void* fn = dlsym(handle, name);
if (fn == nullptr && !optional) {
NNAPI_LOG("nnapi error: unable to open function %s", name);
}
return fn;
}
#ifndef __ANDROID__
int ASharedMemory_create(const char* name, size_t size) {
int fd = shm_open(name, O_RDWR | O_CREAT | O_EXCL, 0644);
if (fd < 0) {
return fd;
}
int result = ftruncate(fd, size);
if (result < 0) {
close(fd);
return -1;
}
return fd;
}
uint32_t CalculateAndroidSdkVersion(NnApi const& nnapi) {
bool has_10 = nnapi.ANeuralNetworksMemory_createFromFd != nullptr;
bool has_11 =
nnapi.ANeuralNetworksModel_relaxComputationFloat32toFloat16 != nullptr;
bool has_12 = nnapi.ANeuralNetworks_getDeviceCount != nullptr;
bool has_13 = nnapi.ANeuralNetworksCompilation_setTimeout != nullptr;
bool has_14 = nnapi.ANeuralNetworks_getRuntimeFeatureLevel != nullptr;
uint32_t sdk_version = 0;
if (has_10) {
sdk_version = 27;
}
if (sdk_version == 27 && has_11) {
sdk_version = 28;
}
if (sdk_version == 28 && has_12) {
sdk_version = 29;
}
if (sdk_version == 29 && has_13) {
sdk_version = 30;
}
if (sdk_version == 30 && has_14) {
sdk_version = 31;
}
return sdk_version;
}
#else
ASharedMemory_create_fn getASharedMemory_create() {
void* libandroid = nullptr;
libandroid = dlopen("libandroid.so", RTLD_LAZY | RTLD_LOCAL);
if (libandroid != nullptr) {
return reinterpret_cast<ASharedMemory_create_fn>(
LoadFunction(libandroid, "ASharedMemory_create", false));
}
std::string libandroid_error = dlerror();
void* cutils_handle = dlopen("libcutils.so", RTLD_LAZY | RTLD_LOCAL);
if (cutils_handle != nullptr) {
return reinterpret_cast<ASharedMemory_create_fn>(
LoadFunction(cutils_handle, "ashmem_create_region", false));
}
NNAPI_LOG(
"nnapi error: unable to open both library %s (%s) and library %s "
"(%s)",
"libandroid.so", libandroid_error.c_str(), "libcutils.so", dlerror());
return nullptr;
}
#endif
#define LOAD_FUNCTION(handle, name) \
nnapi.name = reinterpret_cast<name##_fn>( \
LoadFunction(handle, #name, false));
#define LOAD_FUNCTION_OPTIONAL(handle, name) \
nnapi.name = reinterpret_cast<name##_fn>( \
LoadFunction(handle, #name, true));
#define LOAD_FUNCTION_RENAME(handle, name, symbol) \
nnapi.name = reinterpret_cast<name##_fn>( \
LoadFunction(handle, symbol, false));
const NnApi LoadNnApi() {
NnApi nnapi = {};
nnapi.android_sdk_version = 0;
#ifdef __ANDROID__
nnapi.android_sdk_version = GetAndroidSdkVersion();
if (nnapi.android_sdk_version < 27) {
NNAPI_LOG("nnapi error: requires android sdk version to be at least %d",
27);
nnapi.nnapi_exists = false;
return nnapi;
}
if (nnapi.android_sdk_version <= 33 && IsIsolatedProcess()) {
NNAPI_LOG("NNAPI is disabled in an isolated process");
nnapi.nnapi_exists = false;
return nnapi;
}
#endif
void* libneuralnetworks = nullptr;
static const char nnapi_library_name[] = "libneuralnetworks.so";
libneuralnetworks = dlopen(nnapi_library_name, RTLD_LAZY | RTLD_LOCAL);
#ifdef __ANDROID__
if (libneuralnetworks == nullptr) {
const char* error = dlerror();
if (error) {
NNAPI_LOG("%s\n", error);
}
NNAPI_LOG("nnapi error: unable to open library %s", nnapi_library_name);
}
#endif
nnapi.nnapi_exists = libneuralnetworks != nullptr;
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksMemory_createFromFd);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksMemory_free);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksModel_create);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksModel_free);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksModel_finish);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksModel_addOperand);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksModel_setOperandValue);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
ANeuralNetworksModel_setOperandSymmPerChannelQuantParams);
LOAD_FUNCTION(libneuralnetworks,
ANeuralNetworksModel_setOperandValueFromMemory);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksModel_addOperation);
LOAD_FUNCTION(libneuralnetworks,
ANeuralNetworksModel_identifyInputsAndOutputs);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksCompilation_create);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksCompilation_free);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksCompilation_setPreference);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksCompilation_finish);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksExecution_create);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksExecution_free);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksExecution_setInput);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksExecution_setInputFromMemory);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksExecution_setOutput);
LOAD_FUNCTION(libneuralnetworks,
ANeuralNetworksExecution_setOutputFromMemory);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksExecution_startCompute);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksEvent_wait);
LOAD_FUNCTION(libneuralnetworks, ANeuralNetworksEvent_free);
#ifdef __ANDROID__
nnapi.ASharedMemory_create = getASharedMemory_create();
#else
if (libneuralnetworks != nullptr) {
nnapi.ASharedMemory_create = ASharedMemory_create;
}
#endif
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksModel_relaxComputationFloat32toFloat16);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks, ANeuralNetworks_getDeviceCount);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks, ANeuralNetworks_getDevice);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks, ANeuralNetworksDevice_getName);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks, ANeuralNetworksDevice_getVersion);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksDevice_getFeatureLevel);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks, ANeuralNetworksDevice_getType);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksModel_getSupportedOperationsForDevices);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksCompilation_createForDevices);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksCompilation_setCaching);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks, ANeuralNetworksExecution_compute);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksExecution_getOutputOperandRank);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksExecution_getOutputOperandDimensions);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks, ANeuralNetworksBurst_create);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks, ANeuralNetworksBurst_free);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksExecution_burstCompute);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksMemory_createFromAHardwareBuffer);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksExecution_setMeasureTiming);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksExecution_getDuration);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksDevice_getExtensionSupport);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksModel_getExtensionOperandType);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksModel_getExtensionOperationType);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksModel_setOperandExtensionData);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksCompilation_setTimeout);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksCompilation_setPriority);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksExecution_setTimeout);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksExecution_setLoopTimeout);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks, ANeuralNetworksMemoryDesc_create);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks, ANeuralNetworksMemoryDesc_free);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksMemoryDesc_addInputRole);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksMemoryDesc_addOutputRole);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksMemoryDesc_setDimensions);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks, ANeuralNetworksMemoryDesc_finish);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksMemory_createFromDesc);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks, ANeuralNetworksMemory_copy);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksEvent_createFromSyncFenceFd);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksEvent_getSyncFenceFd);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksExecution_startComputeWithDependencies);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworks_getRuntimeFeatureLevel);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksExecution_enableInputAndOutputPadding);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksExecution_setReusable);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticCompilationInfo_getSessionId);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticCompilationInfo_getNnApiVersion);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticCompilationInfo_getModelArchHash);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticCompilationInfo_getDeviceIds);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticCompilationInfo_getErrorCode);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticCompilationInfo_getInputDataClass);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticCompilationInfo_getOutputDataClass);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticCompilationInfo_getCompilationTimeNanos);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticCompilationInfo_isCachingEnabled);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticCompilationInfo_isControlFlowUsed);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticCompilationInfo_areDynamicTensorsUsed);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticExecutionInfo_getSessionId);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticExecutionInfo_getNnApiVersion);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticExecutionInfo_getModelArchHash);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticExecutionInfo_getDeviceIds);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticExecutionInfo_getExecutionMode);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticExecutionInfo_getInputDataClass);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticExecutionInfo_getOutputDataClass);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticExecutionInfo_getErrorCode);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticExecutionInfo_getRuntimeExecutionTimeNanos);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticExecutionInfo_getDriverExecutionTimeNanos);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticExecutionInfo_getHardwareExecutionTimeNanos);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticExecutionInfo_isCachingEnabled);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticExecutionInfo_isControlFlowUsed);
LOAD_FUNCTION_OPTIONAL(
libneuralnetworks,
SL_ANeuralNetworksDiagnosticExecutionInfo_areDynamicTensorsUsed);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
SL_ANeuralNetworksDiagnostic_registerCallbacks);
#ifndef __ANDROID__
if (nnapi.nnapi_exists && nnapi.android_sdk_version == 0) {
nnapi.android_sdk_version = CalculateAndroidSdkVersion(nnapi);
}
#endif
if (nnapi.ANeuralNetworks_getRuntimeFeatureLevel) {
nnapi.nnapi_runtime_feature_level =
nnapi.ANeuralNetworks_getRuntimeFeatureLevel();
} else {
nnapi.nnapi_runtime_feature_level = nnapi.android_sdk_version;
}
return nnapi;
}
}
std::unique_ptr<const NnApi> CreateNnApiFromSupportLibrary(
const NnApiSLDriverImplFL5* nnapi_support_library_driver) {
auto nnapi = std::make_unique<NnApi>();
nnapi->nnapi_exists = true;
nnapi->android_sdk_version = ANEURALNETWORKS_FEATURE_LEVEL_5;
nnapi->nnapi_runtime_feature_level =
nnapi_support_library_driver->base.implFeatureLevel;
#define ASSIGN_SL_FUNCTION_TO_NNAPI(name) \
nnapi->name = nnapi_support_library_driver->name;
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksMemory_createFromFd);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksMemory_free);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksModel_create);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksModel_free);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksModel_finish);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksModel_addOperand);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksModel_setOperandValue);
ASSIGN_SL_FUNCTION_TO_NNAPI(
ANeuralNetworksModel_setOperandSymmPerChannelQuantParams);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksModel_setOperandValueFromMemory);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksModel_addOperation);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksModel_identifyInputsAndOutputs);
ASSIGN_SL_FUNCTION_TO_NNAPI(
ANeuralNetworksModel_relaxComputationFloat32toFloat16);
nnapi->ANeuralNetworksCompilation_create = nullptr;
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksCompilation_free);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksCompilation_setPreference);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksCompilation_finish);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksExecution_create);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksExecution_free);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksExecution_setInput);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksExecution_setInputFromMemory);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksExecution_setOutput);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksExecution_setOutputFromMemory);
nnapi->ANeuralNetworksExecution_startCompute = nullptr;
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksEvent_wait);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksEvent_free);
#ifdef __ANDROID__
nnapi->ASharedMemory_create = getASharedMemory_create();
#else
nnapi->ASharedMemory_create = ASharedMemory_create;
#endif
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworks_getDeviceCount);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworks_getDevice);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksDevice_getName);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksDevice_getVersion);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksDevice_getFeatureLevel);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksDevice_getType);
ASSIGN_SL_FUNCTION_TO_NNAPI(
ANeuralNetworksModel_getSupportedOperationsForDevices);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksCompilation_createForDevices);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksCompilation_setCaching);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksCompilation_setTimeout);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksCompilation_setPriority);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksExecution_compute);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksExecution_setTimeout);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksExecution_setLoopTimeout);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksExecution_getOutputOperandRank);
ASSIGN_SL_FUNCTION_TO_NNAPI(
ANeuralNetworksExecution_getOutputOperandDimensions);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksBurst_create);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksBurst_free);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksExecution_burstCompute);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksMemory_createFromAHardwareBuffer);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksExecution_setMeasureTiming);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksExecution_getDuration);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksDevice_getExtensionSupport);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksModel_getExtensionOperandType);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksModel_getExtensionOperationType);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksModel_setOperandExtensionData);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksMemoryDesc_create);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksMemoryDesc_free);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksMemoryDesc_addInputRole);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksMemoryDesc_addOutputRole);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksMemoryDesc_setDimensions);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksMemoryDesc_finish);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksMemory_createFromDesc);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksMemory_copy);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksEvent_createFromSyncFenceFd);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksEvent_getSyncFenceFd);
ASSIGN_SL_FUNCTION_TO_NNAPI(
ANeuralNetworksExecution_startComputeWithDependencies);
ASSIGN_SL_FUNCTION_TO_NNAPI(
ANeuralNetworksExecution_enableInputAndOutputPadding);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworksExecution_setReusable);
ASSIGN_SL_FUNCTION_TO_NNAPI(ANeuralNetworks_getRuntimeFeatureLevel);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticCompilationInfo_getSessionId);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticCompilationInfo_getNnApiVersion);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticCompilationInfo_getModelArchHash);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticCompilationInfo_getDeviceIds);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticCompilationInfo_getErrorCode);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticCompilationInfo_getInputDataClass);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticCompilationInfo_getOutputDataClass);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticCompilationInfo_getCompilationTimeNanos);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticCompilationInfo_isCachingEnabled);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticCompilationInfo_isControlFlowUsed);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticCompilationInfo_areDynamicTensorsUsed);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticExecutionInfo_getSessionId);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticExecutionInfo_getNnApiVersion);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticExecutionInfo_getModelArchHash);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticExecutionInfo_getDeviceIds);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticExecutionInfo_getExecutionMode);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticExecutionInfo_getInputDataClass);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticExecutionInfo_getOutputDataClass);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticExecutionInfo_getErrorCode);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticExecutionInfo_getRuntimeExecutionTimeNanos);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticExecutionInfo_getDriverExecutionTimeNanos);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticExecutionInfo_getHardwareExecutionTimeNanos);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticExecutionInfo_isCachingEnabled);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticExecutionInfo_isControlFlowUsed);
ASSIGN_SL_FUNCTION_TO_NNAPI(
SL_ANeuralNetworksDiagnosticExecutionInfo_areDynamicTensorsUsed);
ASSIGN_SL_FUNCTION_TO_NNAPI(SL_ANeuralNetworksDiagnostic_registerCallbacks);
return nnapi;
}
const NnApi* NnApiImplementation() {
static const NnApi nnapi = LoadNnApi();
return &nnapi;
} | #include "tensorflow/lite/nnapi/nnapi_implementation.h"
#include <gtest/gtest.h>
namespace {
TEST(NnapiLibTest, NnApiImplementation) {
const NnApi* nnapi = NnApiImplementation();
EXPECT_NE(nnapi, nullptr);
#ifdef __ANDROID__
EXPECT_GT(nnapi->android_sdk_version, 0);
if (nnapi.android_sdk_version < 27) {
EXPECT_FALSE(nnapi->nnapi_exists);
EXPECT_EQ(nnapi->ANeuralNetworksMemory_createFromFd, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksMemory_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_create, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_finish, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_addOperand, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_setOperandValue, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_setOperandValueFromMemory, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_addOperation, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_identifyInputsAndOutputs, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_relaxComputationFloat32toFloat16,
nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_create, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_setPreference, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_finish, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_create, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setInput, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setInputFromMemory, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setOutput, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setOutputFromMemory, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_startCompute, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksEvent_wait, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksEvent_free, nullptr);
EXPECT_EQ(nnapi->ASharedMemory_create, nullptr);
} else {
EXPECT_TRUE(nnapi->nnapi_exists);
EXPECT_NE(nnapi->ANeuralNetworksMemory_createFromFd, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksMemory_free, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksModel_create, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksModel_free, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksModel_finish, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksModel_addOperand, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksModel_setOperandValue, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksModel_setOperandValueFromMemory, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksModel_addOperation, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksModel_identifyInputsAndOutputs, nullptr);
if (nnapi->android_sdk_version >= 28) {
EXPECT_NE(nnapi->ANeuralNetworksModel_relaxComputationFloat32toFloat16,
nullptr);
} else {
EXPECT_EQ(nnapi->ANeuralNetworksModel_relaxComputationFloat32toFloat16,
nullptr);
}
EXPECT_NE(nnapi->ANeuralNetworksCompilation_create, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksCompilation_free, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksCompilation_setPreference, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksCompilation_finish, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksExecution_create, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksExecution_free, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksExecution_setInput, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksExecution_setInputFromMemory, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksExecution_setOutput, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksExecution_setOutputFromMemory, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksExecution_startCompute, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksEvent_wait, nullptr);
EXPECT_NE(nnapi->ANeuralNetworksEvent_free, nullptr);
EXPECT_NE(nnapi->ASharedMemory_create, nullptr);
}
#else
EXPECT_FALSE(nnapi->nnapi_exists);
EXPECT_EQ(nnapi->android_sdk_version, 0);
EXPECT_EQ(nnapi->ANeuralNetworksMemory_createFromFd, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksMemory_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_create, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_finish, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_addOperand, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_setOperandValue, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_setOperandSymmPerChannelQuantParams,
nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_setOperandValueFromMemory, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_addOperation, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_identifyInputsAndOutputs, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_relaxComputationFloat32toFloat16,
nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_create, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_setPreference, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_finish, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_create, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setInput, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setInputFromMemory, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setOutput, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setOutputFromMemory, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_startCompute, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksEvent_wait, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksEvent_free, nullptr);
EXPECT_EQ(nnapi->ASharedMemory_create, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworks_getDeviceCount, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworks_getDevice, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksDevice_getName, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksDevice_getVersion, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksDevice_getFeatureLevel, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksModel_getSupportedOperationsForDevices,
nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_createForDevices, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksCompilation_setCaching, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_compute, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_getOutputOperandRank, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_getOutputOperandDimensions,
nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksBurst_create, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksBurst_free, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_burstCompute, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksMemory_createFromAHardwareBuffer, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_setMeasureTiming, nullptr);
EXPECT_EQ(nnapi->ANeuralNetworksExecution_getDuration, nullptr);
#endif
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/nnapi/nnapi_implementation.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/nnapi/nnapi_implementation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bbf401b8-740f-4cd0-9b82-b7205cea4db8 | cpp | google/quiche | null_encrypter | quiche/quic/core/crypto/null_encrypter.cc | quiche/quic/core/crypto/null_encrypter_test.cc | #include "quiche/quic/core/crypto/null_encrypter.h"
#include <algorithm>
#include <limits>
#include <string>
#include "absl/numeric/int128.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/quic_data_writer.h"
#include "quiche/quic/core/quic_utils.h"
namespace quic {
const size_t kHashSizeShort = 12;
NullEncrypter::NullEncrypter(Perspective perspective)
: perspective_(perspective) {}
bool NullEncrypter::SetKey(absl::string_view key) { return key.empty(); }
bool NullEncrypter::SetNoncePrefix(absl::string_view nonce_prefix) {
return nonce_prefix.empty();
}
bool NullEncrypter::SetIV(absl::string_view iv) { return iv.empty(); }
bool NullEncrypter::SetHeaderProtectionKey(absl::string_view key) {
return key.empty();
}
bool NullEncrypter::EncryptPacket(uint64_t ,
absl::string_view associated_data,
absl::string_view plaintext, char* output,
size_t* output_length,
size_t max_output_length) {
const size_t len = plaintext.size() + GetHashLength();
if (max_output_length < len) {
return false;
}
absl::uint128 hash;
if (perspective_ == Perspective::IS_SERVER) {
hash =
QuicUtils::FNV1a_128_Hash_Three(associated_data, plaintext, "Server");
} else {
hash =
QuicUtils::FNV1a_128_Hash_Three(associated_data, plaintext, "Client");
}
memmove(output + GetHashLength(), plaintext.data(), plaintext.length());
QuicUtils::SerializeUint128Short(hash,
reinterpret_cast<unsigned char*>(output));
*output_length = len;
return true;
}
std::string NullEncrypter::GenerateHeaderProtectionMask(
absl::string_view ) {
return std::string(5, 0);
}
size_t NullEncrypter::GetKeySize() const { return 0; }
size_t NullEncrypter::GetNoncePrefixSize() const { return 0; }
size_t NullEncrypter::GetIVSize() const { return 0; }
size_t NullEncrypter::GetMaxPlaintextSize(size_t ciphertext_size) const {
return ciphertext_size - std::min(ciphertext_size, GetHashLength());
}
size_t NullEncrypter::GetCiphertextSize(size_t plaintext_size) const {
return plaintext_size + GetHashLength();
}
QuicPacketCount NullEncrypter::GetConfidentialityLimit() const {
return std::numeric_limits<QuicPacketCount>::max();
}
absl::string_view NullEncrypter::GetKey() const { return absl::string_view(); }
absl::string_view NullEncrypter::GetNoncePrefix() const {
return absl::string_view();
}
size_t NullEncrypter::GetHashLength() const { return kHashSizeShort; }
} | #include "quiche/quic/core/crypto/null_encrypter.h"
#include "absl/base/macros.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
namespace quic {
namespace test {
class NullEncrypterTest : public QuicTestWithParam<bool> {};
TEST_F(NullEncrypterTest, EncryptClient) {
unsigned char expected[] = {
0x97,
0xdc,
0x27,
0x2f,
0x18,
0xa8,
0x56,
0x73,
0xdf,
0x8d,
0x1d,
0xd0,
'g',
'o',
'o',
'd',
'b',
'y',
'e',
'!',
};
char encrypted[256];
size_t encrypted_len = 0;
NullEncrypter encrypter(Perspective::IS_CLIENT);
ASSERT_TRUE(encrypter.EncryptPacket(0, "hello world!", "goodbye!", encrypted,
&encrypted_len, 256));
quiche::test::CompareCharArraysWithHexError(
"encrypted data", encrypted, encrypted_len,
reinterpret_cast<const char*>(expected), ABSL_ARRAYSIZE(expected));
}
TEST_F(NullEncrypterTest, EncryptServer) {
unsigned char expected[] = {
0x63,
0x5e,
0x08,
0x03,
0x32,
0x80,
0x8f,
0x73,
0xdf,
0x8d,
0x1d,
0x1a,
'g',
'o',
'o',
'd',
'b',
'y',
'e',
'!',
};
char encrypted[256];
size_t encrypted_len = 0;
NullEncrypter encrypter(Perspective::IS_SERVER);
ASSERT_TRUE(encrypter.EncryptPacket(0, "hello world!", "goodbye!", encrypted,
&encrypted_len, 256));
quiche::test::CompareCharArraysWithHexError(
"encrypted data", encrypted, encrypted_len,
reinterpret_cast<const char*>(expected), ABSL_ARRAYSIZE(expected));
}
TEST_F(NullEncrypterTest, GetMaxPlaintextSize) {
NullEncrypter encrypter(Perspective::IS_CLIENT);
EXPECT_EQ(1000u, encrypter.GetMaxPlaintextSize(1012));
EXPECT_EQ(100u, encrypter.GetMaxPlaintextSize(112));
EXPECT_EQ(10u, encrypter.GetMaxPlaintextSize(22));
EXPECT_EQ(0u, encrypter.GetMaxPlaintextSize(11));
}
TEST_F(NullEncrypterTest, GetCiphertextSize) {
NullEncrypter encrypter(Perspective::IS_CLIENT);
EXPECT_EQ(1012u, encrypter.GetCiphertextSize(1000));
EXPECT_EQ(112u, encrypter.GetCiphertextSize(100));
EXPECT_EQ(22u, encrypter.GetCiphertextSize(10));
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/null_encrypter.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/null_encrypter_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
0b6ab60b-4ff4-4235-aa67-57e09a1db005 | cpp | tensorflow/tensorflow | device_base | tensorflow/core/framework/device_base.cc | tensorflow/core/framework/device_base_test.cc | #define EIGEN_USE_THREADS
#include "tensorflow/core/framework/device_base.h"
#include <algorithm>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/synchronization/notification.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/util/work_sharder.h"
namespace tensorflow {
DeviceBase::~DeviceBase() {
for (auto& temp : eigen_cpu_devices_) {
delete temp;
}
eigen_cpu_devices_.clear();
}
Status DeviceContext::CopyDeviceTensorToCPUSync(const Tensor* device_tensor,
StringPiece tensor_name,
Device* device,
Tensor* cpu_tensor) {
absl::Notification n;
Status status;
CopyDeviceTensorToCPU(device_tensor, tensor_name, device, cpu_tensor,
[&](const Status& s) {
status = s;
n.Notify();
});
n.WaitForNotification();
return status;
}
Status DeviceContext::CopyCPUTensorToDeviceSync(const Tensor* cpu_tensor,
Device* device,
Tensor* device_tensor) const {
absl::Notification n;
Status status;
CopyCPUTensorToDevice(cpu_tensor, device, device_tensor,
[&](const Status& s) {
status = s;
n.Notify();
});
n.WaitForNotification();
return status;
}
const DeviceAttributes& DeviceBase::attributes() const {
LOG(FATAL) << "DeviceBase does not implement attributes()";
std::abort();
}
const string& DeviceBase::name() const {
LOG(FATAL) << "DeviceBase does not implement name()";
std::abort();
}
const DeviceNameUtils::ParsedName& DeviceBase::parsed_name() const {
LOG(FATAL) << "DeviceBase does not implement parsed_name()";
std::abort();
}
const std::string& DeviceBase::device_type() const {
LOG(FATAL) << "DeviceBase does not implement device_type()";
std::abort();
}
void DeviceBase::set_eigen_cpu_device(Eigen::ThreadPoolDevice* d) {
for (int i = 1; i <= d->numThreads(); ++i) {
eigen_cpu_devices_.push_back(new Eigen::ThreadPoolDevice(
d->getPool(), i , d->allocator()));
}
}
const Eigen::ThreadPoolDevice* DeviceBase::eigen_cpu_device() {
const int parallelism = std::max<int>(
1,
std::min<int>(GetPerThreadMaxParallelism(), eigen_cpu_devices_.size()));
return eigen_cpu_devices_[parallelism - 1];
}
namespace {
absl::flat_hash_set<std::string>* GetSymbolicDeviceList() {
static absl::flat_hash_set<std::string>* symbolic_device_list =
new absl::flat_hash_set<std::string>();
return symbolic_device_list;
}
}
void AddSymbolicExecutionDevice(const absl::string_view device_name) {
GetSymbolicDeviceList()->insert(std::string(device_name));
}
bool IsSymbolicExecutionDevice(const absl::string_view device_name) {
absl::flat_hash_set<std::string>* symbolic_devices = GetSymbolicDeviceList();
if (symbolic_devices->contains(device_name)) {
return true;
} else {
return false;
}
}
} | #define EIGEN_USE_THREADS
#include "tensorflow/core/framework/device_base.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/work_sharder.h"
namespace tensorflow {
TEST(DeviceBaseTest, CpuDevice) {
DeviceBase dbase(Env::Default());
thread::ThreadPool pool(Env::Default(), "test", 16);
Eigen::ThreadPoolDevice eigen_device(pool.AsEigenThreadPool(),
pool.NumThreads());
ASSERT_FALSE(dbase.has_eigen_cpu_device());
dbase.set_eigen_cpu_device(&eigen_device);
ASSERT_TRUE(dbase.has_eigen_cpu_device());
{
auto d = dbase.eigen_cpu_device();
EXPECT_EQ(d->numThreads(), 16);
}
{
ScopedPerThreadMaxParallelism maxp(4);
auto d = dbase.eigen_cpu_device();
EXPECT_EQ(d->numThreads(), 4);
}
{
ScopedPerThreadMaxParallelism maxp(1);
auto d = dbase.eigen_cpu_device();
EXPECT_EQ(d->numThreads(), 1);
}
{
ScopedPerThreadMaxParallelism maxp(1000);
auto d = dbase.eigen_cpu_device();
EXPECT_EQ(d->numThreads(), 16);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/device_base.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/device_base_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b88e1308-3599-460c-8a88-6e076d5b4ee0 | cpp | google/cel-cpp | any | common/any.cc | common/any_test.cc | #include "common/any.h"
#include "absl/base/nullability.h"
#include "absl/strings/string_view.h"
namespace cel {
bool ParseTypeUrl(absl::string_view type_url,
absl::Nullable<absl::string_view*> prefix,
absl::Nullable<absl::string_view*> type_name) {
auto pos = type_url.find_last_of('/');
if (pos == absl::string_view::npos || pos + 1 == type_url.size()) {
return false;
}
if (prefix) {
*prefix = type_url.substr(0, pos + 1);
}
if (type_name) {
*type_name = type_url.substr(pos + 1);
}
return true;
}
} | #include "common/any.h"
#include <string>
#include "google/protobuf/any.pb.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "internal/testing.h"
namespace cel {
namespace {
TEST(Any, Value) {
google::protobuf::Any any;
std::string scratch;
SetAnyValueFromCord(&any, absl::Cord("Hello World!"));
EXPECT_EQ(GetAnyValueAsCord(any), "Hello World!");
EXPECT_EQ(GetAnyValueAsString(any), "Hello World!");
EXPECT_EQ(GetAnyValueAsStringView(any, scratch), "Hello World!");
}
TEST(MakeTypeUrlWithPrefix, Basic) {
EXPECT_EQ(MakeTypeUrlWithPrefix("foo", "bar.Baz"), "foo/bar.Baz");
EXPECT_EQ(MakeTypeUrlWithPrefix("foo/", "bar.Baz"), "foo/bar.Baz");
}
TEST(MakeTypeUrl, Basic) {
EXPECT_EQ(MakeTypeUrl("bar.Baz"), "type.googleapis.com/bar.Baz");
}
TEST(ParseTypeUrl, Valid) {
EXPECT_TRUE(ParseTypeUrl("type.googleapis.com/bar.Baz"));
EXPECT_FALSE(ParseTypeUrl("type.googleapis.com"));
EXPECT_FALSE(ParseTypeUrl("type.googleapis.com/"));
EXPECT_FALSE(ParseTypeUrl("type.googleapis.com/foo/"));
}
TEST(ParseTypeUrl, TypeName) {
absl::string_view type_name;
EXPECT_TRUE(ParseTypeUrl("type.googleapis.com/bar.Baz", &type_name));
EXPECT_EQ(type_name, "bar.Baz");
EXPECT_FALSE(ParseTypeUrl("type.googleapis.com", &type_name));
EXPECT_FALSE(ParseTypeUrl("type.googleapis.com/", &type_name));
EXPECT_FALSE(ParseTypeUrl("type.googleapis.com/foo/", &type_name));
}
TEST(ParseTypeUrl, PrefixAndTypeName) {
absl::string_view prefix;
absl::string_view type_name;
EXPECT_TRUE(ParseTypeUrl("type.googleapis.com/bar.Baz", &prefix, &type_name));
EXPECT_EQ(prefix, "type.googleapis.com/");
EXPECT_EQ(type_name, "bar.Baz");
EXPECT_FALSE(ParseTypeUrl("type.googleapis.com", &prefix, &type_name));
EXPECT_FALSE(ParseTypeUrl("type.googleapis.com/", &prefix, &type_name));
EXPECT_FALSE(ParseTypeUrl("type.googleapis.com/foo/", &prefix, &type_name));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/any.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/any_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
d40f4bec-05dc-4d5e-acb1-12eb713ff1ff | cpp | tensorflow/tensorflow | uniform_quant_ops | tensorflow/core/ops/uniform_quant_ops.cc | tensorflow/core/ops/uniform_quant_ops_test.cc | #include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/util/quantization/uniform_quant_ops_params.h"
namespace tensorflow {
namespace {
using shape_inference::DimensionHandle;
using shape_inference::ShapeHandle;
using tensorflow::errors::InvalidArgument;
using tensorflow::errors::Unknown;
absl::StatusOr<TensorShape> ToTensorShape(ShapeHandle shape_handle,
int64_t rank) {
TensorShape shape;
for (int i = 0; i < rank; ++i) {
int64_t dim_size = shape_inference::InferenceContext::Value(
shape_inference::InferenceContext::DimKnownRank(shape_handle, i));
if (dim_size == shape_inference::InferenceContext::kUnknownDim) {
return Unknown("Dim size unknown.");
}
shape.AddDim(dim_size);
}
return shape;
}
Status ScalesZeroPointsShapeValid(shape_inference::InferenceContext* context,
DimensionHandle match_dimension_handle,
ShapeHandle scales, ShapeHandle zero_points) {
const int32_t scales_rank = shape_inference::InferenceContext::Rank(scales);
const int32_t zero_points_rank =
shape_inference::InferenceContext::Rank(zero_points);
if (scales_rank == shape_inference::InferenceContext::kUnknownRank ||
zero_points_rank == shape_inference::InferenceContext::kUnknownRank) {
return absl::OkStatus();
}
if (scales_rank != zero_points_rank) {
return InvalidArgument("scales and zero_points must have same rank.");
}
if (scales_rank == 0) {
return absl::OkStatus();
}
DimensionHandle scales_size = context->Dim(scales, 0);
DimensionHandle zero_points_size = context->Dim(zero_points, 0);
DimensionHandle merged_scales;
TF_RETURN_IF_ERROR(
context->Merge(scales_size, match_dimension_handle, &merged_scales));
DimensionHandle merged_zero_points;
TF_RETURN_IF_ERROR(context->Merge(zero_points_size, match_dimension_handle,
&merged_zero_points));
return absl::OkStatus();
}
Status DotShape(shape_inference::InferenceContext* context) {
ShapeHandle lhs;
TF_RETURN_IF_ERROR(context->WithRank(context->input(0), 2, &lhs));
ShapeHandle rhs;
TF_RETURN_IF_ERROR(context->WithRank(context->input(1), 2, &rhs));
ShapeHandle lhs_scales;
TF_RETURN_IF_ERROR(
context->WithRankAtMost(context->input(2), 0, &lhs_scales));
ShapeHandle lhs_zero_points;
TF_RETURN_IF_ERROR(
context->WithRankAtMost(context->input(3), 0, &lhs_zero_points));
ShapeHandle rhs_scales;
TF_RETURN_IF_ERROR(
context->WithRankAtMost(context->input(4), 1, &rhs_scales));
ShapeHandle rhs_zero_points;
TF_RETURN_IF_ERROR(
context->WithRankAtMost(context->input(5), 1, &rhs_zero_points));
ShapeHandle output_scales;
TF_RETURN_IF_ERROR(
context->WithRankAtMost(context->input(6), 1, &output_scales));
ShapeHandle output_zero_points;
TF_RETURN_IF_ERROR(
context->WithRankAtMost(context->input(7), 1, &output_zero_points));
DimensionHandle inner_lhs = context->Dim(lhs, 1);
DimensionHandle inner_rhs = context->Dim(rhs, 0);
DimensionHandle merged;
TF_RETURN_IF_ERROR(context->Merge(inner_lhs, inner_rhs, &merged));
DimensionHandle output_rows = context->Dim(lhs, 0);
DimensionHandle output_cols = context->Dim(rhs, 1);
TF_RETURN_IF_ERROR(ScalesZeroPointsShapeValid(context, output_cols,
rhs_scales, rhs_zero_points));
TF_RETURN_IF_ERROR(ScalesZeroPointsShapeValid(
context, output_cols, output_scales, output_zero_points));
context->set_output(0, context->Matrix(output_rows, output_cols));
return absl::OkStatus();
}
Status DotHybridShape(shape_inference::InferenceContext* context) {
ShapeHandle lhs;
TF_RETURN_IF_ERROR(context->WithRank(context->input(0), 2, &lhs));
ShapeHandle rhs;
TF_RETURN_IF_ERROR(context->WithRank(context->input(1), 2, &rhs));
ShapeHandle rhs_scales;
TF_RETURN_IF_ERROR(
context->WithRankAtMost(context->input(2), 1, &rhs_scales));
ShapeHandle rhs_zero_points;
TF_RETURN_IF_ERROR(
context->WithRankAtMost(context->input(3), 1, &rhs_zero_points));
DimensionHandle inner_lhs = context->Dim(lhs, 1);
DimensionHandle inner_rhs = context->Dim(rhs, 0);
DimensionHandle merged;
TF_RETURN_IF_ERROR(context->Merge(inner_lhs, inner_rhs, &merged));
DimensionHandle output_rows = context->Dim(lhs, 0);
DimensionHandle output_cols = context->Dim(rhs, 1);
TF_RETURN_IF_ERROR(ScalesZeroPointsShapeValid(context, output_cols,
rhs_scales, rhs_zero_points));
context->set_output(0, context->Matrix(output_rows, output_cols));
return absl::OkStatus();
}
struct ShapeCommonParams {
ShapeHandle lhs;
ShapeHandle rhs;
ShapeHandle lhs_scales;
ShapeHandle lhs_zero_points;
ShapeHandle rhs_scales;
ShapeHandle rhs_zero_points;
ShapeHandle output_scales;
ShapeHandle output_zero_points;
bool is_output_scales_zero_points_set;
ShapeCommonParams(ShapeHandle lhs, ShapeHandle rhs, ShapeHandle lhs_scales,
ShapeHandle lhs_zero_points, ShapeHandle rhs_scales,
ShapeHandle rhs_zero_points, ShapeHandle output_scales,
ShapeHandle output_zero_points)
: lhs(lhs),
rhs(rhs),
lhs_scales(lhs_scales),
lhs_zero_points(lhs_zero_points),
rhs_scales(rhs_scales),
rhs_zero_points(rhs_zero_points),
output_scales(output_scales),
output_zero_points(output_zero_points),
is_output_scales_zero_points_set(true) {}
ShapeCommonParams(ShapeHandle lhs, ShapeHandle rhs, ShapeHandle rhs_scales,
ShapeHandle rhs_zero_points)
: lhs(lhs),
rhs(rhs),
rhs_scales(rhs_scales),
rhs_zero_points(rhs_zero_points),
is_output_scales_zero_points_set(false) {}
};
Status ConvolutionShapeCommon(shape_inference::InferenceContext* context,
const ShapeCommonParams& params) {
const int32_t lhs_rank = shape_inference::InferenceContext::Rank(params.lhs);
const int32_t rhs_rank = shape_inference::InferenceContext::Rank(params.rhs);
if (lhs_rank == shape_inference::InferenceContext::kUnknownRank &&
rhs_rank == shape_inference::InferenceContext::kUnknownRank) {
context->set_output(0, context->UnknownShape());
return absl::OkStatus();
} else if (lhs_rank == shape_inference::InferenceContext::kUnknownRank ||
rhs_rank == shape_inference::InferenceContext::kUnknownRank) {
context->set_output(
0, context->UnknownShapeOfRank(
lhs_rank == shape_inference::InferenceContext::kUnknownRank
? rhs_rank
: lhs_rank));
return absl::OkStatus();
} else if (lhs_rank != rhs_rank) {
return InvalidArgument("lhs and rhs must have same rank.");
}
auto lhs_shape = ToTensorShape(params.lhs, lhs_rank);
auto rhs_shape = ToTensorShape(params.rhs, rhs_rank);
if (!lhs_shape.ok() || !rhs_shape.ok()) {
context->set_output(0, context->UnknownShapeOfRank(lhs_rank));
return absl::OkStatus();
}
UniformQuantizedConvolutionParams convolution_params;
TF_RETURN_IF_ERROR(convolution_params.LoadFromAttrs(*context));
TF_RETURN_IF_ERROR(convolution_params.ValidateOrFillParamsAndValidateShape(
lhs_shape.value(), rhs_shape.value()));
DimensionHandle output_feature = context->Dim(
params.rhs,
convolution_params.dimension_numbers().kernel_output_feature_dimension());
TF_RETURN_IF_ERROR(ScalesZeroPointsShapeValid(
context, output_feature, params.rhs_scales, params.rhs_zero_points));
if (params.is_output_scales_zero_points_set) {
TF_RETURN_IF_ERROR(ScalesZeroPointsShapeValid(context, output_feature,
params.output_scales,
params.output_zero_points));
if (shape_inference::InferenceContext::Rank(params.output_scales) > 0) {
DimensionHandle scales_merged;
TF_RETURN_IF_ERROR(context->Merge(context->Dim(params.rhs_scales, 0),
context->Dim(params.output_scales, 0),
&scales_merged));
}
}
TF_ASSIGN_OR_RETURN(const auto& out_shape,
convolution_params.CalculateOutputShape(
lhs_shape.value(), rhs_shape.value()));
ShapeHandle out_shape_handle;
TF_RETURN_IF_ERROR(
context->MakeShapeFromTensorShape(out_shape, &out_shape_handle));
context->set_output(0, out_shape_handle);
return absl::OkStatus();
}
Status ConvolutionShape(shape_inference::InferenceContext* context) {
ShapeHandle lhs;
TF_RETURN_IF_ERROR(context->WithRankAtLeast(context->input(0), 2, &lhs));
ShapeHandle rhs;
TF_RETURN_IF_ERROR(context->WithRankAtLeast(context->input(1), 2, &rhs));
ShapeHandle lhs_scales;
TF_RETURN_IF_ERROR(
context->WithRankAtMost(context->input(2), 0, &lhs_scales));
ShapeHandle lhs_zero_points;
TF_RETURN_IF_ERROR(
context->WithRankAtMost(context->input(3), 0, &lhs_zero_points));
ShapeHandle rhs_scales;
TF_RETURN_IF_ERROR(
context->WithRankAtMost(context->input(4), 1, &rhs_scales));
ShapeHandle rhs_zero_points;
TF_RETURN_IF_ERROR(
context->WithRankAtMost(context->input(5), 1, &rhs_zero_points));
ShapeHandle output_scales;
TF_RETURN_IF_ERROR(
context->WithRankAtMost(context->input(6), 1, &output_scales));
ShapeHandle output_zero_points;
TF_RETURN_IF_ERROR(
context->WithRankAtMost(context->input(7), 1, &output_zero_points));
return ConvolutionShapeCommon(
context,
ShapeCommonParams(lhs, rhs, lhs_scales, lhs_zero_points, rhs_scales,
rhs_zero_points, output_scales, output_zero_points));
}
Status ConvolutionHybridShape(shape_inference::InferenceContext* context) {
ShapeHandle lhs;
TF_RETURN_IF_ERROR(context->WithRankAtLeast(context->input(0), 2, &lhs));
ShapeHandle rhs;
TF_RETURN_IF_ERROR(context->WithRankAtLeast(context->input(1), 2, &rhs));
ShapeHandle rhs_scales;
TF_RETURN_IF_ERROR(
context->WithRankAtMost(context->input(2), 1, &rhs_scales));
ShapeHandle rhs_zero_points;
TF_RETURN_IF_ERROR(
context->WithRankAtMost(context->input(3), 1, &rhs_zero_points));
return ConvolutionShapeCommon(
context, ShapeCommonParams(lhs, rhs, rhs_scales, rhs_zero_points));
}
}
REGISTER_OP("UniformQuantize")
.Input("input: Tin")
.Input("scales: float")
.Input("zero_points: int32")
.Output("output: Tout")
.Attr("Tin: {float}")
.Attr("Tout: {qint8, qint32}")
.Attr("quantization_axis: int = -1")
.Attr("quantization_min_val: int")
.Attr("quantization_max_val: int")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("UniformRequantize")
.Input("input: Tin")
.Input("input_scales: float")
.Input("input_zero_points: int32")
.Input("output_scales: float")
.Input("output_zero_points: int32")
.Output("output: Tout")
.Attr("Tin: {qint8, qint32}")
.Attr("Tout: {qint8, qint32}")
.Attr("input_quantization_axis: int = -1")
.Attr("input_quantization_min_val: int")
.Attr("input_quantization_max_val: int")
.Attr("output_quantization_axis: int = -1")
.Attr("output_quantization_min_val: int")
.Attr("output_quantization_max_val: int")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("UniformDequantize")
.Input("input: Tin")
.Input("scales: float")
.Input("zero_points: int32")
.Output("output: Tout")
.Attr("Tin: {qint8, qint32}")
.Attr("Tout: {float}")
.Attr("quantization_axis: int = -1")
.Attr("quantization_min_val: int")
.Attr("quantization_max_val: int")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("UniformQuantizedDot")
.Input("lhs: Tin")
.Input("rhs: Tin")
.Input("lhs_scales: float")
.Input("lhs_zero_points: int32")
.Input("rhs_scales: float")
.Input("rhs_zero_points: int32")
.Input("output_scales: float")
.Input("output_zero_points: int32")
.Output("output: Tout")
.Attr("Tin: {qint8}")
.Attr("Tout: {qint32}")
.Attr("lhs_quantization_axis: int = -1")
.Attr("lhs_quantization_min_val: int")
.Attr("lhs_quantization_max_val: int")
.Attr("rhs_quantization_axis: int = -1")
.Attr("rhs_quantization_min_val: int")
.Attr("rhs_quantization_max_val: int")
.Attr("output_quantization_axis: int = -1")
.Attr("output_quantization_min_val: int")
.Attr("output_quantization_max_val: int")
.SetShapeFn(DotShape);
REGISTER_OP("UniformQuantizedDotHybrid")
.Input("lhs: Tlhs")
.Input("rhs: Trhs")
.Input("rhs_scales: float")
.Input("rhs_zero_points: int32")
.Output("output: Tout")
.Attr("Tlhs: {float}")
.Attr("Trhs: {qint8}")
.Attr("Tout: {float}")
.Attr("rhs_quantization_axis: int = -1")
.Attr("rhs_quantization_min_val: int")
.Attr("rhs_quantization_max_val: int")
.SetShapeFn(DotHybridShape);
REGISTER_OP("UniformQuantizedConvolution")
.Input("lhs: Tin")
.Input("rhs: Tin")
.Input("lhs_scales: float")
.Input("lhs_zero_points: int32")
.Input("rhs_scales: float")
.Input("rhs_zero_points: int32")
.Input("output_scales: float")
.Input("output_zero_points: int32")
.Output("output: Tout")
.Attr("Tin: {qint8}")
.Attr("Tout: {qint32}")
.Attr("window_strides: list(int) = []")
.Attr("padding: string")
.Attr("explicit_padding: list(int) = []")
.Attr("lhs_dilation: list(int) = []")
.Attr("rhs_dilation: list(int) = []")
.Attr("batch_group_count: int = 1")
.Attr("feature_group_count: int = 1")
.Attr("dimension_numbers: string = ''")
.Attr("lhs_quantization_axis: int = -1")
.Attr("lhs_quantization_min_val: int")
.Attr("lhs_quantization_max_val: int")
.Attr("rhs_quantization_axis: int = -1")
.Attr("rhs_quantization_min_val: int")
.Attr("rhs_quantization_max_val: int")
.Attr("output_quantization_axis: int = -1")
.Attr("output_quantization_min_val: int")
.Attr("output_quantization_max_val: int")
.SetShapeFn(ConvolutionShape);
REGISTER_OP("UniformQuantizedConvolutionHybrid")
.Input("lhs: Tlhs")
.Input("rhs: Trhs")
.Input("rhs_scales: float")
.Input("rhs_zero_points: int32")
.Output("output: Tout")
.Attr("Tlhs: {float}")
.Attr("Trhs: {qint8}")
.Attr("Tout: {float}")
.Attr("window_strides: list(int) = []")
.Attr("padding: string")
.Attr("explicit_padding: list(int) = []")
.Attr("lhs_dilation: list(int) = []")
.Attr("rhs_dilation: list(int) = []")
.Attr("batch_group_count: int = 1")
.Attr("feature_group_count: int = 1")
.Attr("dimension_numbers: string = ''")
.Attr("rhs_quantization_axis: int = -1")
.Attr("rhs_quantization_min_val: int")
.Attr("rhs_quantization_max_val: int")
.SetShapeFn(ConvolutionHybridShape);
REGISTER_OP("UniformQuantizedAdd")
.Input("lhs: T")
.Input("rhs: T")
.Input("lhs_scales: float")
.Input("lhs_zero_points: int32")
.Input("rhs_scales: float")
.Input("rhs_zero_points: int32")
.Input("output_scales: float")
.Input("output_zero_points: int32")
.Output("output: T")
.Attr("lhs_quantization_axis: int = -1")
.Attr("lhs_quantization_min_val: int")
.Attr("lhs_quantization_max_val: int")
.Attr("rhs_quantization_axis: int = -1")
.Attr("rhs_quantization_min_val: int")
.Attr("rhs_quantization_max_val: int")
.Attr("output_quantization_axis: int = -1")
.Attr("output_quantization_min_val: int")
.Attr("output_quantization_max_val: int")
.Attr("T: {qint32}")
.SetShapeFn(shape_inference::BroadcastBinaryOpShapeFn);
REGISTER_OP("UniformQuantizedClipByValue")
.Input("operand: T")
.Input("min: T")
.Input("max: T")
.Input("scales: float")
.Input("zero_points: int32")
.Output("output: T")
.Attr("T: {qint32}")
.Attr("quantization_axis: int = -1")
.Attr("quantization_min_val: int")
.Attr("quantization_max_val: int")
.SetShapeFn(shape_inference::UnchangedShape);
} | #include <limits>
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/util/quantization/uniform_quant_ops_attr.pb.h"
namespace tensorflow {
namespace {
constexpr int32_t kInt8Min = std::numeric_limits<int8_t>::min();
constexpr int32_t kInt8Max = std::numeric_limits<int8_t>::max();
constexpr int32_t kInt32Min = std::numeric_limits<int32_t>::min();
constexpr int32_t kInt32Max = std::numeric_limits<int32_t>::max();
}
TEST(UniformQuantizedOpsTest, UniformQuantizedDotShapeInference) {
ShapeInferenceTestOp op("UniformQuantizedDot");
INFER_OK(op, "[4,2];[2,3];[];[];[];[];[];[]", "[d0_0,d1_1]");
INFER_OK(op, "[4,2];[2,3];[];[];[3];[3];[];[]", "[d0_0,d1_1]");
INFER_OK(op, "[4,2];[2,3];[];[];[3];[3];[3];[3]", "[d0_0,d1_1]");
INFER_ERROR("", op, "[4,2];[6,3];[];[];[];[];[];[]");
INFER_ERROR("", op, "[4,2];[2,3];[4];[4];[];[];[];[]");
INFER_ERROR("scales and zero_points must have same rank.", op,
"[4,2];[2,3];[];[];[3];[];[];[]");
INFER_ERROR("", op, "[4,2];[2,3];[];[];[6];[6];[];[]");
INFER_ERROR("", op, "[4,2];[2,3];[];[];[];[];[6];[6]");
}
TEST(UniformQuantizedOpsTest, UniformQuantizedDotHybridShapeInference) {
ShapeInferenceTestOp op("UniformQuantizedDotHybrid");
INFER_OK(op, "[4,2];[2,3];[];[]", "[d0_0,d1_1]");
INFER_OK(op, "[4,2];[2,3];[3];[3]", "[d0_0,d1_1]");
INFER_ERROR("", op, "[4,2];[6,3];[];[]");
INFER_ERROR("scales and zero_points must have same rank.", op,
"[4,2];[2,3];[3];[]");
INFER_ERROR("", op, "[4,2];[2,3];[6];[6]");
}
TEST(UniformQuantizedOpsTest,
UniformQuantizedConvolutionShapeInferencePerTensor) {
ShapeInferenceTestOp op("UniformQuantizedConvolution");
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolution")
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tin", DT_QINT8)
.Attr("Tout", DT_QINT32)
.Attr("lhs_quantization_min_val", kInt8Min)
.Attr("lhs_quantization_max_val", kInt8Max)
.Attr("rhs_quantization_min_val", kInt8Min)
.Attr("rhs_quantization_max_val", kInt8Max)
.Attr("output_quantization_min_val", kInt32Min)
.Attr("output_quantization_max_val", kInt32Max)
.Attr("padding", "VALID")
.Finalize(&op.node_def));
INFER_OK(op, "[2,3,40,50];[6,3,4,5];[];[];[];[];[];[]", "[2,6,37,46]");
INFER_ERROR("", op, "[2,3,40,50];[6,9,4,5];[];[];[];[];[];[]");
INFER_ERROR("", op, "[2,3,40,50];[6,3,4,5];[2];[2];[];[];[];[]");
INFER_ERROR("scales and zero_points must have same rank.", op,
"[2,3,40,50];[6,3,4,5];[];[];[6];[];[];[]");
INFER_ERROR("", op, "[2,3,40,50];[6,3,4,5];[];[];[];[];[12];[12]");
}
TEST(UniformQuantizedOpsTest,
UniformQuantizedConvolutionShapeInferencePerChannelRhs) {
ShapeInferenceTestOp op("UniformQuantizedConvolution");
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolution")
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tin", DT_QINT8)
.Attr("Tout", DT_QINT32)
.Attr("rhs_quantization_axis", 0)
.Attr("lhs_quantization_min_val", kInt8Min)
.Attr("lhs_quantization_max_val", kInt8Max)
.Attr("rhs_quantization_min_val", kInt8Min)
.Attr("rhs_quantization_max_val", kInt8Max)
.Attr("output_quantization_min_val", kInt32Min)
.Attr("output_quantization_max_val", kInt32Max)
.Attr("padding", "VALID")
.Finalize(&op.node_def));
INFER_OK(op, "[2,3,40,50];[6,3,4,5];[];[];[6];[6];[];[]", "[2,6,37,46]");
INFER_ERROR("", op, "[2,3,40,50];[6,3,4,5];[];[];[12];[12];[];[]");
}
TEST(UniformQuantizedOpsTest,
UniformQuantizedConvolutionShapeInferencePerChannelRhsAndOutput) {
ShapeInferenceTestOp op("UniformQuantizedConvolution");
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolution")
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tin", DT_QINT8)
.Attr("Tout", DT_QINT32)
.Attr("rhs_quantization_axis", 0)
.Attr("output_quantization_axis", 1)
.Attr("lhs_quantization_min_val", kInt8Min)
.Attr("lhs_quantization_max_val", kInt8Max)
.Attr("rhs_quantization_min_val", kInt8Min)
.Attr("rhs_quantization_max_val", kInt8Max)
.Attr("output_quantization_min_val", kInt32Min)
.Attr("output_quantization_max_val", kInt32Max)
.Attr("padding", "VALID")
.Finalize(&op.node_def));
INFER_OK(op, "[2,3,40,50];[6,3,4,5];[];[];[6];[6];[6];[6]", "[2,6,37,46]");
}
TEST(UniformQuantizedOpsTest,
UniformQuantizedConvolutionHybridShapeInferencePerChannel) {
ShapeInferenceTestOp op("UniformQuantizedConvolutionHybrid");
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolutionHybrid")
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tlhs", DT_QINT8)
.Attr("Trhs", DT_QINT8)
.Attr("Tout", DT_QINT32)
.Attr("rhs_quantization_axis", 0)
.Attr("rhs_quantization_min_val", kInt8Min)
.Attr("rhs_quantization_max_val", kInt8Max)
.Attr("padding", "VALID")
.Finalize(&op.node_def));
INFER_OK(op, "[2,3,40,50];[6,3,4,5];[6];[6]", "[2,6,37,46]");
INFER_ERROR("", op, "[2,3,40,50];[6,3,4,5];[12];[12]");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/uniform_quant_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/uniform_quant_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9c45de3c-81f9-4c01-82ae-09a5a486e55a | cpp | tensorflow/tensorflow | crop | tensorflow/lite/experimental/ml_adjacent/algo/crop.cc | tensorflow/lite/experimental/ml_adjacent/algo/crop_test.cc | #include "tensorflow/lite/experimental/ml_adjacent/algo/crop.h"
#include <cstring>
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace ml_adj {
namespace crop {
namespace {
using ::ml_adj::algo::Algo;
using ::ml_adj::algo::InputPack;
using ::ml_adj::algo::OutputPack;
using ::ml_adj::data::DataRef;
using ::ml_adj::data::MutableDataRef;
using ::ml_adj::data::TypeWidth;
inline void CropToBoundingBox(dim_t offset_height, dim_t offset_width,
dim_t out_height, dim_t out_width,
const DataRef* input, MutableDataRef* output) {
const dim_t in_height = input->Dims()[1];
const dim_t in_width = input->Dims()[2];
const dim_t num_channels = input->Dims()[3];
const dim_t chunk = TypeWidth(input->Type()) * num_channels;
const dim_t in_img_size = in_height * in_width;
const dim_t out_img_size = out_height * out_width;
for (int b = 0; b < input->Dims()[0]; ++b) {
for (int i = 0; i < out_height; ++i) {
const dim_t read_byte_ofs =
(in_img_size * b + (i + offset_height) * in_width + offset_width) *
chunk;
const void* read_start_addr =
reinterpret_cast<const char*>(input->Data()) + read_byte_ofs;
const dim_t write_byte_ofs = chunk * (out_img_size * b + i * out_width);
void* write_addr =
reinterpret_cast<char*>(output->Data()) + write_byte_ofs;
std::memcpy(write_addr, read_start_addr, chunk * out_width);
}
}
}
void ComputeCenterCrop(const InputPack& inputs, const OutputPack& outputs) {
#ifndef NDEBUG
TFLITE_CHECK(inputs.size() == 2);
TFLITE_CHECK(outputs.size() == 1);
#endif
const DataRef* img = inputs[0];
const DataRef* frac = inputs[1];
const double frac_data = *reinterpret_cast<const double*>(frac->Data());
const dim_t in_height = img->Dims()[1];
const dim_t out_height_offset = (in_height - in_height * frac_data) / 2;
const dim_t out_height = in_height - (2 * out_height_offset);
const dim_t in_width = img->Dims()[2];
const dim_t out_width_offset = (in_width - in_width * frac_data) / 2;
const dim_t out_width = in_width - (2 * out_width_offset);
MutableDataRef* output = outputs[0];
output->Resize({img->Dims()[0], out_height, out_width, img->Dims()[3]});
CropToBoundingBox(out_height_offset, out_width_offset, out_height, out_width,
img, output);
}
void ComputeCropToBoundingBox(const InputPack& inputs,
const OutputPack& outputs) {
TFLITE_DCHECK(inputs.size() == 5);
TFLITE_DCHECK(outputs.size() == 1);
const DataRef* img = inputs[0];
const DataRef* offset_height = inputs[1];
const dim_t offset_height_data =
*reinterpret_cast<const dim_t*>(offset_height->Data());
const DataRef* offset_width = inputs[2];
const dim_t offset_width_data =
*reinterpret_cast<const dim_t*>(offset_width->Data());
const DataRef* target_height = inputs[3];
const dim_t target_height_data =
*reinterpret_cast<const dim_t*>(target_height->Data());
const DataRef* target_width = inputs[4];
const dim_t target_width_data =
*reinterpret_cast<const dim_t*>(target_width->Data());
MutableDataRef* output = outputs[0];
output->Resize(
{img->Dims()[0], target_height_data, target_width_data, img->Dims()[3]});
CropToBoundingBox(offset_height_data, offset_width_data, target_height_data,
target_width_data, img, output);
}
}
const Algo* Impl_CenterCrop() {
static const Algo center_crop = {&ComputeCenterCrop, nullptr};
return ¢er_crop;
}
const Algo* Impl_CropToBoundingBox() {
static const Algo crop_to_bounding_box = {&ComputeCropToBoundingBox, nullptr};
return &crop_to_bounding_box;
}
}
} | #include "tensorflow/lite/experimental/ml_adjacent/algo/crop.h"
#include <cstring>
#include <numeric>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
using ::ml_adj::algo::Algo;
using ::ml_adj::data::OwningVectorRef;
using ::testing::ElementsAreArray;
namespace ml_adj {
namespace crop {
namespace {
std::vector<float> GetIotaVec(dim_t d1, dim_t d2, dim_t d3, dim_t d4) {
std::vector<float> res;
res.resize(d1 * d2 * d3 * d4);
std::iota(res.begin(), res.end(), 0);
return res;
}
struct CropCenterTestParams {
std::vector<dim_t> img_dims;
std::vector<float> img_data;
double frac;
std::vector<float> expected_data;
std::vector<dim_t> expected_shape;
};
class CropCenterTest : public testing::TestWithParam<CropCenterTestParams> {};
TEST_P(CropCenterTest, FloatPixelType) {
const CropCenterTestParams& params = GetParam();
OwningVectorRef img(etype_t::f32);
img.Resize(dims_t(params.img_dims));
ASSERT_EQ(img.Bytes(), params.img_data.size() * sizeof(float));
std::memcpy(img.Data(), params.img_data.data(), img.Bytes());
OwningVectorRef frac(etype_t::f64);
frac.Resize({1});
ASSERT_EQ(frac.Bytes(), sizeof(double));
std::memcpy(frac.Data(), ¶ms.frac, frac.Bytes());
OwningVectorRef output(etype_t::f32);
const Algo* center_crop = Impl_CenterCrop();
center_crop->process({&img, &frac}, {&output});
ASSERT_EQ(output.Bytes(), params.expected_data.size() * sizeof(float));
ASSERT_EQ(output.Dims(), params.expected_shape);
const float* out_data = reinterpret_cast<float*>(output.Data());
EXPECT_THAT(absl::MakeSpan(out_data, output.NumElements()),
ElementsAreArray(params.expected_data));
}
INSTANTIATE_TEST_SUITE_P(
CropTests, CropCenterTest,
testing::ValuesIn({
CropCenterTestParams{{1, 4, 4, 1},
GetIotaVec(1, 4, 4, 1),
0.5,
{5, 6, 9, 10},
{1, 2, 2, 1}},
CropCenterTestParams{{1, 5, 5, 1},
GetIotaVec(1, 5, 5, 1),
0.5,
{6, 7, 8, 11, 12, 13, 16, 17, 18},
{1, 3, 3, 1}},
CropCenterTestParams{{1, 3, 3, 1},
GetIotaVec(1, 3, 3, 1),
0.5,
{0, 1, 2, 3, 4, 5, 6, 7, 8},
{1, 3, 3, 1}},
CropCenterTestParams{{1, 5, 5, 1},
GetIotaVec(1, 5, 5, 1),
0.9,
GetIotaVec(1, 5, 5, 1),
{1, 5, 5, 1}},
CropCenterTestParams{
{1, 5, 5, 1}, GetIotaVec(1, 5, 5, 1), 0.2, {12}, {1, 1, 1, 1}},
CropCenterTestParams{{1, 2, 2, 2},
GetIotaVec(1, 2, 2, 2),
.7,
{0, 1, 2, 3, 4, 5, 6, 7},
{1, 2, 2, 2}},
CropCenterTestParams{
{1, 3, 3, 2}, GetIotaVec(1, 3, 3, 2), .1, {8, 9}, {1, 1, 1, 2}},
CropCenterTestParams{
{2, 3, 3, 1}, GetIotaVec(2, 3, 3, 1), .1, {4, 13}, {2, 1, 1, 1}},
CropCenterTestParams{{2, 3, 3, 2},
GetIotaVec(2, 3, 3, 2),
.1,
{8, 9, 26, 27},
{2, 1, 1, 2}},
}));
struct CropToBoundingBoxTestParams {
const std::vector<dim_t> img_dims;
const std::vector<float> img_data;
dim_t offset_height;
dim_t offset_width;
dim_t target_height;
dim_t target_width;
const std::vector<dim_t> expected_shape;
const std::vector<float> expected_data;
};
class CropToBoundingBoxTest
: public testing::TestWithParam<CropToBoundingBoxTestParams> {};
TEST_P(CropToBoundingBoxTest, FloatPixelType) {
const CropToBoundingBoxTestParams& params = GetParam();
OwningVectorRef img(etype_t::f32);
img.Resize(dims_t(params.img_dims));
ASSERT_EQ(img.Bytes(), params.img_data.size() * sizeof(float));
std::memcpy(img.Data(), params.img_data.data(), img.Bytes());
OwningVectorRef offset_height(etype_t::i32);
offset_height.Resize({1});
ASSERT_EQ(offset_height.Bytes(), sizeof(int));
std::memcpy(offset_height.Data(), ¶ms.offset_height,
offset_height.Bytes());
OwningVectorRef offset_width(etype_t::i32);
offset_width.Resize({1});
ASSERT_EQ(offset_width.Bytes(), sizeof(int));
std::memcpy(offset_width.Data(), ¶ms.offset_width, offset_width.Bytes());
OwningVectorRef target_height(etype_t::i32);
target_height.Resize({1});
ASSERT_EQ(target_height.Bytes(), sizeof(int));
std::memcpy(target_height.Data(), ¶ms.target_height,
target_height.Bytes());
OwningVectorRef target_width(etype_t::i32);
target_width.Resize({1});
ASSERT_EQ(target_width.Bytes(), sizeof(int));
std::memcpy(target_width.Data(), ¶ms.target_width, target_width.Bytes());
OwningVectorRef output(etype_t::f32);
const Algo* crop_to_bounding_box = Impl_CropToBoundingBox();
crop_to_bounding_box->process(
{&img, &offset_height, &offset_width, &target_height, &target_width},
{&output});
ASSERT_EQ(output.Bytes(), params.expected_data.size() * sizeof(float));
ASSERT_EQ(output.Dims(), params.expected_shape);
const float* out_data = reinterpret_cast<float*>(output.Data());
EXPECT_THAT(absl::MakeSpan(out_data, output.NumElements()),
ElementsAreArray(params.expected_data));
}
INSTANTIATE_TEST_SUITE_P(
CropTests, CropToBoundingBoxTest,
testing::ValuesIn({
CropToBoundingBoxTestParams{{1, 5, 5, 1},
GetIotaVec(1, 5, 5, 1),
0,
0,
2,
2,
{1, 2, 2, 1},
{0, 1,
5, 6}},
CropToBoundingBoxTestParams{{1, 5, 5, 1},
GetIotaVec(1, 5, 5, 1),
3,
3,
2,
2,
{1, 2, 2, 1},
{18, 19,
23, 24}},
CropToBoundingBoxTestParams{{1, 5, 5, 1},
GetIotaVec(1, 5, 5, 1),
0,
3,
2,
2,
{1, 2, 2, 1},
{3, 4,
8, 9}},
CropToBoundingBoxTestParams{{1, 5, 5, 1},
GetIotaVec(1, 5, 5, 1),
2,
1,
3,
3,
{1, 3, 3, 1},
{11, 12, 13,
16, 17, 18,
21, 22, 23}},
CropToBoundingBoxTestParams{{1, 3, 3, 1},
GetIotaVec(1, 3, 3, 1),
0,
0,
3,
3,
{1, 3, 3, 1},
{0, 1, 2,
3, 4, 5,
6, 7, 8}},
CropToBoundingBoxTestParams{{1, 3, 3, 1},
GetIotaVec(1, 3, 3, 1),
1,
1,
1,
1,
{1, 1, 1, 1},
{4}},
CropToBoundingBoxTestParams{{1, 5, 5, 3},
GetIotaVec(1, 5, 5, 3),
2,
2,
2,
2,
{1, 2, 2, 3},
{36, 37, 38, 39, 40, 41,
51, 52, 53, 54, 55, 56}},
CropToBoundingBoxTestParams{{2, 5, 5, 2},
GetIotaVec(2, 5, 5, 2),
2,
2,
2,
2,
{2, 2, 2, 2},
{24, 25, 26, 27, 34, 35, 36, 37,
74, 75, 76, 77, 84, 85, 86, 87}},
}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/algo/crop.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/algo/crop_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
101e352c-b661-4e10-bbba-e7e6d5172d99 | cpp | tensorflow/tensorflow | image_grad | tensorflow/cc/gradients/image_grad.cc | tensorflow/cc/gradients/image_grad_test.cc | #include <vector>
#include "tensorflow/cc/framework/grad_op_registry.h"
#include "tensorflow/cc/framework/gradients.h"
#include "tensorflow/cc/ops/image_ops_internal.h"
#include "tensorflow/cc/ops/standard_ops.h"
namespace tensorflow {
namespace ops {
namespace {
REGISTER_NO_GRADIENT_OP("NonMaxSuppression");
REGISTER_NO_GRADIENT_OP("NonMaxSuppressionV2");
REGISTER_NO_GRADIENT_OP("NonMaxSuppressionV3");
REGISTER_NO_GRADIENT_OP("NonMaxSuppressionV4");
REGISTER_NO_GRADIENT_OP("NonMaxSuppressionV5");
Status ResizeNearestNeighborGradHelper(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
bool align_corners;
TF_RETURN_IF_ERROR(
GetNodeAttr(op.node()->attrs(), "align_corners", &align_corners));
bool half_pixel_centers;
TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "half_pixel_centers",
&half_pixel_centers));
auto x_shape = Slice(scope, Shape(scope, op.input(0)), {1}, {2});
grad_outputs->push_back(internal::ResizeNearestNeighborGrad(
scope, grad_inputs[0], x_shape,
internal::ResizeNearestNeighborGrad::AlignCorners(align_corners)
.HalfPixelCenters(half_pixel_centers)));
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("ResizeNearestNeighbor", ResizeNearestNeighborGradHelper);
Status ResizeBilinearGradHelper(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
bool align_corners;
TF_RETURN_IF_ERROR(
GetNodeAttr(op.node()->attrs(), "align_corners", &align_corners));
bool half_pixel_centers;
TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "half_pixel_centers",
&half_pixel_centers));
grad_outputs->push_back(internal::ResizeBilinearGrad(
scope, grad_inputs[0], op.input(0),
internal::ResizeBilinearGrad::AlignCorners(align_corners)
.HalfPixelCenters(half_pixel_centers)));
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("ResizeBilinear", ResizeBilinearGradHelper);
Status ResizeBicubicGradHelper(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
bool align_corners;
TF_RETURN_IF_ERROR(
GetNodeAttr(op.node()->attrs(), "align_corners", &align_corners));
bool half_pixel_centers;
TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "half_pixel_centers",
&half_pixel_centers));
grad_outputs->push_back(internal::ResizeBicubicGrad(
scope, grad_inputs[0], op.input(0),
internal::ResizeBicubicGrad::AlignCorners(align_corners)
.HalfPixelCenters(half_pixel_centers)));
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("ResizeBicubic", ResizeBicubicGradHelper);
Status ScaleAndTranslateGradHelper(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
string kernel_type;
TF_RETURN_IF_ERROR(
GetNodeAttr(op.node()->attrs(), "kernel_type", &kernel_type));
bool antialias;
TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "antialias", &antialias));
grad_outputs->push_back(internal::ScaleAndTranslateGrad(
scope, grad_inputs[0], op.input(0), op.input(2), op.input(3),
internal::ScaleAndTranslateGrad::KernelType(kernel_type)
.Antialias(antialias)));
grad_outputs->push_back(NoGradient());
grad_outputs->push_back(NoGradient());
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("ScaleAndTranslate", ScaleAndTranslateGradHelper);
Status CropAndResizeGradHelper(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
DataType input_type;
string method;
TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "method", &method));
TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "T", &input_type));
auto image_shape = Shape(scope, op.input(0));
grad_outputs->push_back(CropAndResizeGradImage(
scope, grad_inputs[0], op.input(1), op.input(2), image_shape, input_type,
CropAndResizeGradImage::Method(method)));
grad_outputs->push_back(CropAndResizeGradBoxes(
scope, grad_inputs[0], op.input(0), op.input(1), op.input(2)));
grad_outputs->push_back(NoGradient());
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("CropAndResize", CropAndResizeGradHelper);
}
}
} | #include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/framework/grad_op_registry.h"
#include "tensorflow/cc/framework/gradient_checker.h"
#include "tensorflow/cc/framework/testutil.h"
#include "tensorflow/cc/gradients/grad_testutil.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
namespace tensorflow {
namespace {
using ops::Const;
using ops::CropAndResize;
using ops::ResizeBicubic;
using ops::ResizeBilinear;
using ops::ResizeNearestNeighbor;
using ops::ScaleAndTranslate;
class ImageGradTest : public ::testing::Test {
protected:
ImageGradTest() : scope_(Scope::NewRootScope()) {}
enum OpType { RESIZE_NEAREST, RESIZE_BILINEAR, RESIZE_BICUBIC };
template <typename T>
Tensor MakeData(const TensorShape& data_shape) {
DataType data_type = DataTypeToEnum<T>::v();
Tensor data(data_type, data_shape);
auto data_flat = data.flat<T>();
for (int i = 0; i < data_flat.size(); ++i) {
data_flat(i) = T(i);
}
return data;
}
template <typename T>
void MakeOp(const OpType op_type, const Tensor& x_data, const Input& y_shape,
const bool align_corners, const bool half_pixel_centers,
Output* x, Output* y) {
*x = Const<T>(scope_, x_data);
switch (op_type) {
case RESIZE_NEAREST:
*y = ResizeNearestNeighbor(
scope_, *x, y_shape,
ResizeNearestNeighbor::AlignCorners(align_corners));
return;
case RESIZE_BILINEAR:
*y = ResizeBilinear(scope_, *x, y_shape,
ResizeBilinear::AlignCorners(align_corners)
.HalfPixelCenters(half_pixel_centers));
return;
case RESIZE_BICUBIC:
*y = ResizeBicubic(scope_, *x, y_shape,
ResizeBicubic::AlignCorners(align_corners)
.HalfPixelCenters(half_pixel_centers));
return;
}
assert(false);
}
template <typename T>
void TestResizedShapeForType(const OpType op_type, const bool align_corners,
const bool half_pixel_centers) {
TensorShape x_shape({1, 2, 2, 1});
Tensor x_data = MakeData<T>(x_shape);
Output x, y;
MakeOp<T>(op_type, x_data, {4, 6}, align_corners, half_pixel_centers, &x,
&y);
ClientSession session(scope_);
std::vector<Tensor> outputs;
TF_ASSERT_OK(session.Run({y}, &outputs));
EXPECT_EQ(outputs.size(), 1);
EXPECT_EQ(outputs[0].shape(), TensorShape({1, 4, 6, 1}));
}
void TestResizedShape(OpType op_type) {
for (const bool half_pixel_centers : {true, false}) {
for (const bool align_corners : {true, false}) {
if (half_pixel_centers && align_corners) {
continue;
}
TestResizedShapeForType<Eigen::half>(op_type, align_corners,
half_pixel_centers);
TestResizedShapeForType<float>(op_type, align_corners,
half_pixel_centers);
TestResizedShapeForType<double>(op_type, align_corners,
half_pixel_centers);
}
}
}
template <typename X_T, typename Y_T, typename JAC_T>
void TestResizeToSmallerAndAlign(const OpType op_type,
const bool align_corners,
const bool half_pixel_centers) {
TensorShape x_shape({1, 4, 6, 1});
Tensor x_data = MakeData<X_T>(x_shape);
Output x, y;
MakeOp<X_T>(op_type, x_data, {2, 3}, align_corners, half_pixel_centers, &x,
&y);
JAC_T max_error;
TF_ASSERT_OK((ComputeGradientError<X_T, Y_T, JAC_T>(
scope_, x, x_data, y, {1, 2, 3, 1}, &max_error)));
EXPECT_LT(max_error, 1.5e-3);
}
template <typename X_T, typename Y_T, typename JAC_T>
void TestResizeToLargerAndAlign(const OpType op_type,
const bool align_corners,
const bool half_pixel_centers) {
TensorShape x_shape({1, 2, 3, 1});
Tensor x_data = MakeData<X_T>(x_shape);
Output x, y;
MakeOp<X_T>(op_type, x_data, {4, 6}, align_corners, half_pixel_centers, &x,
&y);
JAC_T max_error;
TF_ASSERT_OK((ComputeGradientError<X_T, Y_T, JAC_T>(
scope_, x, x_data, y, {1, 4, 6, 1}, &max_error)));
EXPECT_LT(max_error, 1.5e-3);
}
template <typename X_T, typename Y_T, typename JAC_T>
void TestResize(OpType op_type) {
for (const bool half_pixel_centers : {true, false}) {
for (const bool align_corners : {true, false}) {
if (half_pixel_centers && align_corners) {
continue;
}
TestResizeToSmallerAndAlign<X_T, Y_T, JAC_T>(op_type, align_corners,
half_pixel_centers);
TestResizeToLargerAndAlign<X_T, Y_T, JAC_T>(op_type, align_corners,
half_pixel_centers);
}
}
}
Scope scope_;
};
TEST_F(ImageGradTest, TestNearestNeighbor) {
TestResizedShape(RESIZE_NEAREST);
TestResize<float, float, float>(RESIZE_NEAREST);
TestResize<double, double, double>(RESIZE_NEAREST);
}
TEST_F(ImageGradTest, TestBilinear) {
TestResizedShape(RESIZE_BILINEAR);
TestResize<float, float, float>(RESIZE_BILINEAR);
TestResize<double, float, double>(RESIZE_BILINEAR);
}
TEST_F(ImageGradTest, TestBicubic) {
TestResizedShape(RESIZE_BICUBIC);
TestResize<float, float, float>(RESIZE_BICUBIC);
TestResize<double, float, double>(RESIZE_BICUBIC);
}
class ScaleAndTranslateGradTest : public ::testing::Test {
protected:
ScaleAndTranslateGradTest() : scope_(Scope::NewRootScope()) {}
template <typename T>
Tensor MakeData(const TensorShape& data_shape) {
DataType data_type = DataTypeToEnum<T>::v();
Tensor data(data_type, data_shape);
auto data_flat = data.flat<T>();
for (int i = 0; i < data_flat.size(); ++i) {
data_flat(i) = T(i);
}
return data;
}
template <typename T>
void MakeOp(const Tensor& x_data, const Input& y_shape, Input scale,
Input translation, const string& kernel_type, bool antialias,
Output* x, Output* y) {
*x = Const<T>(scope_, x_data);
*y = ScaleAndTranslate(scope_, *x, y_shape, scale, translation,
ScaleAndTranslate::KernelType(kernel_type)
.Antialias(antialias)
.Antialias(antialias));
TF_ASSERT_OK(scope_.status());
}
template <typename X_T, typename Y_T, typename JAC_T>
void TestScaleAndTranslate(const TensorShape x_shape, const int out_height,
const int out_width, Input scale,
Input translation, const string& kernel_type,
bool antialias) {
Tensor x_data = MakeData<X_T>(x_shape);
Output x, y;
MakeOp<X_T>(x_data, {out_height, out_width}, scale, translation,
kernel_type, antialias, &x, &y);
JAC_T max_error;
TF_ASSERT_OK((ComputeGradientError<X_T, Y_T, JAC_T>(
scope_, x, x_data, y, {1, out_height, out_width, 1}, &max_error)));
EXPECT_LT(max_error, 2e-3);
}
const std::vector<Input> kScales = {Input{1.0f, 1.0f}, Input{0.37f, 0.47f},
Input{2.1f, 2.1f}};
const std::vector<Input> kTranslations = {
Input{0.0f, 0.0f}, Input{3.14f, 1.19f}, Input{2.1f, 3.1f},
Input{100.0f, 200.0f}};
Scope scope_;
};
TEST_F(ScaleAndTranslateGradTest, TestGrads) {
const std::vector<std::string> kKernelTypes = {"lanczos1", "lanczos3",
"lanczos5", "gaussian"};
constexpr int kOutHeight = 4;
constexpr int kOutWidth = 6;
const TensorShape kXShape = TensorShape({1, 2, 3, 1});
for (const Input scale : kScales) {
for (const Input translation : kTranslations) {
for (const std::string& kernel_type : kKernelTypes) {
TestScaleAndTranslate<float, float, float>(
kXShape, kOutHeight, kOutWidth, scale, translation, kernel_type,
true);
}
}
}
}
TEST_F(ScaleAndTranslateGradTest, TestGradsWithoutAntialias) {
constexpr int kOutHeight = 4;
constexpr int kOutWidth = 6;
const TensorShape kXShape = TensorShape({1, 2, 3, 1});
for (const Input scale : kScales) {
for (const Input translation : kTranslations) {
TestScaleAndTranslate<float, float, float>(kXShape, kOutHeight, kOutWidth,
scale, translation, "lanczos3",
false);
}
}
}
TEST_F(ScaleAndTranslateGradTest, TestGradsWithSameShape) {
const std::vector<std::string> kKernelTypes = {"lanczos3", "gaussian"};
constexpr int kOutHeight = 2;
constexpr int kOutWidth = 3;
const TensorShape kXShape = TensorShape({1, 2, 3, 1});
for (const Input scale : kScales) {
for (const Input translation : kTranslations) {
for (const std::string& kernel_type : kKernelTypes) {
TestScaleAndTranslate<float, float, float>(
kXShape, kOutHeight, kOutWidth, scale, translation, kernel_type,
true);
}
}
}
}
TEST_F(ScaleAndTranslateGradTest, TestGradsWithSmallerShape) {
const std::vector<std::string> kKernelTypes = {"lanczos3", "gaussian"};
constexpr int kOutHeight = 2;
constexpr int kOutWidth = 3;
const TensorShape kXShape = TensorShape({1, 4, 6, 1});
for (const Input scale : kScales) {
for (const Input translation : kTranslations) {
for (const std::string& kernel_type : kKernelTypes) {
TestScaleAndTranslate<float, float, float>(
kXShape, kOutHeight, kOutWidth, scale, translation, kernel_type,
true);
}
}
}
}
class CropAndResizeGradTest : public ::testing::Test {
protected:
CropAndResizeGradTest() : scope_(Scope::NewRootScope()) {}
template <typename T>
Tensor MakeData(const TensorShape& data_shape) {
DataType data_type = DataTypeToEnum<T>::v();
Tensor data(data_type, data_shape);
auto data_flat = data.flat<T>();
for (int i = 0; i < data_flat.size(); ++i) {
data_flat(i) = T(i);
}
return data;
}
template <typename T>
void MakeOp(const Tensor& x_data, const Input& boxes, const Input& box_ind,
const Input& crop_size, Output* x, Output* y) {
*x = Const<T>(scope_, x_data);
*y = CropAndResize(scope_, *x, boxes, box_ind, crop_size,
CropAndResize::Method("bilinear"));
TF_ASSERT_OK(scope_.status());
}
template <typename X_T, typename Y_T, typename JAC_T>
void TestCropAndResize() {
TensorShape x_shape({1, 4, 2, 1});
Tensor x_data = MakeData<X_T>(x_shape);
TensorShape box_shape({1, 4});
Tensor boxes = MakeData<X_T>(box_shape);
Output x, y;
MakeOp<X_T>(x_data, boxes, {0}, {1, 1}, &x, &y);
JAC_T max_error;
TF_ASSERT_OK((ComputeGradientError<X_T, Y_T, JAC_T>(
scope_, x, x_data, y, {1, 1, 1, 1}, &max_error)));
EXPECT_LT(max_error, 1e-3);
}
Scope scope_;
};
TEST_F(CropAndResizeGradTest, TestCrop) {
TestCropAndResize<float, float, float>();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/gradients/image_grad.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/gradients/image_grad_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
457750a6-4192-455a-a546-396945d39be3 | cpp | google/quiche | http2_frame_builder | quiche/http2/test_tools/http2_frame_builder.cc | quiche/http2/test_tools/http2_frame_builder_test.cc | #include "quiche/http2/test_tools/http2_frame_builder.h"
#ifdef WIN32
#include <winsock2.h>
#else
#include <arpa/inet.h>
#include <netinet/in.h>
#endif
#include "absl/strings/str_cat.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace test {
Http2FrameBuilder::Http2FrameBuilder(Http2FrameType type, uint8_t flags,
uint32_t stream_id) {
AppendUInt24(0);
Append(type);
AppendUInt8(flags);
AppendUInt31(stream_id);
}
Http2FrameBuilder::Http2FrameBuilder(const Http2FrameHeader& v) { Append(v); }
void Http2FrameBuilder::Append(absl::string_view s) {
absl::StrAppend(&buffer_, s);
}
void Http2FrameBuilder::AppendBytes(const void* data, uint32_t num_bytes) {
Append(absl::string_view(static_cast<const char*>(data), num_bytes));
}
void Http2FrameBuilder::AppendZeroes(size_t num_zero_bytes) {
char zero = 0;
buffer_.append(num_zero_bytes, zero);
}
void Http2FrameBuilder::AppendUInt8(uint8_t value) { AppendBytes(&value, 1); }
void Http2FrameBuilder::AppendUInt16(uint16_t value) {
value = htons(value);
AppendBytes(&value, 2);
}
void Http2FrameBuilder::AppendUInt24(uint32_t value) {
EXPECT_EQ(value, value & 0xffffff);
value = htonl(value);
AppendBytes(reinterpret_cast<char*>(&value) + 1, 3);
}
void Http2FrameBuilder::AppendUInt31(uint32_t value) {
uint32_t tmp = value & StreamIdMask();
EXPECT_EQ(value, value & StreamIdMask())
<< "High-bit of uint32_t should be clear.";
value = htonl(tmp);
AppendBytes(&value, 4);
}
void Http2FrameBuilder::AppendUInt32(uint32_t value) {
value = htonl(value);
AppendBytes(&value, sizeof(value));
}
void Http2FrameBuilder::Append(Http2ErrorCode error_code) {
AppendUInt32(static_cast<uint32_t>(error_code));
}
void Http2FrameBuilder::Append(Http2FrameType type) {
AppendUInt8(static_cast<uint8_t>(type));
}
void Http2FrameBuilder::Append(Http2SettingsParameter parameter) {
AppendUInt16(static_cast<uint16_t>(parameter));
}
void Http2FrameBuilder::Append(const Http2FrameHeader& v) {
AppendUInt24(v.payload_length);
Append(v.type);
AppendUInt8(v.flags);
AppendUInt31(v.stream_id);
}
void Http2FrameBuilder::Append(const Http2PriorityFields& v) {
uint32_t tmp = v.stream_dependency & StreamIdMask();
EXPECT_EQ(tmp, v.stream_dependency);
if (v.is_exclusive) {
tmp |= 0x80000000;
}
AppendUInt32(tmp);
ASSERT_LE(1u, v.weight);
ASSERT_LE(v.weight, 256u);
AppendUInt8(v.weight - 1);
}
void Http2FrameBuilder::Append(const Http2RstStreamFields& v) {
Append(v.error_code);
}
void Http2FrameBuilder::Append(const Http2SettingFields& v) {
Append(v.parameter);
AppendUInt32(v.value);
}
void Http2FrameBuilder::Append(const Http2PushPromiseFields& v) {
AppendUInt31(v.promised_stream_id);
}
void Http2FrameBuilder::Append(const Http2PingFields& v) {
AppendBytes(v.opaque_bytes, sizeof Http2PingFields::opaque_bytes);
}
void Http2FrameBuilder::Append(const Http2GoAwayFields& v) {
AppendUInt31(v.last_stream_id);
Append(v.error_code);
}
void Http2FrameBuilder::Append(const Http2WindowUpdateFields& v) {
EXPECT_NE(0u, v.window_size_increment) << "Increment must be non-zero.";
AppendUInt31(v.window_size_increment);
}
void Http2FrameBuilder::Append(const Http2AltSvcFields& v) {
AppendUInt16(v.origin_length);
}
void Http2FrameBuilder::Append(const Http2PriorityUpdateFields& v) {
AppendUInt31(v.prioritized_stream_id);
}
void Http2FrameBuilder::WriteAt(absl::string_view s, size_t offset) {
ASSERT_LE(offset, buffer_.size());
size_t len = offset + s.size();
if (len > buffer_.size()) {
buffer_.resize(len);
}
for (size_t ndx = 0; ndx < s.size(); ++ndx) {
buffer_[offset + ndx] = s[ndx];
}
}
void Http2FrameBuilder::WriteBytesAt(const void* data, uint32_t num_bytes,
size_t offset) {
WriteAt(absl::string_view(static_cast<const char*>(data), num_bytes), offset);
}
void Http2FrameBuilder::WriteUInt24At(uint32_t value, size_t offset) {
ASSERT_LT(value, static_cast<uint32_t>(1 << 24));
value = htonl(value);
WriteBytesAt(reinterpret_cast<char*>(&value) + 1, sizeof(value) - 1, offset);
}
void Http2FrameBuilder::SetPayloadLength(uint32_t payload_length) {
WriteUInt24At(payload_length, 0);
}
size_t Http2FrameBuilder::SetPayloadLength() {
EXPECT_GE(size(), Http2FrameHeader::EncodedSize());
uint32_t payload_length = size() - Http2FrameHeader::EncodedSize();
SetPayloadLength(payload_length);
return payload_length;
}
}
} | #include "quiche/http2/test_tools/http2_frame_builder.h"
#include <string>
#include "absl/strings/escaping.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace test {
namespace {
const char kHighBitSetMsg[] = "High-bit of uint32_t should be clear";
TEST(Http2FrameBuilderTest, Constructors) {
{
Http2FrameBuilder fb;
EXPECT_EQ(0u, fb.size());
}
{
Http2FrameBuilder fb(Http2FrameType::DATA, 0, 123);
EXPECT_EQ(9u, fb.size());
std::string expected_data;
ASSERT_TRUE(
absl::HexStringToBytes("000000"
"00"
"00"
"0000007b",
&expected_data));
EXPECT_EQ(expected_data, fb.buffer());
}
{
Http2FrameHeader header;
header.payload_length = (1 << 24) - 1;
header.type = Http2FrameType::HEADERS;
header.flags = Http2FrameFlag::END_HEADERS;
header.stream_id = StreamIdMask();
Http2FrameBuilder fb(header);
EXPECT_EQ(9u, fb.size());
std::string expected_data;
ASSERT_TRUE(absl::HexStringToBytes(
"ffffff"
"01"
"04"
"7fffffff",
&expected_data));
EXPECT_EQ(expected_data, fb.buffer());
}
}
TEST(Http2FrameBuilderTest, SetPayloadLength) {
Http2FrameBuilder fb(Http2FrameType::DATA, PADDED, 20000);
EXPECT_EQ(9u, fb.size());
fb.AppendUInt8(50);
EXPECT_EQ(10u, fb.size());
fb.Append("ten bytes.");
EXPECT_EQ(20u, fb.size());
fb.AppendZeroes(50);
EXPECT_EQ(70u, fb.size());
fb.SetPayloadLength();
EXPECT_EQ(70u, fb.size());
std::string expected_data;
ASSERT_TRUE(
absl::HexStringToBytes("00003d"
"00"
"08"
"00004e20"
"32"
"74656e2062797465732e"
"00000000000000000000"
"00000000000000000000"
"00000000000000000000"
"00000000000000000000"
"00000000000000000000",
&expected_data));
EXPECT_EQ(expected_data, fb.buffer());
}
TEST(Http2FrameBuilderTest, Settings) {
Http2FrameBuilder fb(Http2FrameType::SETTINGS, 0, 0);
Http2SettingFields sf;
sf.parameter = Http2SettingsParameter::HEADER_TABLE_SIZE;
sf.value = 1 << 12;
fb.Append(sf);
sf.parameter = Http2SettingsParameter::ENABLE_PUSH;
sf.value = 0;
fb.Append(sf);
sf.parameter = Http2SettingsParameter::MAX_CONCURRENT_STREAMS;
sf.value = ~0;
fb.Append(sf);
sf.parameter = Http2SettingsParameter::INITIAL_WINDOW_SIZE;
sf.value = 1 << 16;
fb.Append(sf);
sf.parameter = Http2SettingsParameter::MAX_FRAME_SIZE;
sf.value = 1 << 14;
fb.Append(sf);
sf.parameter = Http2SettingsParameter::MAX_HEADER_LIST_SIZE;
sf.value = 1 << 10;
fb.Append(sf);
size_t payload_size = 6 * Http2SettingFields::EncodedSize();
EXPECT_EQ(Http2FrameHeader::EncodedSize() + payload_size, fb.size());
fb.SetPayloadLength(payload_size);
std::string expected_data;
ASSERT_TRUE(
absl::HexStringToBytes("000024"
"04"
"00"
"00000000"
"0001"
"00001000"
"0002"
"00000000"
"0003"
"ffffffff"
"0004"
"00010000"
"0005"
"00004000"
"0006"
"00000400",
&expected_data));
EXPECT_EQ(expected_data, fb.buffer());
}
TEST(Http2FrameBuilderTest, EnhanceYourCalm) {
std::string expected_data;
ASSERT_TRUE(absl::HexStringToBytes("0000000b", &expected_data));
{
Http2FrameBuilder fb;
fb.Append(Http2ErrorCode::ENHANCE_YOUR_CALM);
EXPECT_EQ(expected_data, fb.buffer());
}
{
Http2FrameBuilder fb;
Http2RstStreamFields rsp;
rsp.error_code = Http2ErrorCode::ENHANCE_YOUR_CALM;
fb.Append(rsp);
EXPECT_EQ(expected_data, fb.buffer());
}
}
TEST(Http2FrameBuilderTest, PushPromise) {
std::string expected_data;
ASSERT_TRUE(absl::HexStringToBytes("7fffffff", &expected_data));
{
Http2FrameBuilder fb;
fb.Append(Http2PushPromiseFields{0x7fffffff});
EXPECT_EQ(expected_data, fb.buffer());
}
{
Http2FrameBuilder fb;
EXPECT_NONFATAL_FAILURE(fb.Append(Http2PushPromiseFields{0xffffffff}),
kHighBitSetMsg);
EXPECT_EQ(expected_data, fb.buffer());
}
}
TEST(Http2FrameBuilderTest, Ping) {
Http2FrameBuilder fb;
Http2PingFields ping{"8 bytes"};
fb.Append(ping);
const absl::string_view kData{"8 bytes\0", 8};
EXPECT_EQ(kData.size(), Http2PingFields::EncodedSize());
EXPECT_EQ(kData, fb.buffer());
}
TEST(Http2FrameBuilderTest, GoAway) {
std::string expected_data;
ASSERT_TRUE(
absl::HexStringToBytes("12345678"
"00000001",
&expected_data));
EXPECT_EQ(expected_data.size(), Http2GoAwayFields::EncodedSize());
{
Http2FrameBuilder fb;
Http2GoAwayFields ga(0x12345678, Http2ErrorCode::PROTOCOL_ERROR);
fb.Append(ga);
EXPECT_EQ(expected_data, fb.buffer());
}
{
Http2FrameBuilder fb;
Http2GoAwayFields ga(0x92345678, Http2ErrorCode::PROTOCOL_ERROR);
EXPECT_NONFATAL_FAILURE(fb.Append(ga), kHighBitSetMsg);
EXPECT_EQ(expected_data, fb.buffer());
}
}
TEST(Http2FrameBuilderTest, WindowUpdate) {
Http2FrameBuilder fb;
fb.Append(Http2WindowUpdateFields{123456});
EXPECT_NONFATAL_FAILURE(fb.Append(Http2WindowUpdateFields{0x80000001}),
kHighBitSetMsg);
EXPECT_NONFATAL_FAILURE(fb.Append(Http2WindowUpdateFields{0}), "non-zero");
std::string expected_data;
ASSERT_TRUE(
absl::HexStringToBytes("0001e240"
"00000001"
"00000000",
&expected_data));
EXPECT_EQ(expected_data.size(), 3 * Http2WindowUpdateFields::EncodedSize());
EXPECT_EQ(expected_data, fb.buffer());
}
TEST(Http2FrameBuilderTest, AltSvc) {
Http2FrameBuilder fb;
fb.Append(Http2AltSvcFields{99});
fb.Append(Http2AltSvcFields{0});
std::string expected_data;
ASSERT_TRUE(
absl::HexStringToBytes("0063"
"0000",
&expected_data));
EXPECT_EQ(expected_data.size(), 2 * Http2AltSvcFields::EncodedSize());
EXPECT_EQ(expected_data, fb.buffer());
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/test_tools/http2_frame_builder.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/test_tools/http2_frame_builder_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
547914af-97d2-4cfc-bdfe-0b2d0b6bf109 | cpp | google/quiche | hpack_huffman_decoder | quiche/http2/hpack/huffman/hpack_huffman_decoder.cc | quiche/http2/hpack/huffman/hpack_huffman_decoder_test.cc | #include "quiche/http2/hpack/huffman/hpack_huffman_decoder.h"
#include <bitset>
#include <limits>
#include <ostream>
#include <sstream>
#include <string>
#include "quiche/common/platform/api/quiche_logging.h"
namespace http2 {
namespace {
typedef uint32_t HuffmanCode;
typedef uint16_t HuffmanCodeBitCount;
typedef std::bitset<32> HuffmanCodeBitSet;
typedef std::bitset<64> HuffmanAccumulatorBitSet;
static constexpr HuffmanCodeBitCount kMinCodeBitCount = 5;
static constexpr HuffmanCodeBitCount kMaxCodeBitCount = 30;
static constexpr HuffmanCodeBitCount kHuffmanCodeBitCount =
std::numeric_limits<HuffmanCode>::digits;
static_assert(std::numeric_limits<HuffmanCode>::digits >= kMaxCodeBitCount,
"HuffmanCode isn't big enough.");
static_assert(std::numeric_limits<HuffmanAccumulator>::digits >=
kMaxCodeBitCount,
"HuffmanAccumulator isn't big enough.");
static constexpr HuffmanAccumulatorBitCount kHuffmanAccumulatorBitCount =
std::numeric_limits<HuffmanAccumulator>::digits;
static constexpr HuffmanAccumulatorBitCount kExtraAccumulatorBitCount =
kHuffmanAccumulatorBitCount - kHuffmanCodeBitCount;
struct PrefixInfo {
uint32_t DecodeToCanonical(HuffmanCode bits) const {
HuffmanCode ordinal_in_length =
((bits - first_code) >> (kHuffmanCodeBitCount - code_length));
return first_canonical + ordinal_in_length;
}
const HuffmanCode first_code;
const uint16_t code_length;
const uint16_t first_canonical;
};
inline std::ostream& operator<<(std::ostream& out, const PrefixInfo& v) {
return out << "{first_code: " << HuffmanCodeBitSet(v.first_code)
<< ", code_length: " << v.code_length
<< ", first_canonical: " << v.first_canonical << "}";
}
PrefixInfo PrefixToInfo(HuffmanCode value) {
if (value < 0b10111000000000000000000000000000) {
if (value < 0b01010000000000000000000000000000) {
return {0b00000000000000000000000000000000, 5, 0};
} else {
return {0b01010000000000000000000000000000, 6, 10};
}
} else {
if (value < 0b11111110000000000000000000000000) {
if (value < 0b11111000000000000000000000000000) {
return {0b10111000000000000000000000000000, 7, 36};
} else {
return {0b11111000000000000000000000000000, 8, 68};
}
} else {
if (value < 0b11111111110000000000000000000000) {
if (value < 0b11111111101000000000000000000000) {
if (value < 0b11111111010000000000000000000000) {
return {0b11111110000000000000000000000000, 10, 74};
} else {
return {0b11111111010000000000000000000000, 11, 79};
}
} else {
return {0b11111111101000000000000000000000, 12, 82};
}
} else {
if (value < 0b11111111111111100000000000000000) {
if (value < 0b11111111111110000000000000000000) {
if (value < 0b11111111111100000000000000000000) {
return {0b11111111110000000000000000000000, 13, 84};
} else {
return {0b11111111111100000000000000000000, 14, 90};
}
} else {
return {0b11111111111110000000000000000000, 15, 92};
}
} else {
if (value < 0b11111111111111110100100000000000) {
if (value < 0b11111111111111101110000000000000) {
if (value < 0b11111111111111100110000000000000) {
return {0b11111111111111100000000000000000, 19, 95};
} else {
return {0b11111111111111100110000000000000, 20, 98};
}
} else {
return {0b11111111111111101110000000000000, 21, 106};
}
} else {
if (value < 0b11111111111111111110101000000000) {
if (value < 0b11111111111111111011000000000000) {
return {0b11111111111111110100100000000000, 22, 119};
} else {
return {0b11111111111111111011000000000000, 23, 145};
}
} else {
if (value < 0b11111111111111111111101111000000) {
if (value < 0b11111111111111111111100000000000) {
if (value < 0b11111111111111111111011000000000) {
return {0b11111111111111111110101000000000, 24, 174};
} else {
return {0b11111111111111111111011000000000, 25, 186};
}
} else {
return {0b11111111111111111111100000000000, 26, 190};
}
} else {
if (value < 0b11111111111111111111111111110000) {
if (value < 0b11111111111111111111111000100000) {
return {0b11111111111111111111101111000000, 27, 205};
} else {
return {0b11111111111111111111111000100000, 28, 224};
}
} else {
return {0b11111111111111111111111111110000, 30, 253};
}
}
}
}
}
}
}
}
}
constexpr unsigned char kCanonicalToSymbol[] = {
'0', '1', '2', 'a', 'c', 'e', 'i', 'o',
's', 't', 0x20, '%', '-', '.', '/', '3',
'4', '5', '6', '7', '8', '9', '=', 'A',
'_', 'b', 'd', 'f', 'g', 'h', 'l', 'm',
'n', 'p', 'r', 'u', ':', 'B', 'C', 'D',
'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L',
'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'Y', 'j', 'k', 'q', 'v',
'w', 'x', 'y', 'z', '&', '*', ',', ';',
'X', 'Z', '!', '\"', '(', ')', '?', '\'',
'+', '|', '#', '>', 0x00, '$', '@', '[',
']', '~', '^', '}', '<', '`', '{', '\\',
0xc3, 0xd0, 0x80, 0x82, 0x83, 0xa2, 0xb8, 0xc2,
0xe0, 0xe2, 0x99, 0xa1, 0xa7, 0xac, 0xb0, 0xb1,
0xb3, 0xd1, 0xd8, 0xd9, 0xe3, 0xe5, 0xe6, 0x81,
0x84, 0x85, 0x86, 0x88, 0x92, 0x9a, 0x9c, 0xa0,
0xa3, 0xa4, 0xa9, 0xaa, 0xad, 0xb2, 0xb5, 0xb9,
0xba, 0xbb, 0xbd, 0xbe, 0xc4, 0xc6, 0xe4, 0xe8,
0xe9, 0x01, 0x87, 0x89, 0x8a, 0x8b, 0x8c, 0x8d,
0x8f, 0x93, 0x95, 0x96, 0x97, 0x98, 0x9b, 0x9d,
0x9e, 0xa5, 0xa6, 0xa8, 0xae, 0xaf, 0xb4, 0xb6,
0xb7, 0xbc, 0xbf, 0xc5, 0xe7, 0xef, 0x09, 0x8e,
0x90, 0x91, 0x94, 0x9f, 0xab, 0xce, 0xd7, 0xe1,
0xec, 0xed, 0xc7, 0xcf, 0xea, 0xeb, 0xc0, 0xc1,
0xc8, 0xc9, 0xca, 0xcd, 0xd2, 0xd5, 0xda, 0xdb,
0xee, 0xf0, 0xf2, 0xf3, 0xff, 0xcb, 0xcc, 0xd3,
0xd4, 0xd6, 0xdd, 0xde, 0xdf, 0xf1, 0xf4, 0xf5,
0xf6, 0xf7, 0xf8, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe,
0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x0b,
0x0c, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14,
0x15, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d,
0x1e, 0x1f, 0x7f, 0xdc, 0xf9, 0x0a, 0x0d, 0x16,
};
constexpr size_t kShortCodeTableSize = 124;
struct ShortCodeInfo {
uint8_t symbol;
uint8_t length;
} kShortCodeTable[kShortCodeTableSize] = {
{0x30, 5},
{0x30, 5},
{0x30, 5},
{0x30, 5},
{0x31, 5},
{0x31, 5},
{0x31, 5},
{0x31, 5},
{0x32, 5},
{0x32, 5},
{0x32, 5},
{0x32, 5},
{0x61, 5},
{0x61, 5},
{0x61, 5},
{0x61, 5},
{0x63, 5},
{0x63, 5},
{0x63, 5},
{0x63, 5},
{0x65, 5},
{0x65, 5},
{0x65, 5},
{0x65, 5},
{0x69, 5},
{0x69, 5},
{0x69, 5},
{0x69, 5},
{0x6f, 5},
{0x6f, 5},
{0x6f, 5},
{0x6f, 5},
{0x73, 5},
{0x73, 5},
{0x73, 5},
{0x73, 5},
{0x74, 5},
{0x74, 5},
{0x74, 5},
{0x74, 5},
{0x20, 6},
{0x20, 6},
{0x25, 6},
{0x25, 6},
{0x2d, 6},
{0x2d, 6},
{0x2e, 6},
{0x2e, 6},
{0x2f, 6},
{0x2f, 6},
{0x33, 6},
{0x33, 6},
{0x34, 6},
{0x34, 6},
{0x35, 6},
{0x35, 6},
{0x36, 6},
{0x36, 6},
{0x37, 6},
{0x37, 6},
{0x38, 6},
{0x38, 6},
{0x39, 6},
{0x39, 6},
{0x3d, 6},
{0x3d, 6},
{0x41, 6},
{0x41, 6},
{0x5f, 6},
{0x5f, 6},
{0x62, 6},
{0x62, 6},
{0x64, 6},
{0x64, 6},
{0x66, 6},
{0x66, 6},
{0x67, 6},
{0x67, 6},
{0x68, 6},
{0x68, 6},
{0x6c, 6},
{0x6c, 6},
{0x6d, 6},
{0x6d, 6},
{0x6e, 6},
{0x6e, 6},
{0x70, 6},
{0x70, 6},
{0x72, 6},
{0x72, 6},
{0x75, 6},
{0x75, 6},
{0x3a, 7},
{0x42, 7},
{0x43, 7},
{0x44, 7},
{0x45, 7},
{0x46, 7},
{0x47, 7},
{0x48, 7},
{0x49, 7},
{0x4a, 7},
{0x4b, 7},
{0x4c, 7},
{0x4d, 7},
{0x4e, 7},
{0x4f, 7},
{0x50, 7},
{0x51, 7},
{0x52, 7},
{0x53, 7},
{0x54, 7},
{0x55, 7},
{0x56, 7},
{0x57, 7},
{0x59, 7},
{0x6a, 7},
{0x6b, 7},
{0x71, 7},
{0x76, 7},
{0x77, 7},
{0x78, 7},
{0x79, 7},
{0x7a, 7},
};
}
HuffmanBitBuffer::HuffmanBitBuffer() { Reset(); }
void HuffmanBitBuffer::Reset() {
accumulator_ = 0;
count_ = 0;
}
size_t HuffmanBitBuffer::AppendBytes(absl::string_view input) {
HuffmanAccumulatorBitCount free_cnt = free_count();
size_t bytes_available = input.size();
if (free_cnt < 8 || bytes_available == 0) {
return 0;
}
size_t bytes_used = 0;
auto* ptr = reinterpret_cast<const uint8_t*>(input.data());
do {
auto b = static_cast<HuffmanAccumulator>(*ptr++);
free_cnt -= 8;
accumulator_ |= (b << free_cnt);
++bytes_used;
} while (free_cnt >= 8 && bytes_used < bytes_available);
count_ += (bytes_used * 8);
return bytes_used;
}
HuffmanAccumulatorBitCount HuffmanBitBuffer::free_count() const {
return kHuffmanAccumulatorBitCount - count_;
}
void HuffmanBitBuffer::ConsumeBits(HuffmanAccumulatorBitCount code_length) {
QUICHE_DCHECK_LE(code_length, count_);
accumulator_ <<= code_length;
count_ -= code_length;
}
bool HuffmanBitBuffer::InputProperlyTerminated() const {
auto cnt = count();
if (cnt < 8) {
if (cnt == 0) {
return true;
}
HuffmanAccumulator expected = ~(~HuffmanAccumulator() >> cnt);
QUICHE_DCHECK_EQ(accumulator_ & ~expected, 0u)
<< "\n expected: " << HuffmanAccumulatorBitSet(expected) << "\n "
<< *this;
return accumulator_ == expected;
}
return false;
}
std::string HuffmanBitBuffer::DebugString() const {
std::stringstream ss;
ss << "{accumulator: " << HuffmanAccumulatorBitSet(accumulator_)
<< "; count: " << count_ << "}";
return ss.str();
}
HpackHuffmanDecoder::HpackHuffmanDecoder() = default;
HpackHuffmanDecoder::~HpackHuffmanDecoder() = default;
bool HpackHuffmanDecoder::Decode(absl::string_view input, std::string* output) {
QUICHE_DVLOG(1) << "HpackHuffmanDecoder::Decode";
input.remove_prefix(bit_buffer_.AppendBytes(input));
while (true) {
QUICHE_DVLOG(3) << "Enter Decode Loop, bit_buffer_: " << bit_buffer_;
if (bit_buffer_.count() >= 7) {
uint8_t short_code =
bit_buffer_.value() >> (kHuffmanAccumulatorBitCount - 7);
QUICHE_DCHECK_LT(short_code, 128);
if (short_code < kShortCodeTableSize) {
ShortCodeInfo info = kShortCodeTable[short_code];
bit_buffer_.ConsumeBits(info.length);
output->push_back(static_cast<char>(info.symbol));
continue;
}
} else {
size_t byte_count = bit_buffer_.AppendBytes(input);
if (byte_count > 0) {
input.remove_prefix(byte_count);
continue;
}
}
HuffmanCode code_prefix = bit_buffer_.value() >> kExtraAccumulatorBitCount;
QUICHE_DVLOG(3) << "code_prefix: " << HuffmanCodeBitSet(code_prefix);
PrefixInfo prefix_info = PrefixToInfo(code_prefix);
QUICHE_DVLOG(3) << "prefix_info: " << prefix_info;
QUICHE_DCHECK_LE(kMinCodeBitCount, prefix_info.code_length);
QUICHE_DCHECK_LE(prefix_info.code_length, kMaxCodeBitCount);
if (prefix_info.code_length <= bit_buffer_.count()) {
uint32_t canonical = prefix_info.DecodeToCanonical(code_prefix);
if (canonical < 256) {
char c = kCanonicalToSymbol[canonical];
output->push_back(c);
bit_buffer_.ConsumeBits(prefix_info.code_length);
continue;
}
QUICHE_DLOG(ERROR) << "EOS explicitly encoded!\n " << bit_buffer_ << "\n "
<< prefix_info;
return false;
}
size_t byte_count = bit_buffer_.AppendBytes(input);
if (byte_count == 0) {
QUICHE_DCHECK_EQ(input.size(), 0u);
return true;
}
input.remove_prefix(byte_count);
}
}
std::string HpackHuffmanDecoder::DebugString() const {
return bit_buffer_.DebugString();
}
} | #include "quiche/http2/hpack/huffman/hpack_huffman_decoder.h"
#include <cstddef>
#include <iostream>
#include <string>
#include "absl/base/macros.h"
#include "absl/strings/escaping.h"
#include "quiche/http2/decoder/decode_buffer.h"
#include "quiche/http2/decoder/decode_status.h"
#include "quiche/http2/test_tools/random_decoder_test_base.h"
#include "quiche/common/platform/api/quiche_expect_bug.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace test {
namespace {
TEST(HuffmanBitBufferTest, Reset) {
HuffmanBitBuffer bb;
EXPECT_TRUE(bb.IsEmpty());
EXPECT_TRUE(bb.InputProperlyTerminated());
EXPECT_EQ(bb.count(), 0u);
EXPECT_EQ(bb.free_count(), 64u);
EXPECT_EQ(bb.value(), 0u);
}
TEST(HuffmanBitBufferTest, AppendBytesAligned) {
std::string s;
s.push_back('\x11');
s.push_back('\x22');
s.push_back('\x33');
absl::string_view sp(s);
HuffmanBitBuffer bb;
sp.remove_prefix(bb.AppendBytes(sp));
EXPECT_TRUE(sp.empty());
EXPECT_FALSE(bb.IsEmpty()) << bb;
EXPECT_FALSE(bb.InputProperlyTerminated());
EXPECT_EQ(bb.count(), 24u) << bb;
EXPECT_EQ(bb.free_count(), 40u) << bb;
EXPECT_EQ(bb.value(), HuffmanAccumulator(0x112233) << 40) << bb;
s.clear();
s.push_back('\x44');
sp = s;
sp.remove_prefix(bb.AppendBytes(sp));
EXPECT_TRUE(sp.empty());
EXPECT_EQ(bb.count(), 32u) << bb;
EXPECT_EQ(bb.free_count(), 32u) << bb;
EXPECT_EQ(bb.value(), HuffmanAccumulator(0x11223344) << 32) << bb;
s.clear();
s.push_back('\x55');
s.push_back('\x66');
s.push_back('\x77');
s.push_back('\x88');
s.push_back('\x99');
sp = s;
sp.remove_prefix(bb.AppendBytes(sp));
EXPECT_EQ(sp.size(), 1u);
EXPECT_EQ('\x99', sp[0]);
EXPECT_EQ(bb.count(), 64u) << bb;
EXPECT_EQ(bb.free_count(), 0u) << bb;
EXPECT_EQ(bb.value(), HuffmanAccumulator(0x1122334455667788LL)) << bb;
sp.remove_prefix(bb.AppendBytes(sp));
EXPECT_EQ(sp.size(), 1u);
EXPECT_EQ('\x99', sp[0]);
EXPECT_EQ(bb.count(), 64u) << bb;
EXPECT_EQ(bb.free_count(), 0u) << bb;
EXPECT_EQ(bb.value(), HuffmanAccumulator(0x1122334455667788LL)) << bb;
}
TEST(HuffmanBitBufferTest, ConsumeBits) {
std::string s;
s.push_back('\x11');
s.push_back('\x22');
s.push_back('\x33');
absl::string_view sp(s);
HuffmanBitBuffer bb;
sp.remove_prefix(bb.AppendBytes(sp));
EXPECT_TRUE(sp.empty());
bb.ConsumeBits(1);
EXPECT_EQ(bb.count(), 23u) << bb;
EXPECT_EQ(bb.free_count(), 41u) << bb;
EXPECT_EQ(bb.value(), HuffmanAccumulator(0x112233) << 41) << bb;
bb.ConsumeBits(20);
EXPECT_EQ(bb.count(), 3u) << bb;
EXPECT_EQ(bb.free_count(), 61u) << bb;
EXPECT_EQ(bb.value(), HuffmanAccumulator(0x3) << 61) << bb;
}
TEST(HuffmanBitBufferTest, AppendBytesUnaligned) {
std::string s;
s.push_back('\x11');
s.push_back('\x22');
s.push_back('\x33');
s.push_back('\x44');
s.push_back('\x55');
s.push_back('\x66');
s.push_back('\x77');
s.push_back('\x88');
s.push_back('\x99');
s.push_back('\xaa');
s.push_back('\xbb');
s.push_back('\xcc');
s.push_back('\xdd');
absl::string_view sp(s);
HuffmanBitBuffer bb;
sp.remove_prefix(bb.AppendBytes(sp));
EXPECT_EQ(sp.size(), 5u);
EXPECT_FALSE(bb.InputProperlyTerminated());
bb.ConsumeBits(15);
EXPECT_EQ(bb.count(), 49u) << bb;
EXPECT_EQ(bb.free_count(), 15u) << bb;
HuffmanAccumulator expected(0x1122334455667788);
expected <<= 15;
EXPECT_EQ(bb.value(), expected);
sp.remove_prefix(bb.AppendBytes(sp));
EXPECT_EQ(sp.size(), 4u);
EXPECT_EQ(bb.count(), 57u) << bb;
EXPECT_EQ(bb.free_count(), 7u) << bb;
expected |= (HuffmanAccumulator(0x99) << 7);
EXPECT_EQ(bb.value(), expected)
<< bb << std::hex << "\n actual: " << bb.value()
<< "\n expected: " << expected;
}
class HpackHuffmanDecoderTest : public RandomDecoderTest {
protected:
HpackHuffmanDecoderTest() {
stop_decode_on_done_ = false;
}
DecodeStatus StartDecoding(DecodeBuffer* b) override {
input_bytes_seen_ = 0;
output_buffer_.clear();
decoder_.Reset();
return ResumeDecoding(b);
}
DecodeStatus ResumeDecoding(DecodeBuffer* b) override {
input_bytes_seen_ += b->Remaining();
absl::string_view sp(b->cursor(), b->Remaining());
if (decoder_.Decode(sp, &output_buffer_)) {
b->AdvanceCursor(b->Remaining());
EXPECT_LE(input_bytes_seen_, input_bytes_expected_);
if (input_bytes_expected_ == input_bytes_seen_) {
if (decoder_.InputProperlyTerminated()) {
return DecodeStatus::kDecodeDone;
} else {
return DecodeStatus::kDecodeError;
}
}
return DecodeStatus::kDecodeInProgress;
}
return DecodeStatus::kDecodeError;
}
HpackHuffmanDecoder decoder_;
std::string output_buffer_;
size_t input_bytes_seen_;
size_t input_bytes_expected_;
};
TEST_F(HpackHuffmanDecoderTest, SpecRequestExamples) {
HpackHuffmanDecoder decoder;
std::string test_table[] = {
"f1e3c2e5f23a6ba0ab90f4ff",
"www.example.com",
"a8eb10649cbf",
"no-cache",
"25a849e95ba97d7f",
"custom-key",
"25a849e95bb8e8b4bf",
"custom-value",
};
for (size_t i = 0; i != ABSL_ARRAYSIZE(test_table); i += 2) {
std::string huffman_encoded;
ASSERT_TRUE(absl::HexStringToBytes(test_table[i], &huffman_encoded));
const std::string& plain_string(test_table[i + 1]);
std::string buffer;
decoder.Reset();
EXPECT_TRUE(decoder.Decode(huffman_encoded, &buffer)) << decoder;
EXPECT_TRUE(decoder.InputProperlyTerminated()) << decoder;
EXPECT_EQ(buffer, plain_string);
}
}
TEST_F(HpackHuffmanDecoderTest, SpecResponseExamples) {
HpackHuffmanDecoder decoder;
std::string test_table[] = {
"6402",
"302",
"aec3771a4b",
"private",
"d07abe941054d444a8200595040b8166e082a62d1bff",
"Mon, 21 Oct 2013 20:13:21 GMT",
"9d29ad171863c78f0b97c8e9ae82ae43d3",
"https:
"94e7821dd7f2e6c7b335dfdfcd5b3960d5af27087f3672c1ab270fb5291f9587316065c0"
"03ed4ee5b1063d5007",
"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1",
};
for (size_t i = 0; i != ABSL_ARRAYSIZE(test_table); i += 2) {
std::string huffman_encoded;
ASSERT_TRUE(absl::HexStringToBytes(test_table[i], &huffman_encoded));
const std::string& plain_string(test_table[i + 1]);
std::string buffer;
decoder.Reset();
EXPECT_TRUE(decoder.Decode(huffman_encoded, &buffer)) << decoder;
EXPECT_TRUE(decoder.InputProperlyTerminated()) << decoder;
EXPECT_EQ(buffer, plain_string);
}
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/hpack/huffman/hpack_huffman_decoder.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/hpack/huffman/hpack_huffman_decoder_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
92726b70-ffc0-4529-89cd-67ffa266f16e | cpp | google/tensorstore | json | tensorstore/index_space/json.cc | tensorstore/index_space/json_test.cc | #include "tensorstore/index_space/json.h"
#include <stddef.h>
#include <algorithm>
#include <cassert>
#include <optional>
#include <string>
#include <type_traits>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/container/inlined_vector.h"
#include "absl/meta/type_traits.h"
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/array.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/internal/json/array.h"
#include "tensorstore/internal/json/json.h"
#include "tensorstore/internal/json/value_as.h"
#include "tensorstore/internal/json_binding/array.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/dimension_indexed.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_binding/std_array.h"
#include "tensorstore/internal/json_binding/std_optional.h"
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/json_serialization_options.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace {
namespace jb = tensorstore::internal_json_binding;
using ::tensorstore::internal_index_space::BuilderFlags;
using ::tensorstore::internal_index_space::OutputIndexMapInitializer;
using ::tensorstore::internal_index_space::TransformRep;
struct DomainJsonKeys {
const char* rank;
const char* inclusive_min;
const char* inclusive_max;
const char* shape;
const char* exclusive_max;
const char* labels;
};
constexpr DomainJsonKeys kIndexDomainJsonKeys = {
"rank", "inclusive_min", "inclusive_max",
"shape", "exclusive_max", "labels",
};
constexpr DomainJsonKeys kIndexTransformJsonKeys = {
"input_rank", "input_inclusive_min", "input_inclusive_max",
"input_shape", "input_exclusive_max", "input_labels",
};
template <typename ElementBinder>
struct ImplicitPairBinder {
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS ElementBinder element_binder;
template <typename Options, typename Obj>
absl::Status operator()(std::true_type is_loading, const Options& options,
Obj* obj, ::nlohmann::json* j) const {
auto&& [element, is_implicit] = *obj;
if (const auto* k = j->get_ptr<const ::nlohmann::json::array_t*>()) {
if (k->size() != 1) {
return internal_json::ExpectedError(
*k, "array of size 1 indicating an implicit value");
}
is_implicit = true;
return element_binder(is_loading, options, &element, &(*k)[0]);
} else {
is_implicit = false;
return element_binder(is_loading, options, &element, j);
}
}
template <typename Options, typename Obj>
absl::Status operator()(std::false_type is_loading, const Options& options,
const Obj* obj, ::nlohmann::json* j) const {
auto&& [element, is_implicit] = *obj;
if (is_implicit) {
::nlohmann::json::array_t k(1);
TENSORSTORE_RETURN_IF_ERROR(
element_binder(is_loading, options, &element, &k[0]));
*j = std::move(k);
} else {
return element_binder(is_loading, options, &element, j);
}
return absl::OkStatus();
}
};
template <typename RankProjection, typename ValuesProjection,
typename ImplicitProjection, typename ElementBinder>
struct ImplicitArrayBinderImpl {
RankProjection rank_ptr;
ValuesProjection values_ptr;
ImplicitProjection implicit_ptr;
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS ElementBinder element_binder;
template <typename Loading, typename Options, typename Obj>
absl::Status operator()(Loading is_loading, const Options& options, Obj* obj,
::nlohmann::json* j) const {
return jb::OptionalArray(
[this](const auto& obj) { return std::invoke(values_ptr, obj).size(); },
[this](auto& obj, size_t size) {
TENSORSTORE_RETURN_IF_ERROR(ValidateRank(size));
auto&& rank = std::invoke(rank_ptr, obj);
if (rank == dynamic_rank) {
rank = size;
} else if (rank != static_cast<DimensionIndex>(size)) {
return internal_json::JsonValidateArrayLength(size, rank);
}
std::invoke(values_ptr, obj).resize(size);
return absl::OkStatus();
},
[this](auto& obj, size_t i) {
auto& value = std::invoke(values_ptr, obj)[i];
auto implicit_value = std::invoke(implicit_ptr, obj)[i];
return std::pair<decltype(value), decltype(implicit_value)>(
value, implicit_value);
},
element_binder)(is_loading, options, obj, j);
}
};
template <typename T>
using InlinedVector = absl::InlinedVector<T, internal::kNumInlinedDims>;
struct TransformParserOutput {
Index offset = 0;
Index stride = 1;
std::optional<DimensionIndex> input_dimension;
IndexInterval index_array_bounds;
SharedArray<const Index, dynamic_rank> index_array;
};
struct TransformParserData {
IntervalForm interval_form = IntervalForm::half_open;
BuilderFlags flags{0};
DimensionIndex rank = dynamic_rank;
InlinedVector<Index> lower_bounds;
InlinedVector<Index> upper_bounds;
DimensionSet implicit_lower_bounds;
DimensionSet implicit_upper_bounds;
InlinedVector<std::string> labels;
std::optional<InlinedVector<TransformParserOutput>> output;
Result<TransformRep::Ptr<>> Finalize();
};
constexpr auto TransformParserOutputBinder = jb::Object(
jb::Member("offset",
jb::Projection(&TransformParserOutput::offset,
jb::DefaultValue([](Index* o) { *o = 0; }))),
jb::AtMostOne("input_dimension", "index_array"),
jb::Member("input_dimension",
jb::Projection(&TransformParserOutput::input_dimension,
jb::Optional())),
jb::OptionalMember(
"index_array",
jb::Projection(&TransformParserOutput::index_array, jb::NestedArray())),
jb::OptionalMember(
"index_array_bounds",
jb::Sequence(jb::Initialize([](auto* obj) {
if (!obj->index_array.data()) {
return absl::InvalidArgumentError(
"\"index_array_bounds\" is only valid with "
"\"index_array\"");
}
return absl::OkStatus();
}),
jb::Projection(&TransformParserOutput::index_array_bounds,
jb::DefaultValue(
[](auto* obj) {
*obj = IndexInterval::Infinite();
},
jb::IndexIntervalBinder)))),
jb::OptionalMember(
"stride",
jb::Sequence(
jb::Initialize([](auto* obj) {
if (!obj->input_dimension && !obj->index_array.data()) {
return absl::InvalidArgumentError(
"Either \"input_dimension\" or \"index_array\" must be "
"specified in "
"conjunction with \"stride\"");
}
return absl::OkStatus();
}),
jb::Projection(&TransformParserOutput::stride,
jb::DefaultValue([](Index* s) { *s = 1; }))))
);
template <typename T, typename ElementBinder>
constexpr auto LowerBoundsBinder(ElementBinder element_binder) {
using Binder = ImplicitPairBinder<absl::remove_cvref_t<ElementBinder>>;
auto rank_ptr = &T::rank;
auto value_ptr = &T::lower_bounds;
auto implicit_ptr = &T::implicit_lower_bounds;
return ImplicitArrayBinderImpl<decltype(rank_ptr), decltype(value_ptr),
decltype(implicit_ptr), Binder>{
std::move(rank_ptr), std::move(value_ptr), std::move(implicit_ptr),
Binder{std::move(element_binder)}};
}
template <typename T, typename ElementBinder>
constexpr auto UpperBoundsBinder(ElementBinder element_binder) {
using Binder = ImplicitPairBinder<absl::remove_cvref_t<ElementBinder>>;
auto rank_ptr = &T::rank;
auto value_ptr = &T::upper_bounds;
auto implicit_ptr = &T::implicit_upper_bounds;
return ImplicitArrayBinderImpl<decltype(rank_ptr), decltype(value_ptr),
decltype(implicit_ptr), Binder>{
std::move(rank_ptr), std::move(value_ptr), std::move(implicit_ptr),
Binder{std::move(element_binder)}};
}
constexpr auto IndexTransformParser(
bool is_transform, DimensionIndex input_rank_constraint = dynamic_rank) {
return [=](auto is_loading, const auto& options, auto* obj,
::nlohmann::json::object_t* j) -> absl::Status {
using T = TransformParserData;
auto* keys =
is_transform ? &kIndexTransformJsonKeys : &kIndexDomainJsonKeys;
DimensionIndex* rank = is_loading ? &obj->rank : nullptr;
return jb::Sequence(
jb::AtLeastOne(keys->rank, keys->inclusive_min, keys->shape,
keys->inclusive_max, keys->exclusive_max, keys->labels),
[=](auto is_loading, const auto& options, auto* obj,
::nlohmann::json::object_t* j) -> absl::Status {
if constexpr (!is_loading) {
if (j->count(keys->inclusive_min) ||
j->count(keys->exclusive_max) || j->count(keys->labels)) {
return absl::OkStatus();
}
}
return jb::Member(
keys->rank,
jb::Projection(&T::rank,
jb::DefaultValue(
[](DimensionIndex* o) { *o = dynamic_rank; },
jb::Integer<DimensionIndex>(0, kMaxRank)))
)(is_loading, options, obj, j);
},
jb::OptionalMember(keys->inclusive_min,
jb::Sequence(LowerBoundsBinder<T>(
jb::BoundsBinder<-kInfIndex, 0>()),
jb::Initialize([](auto* obj) {
obj->flags |=
(BuilderFlags::kSetLower |
BuilderFlags::kSetImplicitLower);
}))),
jb::AtMostOne(keys->shape, keys->inclusive_max, keys->exclusive_max),
jb::OptionalMember(
keys->shape,
jb::LoadSave(jb::Sequence(
UpperBoundsBinder<T>(jb::BoundsBinder<0, +kInfSize>()),
jb::Initialize([](auto* obj) {
obj->interval_form = IntervalForm::sized;
obj->flags |= (BuilderFlags::kSetUpper |
BuilderFlags::kSetImplicitUpper);
})))),
jb::OptionalMember(
keys->inclusive_max,
jb::LoadSave(jb::Sequence(
UpperBoundsBinder<T>(jb::BoundsBinder<0, +kInfIndex>()),
jb::Initialize([](auto* obj) {
obj->interval_form = IntervalForm::closed;
obj->flags |= (BuilderFlags::kSetUpper |
BuilderFlags::kSetImplicitUpper);
})))),
jb::OptionalMember(
keys->exclusive_max,
jb::Sequence(
UpperBoundsBinder<T>(jb::BoundsBinder<0, +kInfIndex + 1>()),
jb::Initialize([](auto* obj) {
obj->interval_form = IntervalForm::half_open;
obj->flags |= (BuilderFlags::kSetUpper |
BuilderFlags::kSetImplicitUpper);
}))),
jb::OptionalMember(
keys->labels,
jb::Projection(&T::labels, jb::DimensionLabelVector(rank))),
jb::Initialize([=](auto* obj) {
if (!RankConstraint::EqualOrUnspecified(input_rank_constraint,
obj->rank)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Expected ", keys->rank, " to be ", input_rank_constraint,
", but is: ", obj->rank));
}
return absl::OkStatus();
})
)(is_loading, options, obj, j);
};
}
constexpr auto IndexTransformOutputParser(
DimensionIndex output_rank_constraint = dynamic_rank) {
return [=](auto is_loading, const auto& options, auto* obj,
::nlohmann::json::object_t* j) -> absl::Status {
return jb::Sequence(
jb::Member("output", jb::Projection(&TransformParserData::output,
jb::Optional(jb::Array(
TransformParserOutputBinder)))),
jb::Initialize([=](auto* obj) {
if (obj->output) {
if (output_rank_constraint != dynamic_rank &&
obj->output->size() != output_rank_constraint) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Expected output rank to be ", output_rank_constraint,
", but is: ", obj->output->size()));
}
return absl::OkStatus();
}
const DimensionIndex rank = obj->rank;
if (output_rank_constraint != dynamic_rank &&
output_rank_constraint != rank) {
return absl::InvalidArgumentError("Missing \"output\" member");
}
return absl::OkStatus();
}) )(is_loading, options, obj, j);
};
}
Result<TransformRep::Ptr<>> TransformParserData::Finalize() {
if (!output) {
output.emplace(rank);
for (DimensionIndex i = 0; i < rank; ++i) {
(*output)[i].input_dimension = i;
}
}
const DimensionIndex output_rank = output->size();
auto transform = TransformRep::Allocate(rank, output_rank);
transform->input_rank = rank;
transform->output_rank = output_rank;
if ((flags & BuilderFlags::kSetLower) != BuilderFlags::kDefault) {
std::copy(lower_bounds.begin(), lower_bounds.end(),
transform->input_origin().begin());
transform->implicit_lower_bounds = implicit_lower_bounds;
}
if ((flags & BuilderFlags::kSetUpper) != BuilderFlags::kDefault) {
std::copy(upper_bounds.begin(), upper_bounds.end(),
transform->input_shape().begin());
transform->implicit_upper_bounds = implicit_upper_bounds;
}
if (!labels.empty()) {
std::copy(labels.begin(), labels.end(), transform->input_labels().begin());
}
InlinedVector<OutputIndexMapInitializer> output_maps;
output_maps.reserve(output_rank);
auto maps = transform->output_index_maps();
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
auto& out = (*output)[output_dim];
auto& map = maps[output_dim];
map.offset() = out.offset;
map.stride() = out.stride;
output_maps.emplace_back(
out.input_dimension
? OutputIndexMapInitializer(out.input_dimension.value())
: OutputIndexMapInitializer(out.index_array,
out.index_array_bounds));
}
TENSORSTORE_RETURN_IF_ERROR(SetOutputIndexMapsAndValidateTransformRep(
transform.get(), output_maps, interval_form, flags));
return transform;
}
TransformParserData MakeIndexDomainViewDataForSaving(IndexDomainView<> domain) {
const DimensionIndex rank = domain.rank();
TransformParserData tmp;
tmp.rank = rank;
tmp.lower_bounds.resize(rank);
tmp.upper_bounds.resize(rank);
tmp.labels.assign(domain.labels().begin(), domain.labels().end());
tmp.implicit_lower_bounds = domain.implicit_lower_bounds();
tmp.implicit_upper_bounds = domain.implicit_upper_bounds();
bool all_implicit_lower = true;
bool all_implicit_upper = true;
for (DimensionIndex i = 0; i < rank; ++i) {
tmp.lower_bounds[i] = domain[i].inclusive_min();
tmp.upper_bounds[i] = domain[i].exclusive_max();
all_implicit_lower = all_implicit_lower && tmp.implicit_lower_bounds[i] &&
(tmp.lower_bounds[i] == -kInfIndex);
all_implicit_upper = all_implicit_upper && tmp.implicit_upper_bounds[i] &&
(tmp.upper_bounds[i] == (+kInfIndex + 1));
}
if (all_implicit_lower) {
tmp.lower_bounds.resize(0);
}
if (all_implicit_upper) {
tmp.upper_bounds.resize(0);
}
return tmp;
}
TransformParserData MakeIndexTransformViewDataForSaving(
IndexTransformView<> transform) {
auto input_domain = transform.input_domain();
TransformParserData tmp = MakeIndexDomainViewDataForSaving(input_domain);
const DimensionIndex input_rank = transform.input_rank();
const DimensionIndex output_rank = transform.output_rank();
bool all_identity = (output_rank == input_rank);
tmp.output.emplace(output_rank);
auto maps = transform.output_index_maps();
for (DimensionIndex i = 0; i < output_rank; ++i) {
auto& output = (*tmp.output)[i];
const auto map = maps[i];
if (map.offset() != 0) {
output.offset = map.offset();
all_identity = false;
}
if (map.method() != OutputIndexMethod::constant && map.stride() != 1) {
output.stride = map.stride();
all_identity = false;
}
switch (map.method()) {
case OutputIndexMethod::constant:
all_identity = false;
break;
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex input_dim = map.input_dimension();
output.input_dimension = input_dim;
if (input_dim != i) all_identity = false;
break;
}
case OutputIndexMethod::array: {
all_identity = false;
const auto index_array_data = map.index_array();
output.index_array = UnbroadcastArrayPreserveRank(
UnownedToShared(index_array_data.array_ref()));
IndexInterval index_range = index_array_data.index_range();
if (index_range != IndexInterval::Infinite() &&
!ValidateIndexArrayBounds(index_range, output.index_array).ok()) {
output.index_array_bounds = index_range;
}
break;
}
}
}
if (all_identity) {
tmp.output = std::nullopt;
}
return tmp;
}
}
void to_json(::nlohmann::json& j,
IndexTransformView<> transform) {
if (!transform.valid()) {
j = ::nlohmann::json(::nlohmann::json::value_t::discarded);
return;
}
auto binder = jb::Object(IndexTransformParser(true),
IndexTransformOutputParser());
auto tmp = MakeIndexTransformViewDataForSaving(transform);
::nlohmann::json::object_t obj;
auto status = binder(std::false_type{}, IncludeDefaults{false}, &tmp, &obj);
status.IgnoreError();
assert(status.ok());
j = std::move(obj);
}
void to_json(::nlohmann::json& j,
IndexDomainView<> domain) {
if (!domain.valid()) {
j = ::nlohmann::json(::nlohmann::json::value_t::discarded);
return;
}
auto binder = jb::Object(IndexTransformParser(false));
auto tmp = MakeIndexDomainViewDataForSaving(domain);
::nlohmann::json::object_t obj;
auto status = binder(std::false_type{}, IncludeDefaults{false}, &tmp, &obj);
status.IgnoreError();
assert(status.ok());
j = std::move(obj);
}
void to_json(::nlohmann::json& j,
IndexInterval interval) {
auto status = jb::IndexIntervalBinder(std::false_type{},
IncludeDefaults{false}, &interval, &j);
status.IgnoreError();
assert(status.ok());
}
namespace internal_index_space {
Result<TransformRep::Ptr<>> ParseIndexTransformFromJson(
const ::nlohmann::json& j, DimensionIndex input_rank_constraint,
DimensionIndex output_rank_constraint) {
if (j.is_discarded()) return TransformRep::Ptr<>(nullptr);
auto result = [&]() -> Result<TransformRep::Ptr<>> {
auto binder = jb::Object(IndexTransformParser(true, input_rank_constraint),
IndexTransformOutputParser(output_rank_constraint)
);
TENSORSTORE_ASSIGN_OR_RETURN(auto parser_data,
jb::FromJson<TransformParserData>(j, binder));
return parser_data.Finalize();
}();
if (result) return result;
return MaybeAnnotateStatus(result.status(),
"Error parsing index transform from JSON");
}
Result<TransformRep::Ptr<>> ParseIndexDomainFromJson(
const ::nlohmann::json& j, DimensionIndex rank_constraint) {
if (j.is_discarded()) return TransformRep::Ptr<>(nullptr);
auto result = [&]() -> Result<TransformRep::Ptr<>> {
auto binder = jb::Object(IndexTransformParser(false, rank_constraint));
TENSORSTORE_ASSIGN_OR_RETURN(auto parser_data,
jb::FromJson<TransformParserData>(j, binder))
return parser_data.Finalize();
}();
if (result) return result;
return MaybeAnnotateStatus(result.status(),
"Error parsing index domain from JSON");
}
}
namespace internal_json_binding {
TENSORSTORE_DEFINE_JSON_BINDER(
ConstrainedRankJsonBinder,
[](auto is_loading, const auto& options, auto* obj, auto* j) {
if constexpr (is_loading) {
if (j->is_discarded()) {
*obj = options.rank().rank;
return absl::OkStatus();
}
TENSORSTORE_RETURN_IF_ERROR(
Integer<DimensionIndex>(0, kMaxRank)(is_loading, options, obj, j));
} else {
if ((!IncludeDefaults(options).include_defaults() &&
options.rank().rank != dynamic_rank) ||
*obj == dynamic_rank) {
*j = ::nlohmann::json::value_t::discarded;
} else {
*j = *obj;
}
}
if (!RankConstraint::EqualOrUnspecified(options.rank().rank, *obj)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Expected ", options.rank().rank, ", but received: ", *obj));
}
return absl::OkStatus();
})
}
} | #include "tensorstore/index_space/json.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::dynamic_rank;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransform;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::kInfIndex;
using ::tensorstore::kInfSize;
using ::tensorstore::MatchesJson;
using ::tensorstore::MatchesStatus;
using ::tensorstore::Result;
using ::tensorstore::internal::ParseJson;
IndexTransform<> MakeExampleTransform() {
return tensorstore::IndexTransformBuilder<4, 3>()
.input_origin({-kInfIndex, 7, -kInfIndex, 8})
.input_exclusive_max({kInfIndex + 1, 10, kInfIndex + 1, 17})
.implicit_lower_bounds({0, 0, 1, 1})
.implicit_upper_bounds({0, 0, 1, 1})
.input_labels({"x", "y", "z", "t"})
.output_constant(0, 3)
.output_single_input_dimension(1, 0, 2, 2)
.output_index_array(2, 7, 1,
tensorstore::MakeArray<Index>({{
{{1}},
{{2}},
{{3}},
}}))
.Finalize()
.value();
}
IndexTransform<> MakeUnlabeledExampleTransform() {
return tensorstore::IndexTransformBuilder<4, 3>()
.input_origin({-kInfIndex, 7, -kInfIndex, 8})
.input_exclusive_max({kInfIndex + 1, 10, kInfIndex + 1, 17})
.implicit_lower_bounds({0, 0, 1, 1})
.implicit_upper_bounds({0, 0, 1, 1})
.output_constant(0, 3)
.output_single_input_dimension(1, 0, 2, 2)
.output_index_array(2, 7, 1,
tensorstore::MakeArray<Index>({{
{{1}},
{{2}},
{{3}},
}}),
IndexInterval::Closed(1, 2))
.Finalize()
.value();
}
::nlohmann::json MakeUnlabeledExampleJson() {
return ParseJson(R"(
{
"input_inclusive_min": ["-inf", 7, ["-inf"], [8]],
"input_exclusive_max": ["+inf", 10, ["+inf"], [17]],
"output": [
{"offset": 3},
{"stride": 2, "input_dimension": 2},
{
"offset": 7,
"index_array": [[ [[1]], [[2]], [[3]] ]],
"index_array_bounds": [1, 2]
}
]
}
)");
}
::nlohmann::json MakeLabeledExampleJson() {
return ParseJson(R"(
{
"input_inclusive_min": ["-inf", 7, ["-inf"], [8]],
"input_exclusive_max": ["+inf", 10, ["+inf"], [17]],
"input_labels": ["x", "y", "z", "t"],
"output": [
{"offset": 3},
{"stride": 2, "input_dimension": 2},
{"offset": 7, "index_array": [[ [[1]], [[2]], [[3]] ]]}
]
}
)");
}
TEST(ToJsonTest, Unlabeled) {
EXPECT_EQ(MakeUnlabeledExampleJson(),
::nlohmann::json(MakeUnlabeledExampleTransform()));
}
TEST(ToJsonTest, Labeled) {
EXPECT_EQ(MakeLabeledExampleJson(), ::nlohmann::json(MakeExampleTransform()));
}
TEST(IndexTransformJsonBinderTest, IndexArrayOutOfBounds) {
tensorstore::TestJsonBinderRoundTrip<IndexTransform<>>({
{IndexTransformBuilder(1, 1)
.input_shape({3})
.output_index_array(0, 0, 1,
tensorstore::MakeArray<Index>({1, 2, 3}))
.Finalize()
.value(),
{
{"input_inclusive_min", {0}},
{"input_exclusive_max", {3}},
{"output",
{
{{"index_array", {1, 2, 3}}},
}},
}},
{IndexTransformBuilder(1, 1)
.input_shape({3})
.output_index_array(0, 0, 1,
tensorstore::MakeArray<Index>({1, 2, 3}),
IndexInterval::UncheckedClosed(1, 2))
.Finalize()
.value(),
{
{"input_inclusive_min", {0}},
{"input_exclusive_max", {3}},
{"output",
{
{{"index_array", {1, 2, 3}}, {"index_array_bounds", {1, 2}}},
}},
}},
{IndexTransformBuilder(1, 1)
.input_shape({3})
.output_index_array(
0, 0, 1, tensorstore::MakeArray<Index>({1, kInfIndex + 1, 3}))
.Finalize()
.value(),
{
{"input_inclusive_min", {0}},
{"input_exclusive_max", {3}},
{"output",
{
{{"index_array", {1, kInfIndex + 1, 3}}},
}},
}},
});
tensorstore::TestJsonBinderToJson<IndexTransform<>>({
{IndexTransformBuilder(1, 1)
.input_shape({3})
.output_index_array(0, 0, 1,
tensorstore::MakeArray<Index>({1, 2, 3}),
IndexInterval::Closed(1, 3))
.Finalize()
.value(),
::testing::Optional(MatchesJson(::nlohmann::json{
{"input_inclusive_min", {0}},
{"input_exclusive_max", {3}},
{"output",
{
{{"index_array", {1, 2, 3}}},
}},
}))},
});
}
TEST(ToJsonTest, NullTransform) {
EXPECT_TRUE(::nlohmann::json(tensorstore::IndexTransform<>()).is_discarded());
}
TEST(ToJsonTest, IdentityTransform) {
EXPECT_EQ(ParseJson(R"(
{
"input_inclusive_min": [1, 2],
"input_exclusive_max": [4, 6]
}
)")
.dump(),
::nlohmann::json(tensorstore::IdentityTransform(
tensorstore::BoxView({1, 2}, {3, 4})))
.dump());
}
TEST(ToJsonTest, Translation) {
EXPECT_EQ(
::nlohmann::json({
{"input_inclusive_min", {1, 2}},
{"input_exclusive_max", {4, 6}},
{"output",
{
{{"offset", -1}, {"input_dimension", 0}},
{{"offset", -2}, {"input_dimension", 1}},
}},
}),
::nlohmann::json(ChainResult(tensorstore::IdentityTransform(
tensorstore::BoxView({3, 4})),
tensorstore::AllDims().TranslateTo({1, 2}))
.value()));
}
void TestRoundTripJson(const ::nlohmann::json& json) {
SCOPED_TRACE(json.dump());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto parsed,
tensorstore::ParseIndexTransform(json));
EXPECT_EQ(json, ::nlohmann::json(parsed));
}
TEST(RoundTripJsonTest, Labels) {
TestRoundTripJson({
{"input_inclusive_min", {1}},
{"input_exclusive_max", {3}},
});
TestRoundTripJson({
{"input_inclusive_min", {1}},
{"input_exclusive_max", {3}},
{"input_labels", {"x"}},
});
}
TEST(RoundTripJsonTest, Rank0) {
TestRoundTripJson({
{"input_rank", 0},
});
}
TEST(RoundTripJsonTest, Input1Output0) {
TestRoundTripJson({
{"input_rank", 1},
{"output", ::nlohmann::json::array_t()},
});
}
TEST(RoundTripJsonTest, LabelsOnly) {
TestRoundTripJson({
{"input_labels", {"x", "y", "z"}},
});
}
TEST(RoundTripJsonTest, MinOnlyNotImplicit) {
TestRoundTripJson({
{"input_inclusive_min", {"-inf"}},
});
}
TEST(RoundTripJsonTest, MaxOnlyNotImplicit) {
TestRoundTripJson({
{"input_exclusive_max", {"+inf"}},
});
}
TEST(ParseIndexTransformTest, Null) {
EXPECT_EQ(IndexTransform<>(),
tensorstore::ParseIndexTransform(
::nlohmann::json(::nlohmann::json::value_t::discarded)));
}
TEST(ParseIndexTransformTest, DynamicFromLabeled) {
EXPECT_EQ(MakeExampleTransform(),
tensorstore::ParseIndexTransform(MakeLabeledExampleJson()));
}
TEST(ParseIndexTransformTest, DynamicFromUnlabeled) {
EXPECT_EQ(MakeUnlabeledExampleTransform(),
tensorstore::ParseIndexTransform(MakeUnlabeledExampleJson()));
}
TEST(ParseIndexTransformTest, Static) {
auto t = tensorstore::ParseIndexTransform<4, 3>(MakeLabeledExampleJson());
static_assert(
std::is_same_v<decltype(t), Result<tensorstore::IndexTransform<4, 3>>>);
EXPECT_EQ(MakeExampleTransform(), t);
}
TEST(ParseIndexTransformTest, IdentityTransformExclusiveMax) {
EXPECT_EQ(tensorstore::IndexTransformBuilder<>(2, 2)
.input_origin({1, 2})
.input_exclusive_max({5, kInfIndex + 1})
.output_identity_transform()
.Finalize()
.value(),
tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1, 2],
"input_exclusive_max": [5, "+inf"]
}
)")));
}
TEST(ParseIndexTransformTest, IdentityTransformInclusiveMax) {
EXPECT_EQ(tensorstore::IndexTransformBuilder<>(2, 2)
.input_origin({1, 2})
.input_inclusive_max({5, kInfIndex})
.output_identity_transform()
.Finalize()
.value(),
tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1, 2],
"input_inclusive_max": [5, "+inf"]
}
)")));
}
TEST(ParseIndexTransformTest, IdentityTransformShape) {
EXPECT_EQ(tensorstore::IndexTransformBuilder<>(2, 2)
.input_origin({1, 2})
.input_shape({5, kInfSize})
.output_identity_transform()
.Finalize()
.value(),
tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1, 2],
"input_shape": [5, "+inf"]
}
)")));
}
TEST(ParseIndexTransformTest, IdentityTransformInputRank) {
EXPECT_EQ(tensorstore::IndexTransformBuilder<>(2, 2)
.output_identity_transform()
.Finalize()
.value(),
tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_rank": 2
}
)")));
}
TEST(ParseIndexTransformTest, StaticInputRankMismatch) {
EXPECT_THAT(
(tensorstore::ParseIndexTransform<3, 3>(MakeLabeledExampleJson())),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Expected input_rank to be 3, but is: 4"));
}
TEST(ParseIndexTransformTest, StaticOutputRankMismatch) {
EXPECT_THAT(
(tensorstore::ParseIndexTransform<4, 2>(MakeLabeledExampleJson())),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Expected output rank to be 2, but is: 3"));
}
TEST(ParseIndexTransformTest, MissingInputRank) {
EXPECT_THAT(
tensorstore::ParseIndexTransform(ParseJson(R"(
{
"output": [
{"offset": 3},
{"stride": 2, "input_dimension": 2},
{"offset": 7, "index_array": [[ [[1]], [[2]], [[3]] ]]}
]
}
)")),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"At least one of \"input_rank\", \"input_inclusive_min\", "
"\"input_shape\", \"input_inclusive_max\", \"input_exclusive_max\", "
"\"input_labels\" members must be specified"));
}
TEST(ParseIndexTransformTest, InvalidInputRank) {
EXPECT_THAT(tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_rank": -3
}
)")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Error parsing object member \"input_rank\": "
"Expected integer .*, but received: -3"));
}
TEST(ParseIndexTransformTest, InvalidShape) {
EXPECT_THAT(tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1, 2],
"input_shape": [1, 2, 3]
}
)")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Error parsing object member \"input_shape\": "
"Array has length 3 but should have length 2"));
}
TEST(ParseIndexTransformTest, ExclusiveMaxAndInclusiveMax) {
EXPECT_THAT(
tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1, 2],
"input_exclusive_max": [5, 10],
"input_inclusive_max": [5, 10]
}
)")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"At most one of \"input_shape\", \"input_inclusive_max\", "
"\"input_exclusive_max\" members is allowed"));
}
TEST(ParseIndexTransformTest, ExclusiveMaxAndShape) {
EXPECT_THAT(
tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1, 2],
"input_exclusive_max": [5, 10],
"input_shape": [5, 10]
}
)")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"At most one of \"input_shape\", \"input_inclusive_max\", "
"\"input_exclusive_max\" members is allowed"));
}
TEST(ParseIndexTransformTest, InclusiveMaxAndShape) {
EXPECT_THAT(
tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1, 2],
"input_inclusive_max": [5, 10],
"input_shape": [5, 10]
}
)")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"At most one of \"input_shape\", \"input_inclusive_max\", "
"\"input_exclusive_max\" members is allowed"));
}
TEST(ParseIndexTransformTest, MissingOutputs) {
auto json = ParseJson(R"(
{
"input_inclusive_min": [1, 2],
"input_exclusive_max": [5, 10]
}
)");
EXPECT_EQ((tensorstore::IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_exclusive_max({5, 10})
.output_identity_transform()
.Finalize()
.value()),
(tensorstore::ParseIndexTransform<dynamic_rank, 2>(json)));
EXPECT_THAT((tensorstore::ParseIndexTransform<dynamic_rank, 3>(json)),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Missing \"output\" member"));
}
TEST(ParseIndexTransformTest, InvalidInterval) {
EXPECT_THAT(tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1, 11],
"input_exclusive_max": [5, 10]
}
)")),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(ParseIndexTransformTest, UnexpectedTopLevelMember) {
EXPECT_THAT((tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1, 2],
"input_exclusive_max": [5, 10],
"extra": "value"
}
)"))),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Object includes extra members: \"extra\""));
}
TEST(ParseIndexTransformTest, UnexpectedOutputMember) {
EXPECT_THAT((tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1],
"input_exclusive_max": [2],
"output": [
{"extra": "value"}
]
}
)"))),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Error parsing object member \"output\": "
"Error parsing value at position 0: "
"Object includes extra members: \"extra\""));
}
TEST(ParseIndexTransformTest, InvalidLabel) {
EXPECT_THAT(tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1, 2],
"input_exclusive_max": [5, 10],
"input_labels": [1, 2]
}
)")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Error parsing object member \"input_labels\": "
"Error parsing value at position 0: "
"Expected string, but received: 1"));
}
TEST(ParseIndexTransformTest, InvalidBound) {
EXPECT_THAT(
tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1, "a"],
"input_exclusive_max": [5, 10]
}
)")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Error parsing object member \"input_inclusive_min\": "
"Error parsing value at position 1: "
"Expected 64-bit signed integer or \"-inf\", "
"but received: \"a\""));
}
TEST(ParseIndexTransformTest, InvalidBoundPositiveInfinity) {
EXPECT_THAT(
tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1, "+inf"],
"input_exclusive_max": [5, 10]
}
)")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Error parsing object member \"input_inclusive_min\": "
"Error parsing value at position 1: "
"Expected 64-bit signed integer or \"-inf\", "
"but received: \"\\+inf\""));
}
TEST(ParseIndexTransformTest, InvalidBoundNegativeInfinity) {
EXPECT_THAT(
tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1, "-inf"],
"input_exclusive_max": [5, "-inf"]
}
)")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Error parsing object member \"input_exclusive_max\": "
"Error parsing value at position 1: "
"Expected 64-bit signed integer or \"\\+inf\", "
"but received: \"-inf\""));
}
TEST(ParseIndexTransformTest, InvalidOutputOffset) {
EXPECT_THAT(
tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1],
"input_exclusive_max": [5],
"output": [
{"offset": "a"}
]
}
)")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Error parsing object member \"output\": "
"Error parsing value at position 0: "
"Error parsing object member \"offset\": "
"Expected 64-bit signed integer, but received: \"a\""));
}
TEST(ParseIndexTransformTest, InvalidOutputStride) {
EXPECT_THAT(
tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1],
"input_exclusive_max": [5],
"output": [
{"stride": "a", "input_dimension": 0}
]
}
)")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Error parsing object member \"output\": "
"Error parsing value at position 0: "
"Error parsing object member \"stride\": "
"Expected 64-bit signed integer, but received: \"a\""));
}
TEST(ParseIndexTransformTest, UnexpectedStride) {
EXPECT_THAT(
tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1],
"input_exclusive_max": [5],
"output": [
{"stride": 1}
]
}
)")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Error parsing object member \"output\": "
"Error parsing value at position 0: "
"Error parsing object member \"stride\": "
"Either \"input_dimension\" or \"index_array\" must be "
"specified in conjunction with \"stride\""));
}
TEST(ParseIndexTransformTest, InvalidOutputInput) {
EXPECT_THAT(
tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1],
"input_exclusive_max": [5],
"output": [
{"input_dimension": "a"}
]
}
)")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Error parsing object member \"output\": "
"Error parsing value at position 0: "
"Error parsing object member \"input_dimension\": "
"Expected 64-bit signed integer, but received: \"a\""));
}
TEST(ParseIndexTransformTest, InvalidOutputArray) {
EXPECT_THAT(
tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1],
"input_exclusive_max": [5],
"output": [
{"index_array": "a"}
]
}
)")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Error parsing object member \"output\": "
"Error parsing value at position 0: "
"Error parsing object member \"index_array\": "
"Error parsing array element at position \\{\\}: "
".* received: \"a\""));
}
TEST(ParseIndexTransformTest, InvalidOutputInputAndArray) {
EXPECT_THAT(
tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_inclusive_min": [1],
"input_exclusive_max": [5],
"output": [
{"input_dimension": 0, "index_array": [1]}
]
}
)")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Error parsing object member \"output\": "
"Error parsing value at position 0: "
"At most one of \"input_dimension\", \"index_array\" "
"members is allowed"));
}
TEST(ParseIndexTransformTest, DuplicateLabels) {
EXPECT_THAT(tensorstore::ParseIndexTransform(ParseJson(R"(
{
"input_labels": ["x", "x"]
}
)")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index transform from JSON: "
"Error parsing object member \"input_labels\": "
"Dimension label.*"));
}
TEST(IndexDomainJsonBinderTest, Simple) {
tensorstore::TestJsonBinderRoundTrip<tensorstore::IndexDomain<>>({
{tensorstore::IndexDomainBuilder<4>()
.origin({-kInfIndex, 7, -kInfIndex, 8})
.exclusive_max({kInfIndex + 1, 10, kInfIndex + 1, 17})
.implicit_lower_bounds({0, 0, 1, 1})
.implicit_upper_bounds({0, 0, 1, 1})
.labels({"x", "y", "z", "t"})
.Finalize()
.value(),
{
{"inclusive_min", {"-inf", 7, {"-inf"}, {8}}},
{"exclusive_max", {"+inf", 10, {"+inf"}, {17}}},
{"labels", {"x", "y", "z", "t"}},
}},
});
tensorstore::TestJsonBinderFromJson<tensorstore::IndexDomain<>>({
{{
{"rank", 33},
},
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Error parsing index domain from JSON: "
"Error parsing object member \"rank\": "
"Expected integer in the range \\[0, 32\\], but received: 33")},
{{
{"shape", {1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1}},
},
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index domain from JSON: "
"Error parsing object member \"shape\": "
"Rank 33 is outside valid range \\[0, 32\\]")},
{{
{"labels", {"", "", "", "", "", "", "", "", "", "",
"", "", "", "", "", "", "", "", "", "",
"", "", "", "", "", "", "", "", "", "",
"", "", ""}},
},
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index domain from JSON: "
"Error parsing object member \"labels\": "
"Rank 33 is outside valid range \\[0, 32\\]")},
{{
{"inclusive_min", {"-inf", 7, {"-inf"}, {8}}},
{"exclusive_max", {"+inf", 10, {"+inf"}, {17}}},
{"labels", {"x", "y", "z", "t"}},
{"output", "abc"},
},
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing index domain from JSON: "
"Object includes extra members: \"output\"")},
});
}
TEST(IndexBinderTest, Basic) {
using ::tensorstore::kMaxFiniteIndex;
tensorstore::TestJsonBinderRoundTrip<Index>(
{
{-kInfIndex, "-inf"},
{+kInfIndex, "+inf"},
{-kMaxFiniteIndex, -kMaxFiniteIndex},
{0, 0},
{5, 5},
{+kMaxFiniteIndex, +kMaxFiniteIndex},
},
tensorstore::internal_json_binding::IndexBinder);
tensorstore::TestJsonBinderFromJson<Index>(
{
{"abc", MatchesStatus(absl::StatusCode::kInvalidArgument)},
{-kInfIndex, ::testing::Optional(MatchesJson(-kInfIndex))},
{+kInfIndex, ::testing::Optional(MatchesJson(+kInfIndex))},
{-kInfIndex - 1, MatchesStatus(absl::StatusCode::kInvalidArgument)},
{kInfIndex + 1, MatchesStatus(absl::StatusCode::kInvalidArgument)},
},
tensorstore::internal_json_binding::IndexBinder);
}
TEST(IndexIntervalBinderTest, Basic) {
using ::tensorstore::IndexInterval;
tensorstore::TestJsonBinderRoundTrip<IndexInterval>({
{IndexInterval::UncheckedClosed(5, 10), {5, 10}},
{IndexInterval(), {"-inf", "+inf"}},
{IndexInterval::UncheckedClosed(5, 4), {5, 4}},
{IndexInterval::UncheckedClosed(-kInfIndex, 20), {"-inf", 20}},
{IndexInterval::UncheckedClosed(20, +kInfIndex), {20, "+inf"}},
});
tensorstore::TestJsonBinderFromJson<IndexInterval>({
{"abc", MatchesStatus(absl::StatusCode::kInvalidArgument)},
{{-kInfIndex - 1, 10}, MatchesStatus(absl::StatusCode::kInvalidArgument)},
{{10, 5}, MatchesStatus(absl::StatusCode::kInvalidArgument)},
});
}
TEST(ConstrainedRankJsonBinderTest, RoundTripNoConstraintIncludeDefaults) {
tensorstore::TestJsonBinderRoundTrip<DimensionIndex>(
{
{5, 5},
{dynamic_rank, ::nlohmann::json::value_t::discarded},
},
tensorstore::internal_json_binding::ConstrainedRankJsonBinder);
}
TEST(ConstrainedRankJsonBinderTest, RoundTripNoConstraintExcludeDefaults) {
tensorstore::TestJsonBinderRoundTrip<DimensionIndex>(
{
{5, 5},
{dynamic_rank, ::nlohmann::json::value_t::discarded},
},
tensorstore::internal_json_binding::ConstrainedRankJsonBinder,
tensorstore::IncludeDefaults{false});
}
TEST(ConstrainedRankJsonBinderTest, RoundTripRankConstraintIncludeDefaults) {
tensorstore::TestJsonBinderRoundTrip<DimensionIndex>(
{
{30, 30},
},
tensorstore::internal_json_binding::ConstrainedRankJsonBinder,
tensorstore::JsonSerializationOptions{tensorstore::RankConstraint{30},
tensorstore::IncludeDefaults{true}},
tensorstore::RankConstraint{30});
}
TEST(ConstrainedRankJsonBinderTest, FromJsonRankConstraint) {
tensorstore::TestJsonBinderFromJson<DimensionIndex>(
{
{30, ::testing::Optional(30)},
{::nlohmann::json::value_t::discarded, ::testing::Optional(30)},
{5, MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected 30, but received: 5")},
},
tensorstore::internal_json_binding::ConstrainedRankJsonBinder,
tensorstore::RankConstraint{30});
}
TEST(ConstrainedRankJsonBinderTest, ToJsonRankConstraintIncludeDefaults) {
tensorstore::TestJsonBinderToJson<DimensionIndex>(
{
{30, ::testing::Optional(MatchesJson(30))},
{dynamic_rank, ::testing::Optional(MatchesJson(
::nlohmann::json::value_t::discarded))},
{5, MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected 30, but received: 5")},
},
tensorstore::internal_json_binding::ConstrainedRankJsonBinder,
tensorstore::JsonSerializationOptions{
tensorstore::RankConstraint{30}, tensorstore::IncludeDefaults{true}});
}
TEST(ConstrainedRankJsonBinderTest, ToJsonRankConstraintExcludeDefaults) {
tensorstore::TestJsonBinderToJson<DimensionIndex>(
{
{30, ::testing::Optional(
MatchesJson(::nlohmann::json::value_t::discarded))},
{dynamic_rank, ::testing::Optional(MatchesJson(
::nlohmann::json::value_t::discarded))},
{5, MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected 30, but received: 5")},
},
tensorstore::internal_json_binding::ConstrainedRankJsonBinder,
tensorstore::JsonSerializationOptions{tensorstore::IncludeDefaults{false},
tensorstore::RankConstraint{30}});
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/json.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/json_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
f8a22cdd-a9f2-4a8b-9332-c4770305a759 | cpp | tensorflow/tensorflow | fill_quantization_options | tensorflow/compiler/mlir/quantization/stablehlo/utils/fill_quantization_options.cc | tensorflow/compiler/mlir/quantization/stablehlo/tests/fill_quantization_options_test.cc | #include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_options.pb.h"
namespace mlir::quant::stablehlo {
using ::stablehlo::quantization::CustomQuantizationMethod;
using ::stablehlo::quantization::PresetQuantizationMethod;
using ::stablehlo::quantization::QuantizationComponentSpec;
using ::stablehlo::quantization::QuantizationOptions;
using QuantizationComponent =
::stablehlo::quantization::QuantizationComponentSpec_QuantizationComponent;
using BitType = ::stablehlo::quantization::QuantizationComponentSpec_BitType;
using BitWidth = ::stablehlo::quantization::QuantizationComponentSpec_BitWidth;
void SetQuantizationComponentSpec(QuantizationComponentSpec* spec,
const QuantizationComponent& component,
const BitType bit_type,
const BitWidth bit_width) {
spec->set_quantization_component(component);
spec->set_bit_type(bit_type);
spec->set_bit_width(bit_width);
}
::stablehlo::quantization::QuantizationOptions FillPresetQuantizationOptions(
::stablehlo::quantization::QuantizationOptions quantization_options_) {
CustomQuantizationMethod custom_method =
quantization_options_.quantization_method().custom_quantization_method();
QuantizationComponentSpec *activation_component, *weight_component,
*bias_component;
const auto preset_method = quantization_options_.quantization_method()
.preset_quantization_method()
.preset_method();
if (!preset_method) return quantization_options_;
switch (preset_method) {
case PresetQuantizationMethod::FLOAT16:
weight_component = custom_method.add_quantization_component_spec();
SetQuantizationComponentSpec(weight_component,
QuantizationComponentSpec::COMPONENT_WEIGHT,
QuantizationComponentSpec::BIT_TYPE_FLOAT,
QuantizationComponentSpec::BIT_WIDTH_16);
bias_component = custom_method.add_quantization_component_spec();
SetQuantizationComponentSpec(bias_component,
QuantizationComponentSpec::COMPONENT_BIAS,
QuantizationComponentSpec::BIT_TYPE_FLOAT,
QuantizationComponentSpec::BIT_WIDTH_16);
break;
case PresetQuantizationMethod::WEIGHT_ONLY:
weight_component = custom_method.add_quantization_component_spec();
SetQuantizationComponentSpec(weight_component,
QuantizationComponentSpec::COMPONENT_WEIGHT,
QuantizationComponentSpec::BIT_TYPE_INT,
QuantizationComponentSpec::BIT_WIDTH_8);
break;
case PresetQuantizationMethod::POST_TRAINING_QUANTIZATION_STATIC_RANGE:
activation_component = custom_method.add_quantization_component_spec();
SetQuantizationComponentSpec(
activation_component, QuantizationComponentSpec::COMPONENT_ACTIVATION,
QuantizationComponentSpec::BIT_TYPE_INT,
QuantizationComponentSpec::BIT_WIDTH_8);
weight_component = custom_method.add_quantization_component_spec();
SetQuantizationComponentSpec(weight_component,
QuantizationComponentSpec::COMPONENT_WEIGHT,
QuantizationComponentSpec::BIT_TYPE_INT,
QuantizationComponentSpec::BIT_WIDTH_8);
bias_component = custom_method.add_quantization_component_spec();
SetQuantizationComponentSpec(bias_component,
QuantizationComponentSpec::COMPONENT_BIAS,
QuantizationComponentSpec::BIT_TYPE_INT,
QuantizationComponentSpec::BIT_WIDTH_32);
break;
default:
break;
}
*quantization_options_.mutable_quantization_method()
->mutable_custom_quantization_method() = custom_method;
return quantization_options_;
}
LogicalResult GetActivationBitWidth(QuantizationOptions quantization_options,
int* bit_width) {
CustomQuantizationMethod custom_method =
quantization_options.quantization_method().custom_quantization_method();
for (const auto& component : custom_method.quantization_component_spec()) {
if (component.quantization_component() ==
QuantizationComponentSpec::COMPONENT_ACTIVATION) {
switch (component.bit_width()) {
case QuantizationComponentSpec::BIT_WIDTH_4:
*bit_width = 4;
return success();
break;
case QuantizationComponentSpec::BIT_WIDTH_8:
*bit_width = 8;
return success();
break;
case QuantizationComponentSpec::BIT_WIDTH_16:
*bit_width = 16;
return success();
break;
case QuantizationComponentSpec::BIT_WIDTH_32:
*bit_width = 32;
return success();
break;
default:
break;
}
}
}
return failure();
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/utils/fill_quantization_options.h"
#include <ostream>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_options.pb.h"
#include "tsl/platform/protobuf.h"
namespace mlir::quant::stablehlo {
namespace {
using ::stablehlo::quantization::PresetQuantizationMethod;
using ::stablehlo::quantization::QuantizationComponentSpec;
using ::stablehlo::quantization::QuantizationOptions;
class ProtoStringMatcher {
public:
explicit ProtoStringMatcher(const tsl::protobuf::Message& expected)
: expected_(expected.SerializeAsString()) {}
template <typename Message>
bool MatchAndExplain(const Message& p, testing::MatchResultListener*) const {
return p.SerializeAsString() == expected_;
}
void DescribeTo(::std::ostream* os) const { *os << expected_; }
void DescribeNegationTo(::std::ostream* os) const {
*os << "not equal to expected message: " << expected_;
}
private:
const std::string expected_;
};
inline ::testing::PolymorphicMatcher<ProtoStringMatcher> EqualsProto(
const tsl::protobuf::Message& x) {
return ::testing::MakePolymorphicMatcher(ProtoStringMatcher(x));
}
void FillPresetQuantizationOptionsTestHelper(
const PresetQuantizationMethod::PresetMethod preset_quantization_options,
const QuantizationComponentSpec expected_activation_component,
const QuantizationComponentSpec expected_weight_component,
const QuantizationComponentSpec expected_bias_component) {
QuantizationOptions quantization_options;
quantization_options.mutable_quantization_method()
->mutable_preset_quantization_method()
->set_preset_method(preset_quantization_options);
QuantizationOptions filled_quantization_options =
quant::stablehlo::FillPresetQuantizationOptions(quantization_options);
for (QuantizationComponentSpec component :
filled_quantization_options.quantization_method()
.custom_quantization_method()
.quantization_component_spec()) {
switch (component.quantization_component()) {
case (QuantizationComponentSpec::COMPONENT_ACTIVATION):
EXPECT_THAT(component, EqualsProto(expected_activation_component));
break;
case (QuantizationComponentSpec::COMPONENT_WEIGHT):
EXPECT_THAT(component, EqualsProto(expected_weight_component));
break;
case (QuantizationComponentSpec::COMPONENT_BIAS):
EXPECT_THAT(component, EqualsProto(expected_bias_component));
break;
default:
break;
}
}
}
TEST(FillQuantizationOptionsTest, PresetFloat16) {
QuantizationComponentSpec activation_component, weight_component,
bias_component;
weight_component.set_quantization_component(
QuantizationComponentSpec::COMPONENT_WEIGHT);
weight_component.set_bit_width(QuantizationComponentSpec::BIT_WIDTH_16);
weight_component.set_bit_type(QuantizationComponentSpec::BIT_TYPE_FLOAT);
bias_component.set_quantization_component(
QuantizationComponentSpec::COMPONENT_BIAS);
bias_component.set_bit_width(QuantizationComponentSpec::BIT_WIDTH_16);
bias_component.set_bit_type(QuantizationComponentSpec::BIT_TYPE_FLOAT);
FillPresetQuantizationOptionsTestHelper(
PresetQuantizationMethod::FLOAT16,
activation_component,
weight_component,
bias_component);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/utils/fill_quantization_options.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/tests/fill_quantization_options_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |